I have QThreadPool which run some tasks.
I need also create new tasks in other tasks.
To do that i think implement something like following:
class Task : public QRunnable
{
QThreadPool *pool;
public:
Task (QThreadPool *p) : pool(p){}
void run()
{
static std::atomic<int> counter = ATOMIC_FLAG_INIT;
qDebug() << QThread::currentThreadId();
int c;
if ((c = counter.load(std::memory_order_acquire)) < 10) {
pool->start(new Task(pool));
counter.store(c + 1, std::memory_order_release);
}
}
};
int main(int argc, char *argv[])
{
//...
QThreadPool threadPool;
threadPool.start(new Task(&threadPool));
//...
}
But I'm not sure that this is the right approach.
Related
I am new to C++ and multithreading applications. I want to process a long list of data (potentially several thousands of entries) by dividing its entries among a few threads. I have retrieved a ThreadPool class and a Queue class from the web (it is my first time tackling the subject). I construct the threads and populate the queue in the following way (definitions at the end of the post):
ThreadPool *pool = new ThreadPool(8);
std::vector<std::function<void(int)>> *caller =
new std::vector<std::function<void(int)>>;
for (size_t i = 0; i < Nentries; ++i)
{
caller->push_back(
[=](int j){func(entries[i], j);});
pool->PushTask((*caller)[i]);
}
delete pool;
The problem is that only a number of entries equaling the number of created threads are processed, as if the program does not wait for the queue to be empty. Indeed, if I put
while (pool->GetWorkQueueLength()) {}
just before the pool destructor, the whole list is correctly processed. However, I am afraid I am consuming too many resources by using a while loop. Moreover, I have not found anyone doing anything like it, so I think this is the wrong approach and the classes I use have some error. Can anyone find the error (if present) or suggest another solution?
Here are the classes I use. I suppose the problem is in the implementation of the destructor, but I am not sure.
SynchronizeQueue.hh
#ifndef SYNCQUEUE_H
#define SYNCQUEUE_H
#include <list>
#include <mutex>
#include <condition_variable>
template<typename T>
class SynchronizedQueue
{
public:
SynchronizedQueue();
void Put(T const & data);
T Get();
size_t Size();
private:
SynchronizedQueue(SynchronizedQueue const &)=delete;
SynchronizedQueue & operator=(SynchronizedQueue const &)=delete;
std::list<T> queue;
std::mutex mut;
std::condition_variable condvar;
};
template<typename T>
SynchronizedQueue<T>::SynchronizedQueue()
{}
template<typename T>
void SynchronizedQueue<T>::Put(T const & data)
{
std::unique_lock<std::mutex> lck(mut);
queue.push_back(data);
condvar.notify_one();
}
template<typename T>
T SynchronizedQueue<T>::Get()
{
std::unique_lock<std::mutex> lck(mut);
while (queue.empty())
{
condvar.wait(lck);
}
T result = queue.front();
queue.pop_front();
return result;
}
template<typename T>
size_t SynchronizedQueue<T>::Size()
{
std::unique_lock<std::mutex> lck(mut);
return queue.size();
}
#endif
ThreadPool.hh
#ifndef THREADPOOL_H
#define THREADPOOL_H
#include "SynchronizedQueue.hh"
#include <atomic>
#include <functional>
#include <mutex>
#include <thread>
#include <vector>
class ThreadPool
{
public:
ThreadPool(int nThreads = 0);
virtual ~ThreadPool();
void PushTask(std::function<void(int)> func);
size_t GetWorkQueueLength();
private:
void WorkerThread(int i);
std::atomic<bool> done;
unsigned int threadCount;
SynchronizedQueue<std::function<void(int)>> workQueue;
std::vector<std::thread> threads;
};
#endif
ThreadPool.cc
#include "ThreadPool.hh"
#include "SynchronizedQueue.hh"
void doNothing(int i)
{}
ThreadPool::ThreadPool(int nThreads)
: done(false)
{
if (nThreads <= 0)
{
threadCount = std::thread::hardware_concurrency();
}
else
{
threadCount = nThreads;
}
for (unsigned int i = 0; i < threadCount; ++i)
{
threads.push_back(std::thread(&ThreadPool::WorkerThread, this, i));
}
}
ThreadPool::~ThreadPool()
{
done = true;
for (unsigned int i = 0; i < threadCount; ++i)
{
PushTask(&doNothing);
}
for (auto& th : threads)
{
if (th.joinable())
{
th.join();
}
}
}
void ThreadPool::PushTask(std::function<void(int)> func)
{
workQueue.Put(func);
}
void ThreadPool::WorkerThread(int i)
{
while (!done)
{
workQueue.Get()(i);
}
}
size_t ThreadPool::GetWorkQueueLength()
{
return workQueue.Size();
}
You can push tasks saying "done" instead of setting "done" via atomic variable.
So that each thread will exit by itself when seeing "done" task, and no earlier. In destructor you only need to push these tasks and join threads. This is called "poison pill".
Alternatively, if you insist on your current design with done variable, you can wait on the same condition you already have:
std::unique_lock<std::mutex> lck(mut);
while (!queue.empty())
{
condvar.wait(lck);
}
But then you'll need to change your notify_one to notify_all, and this may be sub-optimal.
I want to process a long list of data (potentially several thousands of entries) by dividing its entries among a few threads.
You can do that with parallel algorithms, like tbb::parallel_for:
#include <tbb/parallel_for.h>
#include <vector>
void func(int entry);
int main () {
std::vector<int> entries(1000000);
tbb::parallel_for(size_t{0}, entries.size(), [&](size_t i) { func(entries[i]); });
}
If you need sequential thread ids, you can do:
void func(int element, int thread_id);
template<class C>
inline auto make_range(C& c) -> decltype(tbb::blocked_range<decltype(c.begin())>(c.begin(), c.end())) {
return tbb::blocked_range<decltype(c.begin())>(c.begin(), c.end());
}
int main () {
std::vector<int> entries(1000000);
std::atomic<int> thread_counter{0};
tbb::parallel_for(make_range(entries), [&](auto sub_range) {
static thread_local int const thread_id = thread_counter.fetch_add(1, std::memory_order_relaxed);
for(auto& element : sub_range)
func(element, thread_id);
});
}
Alternatively, there is std::this_thread::get_id.
I have a function that takes a callback, and used it to do work on 10 separate threads. However, it is often the case that not all of the work is needed. For example, if the desired result is obtained on the third thread, it should stop all work being done on of the remaining alive threads.
This answer here suggests that it is not possible unless you have the callback functions take an additional std::atomic_bool argument, that signals whether the function should terminate prematurely.
This solution does not work for me. The workers are spun up inside a base class, and the whole point of this base class is to abstract away details of multithreading. How can I do this? I am anticipating that I will have to ditch std::async for something more involved.
#include <iostream>
#include <future>
#include <vector>
class ABC{
public:
std::vector<std::future<int> > m_results;
ABC() {};
~ABC(){};
virtual int callback(int a) = 0;
void doStuffWithCallBack();
};
void ABC::doStuffWithCallBack(){
// start working
for(int i = 0; i < 10; ++i)
m_results.push_back(std::async(&ABC::callback, this, i));
// analyze results and cancel all threads when you get the 1
for(int j = 0; j < 10; ++j){
double foo = m_results[j].get();
if ( foo == 1){
break; // but threads continue running
}
}
std::cout << m_results[9].get() << " <- this shouldn't have ever been computed\n";
}
class Derived : public ABC {
public:
Derived() : ABC() {};
~Derived() {};
int callback(int a){
std::cout << a << "!\n";
if (a == 3)
return 1;
else
return 0;
};
};
int main(int argc, char **argv)
{
Derived myObj;
myObj.doStuffWithCallBack();
return 0;
}
I'll just say that this should probably not be a part of a 'normal' program, since it could leak resources and/or leave your program in an unstable state, but in the interest of science...
If you have control of the thread loop, and you don't mind using platform features, you could inject an exception into the thread. With posix you can use signals for this, on Windows you would have to use SetThreadContext(). Though the exception will generally unwind the stack and call destructors, your thread may be in a system call or other 'non-exception safe place' when the exception occurs.
Disclaimer: I only have Linux at the moment, so I did not test the Windows code.
#if defined(_WIN32)
# define ITS_WINDOWS
#else
# define ITS_POSIX
#endif
#if defined(ITS_POSIX)
#include <signal.h>
#endif
void throw_exception() throw(std::string())
{
throw std::string();
}
void init_exceptions()
{
volatile int i = 0;
if (i)
throw_exception();
}
bool abort_thread(std::thread &t)
{
#if defined(ITS_WINDOWS)
bool bSuccess = false;
HANDLE h = t.native_handle();
if (INVALID_HANDLE_VALUE == h)
return false;
if (INFINITE == SuspendThread(h))
return false;
CONTEXT ctx;
ctx.ContextFlags = CONTEXT_CONTROL;
if (GetThreadContext(h, &ctx))
{
#if defined( _WIN64 )
ctx.Rip = (DWORD)(DWORD_PTR)throw_exception;
#else
ctx.Eip = (DWORD)(DWORD_PTR)throw_exception;
#endif
bSuccess = SetThreadContext(h, &ctx) ? true : false;
}
ResumeThread(h);
return bSuccess;
#elif defined(ITS_POSIX)
pthread_kill(t.native_handle(), SIGUSR2);
#endif
return false;
}
#if defined(ITS_POSIX)
void worker_thread_sig(int sig)
{
if(SIGUSR2 == sig)
throw std::string();
}
#endif
void init_threads()
{
#if defined(ITS_POSIX)
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sa.sa_handler = worker_thread_sig;
sigaction(SIGUSR2, &sa, 0);
#endif
}
class tracker
{
public:
tracker() { printf("tracker()\n"); }
~tracker() { printf("~tracker()\n"); }
};
int main(int argc, char *argv[])
{
init_threads();
printf("main: starting thread...\n");
std::thread t([]()
{
try
{
tracker a;
init_exceptions();
printf("thread: started...\n");
std::this_thread::sleep_for(std::chrono::minutes(1000));
printf("thread: stopping...\n");
}
catch(std::string s)
{
printf("thread: exception caught...\n");
}
});
printf("main: sleeping...\n");
std::this_thread::sleep_for(std::chrono::seconds(2));
printf("main: aborting...\n");
abort_thread(t);
printf("main: joining...\n");
t.join();
printf("main: exiting...\n");
return 0;
}
Output:
main: starting thread...
main: sleeping...
tracker()
thread: started...
main: aborting...
main: joining...
~tracker()
thread: exception caught...
main: exiting...
I have an attempt at a producer/consumer
Producer
#pragma once
#ifndef PRODUCER_H
#define PRODUCER_H
#include <thread>
#include "Mailbox.h"
class Producer
{
private:
std::thread producer;
Mailbox& mailbox;
public:
Producer(Mailbox& newmailbox);
~Producer();
void start();
void run();
};
Producer::Producer(Mailbox& newMailbox) : mailbox(newMailbox) {}
Producer::~Producer() {}
void Producer::start()
{
producer = std::thread(&Producer::run, this);
}
void Producer::run()
{
mailbox.inc();
}
#endif
Consumer
#pragma once
#ifndef CONSUMER_H
#define CONSUMER_H
#include "Mailbox.h"
#include <thread>
#include <iostream>
class Consumer
{
private:
Mailbox& mailbox;
std::thread consumer;
public:
Consumer(Mailbox& newMailbox);
~Consumer();
void start();
void run();
};
Consumer::Consumer(Mailbox& newMailbox) : mailbox(newMailbox) {}
Consumer::~Consumer() {}
void Consumer::start()
{
consumer = std::thread(&Consumer::run, this);
}
void Consumer::run()
{
mailbox.read();
}
#endif
Mailbox
#pragma once
#ifndef MAILBOX_H
#define MAILBOX_H
#include <mutex>
#include <iostream>
class Mailbox
{
private:
int& mailbox;
int init_val;
std::mutex mmutex;
std::condition_variable condition;
public:
Mailbox();
~Mailbox();
void inc();
void read();
};
Mailbox::Mailbox() : mailbox(init_val), init_val(0) {}
Mailbox::~Mailbox()
{
}
void Mailbox::inc()
{
int count = 0;
while (count < 10)
{
std::unique_lock<std::mutex> lock(mmutex);
std::cout << "Producer increment\n";
mailbox += 1;
lock.unlock();
count += 1;
}
}
void Mailbox::read()
{
int count = 0;
while (count < 10)
{
std::unique_lock<std::mutex> lock(mmutex);
condition.wait(lock, [this](){return get_cflag(); });
condition.notify_one();
count += 1;
}
}
#endif
Main
int main()
{
Mailbox* mailbox = new Mailbox();
Consumer* consumer = new Consumer(*mailbox);
Producer* producer = new Producer(*mailbox);
consumer->start();
producer->start();
return 0;
}
Mutex locking works albeit asynchronously because I have no control over when a std::thread will start so I decided to implement a semi-synchronous methodology using std::unique_lock in addition to std::mutex.
Problem is, the Consumer waits and the Producer flies on ahead with no notification at least that is what the debugger is telling me and the last Producer iteration result sin a n abort() so something is going wrong here.
Based upon David Schwartz's comment, insight from Mike Strobel, and additional research, I changed the producer and consumer functions
Producer
void Mailbox::inc()
{
int count = 0;
while (count < 10)
{
std::unique_lock<std::mutex> lock(mmutex);
std::cout << "Producer increment\n";
mailbox += 1;
lock.unlock();
set_cflag(true); // signal to the consumer data is ready
condition.notify_one();
{
std::unique_lock<std::mutex> lock(mmutex);
condition.wait(lock, [this]() {return get_pflag(); });
}
set_pflag(false);
count += 1;
}
}
Consumer
void Mailbox::read()
{
int count = 0;
while (count < 10)
{
std::unique_lock<std::mutex> lock(mmutex);
condition.wait(lock, [this](){return get_cflag(); });
std::cout << "Consumer: " << mailbox << "\n";
lock.unlock();
set_pflag(true);
condition.notify_one();
count += 1;
set_cflag(false);
}
}
Mailbox
class Mailbox
{
private:
int& mailbox;
int cflag, pflag;
int init_val;
std::mutex mmutex;
std::condition_variable condition;
public:
Mailbox();
~Mailbox();
int get_cflag() { return cflag; }
void set_cflag(int newFlag) { cflag = newFlag; }
int get_pflag() { return pflag; }
void set_pflag(int newFlag) { pflag = newFlag; }
void inc();
void read();
};
Mailbox::Mailbox() : mailbox(init_val), init_val(0), cflag(0), pflag(0) {}
Mailbox::~Mailbox()
{
}
The output upon execution is as I desired
int main()
{
Mailbox* mailbox = new Mailbox();
Consumer* consumer = new Consumer(*mailbox);
Producer* producer = new Producer(*mailbox);
consumer->start();
producer->start();
fgetc(stdin);
return 0;
}
Producer increment
Consumer: 1
Producer increment
Consumer: 2
Producer increment
Consumer: 3
Producer increment
Consumer: 4
Producer increment
Consumer: 5
Producer increment
Consumer: 6
Producer increment
Consumer: 7
Producer increment
Consumer: 8
Producer increment
Consumer: 9
Producer increment
Consumer: 10
I’m not a C++ guy, but if these condition variables work the way I think they do, you’ll only get notified if a signal arrives while you’re waiting. If the signal arrived before you started waiting, you’ll block indefinitely.
After you acquire the lock in 'Mailbox::read`, you should check to see if an item is available, and only wait on the condition variable if there isn’t one. If there is, go ahead and take it:
int Mailbox::read()
{
std::unique_lock<std::mutex> lock(m);
while (mailbox <= 0)
condition.wait(lock);
return mailbox--;
}
When we comile and run the below code it does not invoke the thread. As per concurrency chapter 2 of the C++ Concurrency in Action by Anthony Williams
it should work [ page 50 ] Listing example 2.6
The line
ScopedThread t(std::thread(func(some_local_state)));
or
ScopedThread t(std::thread((my_func)));
does not seem to invoke the thread. Why is it treating the thread variable as temporary and avoiding invoking the execution.
#include<iostream>
#include<thread>
using namespace std;
class ScopedThread
{
private:
std::thread t;
public:
explicit ScopedThread(std::thread t_) : t(std::move(t_))
{
if(!t.joinable())
{
cout << "Not throwable" << endl;
throw std::logic_error("No Thread Error!");
}
}
~ScopedThread()
{
t.join();
}
ScopedThread(const ScopedThread& ) = delete;
ScopedThread operator=(const ScopedThread& ) = delete;
};
void do_something(const int& ref)
{
int temp=ref;
cout << "inside do_something at id = " << ref << endl;
}
struct func
{
int& i;
func(int& i_) : i(i_) { }
void operator ()()
{
for(unsigned j=0; j<100; ++j)
{
do_something(i);
}
}
};
void some_func()
{
int some_local_state = 42;
func my_func(some_local_state);
// Both below lines [ uncomment each at one time ]
// ScopedThread t(std::thread(func(some_local_state)));
// ScopedThread t(std::thread((my_func)));
}
int main(int argc, char* argv[])
{
some_func();
}
The problem is that both lines you commented out are actually function declarations (see e.g., this question).
The solution is to write either of these
ScopedThread t{std::thread(func(some_local_state))};
ScopedThread t((std::thread(func(some_local_state))));
ScopedThread t{std::thread(my_func)};
ScopedThread t((std::thread(my_func)));
I have an object MainWorker ran as a separate thread thanks to moveToThread method.
MainWorker has a member SubWorker which is also ran as a separate thread. Both threads are working in infinite loops.
The idea is, MainWorker and SubWorker both perform some separate computations. Whenever SubWorker is done computing, it should notify MainWorker with the result.
Therefore I intuitively made first connection between signal emitted by SubWorker and a slot of MainWorker, but it wasn't working, so I made two more connections to rule out some potential problems:
connect(subWorker, &SubWorker::stuffDid, this, &MainWorker::reportStuff)); //1
connect(subWorker, &SubWorker::stuffDid, subWorker, &SubWorker::reportStuff); //2
connect(this, &MainWorker::stuffDid, this, &MainWorker::reportStuffSelf); //3
It seems, that what is not working is exactly what I need - cross thread communication, because connection 2 and 3 works as expected. My question is: How do I make connection 1 work?
Edit: Apparently, after Karsten's explanation, it is clear that infinite loop blocks the EventLoop. So the new question is, how can I send messages (signals, whatever) from an infinite loop thread to its parent thread?
I am new to Qt, there is a high chance that I got it completely wrong. Here goes the minimal (not)working example:
MainWorker.h
class MainWorker : public QObject
{
Q_OBJECT
public:
MainWorker() : run(false) {}
void doStuff()
{
subWorker = new SubWorker;
subWorkerThread = new QThread;
subWorker->moveToThread(subWorkerThread);
connect(subWorkerThread, &QThread::started, subWorker, &SubWorker::doStuff);
if(!connect(subWorker, &SubWorker::stuffDid, this, &MainWorker::reportStuff)) qDebug() << "connect failed";
connect(subWorker, &SubWorker::stuffDid, subWorker, &SubWorker::reportStuff);
connect(this, &MainWorker::stuffDid, this, &MainWorker::reportStuffSelf);
subWorkerThread->start();
run = true;
while(run)
{
QThread::currentThread()->msleep(200);
emit stuffDid();
}
}
private:
bool run;
QThread* subWorkerThread;
SubWorker* subWorker;
signals:
void stuffDid();
public slots:
void reportStuff()
{
qDebug() << "MainWorker: SubWorker did stuff";
}
void reportStuffSelf()
{
qDebug() << "MainWorker: MainWorker did stuff (EventLoop is not blocked)";
}
};
SubWorker.h
class SubWorker : public QObject
{
Q_OBJECT
public:
SubWorker() : run(false) {}
void doStuff()
{
run = true;
while(run)
{
qDebug() << "SubWorker: Doing stuff...";
QThread::currentThread()->msleep(1000);
emit stuffDid();
}
}
private:
bool run;
public slots:
void reportStuff()
{
qDebug() << "SubWorker: SubWorker did stuff";
}
signals:
void stuffDid();
};
main.cpp
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
MainWorker *mainWorker = new MainWorker;
QThread *mainWorkerThread = new QThread;
mainWorker->moveToThread(mainWorkerThread);
QObject::connect(mainWorkerThread, &QThread::started, mainWorker, &MainWorker::doStuff);
mainWorkerThread->start();
return a.exec();
}