#include <bits/stdc++.h>
#define fastio cin.tie(0)->sync_with_stdio(0)
using namespace std;
struct cmp{
bool operator() (const int& a, const int& b) const {
return a > b;
}
};
int main()
{
fastio;
priority_queue<int, vector<int>, cmp> pq;
pq.push(2); pq.push(1);
while(pq.size()){
auto cur = pq.top(); pq.pop();
cout << cur << ' ';
}
}
it is a priority_queue with reverse order. But i'm not sure that how it works..
If I put 2 -> 1, then which one get a or b? Or how the value of bool works in the pq
Related
I don't understand why CLion IDE underlines "pthread_create" and "pthread_join" in red and says "No matching function for call to...". I used a similar code without using the pointer to an object passed to the thread as an argument and it worked.
#include <iostream>
#include <pthread.h>
#define NUM_THREADS 4
using namespace std;
class Animal {
private:
float x, y;
public:
Animal(float x, float y) {
this->x = x;
this->y = y;
}
void print() {
cout<< x << "," << y << endl;
}
};
void *function(Animal *p) {
Animal animal = *p;
animal.print();
}
int main() {
pthread_t thread[NUM_THREADS];
Animal dog[] = {Animal(2, 3), Animal(-1, 2), Animal(5, 2), Animal(5, 10)};
for(int i = 0; i<NUM_THREADS; i++) {
pthread_create(&thread[i], NULL, function, &dog[i]);
}
for(int i = 0; i<NUM_THREADS; i++) {
pthread_join(thread[i]);
}
return 0;
}
I changed the argument of the childThread function from Animal to void *
void *childThread(void *p) {
Animal *animal = (Animal *)p;
animal->print();
}
And added a second argument NULL to pthread_join
pthread_join(thread, NULL)
and now it works
I am new to using condition_variables and unique_locks in C++. I am working on creating an event loop that polls two custom event-queues and a "boolean" (see integer acting as boolean), which can be acted upon by multiple sources.
I have a demo (below) that appears to work, which I would greatly appreciate if you can review and confirm if it follows the best practices for using unique_lock and condition_variables and any problems you foresee happening (race conditions, thread blocking, etc).
In ThreadSafeQueue::enqueue(...): are we unlocking twice by calling notify and having the unique_lock go out of scope?
In the method TheadSafeQueue::dequeueAll(): We assume it is being called by a method that has been notified (cond.notify), and therefore has been locked. Is there a better way to encapsulate this to keep the caller cleaner?
Do we need to make our class members volatile similar to this?
Is there a better way to mockup our situation that allows us to test if we've correctly implemented the locks? Perhaps without the sleep statements and automating the checking process?
ThreadSafeQueue.h:
#include <condition_variable>
#include <cstdint>
#include <iostream>
#include <mutex>
#include <vector>
template <class T>
class ThreadSafeQueue {
public:
ThreadSafeQueue(std::condition_variable* cond, std::mutex* unvrsl_m)
: ThreadSafeQueue(cond, unvrsl_m, 1) {}
ThreadSafeQueue(std::condition_variable* cond, std::mutex* unvrsl_m,
uint32_t capacity)
: cond(cond),
m(unvrsl_m),
head(0),
tail(0),
capacity(capacity),
buffer((T*)malloc(get_size() * sizeof(T))),
scratch_space((T*)malloc(get_size() * sizeof(T))) {}
std::condition_variable* cond;
~ThreadSafeQueue() {
free(scratch_space);
free(buffer);
}
void resize(uint32_t new_cap) {
std::unique_lock<std::mutex> lock(*m);
check_params_resize(new_cap);
free(scratch_space);
scratch_space = buffer;
buffer = (T*)malloc(sizeof(T) * new_cap);
copy_cyclical_queue();
free(scratch_space);
scratch_space = (T*)malloc(new_cap * sizeof(T));
tail = get_size();
head = 0;
capacity = new_cap;
}
void enqueue(const T& value) {
std::unique_lock<std::mutex> lock(*m);
resize();
buffer[tail++] = value;
if (tail == get_capacity()) {
tail = 0;
} else if (tail > get_capacity())
throw("Something went horribly wrong TSQ: 75");
cond->notify_one();
}
// Assuming m has already been locked by the caller...
void dequeueAll(std::vector<T>* vOut) {
if (get_size() == 0) return;
scratch_space = buffer;
copy_cyclical_queue();
vOut->insert(vOut->end(), buffer, buffer + get_size());
head = tail = 0;
}
// Const functions because they shouldn't be modifying the internal variables
// of the object
bool is_empty() const { return get_size() == 0; }
uint32_t get_size() const {
if (head == tail)
return 0;
else if (head < tail) {
// 1 2 3
// 0 1 2
// 1
// 0
return tail - head;
} else {
// 3 _ 1 2
// 0 1 2 3
// capacity-head + tail+1 = 4-2+0+1 = 2 + 1
return get_capacity() - head + tail + 1;
}
}
uint32_t get_capacity() const { return capacity; }
//---------------------------------------------------------------------------
private:
std::mutex* m;
uint32_t head;
uint32_t tail;
uint32_t capacity;
T* buffer;
T* scratch_space;
uint32_t get_next_empty_spot();
void copy_cyclical_queue() {
uint32_t size = get_size();
uint32_t cap = get_capacity();
if (size == 0) {
return; // because we have nothing to copy
}
if (head + size <= cap) {
// _ 1 2 3 ... index = 1, size = 3, 1+3 = 4 = capacity... only need 1 copy
memcpy(buffer, scratch_space + head, sizeof(T) * size);
} else {
// 5 1 2 3 4 ... index = 1, size = 5, 1+5 = 6 = capacity... need to copy
// 1-4 then 0-1
// copy number of bytes: front = 1, to (5-1 = 4 elements)
memcpy(buffer, scratch_space + head, sizeof(T) * (cap - head));
// just copy the bytes from the front up to the first element in the old
// array
memcpy(buffer + (cap - head), scratch_space, sizeof(T) * tail);
}
}
void check_params_resize(uint32_t new_cap) {
if (new_cap < get_size()) {
std::cerr << "ThreadSafeQueue: check_params_resize: size(" << get_size()
<< ") > new_cap(" << new_cap
<< ")... data "
"loss will occur if this happens. Prevented."
<< std::endl;
}
}
void resize() {
uint32_t new_cap;
uint32_t size = get_size();
uint32_t cap = get_capacity();
if (size + 1 >= cap - 1) {
std::cout << "RESIZE CALLED --- BAD" << std::endl;
new_cap = 2 * cap;
check_params_resize(new_cap);
free(scratch_space); // free existing (too small) scratch space
scratch_space = buffer; // transfer pointer over
buffer = (T*)malloc(sizeof(T) * new_cap); // allocate a bigger buffer
copy_cyclical_queue();
// move over everything with memcpy from scratch_space to buffer
free(scratch_space); // free what used to be the too-small buffer
scratch_space =
(T*)malloc(sizeof(T) * new_cap); // recreate scratch space
tail = size;
head = 0;
// since we're done with the old array... delete for memory management->
capacity = new_cap;
}
}
};
// Event Types
// keyboard/mouse
// network
// dirty flag
Main.cpp:
#include <unistd.h>
#include <cstdint>
#include <iostream>
#include <mutex>
#include <queue>
#include <sstream>
#include <thread>
#include "ThreadSafeQueue.h"
using namespace std;
void write_to_threadsafe_queue(ThreadSafeQueue<uint32_t> *q,
uint32_t startVal) {
uint32_t count = startVal;
while (true) {
q->enqueue(count);
cout << "Successfully enqueued: " << count << endl;
count += 2;
sleep(count);
}
}
void sleep_and_set_redraw(int *redraw, condition_variable *cond) {
while (true) {
sleep(3);
__sync_fetch_and_or(redraw, 1);
cond->notify_one();
}
}
void process_events(vector<uint32_t> *qOut, condition_variable *cond,
ThreadSafeQueue<uint32_t> *q1,
ThreadSafeQueue<uint32_t> *q2, int *redraw, mutex *m) {
while (true) {
unique_lock<mutex> lck(*m);
cond->wait(lck);
q1->dequeueAll(qOut);
q2->dequeueAll(qOut);
if (__sync_fetch_and_and(redraw, 0)) {
cout << "FLAG SET" << endl;
qOut->push_back(0);
}
for (auto a : *qOut) cout << a << "\t";
cout << endl;
cout << "PROCESSING: " << qOut->size() << endl;
qOut->clear();
}
}
void test_2_queues_and_bool() {
try {
condition_variable cond;
mutex m;
ThreadSafeQueue<uint32_t> q1(&cond, &m, 1024);
ThreadSafeQueue<uint32_t> q2(&cond, &m, 1024);
int redraw = 0;
vector<uint32_t> qOut;
thread t1(write_to_threadsafe_queue, &q1, 2);
thread t2(write_to_threadsafe_queue, &q2, 1);
thread t3(sleep_and_set_redraw, &redraw, &cond);
thread t4(process_events, &qOut, &cond, &q1, &q2, &redraw, &m);
t1.join();
t2.join();
t3.join();
t4.join();
} catch (system_error &e) {
cout << "MAIN TEST CRASHED" << e.what();
}
}
int main() { test_2_queues_and_bool(); }
Why doesn't making threads like this work inside of an abstract base class? I'm trying to abstract away all of the multithreading details for users who derive from this base class. I don't understand why it says "no type named 'type'" when I clearly write that callbackSquare returns type int.
#include <iostream>
#include <future>
#include <vector>
class ABC{
public:
std::vector<std::future<int> > m_results;
ABC(){};
~ABC(){};
virtual int callbackSquare(int& a) = 0;
void doStuffWithCallBack();
};
void ABC::doStuffWithCallBack(){
for(int i = 0; i < 10; ++i)
m_results.push_back(std::async(&ABC::callbackSquare, this, i));
for(int j = 0; j < 10; ++j)
std::cout << m_results[j].get() << "\n";
}
class Derived : public ABC {
Derived() : ABC() {};
~Derived(){};
int callbackSquare(int& a) {return a * a;};
};
int main(int argc, char **argv)
{
std::cout << "testing\n";
return 0;
}
The strange errors I'm getting are:
/usr/include/c++/5/future:1709:67: required from 'std::future<typename std::result_of<_Functor(_ArgTypes ...)>::type> std::async(std::launch, _Fn&&, _Args&& ...) [with _Fn = int (ABC::*)(int&); _Args = {ABC*, int&}; typename std::result_of<_Functor(_ArgTypes ...)>::type = int]'
/usr/include/c++/5/future:1725:19: required from 'std::future<typename std::result_of<_Functor(_ArgTypes ...)>::type> std::async(_Fn&&, _Args&& ...) [with _Fn = int (ABC::*)(int&); _Args = {ABC*, int&}; typename std::result_of<_Functor(_ArgTypes ...)>::type = int]'
/home/taylor/Documents/ssmworkspace/callbacktest/main.cpp:16:69: required from here
/usr/include/c++/5/functional:1505:61: error: no type named 'type' in 'class std::result_of<std::_Mem_fn<int (ABC::*)(int&)>(ABC*, int)>'
typedef typename result_of<_Callable(_Args...)>::type result_type;
^
/usr/include/c++/5/functional:1526:9: error: no type named 'type' in 'class std::result_of<std::_Mem_fn<int (ABC::*)(int&)>(ABC*, int)>'
_M_invoke(_Index_tuple<_Indices...>)
Your problem can be reproduced with any function that accepts a reference:
#include <future>
int f(int& a)
{
return a * a;
}
int main()
{
int i = 42;
auto r = std::async(f, i);
}
Accepting a reference in your code is risky since the variable will be modified by the loop iteration, creating a data race because the called function also accesses the variable.
Change the function to accept the input parameter by value, or call std::async by passing std::ref(i) or std::cref(i) (in case the function accepts a const reference) if you acknowledge the risk.
I have a shared object of type:
struct A {
int x;
int y;
int z;
};
A obj;
There is 2 threads running in parallel.
Thread #1 modifies object's members:
// .. some code before
obj.x = 42;
obj.y = 42;
obj.z = 42;
// .. now thread #2 can read obj
Thread #2 reads object's members:
// .. some code before
int x = obj.x;
int y = obj.y;
int z = obj.z;
// .. some code after
How to synchronize the flow most efficiently that the thread #2 reads object's members only after thread #1 modified them all?
Use std::atomic here.
boost::atomic<int> x { INT_MAX };
// thread1:
while (x.load(memory_order_acquire) == INT_MAX);
// thread2:
x.store(42,memory_order_release);
EDIT add runnable example
main.cpp
#include <iostream>
#include <chrono>
#include <thread>
#include <atomic>
struct Foo {
Foo(): x_(0) {}
std::atomic<int> x_;
};
using namespace std;
int main() {
Foo foo;
thread th1(
[&]() {
cout << "thread1 Waiting for thread2 setting value for x" << endl;
while (foo.x_.load(memory_order_acquire) == 0);
int current = foo.x_.load(memory_order_acquire);
cout << "thread 1 print current value of x is " << current << endl;
});
thread th2(
[&]() {
std::chrono::milliseconds dura( 2000 );
std::this_thread::sleep_for( dura );
cout << "thread2 set up value for x" << endl;
foo.x_.store(42,memory_order_release);
});
th1.join();
th2.join();
return 0;
}
CMakeLists.txt
cmake_minimum_required(VERSION 2.8.4)
project(atomicExample)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -std=c++11")
set(SOURCE_FILES main.cpp)
add_executable(atomicExample ${SOURCE_FILES})
You can use std::mutex to synchronize critical sections of your code. std::lock_guard locks a mutex and releases it when the lock_guard goes out of scope. I've also added a condition variable to ensure that thread2() waits for thread1() to finish assigning values to obj before continuing.
Example:
#include <mutex>
#include <condition_variable>
struct A {
int x;
int y;
int z;
};
A obj;
std::mutex m;
std::condition_variable cv;
bool thread1Done = false;
void thread1()
{
std::lock_guard<std::mutex> lock( m );
obj.x = 42;
obj.y = 42;
obj.z = 42;
thread1Done = true;
cv.notifyAll();
}
void thread2()
{
std::unique_lock<std::mutex> lock( m );
while ( !thread1Done ) {
cv.wait( lock );
}
int x = obj.x;
int y = obj.y;
int z = obj.z;
}
I am reading a file which has variable sized columns:
0 1 3 0
0 2 0
0 4 0
My code reads the file, but after the last "0" it outputs two numbers which are garbage. Output is: 0130020040-8457888-85648454 (something like that): Please Help me THANK YOU
int **routes = new int *[3]; //create route matrix
for(int i=0;i<3;i++)
{
routes[i] = new int[sizeof(routes)];
}
ifstream routefile;
routefile.open("Sweep_routes.txt");
if(routefile.fail()){
cout << "ERROR";
exit(1);}
for (int i=0;i<3;i++){
for (int j=0;j<sizeof(routes);j++){
routefile>>routes[i][j]; //read
}
}
for (int i=0;i<3;i++){
for (int j=0;j<sizeof(routes);j++){
cout << routes[i][j]; //display
}
}
system("PAUSE");
return 0;
}
You can use this alternative to your code:
#include <iostream>
#include <string>
#include <array>
#include <fstream>
int main()
{
std::array<std::array<std::string, 3>, 3> routes;
std::ifstream routefile("Sweep_routes.txt");
if (routefile)
{
for (auto x : routes)
{
for (auto y : x)
{
if (routefile >> y)
std::cout << y;
}
}
}
std::cin.get();
}