Why is my timer not periodic but expired only one time? - linux

I have created a timer using POSIX timerfd function.
Intention is, the timer should be periodic, and the timer expiry is observed from a seperate function called myFunc( )
I am calling this function multiple times, so that the timer expiry can be observed periodically after a wait of 5 secs.
Problem is, as soon as first time it expires after 5 seconds, next time onwards...it doesn't expire again, that is no delay of 5 seconds is observed from the second iteration onwards.
Can someone tell me what i am missing?
#include <stdio.h>
#include <iostream>
#include <errno.h>
#include <dlfcn.h>
#include <assert.h>
#include <sys/mman.h>
#include <new>
#include <limits.h>
#include <sys/epoll.h>
#include <sys/timerfd.h>
using namespace std;
struct epoll_event event;
int timer_fd, efd, no_of_fd;
void myFunc( int i );
int main()
{
struct itimerspec its;
its.it_value.tv_sec = 5;
its.it_value.tv_nsec = 0;
its.it_interval.tv_sec = 3; // Every 3 seconds interval
its.it_interval.tv_nsec = 0;
efd = epoll_create(2);
timer_fd = timerfd_create(CLOCK_REALTIME, TFD_NONBLOCK);
if ( timer_fd == -1 )
{
fprintf(stderr, "timerfd_create error in start timer");
return 1;
}
event.data.fd = timer_fd;
event.events = EPOLLIN|EPOLLPRI;
if ( epoll_ctl(efd, EPOLL_CTL_ADD, timer_fd, &event) == -1 )
{
fprintf(stderr, "epoll_ctl error in start timer");
return 1;
}
if ( timerfd_settime(timer_fd, 0, &its, NULL) == -1 )
{
fprintf(stderr, "timerfd_settime error in start timer");
return 1;
}
myFunc( 10 );
myFunc( 20 );
myFunc( 30 );
}
void myFunc( int i )
{
printf("Inside myFunc %d\n", i);
no_of_fd = 0;
struct epoll_event revent;
errno = 0;
do {
no_of_fd = epoll_wait(efd, &revent, 1, -1);
} while ( no_of_fd < 0 && errno == EINTR );
if ( no_of_fd < 0 )
{
fprintf(stderr, "epoll_wait error in start timer");
}
if ( revent.data.fd == timer_fd ) {
printf("Timer expired \n");
}
}

When using epoll with level-triggering, you should read 8 bytes on every EPOLLIN. This is an int64 that tells you the number of event expirations. Reading it effectively "clears" this number so that the next EPOLLIN is the result of a different event expiration.
The manual tells you about reading:
If the timer has already expired one or more times since its
settings were last modified using timerfd_settime(), or since
the last successful read(2), then the buffer given to read(2)
returns an unsigned 8-byte integer (uint64_t) containing the
number of expirations that have occurred. (The returned value
is in host byte order—that is, the native byte order for
integers on the host machine.)

Related

How could futex_wake return 0

I implemented semaphore using futex. The following program often fails at the assertion in sem_post(). While the return value is supposed to be 1, it sometimes returns 0. How can this happen?
When I use POSIX semaphore the program always finishes successfully.
I'm using Linux 2.6.32-642.6.1.el6.x86_64
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include <ctime>
#include <linux/futex.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#include <errno.h>
using namespace std;
#if 0
#include <semaphore.h>
#else
typedef volatile int sem_t;
void sem_init(sem_t* sem, int shared, int value)
{
*sem = value;
}
void sem_post(sem_t* sem)
{
while (1)
{
int value = *sem;
if (__sync_bool_compare_and_swap(sem, value, value >= 0 ? value+1 : 1))
{
if (value < 0) // had contender
{
int r = syscall(SYS_futex, sem, FUTEX_WAKE, 1, NULL, 0, 0);
if (r != 1)
fprintf(stderr, "post r=%d err=%d sem=%d %d\n", r,errno,value,*sem);
assert(r == 1);
}
return;
}
}
}
int sem_wait(sem_t* sem)
{
while (1)
{
int value = *sem;
if (value > 0 // positive means no contender
&& __sync_bool_compare_and_swap(sem, value, value-1))
return 0;
if (value <= 0
&& __sync_bool_compare_and_swap(sem, value, -1))
{
int r= syscall(SYS_futex, sem, FUTEX_WAIT, -1, NULL, 0, 0);
if (!r) {
assert(__sync_fetch_and_sub(sem, 1) > 0);
return 0;
}
printf("wait r=%d errno=%d sem=%d %d\n", r,errno, value,*sem);
}
}
}
void sem_getvalue(sem_t* sem, int* value)
{
*value = *sem;
}
#endif
// return current time in ns
unsigned long GetTime()
{
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return ts.tv_sec*1000000000ul + ts.tv_nsec;
}
void Send(sem_t* sem, unsigned count)
{
while (count--)
sem_post(sem);
}
void Receive(sem_t* sem, unsigned count)
{
while (count--)
sem_wait(sem);
}
int main()
{
sem_t* sem = reinterpret_cast<sem_t*>(mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_SHARED, -1, 0));
assert(sem != MAP_FAILED);
sem_init(sem, 1, 0);
unsigned count = 10485760;
int pid = fork();
assert(pid != -1);
if (!pid) // child
{
Send(sem, count);
_exit(EXIT_SUCCESS);
}
else // parent
{
unsigned long t0 = GetTime();
Receive(sem, count);
printf("t=%g ms\n", (GetTime()-t0)*1e-6);
wait(NULL);
int v;
sem_getvalue(sem, &v);
assert(v == 0);
}
}
The call to syscall(SYS_futex, sem, FUTEX_WAKE, 1, NULL, 0, 0) will return 0 when there is no thread waiting on sem. In your code this is possible because you call that futex line in sem_post when *sem is negative which can be the case without that any thread is actually sleeping:
If *sem is zero when calling sem_wait you continue to execute __sync_bool_compare_and_swap(sem, value, -1) which sets *sem to -1. At that point this thread is not yet sleeping however. So, when another thread calls sem_post at that point (before the thread that is calling sem_wait enters the futex syscall) your assert failure will happen.
it seems that __sync_bool_compare_and_swap(sem, value, -1) and __sync_fetch_and_sub(sem, 1) are problematic. We need to keep in mind that sem_wait may be called concurrently by multiple threads (although in your test case there is only one thread calling it).
If we can afford the overhead of busy polling, we can remove the futex and result in the following code. It is also faster than the futex version (t=347 ms, while the futex version is t=914 ms).
void sem_post(sem_t* sem)
{
int value = __sync_fetch_and_add(sem, 1);
}
int sem_wait(sem_t* sem)
{
while (1)
{
int value = *sem;
if (value > 0) // positive means no contention
{
if (__sync_bool_compare_and_swap(sem, value, value-1)) {
return 0; // success
}
}
// yield the processor to avoid deadlock
sched_yield();
}
}
The code works as follows: The shared variable *sem is always non-negative. When a thread posts the semaphore from 0 to 1, all threads waiting on the semaphore may try, but exactly one thread will succeed in compare_and_swap.

Linux: POSIX Timers ignore Thread Priority

I have a Thread which has to run every millisecond. When no other thread of the program is active, everything is fine. But if more than 3 other threads are running, the Timer-Thread is only called less than 100 times per second (on my test machine).
It seems that the priority settings of the Timer are ignored.
I have tested this with Kernel Versions 3.12 and 3.18.
Test code, which prints some values after 10000 calls of the timer thread (so normally after 10 seconds):
#define NTHREADS 3
#include <sched.h>
#include <pthread.h>
#include <signal.h>
timer_t timer;
unsigned long long val = 0;
pthread_attr_t attrHigh, attrLow;
void TimerTestThread()
{
val++;
if(val >= 10000)
printf("%i ", val);
}
void BusyThread()
{
int a;
while(1)
{
a++;
}
}
int main()
{
pthread_attr_init(&attrHigh);
pthread_attr_setinheritsched(&attrHigh, PTHREAD_EXPLICIT_SCHED);
pthread_attr_setschedpolicy(&attrHigh, SCHED_FIFO);
struct sched_param paramHigh;
paramHigh.sched_priority = 90;
pthread_attr_setschedparam(&attrHigh, &paramHigh);
pthread_attr_init(&attrLow);
pthread_attr_setinheritsched(&attrLow, PTHREAD_EXPLICIT_SCHED);
pthread_attr_setschedpolicy(&attrLow, SCHED_FIFO);
struct sched_param paramLow;
paramLow.sched_priority = 1;
pthread_attr_setschedparam(&attrLow, &paramLow);
struct sigevent evp;
evp.sigev_notify = SIGEV_THREAD;
evp.sigev_notify_function = TimerTestThread;
evp.sigev_notify_attributes = &attrHigh;
struct itimerspec value;
value.it_interval.tv_sec = 0; // Interval
value.it_interval.tv_nsec = 1000000;
value.it_value.tv_sec = 0; // Initial Expiration
value.it_value.tv_nsec = 1000000;
int i;
pthread_t threads[NTHREADS];
for(i=0; i<NTHREADS;i++)
{
pthread_create(&(threads[i]), &attrLow, BusyThread, NULL);
}
if(timer_create(CLOCK_MONOTONIC, &evp, &timer) != 0)
{
i = 5;
}
if(timer_settime(timer, 0, &value, NULL) != 0)
{
i = 6;
}
while(1);
}
I do not understand why the behavior is like this. Maybe you see something i missed.
EDIT: Corrected a silly source copy error

Differences between POSIX threads on OSX and LINUX?

Can anyone shed light on the reason that when the below code is compiled and run on OSX the 'bartender' thread skips through the sem_wait() in what seems like a random manner and yet when compiled and run on a Linux machine the sem_wait() holds the thread until the relative call to sem_post() is made, as would be expected?
I am currently learning not only POSIX threads but concurrency as a whole so absoutely any comments, tips and insights are warmly welcomed...
Thanks in advance.
#include <stdio.h>
#include <stdlib.h>
#include <semaphore.h>
#include <fcntl.h>
#include <unistd.h>
#include <pthread.h>
#include <errno.h>
//using namespace std;
#define NSTUDENTS 30
#define MAX_SERVINGS 100
void* student(void* ptr);
void get_serving(int id);
void drink_and_think();
void* bartender(void* ptr);
void refill_barrel();
// This shared variable gives the number of servings currently in the barrel
int servings = 10;
// Define here your semaphores and any other shared data
sem_t *mutex_stu;
sem_t *mutex_bar;
int main() {
static const char *semname1 = "Semaphore1";
static const char *semname2 = "Semaphore2";
pthread_t tid;
mutex_stu = sem_open(semname1, O_CREAT, 0777, 0);
if (mutex_stu == SEM_FAILED)
{
fprintf(stderr, "%s\n", "ERROR creating semaphore semname1");
exit(EXIT_FAILURE);
}
mutex_bar = sem_open(semname2, O_CREAT, 0777, 1);
if (mutex_bar == SEM_FAILED)
{
fprintf(stderr, "%s\n", "ERROR creating semaphore semname2");
exit(EXIT_FAILURE);
}
pthread_create(&tid, NULL, bartender, &tid);
for(int i=0; i < NSTUDENTS; ++i) {
pthread_create(&tid, NULL, student, &tid);
}
pthread_join(tid, NULL);
sem_unlink(semname1);
sem_unlink(semname2);
printf("Exiting the program...\n");
}
//Called by a student process. Do not modify this.
void drink_and_think() {
// Sleep time in milliseconds
int st = rand() % 10;
sleep(st);
}
// Called by a student process. Do not modify this.
void get_serving(int id) {
if (servings > 0) {
servings -= 1;
} else {
servings = 0;
}
printf("ID %d got a serving. %d left\n", id, servings);
}
// Called by the bartender process.
void refill_barrel()
{
servings = 1 + rand() % 10;
printf("Barrel refilled up to -> %d\n", servings);
}
//-- Implement a synchronized version of the student
void* student(void* ptr) {
int id = *(int*)ptr;
printf("Started student %d\n", id);
while(1) {
sem_wait(mutex_stu);
if(servings > 0) {
get_serving(id);
} else {
sem_post(mutex_bar);
continue;
}
sem_post(mutex_stu);
drink_and_think();
}
return NULL;
}
//-- Implement a synchronized version of the bartender
void* bartender(void* ptr) {
int id = *(int*)ptr;
printf("Started bartender %d\n", id);
//sleep(5);
while(1) {
sem_wait(mutex_bar);
if(servings <= 0) {
refill_barrel();
} else {
printf("Bar skipped sem_wait()!\n");
}
sem_post(mutex_stu);
}
return NULL;
}
The first time you run the program, you're creating named semaphores with initial values, but since your threads never exit (they're infinite loops), you never get to the sem_unlink calls to delete those semaphores. If you kill the program (with ctrl-C or any other way), the semaphores will still exist in whatever state they are in. So if you run the program again, the sem_open calls will succeed (because you don't use O_EXCL), but they won't reset the semaphore value or state, so they might be in some odd state.
So you should make sure to call sem_unlink when the program STARTS, before calling sem_open. Better yet, don't use named semaphores at all -- use sem_init to initialize a couple of unnamed semaphores instead.

Arguments were passed wrong in pthread

I write a code to print out strings: "Thread 0" to "Thread 4" using pthread.
Here is my code:
Case 1:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
void *print_message_function(void* parameter) {
long *i = (long *)parameter;
printf("Thread %ld\n", *i);
pthread_exit(0);
}
int main(int argc, char *argv[]) {
pthread_t threads[5];
long i = 0;
for (i = 0; i < 5; i++) {
pthread_create(&threads[i], 0, print_message_function, (void *)&i);
}
pthread_exit(NULL);
}
But the result is:
Thread 2
Thread 3
Thread 3
Thread 4
Thread 5
or:
Thread 0
Thread 0
Thread 0
Thread 0
Thread 0
It changed when I run it again. So I don't know why the values I passed are (2 to 5) or all (0) or ..... (many cases). I think my arguments I passed is from 0 to 4.
Case 2:
When I change to the new code:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
void *print_message_function(void *parameter);
int main(int argc, char *argv[]) {
pthread_t threads[5];
int i = 0;
for (i = 0; i < 5; i++) {
char *msg = (char*)malloc(sizeof(char));
sprintf(msg, "Thread %d", i);
pthread_create(&threads[i], 0, print_message_function, (void *)msg);
}
}
void *print_message_function(void *parameter) {
printf("%s\n", (char *)parameter);
}
The result is:
Thread 1
Thread 0
Thread 3
Thread 2
Thread 4
Thread 4
It means the loop run 6 times! Why?
Change Case 1 to this:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
void *print_message_function(void* parameter) {
long i = (long)parameter; // <<<
printf("Thread %ld\n", i); // <<<
pthread_exit(0);
}
int main(int argc, char *argv[]) {
pthread_t threads[5];
long i = 0;
for (i = 0; i < 5; i++) {
pthread_create(&threads[i], 0, print_message_function, (void *)i); // <<<
}
pthread_exit(NULL);
}
The reason that you were seeing inconsistent results before was because you were passing a pointer to each thread where each pointer was pointing at the same local variable, which you were then modifying.
In Case 2 you are mallocing only a single char and then trying to write a string to it. It should be fairly easy to fix.
Your case 2 approach is valid, however you need to fix the malloc part to allocate enough bytes. Change it to
char *msg = (char*)malloc(sizeof(char) * (strlen("Thread ") + 10));
// assuming i will take at most 9-digits (unlikely case)
For your case 1, you are passing address of i. But the value of i is changing thread function will get whatever value is there at that location when it tries to print. Also note that address of i may not be valid by the time thread function executes as its allocated on stack and will go away when main function returns.

Keeping number of threads constant with pthread in C

I tried to find a solution in order to keep the number of working threads constant under linux in C using pthreads, but I seem to be unable to fully understand what's wrong with the following code:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#define MAX_JOBS 50
#define MAX_THREADS 5
pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;
int jobs = MAX_JOBS;
int worker = 0;
int counter = 0;
void *functionC() {
pthread_mutex_lock(&mutex1);
worker++;
counter++;
printf("Counter value: %d\n",counter);
pthread_mutex_unlock(&mutex1);
// Do something...
sleep(4);
pthread_mutex_lock(&mutex1);
jobs--;
worker--;
printf(" >>> Job done: %d\n",jobs);
pthread_mutex_unlock(&mutex1);
}
int main(int argc, char *argv[]) {
int i=0, j=0;
pthread_t thread[MAX_JOBS];
// Create threads if the number of working threads doesn't exceed MAX_THREADS
while (1) {
if (worker > MAX_THREADS) {
printf(" +++ In queue: %d\n", worker);
sleep(1);
} else {
//printf(" +++ Creating new thread: %d\n", worker);
pthread_create(&thread[i], NULL, &functionC, NULL);
//printf("%d",worker);
i++;
}
if (i == MAX_JOBS) break;
}
// Wait all threads to finish
for (j=0;j<MAX_JOBS;j++) {
pthread_join(thread[j], NULL);
}
return(0);
}
A while (1) loop keeps creating threads if the number of working threads is under a certain threshold. A mutex is supposed to lock the critical sections every time the global counter of the working threads is incremented (thread creation) and decremented (job is done). I thought it could work fine and for the most part it does, but weird things happen...
For instance, if I comment (as it is in this snippet) the printf //printf(" +++ Creating new thread: %d\n", worker); the while (1) seems to generate a random number (18-25 in my experience) threads (functionC prints out "Counter value: from 1 to 18-25"...) at a time instead of respecting the IF condition inside the loop. If I include the printf the loop seems to behave "almost" in the right way... This seems to hint that there's a missing "mutex" condition that I should add to the loop in main() to effectively lock the thread when MAX_THREADS is reached but after changing a LOT of times this code for the past few days I'm a bit lost, now. What am I missing?
Please, let me know what I should change in order to keep the number of threads constant it doesn't seem that I'm too far from the solution... Hopefully... :-)
Thanks in advance!
Your problem is that worker is not incremented until the new thread actually starts and gets to run - in the meantime, the main thread loops around, checks workers, finds that it hasn't changed, and starts another thread. It can repeat this many times, creating far too many threads.
So, you need to increment worker in the main thread, when you've decided to create a new thread.
You have another problem - you should be using condition variables to let the main thread sleep until it should start another thread, not using a busy-wait loop with a sleep(1); in it. The complete fixed code would look like:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <unistd.h>
#define MAX_JOBS 50
#define MAX_THREADS 5
pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER;
int jobs = MAX_JOBS;
int workers = 0;
int counter = 0;
void *functionC() {
pthread_mutex_lock(&mutex1);
counter++;
printf("Counter value: %d\n",counter);
pthread_mutex_unlock(&mutex1);
// Do something...
sleep(4);
pthread_mutex_lock(&mutex1);
jobs--;
printf(" >>> Job done: %d\n",jobs);
/* Worker is about to exit, so decrement count and wakeup main thread */
workers--;
pthread_cond_signal(&cond1);
pthread_mutex_unlock(&mutex1);
return NULL;
}
int main(int argc, char *argv[]) {
int i=0, j=0;
pthread_t thread[MAX_JOBS];
// Create threads if the number of working threads doesn't exceed MAX_THREADS
while (i < MAX_JOBS) {
/* Block on condition variable until there are insufficient workers running */
pthread_mutex_lock(&mutex1);
while (workers >= MAX_THREADS)
pthread_cond_wait(&cond1, &mutex1);
/* Another worker will be running shortly */
workers++;
pthread_mutex_unlock(&mutex1);
pthread_create(&thread[i], NULL, &functionC, NULL);
i++;
}
// Wait all threads to finish
for (j=0;j<MAX_JOBS;j++) {
pthread_join(thread[j], NULL);
}
return(0);
}
Note that even though this works, it isn't ideal - it's best to create the number of threads you want up-front, and have them loop around, waiting for work. This is because creating and destroying threads has significant overhead, and because it often simplifies resource management. A version of your code rewritten to work like this would look like:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <unistd.h>
#define MAX_JOBS 50
#define MAX_THREADS 5
pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;
int jobs = MAX_JOBS;
int counter = 0;
void *functionC()
{
int running_job;
pthread_mutex_lock(&mutex1);
counter++;
printf("Counter value: %d\n",counter);
while (jobs > 0) {
running_job = jobs--;
pthread_mutex_unlock(&mutex1);
printf(" >>> Job starting: %d\n", running_job);
// Do something...
sleep(4);
printf(" >>> Job done: %d\n", running_job);
pthread_mutex_lock(&mutex1);
}
pthread_mutex_unlock(&mutex1);
return NULL;
}
int main(int argc, char *argv[]) {
int i;
pthread_t thread[MAX_THREADS];
for (i = 0; i < MAX_THREADS; i++)
pthread_create(&thread[i], NULL, &functionC, NULL);
// Wait all threads to finish
for (i = 0; i < MAX_THREADS; i++)
pthread_join(thread[i], NULL);
return 0;
}

Resources