Proper implementation of an inter process communication (IPC) - linux

Is the following a proper implementation of an inter-process communication?
#include <stdio.h>
#include <fcntl.h>
#include <sys/poll.h>
int main(int argc, char** argv) {
if (argc > 1) {
//Sending side
struct stat buffer;
if (stat("/tmp/PROCAtoPROCB", &buffer) != 0)
mkfifo("/tmp/PROCAtoPROCB", (mode_t)0600);
int fdFIFO = open("/tmp/PROCAtoPROCB", O_WRONLY | O_NONBLOCK);
if (fdFIFO > 0) {
write(fdFIFO, (void *)argv[1], sizeof(argv[1]));
close(fdFIFO);
}
} else {
//Receiving side
int fdFIFO = -1;
struct stat buffer;
if (stat("/tmp/PROCAtoPROCB", &buffer) != 0)
mkfifo("/tmp/PROCAtoPROCB", (mode_t)0600);
while (1) {
struct pollfd pollfds[1];
if (fdFIFO == -1)
fdFIFO = open("/tmp/PROCAtoPROCB", O_RDONLY | O_NONBLOCK);
pollfds[0].fd = fdFIFO;
pollfds[0].events = POLLIN;
poll(pollfds, 1, -1);
if (pollfds[0].revents & POLLIN) {
char buf[1024];
read(fdFIFO, &buf, 1024);
close(fdFIFO);
fdFIFO = -1;
printf("Other process says %s\n", buf);
}
printf("End of loop\n");
}
}
return 0;
}
It seems to be working but I'm wondering if there could be a race condition leading to hanging. One constraint is that both processes need to be started independently and in any order.

Some stress tests showed no problem so the implementation seems OK if somebody wants to reuse the code.

Related

Using eBPF to measure CPU mode switch overhead incured by making system call

As title, but the measurement result is unreasonable. Let me describe the current status.
I'm using syscall getuid as measurement target, I started by measureing the complete overhead with two clock_gettime bounded around, then measure the entry (what SYSCALL instruction does before executing the actual getuid code) and leaving overhead saparately (with eBPF program hook onto the entry and leaving point).
The result for the complete overhead is ~65ns, and regarding to the entry and leaving overhead, it's ~77ns and ~70ns respectively.
It's obvious that my measurement has some additional overhead except the typical overhead. However, it's weird that since clock_gettime is a vDSO syscall, it should barely have noticeable overhead. And BPF, which is a lightweight instrumental tool (JIT-ed and etc.) these day in Linux, shouldn't have noticeable overhead too.
Is there anyone have idea what additional overhead my measurement incurs?
Following is my measurement code:
userland (measuring the return-from-kernel overhead):
#define _GNU_SOURCE
#include <bpf.h>
#include <libbpf.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <string.h>
#include <asm/errno.h>
#include <linux/if_link.h>
#include <errno.h>
#include <sys/resource.h>
#include <unistd.h>
#include <asm/unistd.h>
#include <time.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <sched.h>
#define likely(x) __builtin_expect((x),1)
#define unlikely(x) __builtin_expect((x),0)
#define TEST_CNT 1000000
#define BPF_FILE_NAME "mkern.o"
#define BPF_MAP_NAME "msys"
static inline int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid,
int cpu, int group_fd,
unsigned long flags)
{
attr->size = sizeof(*attr);
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
}
static int attach_kprobe(int prog_fd)
{
int err, fd, id;
char buf[32];
struct perf_event_attr attr = {};
err = system("echo 'r:kp_sys_batch __x64_sys_getuid' > /sys/kernel/debug/tracing/kprobe_events");
if (err < 0) {
fprintf(stderr, "Failed to create kprobe, error '%s'\n", strerror(errno));
return -1;
}
fd = open("/sys/kernel/debug/tracing/events/kprobes/kp_sys_batch/id", O_RDONLY, 0);
if (fd < 0) {
fprintf(stderr, "Failed to open event %s\n", "sys_batch");
return -1;
}
err = read(fd, buf, sizeof(buf));
if (err < 0 || err >= sizeof(buf)) {
fprintf(stderr, "read from '%s' failed '%s'\n", "sys_batch", strerror(errno));
return -1;
}
close(fd);
buf[err] = 0;
id = atoi(buf);
attr.config = id;
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW;
attr.sample_period = 1;
attr.wakeup_events = 1;
fd = sys_perf_event_open(&attr, 0/*this process*/, -1/*any cpu*/, -1/*group leader*/, 0);
if (fd < 0) {
perror("sys_perf_event_open");
fprintf(stderr, "Failed to open perf_event (id: %llu)\n", attr.config);
return -1;
}
err = ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
if (err < 0) {
fprintf(stderr, "ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
strerror(errno));
return -1;
}
err = ioctl(fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
if (err < 0) {
fprintf(stderr, "ioctl PERF_EVENT_IOC_SET_BPF failed: %s\n",
strerror(errno));
return -1;
}
return 0;
}
static void maxi_memlock_rlimit(void)
{
struct rlimit rlim_new = {
.rlim_cur = RLIM_INFINITY,
.rlim_max = RLIM_INFINITY,
};
if (setrlimit(RLIMIT_MEMLOCK, &rlim_new)) {
fprintf(stderr, "Failed to increase RLIMIT_MEMLOCK limit!\n");
exit(-1);
}
}
static int find_map_fd(struct bpf_object *bpf_obj, const char *mapname)
{
struct bpf_map *map;
int map_fd = -1;
map = bpf_object__find_map_by_name(bpf_obj, mapname);
if (!map) {
fprintf(stderr, "Failed finding map by name: %s\n", mapname);
exit(-1);
}
map_fd = bpf_map__fd(map);
return map_fd;
}
int main(int argc, char **argv)
{
int bpf_map_fd;
int bpf_prog_fd = -1;
int err;
int key = 0;
struct timespec tp;
struct bpf_object *bpf_obj;
struct reals map;
struct bpf_prog_load_attr xattr = {
.prog_type = BPF_PROG_TYPE_KPROBE,
.file = BPF_FILE_NAME,
};
maxi_memlock_rlimit();
err = bpf_prog_load_xattr(&xattr, &bpf_obj, &bpf_prog_fd);
if (err) {
fprintf(stderr, "Failed loading bpf object file\n");
exit(-1);
}
if (attach_kprobe(bpf_prog_fd)) {
fprintf(stderr, "Failed attaching kprobe\n");
exit(-1);
}
bpf_map_fd = find_map_fd(bpf_obj, BPF_MAP_NAME);
if (find_map_fd < 0) {
fprintf(stderr, "Failed finding map fd\n");
exit(-1);
}
/* warm up */
for (int i = 0; i < TEST_CNT; i++) {
syscall(__NR_getuid); /* dummy call */
clock_gettime(CLOCK_MONOTONIC, &tp);
if (unlikely(bpf_map_lookup_elem(bpf_map_fd, &key, &map))) {
fprintf(stderr, "Failed to lookup map element\n");
perror("lookup");
exit(-1);
}
}
uint64_t delta = 0;
for (int i = 0; i < TEST_CNT; i++) {
syscall(__NR_getuid); /* dummy call */
clock_gettime(CLOCK_MONOTONIC, &tp);
if (unlikely(bpf_map_lookup_elem(bpf_map_fd, &key, &map))) {
fprintf(stderr, "Failed to lookup map element\n");
perror("lookup");
exit(-1);
}
delta += (1000000000 * tp.tv_sec + tp.tv_nsec) - map.ts;
}
printf("avg: %fns\n", (double) delta / TEST_CNT);
return 0;
}
user land (measuring the enter-kernel overhead, almost same as the above, except what I pointed out):
err = system("echo 'p:kp_sys_batch sys_batch' > /sys/kernel/debug/tracing/kprobe_events");
...
clock_gettime(CLOCK_MONOTONIC, &tp);
syscall(__NR_getuid); /* dummy call */
...
delta += map.ts - (1000000000 * tp.tv_sec + tp.tv_nsec);
kernel land:
SEC("getuid")
int kp_sys_batch(struct pt_regs *ctx)
{
__u32 i = 0;
struct reals *r;
r = bpf_map_lookup_elem(&reals, &i);
if (!r)
return 1;
r->ts = bpf_ktime_get_ns();
return 0;
}
Except the additional overhead I mentioned above, inside the return-from-kernel measurement code, if the echo 'r:kp_sys_batch sys_batch' is changed to echo 'p:kp_sys_batch sys_batch' (which means that the measurement would take the syscall execution overhead into account), the result would be ~48ns, this means that the result includes overhead of syscall execution and return-from-kernel. Any idea why this could be only ~48ns?
Thanks!

How to use socat proxy to intercept Unix domain socket sending ancillary data?

I have two programs, a server and a client. The server opens a file, writes data to it, and then send its file descriptor to the client over a unix domain socket. Everything works fine untill I introduce a socat proxy in between.
socat -x -v UNIX-LISTEN:/tmp/unixSockSendFe,mode=775,reuseaddr,fork UNIX-CONNECT:/tmp/unixSockSendFd
Explanation
The server listens on /tmp/unixSockSendFd, socat connects to it(UNIX-CONNECT:/tmp/unixSockSendFd), and creates another Unix domain socket(UNIX-LISTEN:/tmp/unixSockSendFe,mode=775,reuseaddr,fork), on which the client connects. Any communication between the client and server gets relayed through socat, which prints the bytes sent in their binary (-x option), and ascii (-v option) form.
If I don't use socat, and client directly connects to server(on /tmp/unixSockSendFd socket), everything works fine, but when socat is used as a proxy, the client crashes with a segmentation fault.
Server
/*Server code - sendfd.c*/
#include <sys/socket.h>
#include <sys/un.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <errno.h>
char *socket_path = "/tmp/unixSockSendFd";
char *file="/tmp/abcd.txt" ;/*file whose fd is to be sent*/
int sendfd(int sock, int fd);
int recvfd(int s);
char data[]="sahil\0";
int main(int argc, char *argv[]) {
struct sockaddr_un addr;
char buf[100];
buf[0]='\n';
int fd,rc,confd;
int fd_to_send;
int temp,len;
temp=1;
fd_to_send=open(file,O_TRUNC|O_RDWR|O_CREAT,S_IRWXU|S_IRWXG|S_IRWXO);
if(fd_to_send==-1)
{
perror("file open error");
return -1;
}
if (argc > 1) socket_path=argv[1];
if ( (fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
perror("socket error");
exit(-1);
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
if (*socket_path == '\0') {
*addr.sun_path = '\0';
strncpy(addr.sun_path+1, socket_path+1, sizeof(addr.sun_path)-2);
} else {
strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path)-1);
}
unlink(socket_path);
if(bind(fd,(struct sockaddr*)&addr,sizeof(addr))==-1){
perror("bind error");
return -1;
}
/*Writing data to file before sending fd*/
len=write(fd_to_send,data,(int)strlen(data));
fsync(fd_to_send);
printf("(len=%d)data written in file(content between ## marks) ##%s##\n",len,data);
listen(fd,1);
for(;;){
confd=accept(fd,NULL,NULL);
if(confd==-1)
{
perror("accept error");
continue;
}
else{
printf("new client connected ... sending fd ... \n");
sendfd(confd,fd_to_send);
close(confd);
}
}
return 0;
}
int sendfd(int sock, int fd)
{
struct msghdr hdr;
struct iovec data;
char cmsgbuf[CMSG_SPACE(sizeof(int))];
char dummy = '*';
data.iov_base = &dummy;
data.iov_len = sizeof(dummy);
memset(&hdr, 0, sizeof(hdr));
hdr.msg_name = NULL;
hdr.msg_namelen = 0;
hdr.msg_iov = &data;
hdr.msg_iovlen = 1;
hdr.msg_flags = 0;
hdr.msg_control = cmsgbuf;
hdr.msg_controllen = CMSG_LEN(sizeof(int));
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
*(int*)CMSG_DATA(cmsg) = fd;
//memcpy((CMSG_DATA(cmsg)), &fd, sizeof(fd)); -- from ivshmem server code - this too works instead of previous line
int n = sendmsg(sock, &hdr, 0);
if(n == -1)
printf("sendmsg() failed: %s (socket fd = %d)\n", strerror(errno), sock);
return n;
}
int recvfd(int s)
{
int n;
int fd;
char buf[1];
struct iovec iov;
struct msghdr msg;
struct cmsghdr *cmsg;
char cms[CMSG_SPACE(sizeof(int))];
iov.iov_base = buf;
iov.iov_len = 1;
memset(&msg, 0, sizeof msg);
msg.msg_name = 0;
msg.msg_namelen = 0;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = (caddr_t)cms;
msg.msg_controllen = sizeof cms;
if((n=recvmsg(s, &msg, 0)) < 0)
return -1;
if(n == 0){
perror("unexpected EOF");
return -1;
}
cmsg = CMSG_FIRSTHDR(&msg);
memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
return fd;
}
Client
/*Client code - recvfd.c*/
#include <sys/socket.h>
#include <sys/un.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <errno.h>
char *socket_path = "/tmp/unixSockSendFe";
int sendfd(int sock, int fd);
int recvfd(int s);
int fd_received;
int main(int argc, char *argv[]) {
struct sockaddr_un addr;
char buf[100];
buf[0]='\n';
int fd,rc,confd;
int temp,len;
temp=1;
if (argc > 1) socket_path=argv[1];
if ( (fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
perror("socket error");
exit(-1);
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
if (*socket_path == '\0') {
*addr.sun_path = '\0';
strncpy(addr.sun_path+1, socket_path+1, sizeof(addr.sun_path)-2);
} else {
strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path)-1);
}
if(connect(fd,(struct sockaddr*)&addr,sizeof(addr))==-1)
{
perror("connect error");
exit(-1);
}
fd_received=recvfd(fd);
lseek(fd_received,0,SEEK_SET);
len=read(fd_received,buf,5);
if(len<0)
{
perror("read error");
}
printf("(fd_received=%d,len=%d) first %d characters read from the file whoes fd was received(content within ##) ##%.*s##\n",fd_received,len,5,5,buf);
return 0;
}
int sendfd(int sock, int fd)
{
struct msghdr hdr;
struct iovec data;
char cmsgbuf[CMSG_SPACE(sizeof(int))];
char dummy = '*';
data.iov_base = &dummy;
data.iov_len = sizeof(dummy);
memset(&hdr, 0, sizeof(hdr));
hdr.msg_name = NULL;
hdr.msg_namelen = 0;
hdr.msg_iov = &data;
hdr.msg_iovlen = 1;
hdr.msg_flags = 0;
hdr.msg_control = cmsgbuf;
hdr.msg_controllen = CMSG_LEN(sizeof(int));
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
*(int*)CMSG_DATA(cmsg) = fd;
int n = sendmsg(sock, &hdr, 0);
if(n == -1)
printf("sendmsg() failed: %s (socket fd = %d)\n", strerror(errno), sock);
return n;
}
int recvfd(int s)
{
int n;
int fd;
char buf[1];
struct iovec iov;
struct msghdr msg;
struct cmsghdr *cmsg;
char cms[CMSG_SPACE(sizeof(int))];
iov.iov_base = buf;
iov.iov_len = 1;
memset(&msg, 0, sizeof msg);
msg.msg_name = 0;
msg.msg_namelen = 0;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = (caddr_t)cms;
msg.msg_controllen = sizeof cms;
if((n=recvmsg(s, &msg, 0)) < 0)
return -1;
if(n == 0){
perror("unexpected EOF");
return -1;
}
cmsg = CMSG_FIRSTHDR(&msg);
memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
return fd;
}
On running client (recvfd) I get segmentation fault.
./recvfd
[1] 6104 segmentation fault (core dumped) ./recvfd
Here are lines from running gdb with coredump
Core was generated by `./recvfd'.
Program terminated with signal SIGSEGV, Segmentation fault.
#0 0x0000000000400cf9 in recvfd (s=3) at recvfd.c:146
146 memmove(&fd, CMSG_DATA(cmsg), sizeof(int));
Here is the core dump - Link.
I want to sniff the communication happening between the two processes when the file descriptor is being sent. I am not able to figure out why the client crashes when run with socat, but doesn't when run without it.
Update 1
While using socat to sniff communication happening between two processes of a well established open source project (ivshmem - used for sharing memory between running virtual machines, also a part of Intel DPDK, Link), I observed the following.
None of the processes crash on using socat
When socat is used, the file descriptor is not properly sent, and does not get added to the recipient process.
When socat is not used, and the two processes are connected directly, the file descriptor gets sent properly, and gets added to the recipient process.

How can i make sure that only a single instance of the process on Linux? [duplicate]

What would be your suggestion in order to create a single instance application, so that only one process is allowed to run at a time? File lock, mutex or what?
A good way is:
#include <sys/file.h>
#include <errno.h>
int pid_file = open("/var/run/whatever.pid", O_CREAT | O_RDWR, 0666);
int rc = flock(pid_file, LOCK_EX | LOCK_NB);
if(rc) {
if(EWOULDBLOCK == errno)
; // another instance is running
}
else {
// this is the first instance
}
Note that locking allows you to ignore stale pid files (i.e. you don't have to delete them). When the application terminates for any reason the OS releases the file lock for you.
Pid files are not terribly useful because they can be stale (the file exists but the process does not). Hence, the application executable itself can be locked instead of creating and locking a pid file.
A more advanced method is to create and bind a unix domain socket using a predefined socket name. Bind succeeds for the first instance of your application. Again, the OS unbinds the socket when the application terminates for any reason. When bind() fails another instance of the application can connect() and use this socket to pass its command line arguments to the first instance.
Here is a solution in C++. It uses the socket recommendation of Maxim. I like this solution better than the file based locking solution, because the file based one fails if the process crashes and does not delete the lock file. Another user will not be able to delete the file and lock it. The sockets are automatically deleted when the process exits.
Usage:
int main()
{
SingletonProcess singleton(5555); // pick a port number to use that is specific to this app
if (!singleton())
{
cerr << "process running already. See " << singleton.GetLockFileName() << endl;
return 1;
}
... rest of the app
}
Code:
#include <netinet/in.h>
class SingletonProcess
{
public:
SingletonProcess(uint16_t port0)
: socket_fd(-1)
, rc(1)
, port(port0)
{
}
~SingletonProcess()
{
if (socket_fd != -1)
{
close(socket_fd);
}
}
bool operator()()
{
if (socket_fd == -1 || rc)
{
socket_fd = -1;
rc = 1;
if ((socket_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
throw std::runtime_error(std::string("Could not create socket: ") + strerror(errno));
}
else
{
struct sockaddr_in name;
name.sin_family = AF_INET;
name.sin_port = htons (port);
name.sin_addr.s_addr = htonl (INADDR_ANY);
rc = bind (socket_fd, (struct sockaddr *) &name, sizeof (name));
}
}
return (socket_fd != -1 && rc == 0);
}
std::string GetLockFileName()
{
return "port " + std::to_string(port);
}
private:
int socket_fd = -1;
int rc;
uint16_t port;
};
For windows, a named kernel object (e.g. CreateEvent, CreateMutex). For unix, a pid-file - create a file and write your process ID to it.
You can create an "anonymous namespace" AF_UNIX socket. This is completely Linux-specific, but has the advantage that no filesystem actually has to exist.
Read the man page for unix(7) for more info.
Avoid file-based locking
It is always good to avoid a file based locking mechanism to implement the singleton instance of an application. The user can always rename the lock file to a different name and run the application again as follows:
mv lockfile.pid lockfile1.pid
Where lockfile.pid is the lock file based on which is checked for existence before running the application.
So, it is always preferable to use a locking scheme on object directly visible to only the kernel. So, anything which has to do with a file system is not reliable.
So the best option would be to bind to a inet socket. Note that unix domain sockets reside in the filesystem and are not reliable.
Alternatively, you can also do it using DBUS.
It's seems to not be mentioned - it is possible to create a mutex in shared memory but it needs to be marked as shared by attributes (not tested):
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
pthread_mutex_t *mutex = shmat(SHARED_MEMORY_ID, NULL, 0);
pthread_mutex_init(mutex, &attr);
There is also shared memory semaphores (but I failed to find out how to lock one):
int sem_id = semget(SHARED_MEMORY_KEY, 1, 0);
No one has mentioned it, but sem_open() creates a real named semaphore under modern POSIX-compliant OSes. If you give a semaphore an initial value of 1, it becomes a mutex (as long as it is strictly released only if a lock was successfully obtained).
With several sem_open()-based objects, you can create all of the common equivalent Windows named objects - named mutexes, named semaphores, and named events. Named events with "manual" set to true is a bit more difficult to emulate (it requires four semaphore objects to properly emulate CreateEvent(), SetEvent(), and ResetEvent()). Anyway, I digress.
Alternatively, there is named shared memory. You can initialize a pthread mutex with the "shared process" attribute in named shared memory and then all processes can safely access that mutex object after opening a handle to the shared memory with shm_open()/mmap(). sem_open() is easier if it is available for your platform (if it isn't, it should be for sanity's sake).
Regardless of the method you use, to test for a single instance of your application, use the trylock() variant of the wait function (e.g. sem_trywait()). If the process is the only one running, it will successfully lock the mutex. If it isn't, it will fail immediately.
Don't forget to unlock and close the mutex on application exit.
It will depend on which problem you want to avoid by forcing your application to have only one instance and the scope on which you consider instances.
For a daemon — the usual way is to have a /var/run/app.pid file.
For user application, I've had more problems with applications which prevented me to run them twice than with being able to run twice an application which shouldn't have been run so. So the answer on "why and on which scope" is very important and will probably bring answer specific on the why and the intended scope.
Here is a solution based on sem_open
/*
*compile with :
*gcc single.c -o single -pthread
*/
/*
* run multiple instance on 'single', and check the behavior
*/
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <semaphore.h>
#include <unistd.h>
#include <errno.h>
#define SEM_NAME "/mysem_911"
int main()
{
sem_t *sem;
int rc;
sem = sem_open(SEM_NAME, O_CREAT, S_IRWXU, 1);
if(sem==SEM_FAILED){
printf("sem_open: failed errno:%d\n", errno);
}
rc=sem_trywait(sem);
if(rc == 0){
printf("Obtained lock !!!\n");
sleep(10);
//sem_post(sem);
sem_unlink(SEM_NAME);
}else{
printf("Lock not obtained\n");
}
}
One of the comments on a different answer says "I found sem_open() rather lacking". I am not sure about the specifics of what's lacking
Based on the hints in maxim's answer here is my POSIX solution of a dual-role daemon (i.e. a single application that can act as daemon and as a client communicating with that daemon). This scheme has the advantage of providing an elegant solution of the problem when the instance started first should be the daemon and all following executions should just load off the work at that daemon. It is a complete example but lacks a lot of stuff a real daemon should do (e.g. using syslog for logging and fork to put itself into background correctly, dropping privileges etc.), but it is already quite long and is fully working as is. I have only tested this on Linux so far but IIRC it should be all POSIX-compatible.
In the example the clients can send integers passed to them as first command line argument and parsed by atoi via the socket to the daemon which prints it to stdout. With this kind of sockets it is also possible to transfer arrays, structs and even file descriptors (see man 7 unix).
#include <stdio.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <sys/socket.h>
#include <sys/un.h>
#define SOCKET_NAME "/tmp/exampled"
static int socket_fd = -1;
static bool isdaemon = false;
static bool run = true;
/* returns
* -1 on errors
* 0 on successful server bindings
* 1 on successful client connects
*/
int singleton_connect(const char *name) {
int len, tmpd;
struct sockaddr_un addr = {0};
if ((tmpd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) {
printf("Could not create socket: '%s'.\n", strerror(errno));
return -1;
}
/* fill in socket address structure */
addr.sun_family = AF_UNIX;
strcpy(addr.sun_path, name);
len = offsetof(struct sockaddr_un, sun_path) + strlen(name);
int ret;
unsigned int retries = 1;
do {
/* bind the name to the descriptor */
ret = bind(tmpd, (struct sockaddr *)&addr, len);
/* if this succeeds there was no daemon before */
if (ret == 0) {
socket_fd = tmpd;
isdaemon = true;
return 0;
} else {
if (errno == EADDRINUSE) {
ret = connect(tmpd, (struct sockaddr *) &addr, sizeof(struct sockaddr_un));
if (ret != 0) {
if (errno == ECONNREFUSED) {
printf("Could not connect to socket - assuming daemon died.\n");
unlink(name);
continue;
}
printf("Could not connect to socket: '%s'.\n", strerror(errno));
continue;
}
printf("Daemon is already running.\n");
socket_fd = tmpd;
return 1;
}
printf("Could not bind to socket: '%s'.\n", strerror(errno));
continue;
}
} while (retries-- > 0);
printf("Could neither connect to an existing daemon nor become one.\n");
close(tmpd);
return -1;
}
static void cleanup(void) {
if (socket_fd >= 0) {
if (isdaemon) {
if (unlink(SOCKET_NAME) < 0)
printf("Could not remove FIFO.\n");
} else
close(socket_fd);
}
}
static void handler(int sig) {
run = false;
}
int main(int argc, char **argv) {
switch (singleton_connect(SOCKET_NAME)) {
case 0: { /* Daemon */
struct sigaction sa;
sa.sa_handler = &handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGINT, &sa, NULL) != 0 || sigaction(SIGQUIT, &sa, NULL) != 0 || sigaction(SIGTERM, &sa, NULL) != 0) {
printf("Could not set up signal handlers!\n");
cleanup();
return EXIT_FAILURE;
}
struct msghdr msg = {0};
struct iovec iovec;
int client_arg;
iovec.iov_base = &client_arg;
iovec.iov_len = sizeof(client_arg);
msg.msg_iov = &iovec;
msg.msg_iovlen = 1;
while (run) {
int ret = recvmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret != sizeof(client_arg)) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
printf("Error while accessing socket: %s\n", strerror(errno));
exit(1);
}
printf("No further client_args in socket.\n");
} else {
printf("received client_arg=%d\n", client_arg);
}
/* do daemon stuff */
sleep(1);
}
printf("Dropped out of daemon loop. Shutting down.\n");
cleanup();
return EXIT_FAILURE;
}
case 1: { /* Client */
if (argc < 2) {
printf("Usage: %s <int>\n", argv[0]);
return EXIT_FAILURE;
}
struct iovec iovec;
struct msghdr msg = {0};
int client_arg = atoi(argv[1]);
iovec.iov_base = &client_arg;
iovec.iov_len = sizeof(client_arg);
msg.msg_iov = &iovec;
msg.msg_iovlen = 1;
int ret = sendmsg(socket_fd, &msg, 0);
if (ret != sizeof(client_arg)) {
if (ret < 0)
printf("Could not send device address to daemon: '%s'!\n", strerror(errno));
else
printf("Could not send device address to daemon completely!\n");
cleanup();
return EXIT_FAILURE;
}
printf("Sent client_arg (%d) to daemon.\n", client_arg);
break;
}
default:
cleanup();
return EXIT_FAILURE;
}
cleanup();
return EXIT_SUCCESS;
}
All credits go to Mark Lakata. I merely did some very minor touch up only.
main.cpp
#include "singleton.hpp"
#include <iostream>
using namespace std;
int main()
{
SingletonProcess singleton(5555); // pick a port number to use that is specific to this app
if (!singleton())
{
cerr << "process running already. See " << singleton.GetLockFileName() << endl;
return 1;
}
// ... rest of the app
}
singleton.hpp
#include <netinet/in.h>
#include <unistd.h>
#include <cerrno>
#include <string>
#include <cstring>
#include <stdexcept>
using namespace std;
class SingletonProcess
{
public:
SingletonProcess(uint16_t port0)
: socket_fd(-1)
, rc(1)
, port(port0)
{
}
~SingletonProcess()
{
if (socket_fd != -1)
{
close(socket_fd);
}
}
bool operator()()
{
if (socket_fd == -1 || rc)
{
socket_fd = -1;
rc = 1;
if ((socket_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
throw std::runtime_error(std::string("Could not create socket: ") + strerror(errno));
}
else
{
struct sockaddr_in name;
name.sin_family = AF_INET;
name.sin_port = htons (port);
name.sin_addr.s_addr = htonl (INADDR_ANY);
rc = bind (socket_fd, (struct sockaddr *) &name, sizeof (name));
}
}
return (socket_fd != -1 && rc == 0);
}
std::string GetLockFileName()
{
return "port " + std::to_string(port);
}
private:
int socket_fd = -1;
int rc;
uint16_t port;
};
#include <windows.h>
int main(int argc, char *argv[])
{
// ensure only one running instance
HANDLE hMutexH`enter code here`andle = CreateMutex(NULL, TRUE, L"my.mutex.name");
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
return 0;
}
// rest of the program
ReleaseMutex(hMutexHandle);
CloseHandle(hMutexHandle);
return 0;
}
FROM: HERE
On Windows you could also create a shared data segment and use an interlocked function to test for the first occurence, e.g.
#include <Windows.h>
#include <stdio.h>
#include <conio.h>
#pragma data_seg("Shared")
volatile LONG lock = 0;
#pragma data_seg()
#pragma comment(linker, "/SECTION:Shared,RWS")
void main()
{
if (InterlockedExchange(&lock, 1) == 0)
printf("first\n");
else
printf("other\n");
getch();
}
I have just written one, and tested.
#define PID_FILE "/tmp/pidfile"
static void create_pidfile(void) {
int fd = open(PID_FILE, O_RDWR | O_CREAT | O_EXCL, 0);
close(fd);
}
int main(void) {
int fd = open(PID_FILE, O_RDONLY);
if (fd > 0) {
close(fd);
return 0;
}
// make sure only one instance is running
create_pidfile();
}
Just run this code on a seperate thread:
void lock() {
while(1) {
ofstream closer("myapplock.locker", ios::trunc);
closer << "locked";
closer.close();
}
}
Run this as your main code:
int main() {
ifstream reader("myapplock.locker");
string s;
reader >> s;
if (s != "locked") {
//your code
}
return 0;
}

Message queue/shared memory method

I have a bit of a problem in using IPC (inter-process communication) program below.
Please let me explain:
I want to pass Linux commands such as "ls" or "wc file.txt"
from a parent to a child to execute using the message queue, and
then have the child returning the command outputs back to
the parent process using shared memory method.
But this is what I got: The parent process always got the output 1 step behind;
in the following fashion:
Step1) ls file.txt
(Nothing showed up.)
Step2) wc file.txt
(Output of earlier command "ls file.txt" showed up here instead.)
Step 3) cat file.txt
(Output of earlier command "wc file.txt" showed up instead.)
Any help is appreciated.
To compile: gcc -o program ./program.c
To run: -./program -v
Code:
#define BUFSZ 512
#define ERRBUFSZ 512
#define TIMEOUT_TIMEDIO 20
#define SHM_SIZE 5120
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <setjmp.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/shm.h>
static sigjmp_buf jmpbuf;
int timed_io(char* buf, int len, FILE* rfp, int sec);
static void sigalrm_handler(int signo);
void do_cmd(char *buf, int len, int linenum, char *errbuf);
int parse_cmd(char *buf, char **vbuf, char *errbuf);
int process_cmd_ipc(char *argv, int linenum, char *errbuf);
struct my_msgbuf {
long mtype;
char mtext[256];
};
static void sigalrm_handler(int signo)
{
siglongjmp(jmpbuf, 1);
}
int timed_io(char* buf, int len, FILE* rfp, int sec)
{
struct sigaction nsigaction[1];
struct sigaction osigaction[1];
int prev_alrm;
int st = 0;
if(sigsetjmp(jmpbuf, 1) == 0)
{
nsigaction->sa_handler = sigalrm_handler;
sigemptyset(&nsigaction->sa_mask);
nsigaction->sa_flags = SA_RESTART;
prev_alrm = alarm(0);
sigaction(SIGALRM, nsigaction, osigaction);
alarm(sec);
if (fgets(buf, len, rfp) == NULL)
st = -1; // EOF
buf[strlen(buf) - 1] = 0;
}
else { st = -2; } // Time-out
alarm(0); // Reset old alarm and handler
sigaction(SIGALRM, osigaction, 0);
return st;
}
int process_cmd_ipc(char *argv, int linenum, char* errbuf)
{
struct my_msgbuf buf;
int msqid, msqid_parent, st, shmid, str_len;
key_t key, key_shm;
char* shared_buf;
FILE *fd;
// create key for shared memory segment
if ((key_shm = ftok("shm_key.txt", 'R')) == -1) {
perror("ftok");
exit(1);
}
// Connect to shared memory segment
if ((shmid = shmget(key_shm, SHM_SIZE, 0644 | IPC_CREAT)) == -1)
{
perror("shmget");
exit(1);
}
// Attach to shared memory segment
shared_buf = shmat(shmid, (void *) 0, 0);
if (shared_buf == (char *) (-1)) {
perror("shmat");
exit(1);
}
// End of shared memory section` //
// Begin: message queue section
pid_t cpid=fork();
if (cpid<0) {
fprintf(stderr,"ERR: \"fork\" error! (Line=%d)\n", linenum);
exit (-1);
} else if (cpid==0) // child process
{ // Begin: message queue
if ((key = ftok("mysh.c", 'B')) == -1) {
perror("ftok");
exit(1);
}
if ((msqid = msgget(key, 0644)) == -1) {
perror("msgget from child");
exit(1);
}
memset(buf.mtext, 0, sizeof(buf.mtext)); // Clear buffer
if(msgrcv(msqid, (struct msgbuf*) &buf, sizeof(buf), 0,0) == -1)
{
perror("msgrcv");
exit(1);
}
// End: message queue
// begin: shared memory segment
memset(shared_buf, 0, SHM_SIZE); // zeroize shared_buf
fd = popen(buf.mtext, "r");
str_len = 0;
while(fgets(shared_buf + str_len, SHM_SIZE, fd) != NULL)
{ str_len = strlen(shared_buf); }
pclose(fd);
// end: shared memory segment
}
else { // parent
// Begin - message queue
if ((key = ftok("mysh.c", 'B')) == -1) {
perror("ftok");
exit(1);
}
if ((msqid_parent = msgget(key, 0644 | IPC_CREAT)) == -1) {
perror("msgget from parent");
exit(1);
}
buf.mtype = 1;
strncpy(buf.mtext, argv, strlen(argv));
if(msgsnd(msqid_parent, (struct my_msgbuf*) &buf, strlen(buf.mtext), 0) == -1)
perror("msgsnd");
// End - message queue
// Begin - shared memory
// usleep(10000);
printf("%s", shared_buf);
// End - shared memory
} // if-else fork
}
int parse_cmd(char *buf, char **vbuf, char *errbuf)
{
int i=0;
char *delim=" ,\t\n";
char *tok;
tok=strtok(buf,delim);
while (tok) {
vbuf[i]=(char *)malloc(BUFSZ*sizeof(char));
strcpy(vbuf[i],tok);
tok=strtok(NULL,delim);
i++;
}
vbuf[i]=0;
return i;
}
void do_cmd(char *buf, int len, int linenum, char *errbuf) {
int i=0; int numargs;
char *vbuf[128];
char* copy = (char *) malloc(strlen(buf) + 1);
int maxargs=sizeof(vbuf)/sizeof(char *);
strcpy(copy, buf);
numargs = parse_cmd(copy,vbuf,errbuf);
process_cmd_ipc(buf,linenum, errbuf);
for (i=0;i<numargs; i++) { free(vbuf[i]); }
free(copy);
copy = NULL;
return;
}
int main(int argc, char **argv)
{
int i; int st; int linenum=0;
char *buf=(char *)malloc(BUFSZ*sizeof(char));
char *errbuf=(char *)malloc(ERRBUFSZ*sizeof(char));
char *mysh = "";
FILE *rfp=stdin;
if (isatty(fileno(rfp))) {
mysh = "mysh (Ctrl-C to exit)>";
fprintf(stderr,"%s",mysh);
}
while(1)
{
st = timed_io(buf, BUFSZ, stdin, TIMEOUT_TIMEDIO);
if (st != 0)
{
fprintf(stderr, "ERR: No input %s (Status=%d)\n", errbuf, st);
return -1;
}
else
{
linenum++;
if (*buf)
{ do_cmd(buf, BUFSZ, linenum,errbuf); }
if (mysh)
fprintf(stderr,"%s",mysh);
}
}
}

Can not get proper response from select() using writefds

Parent receives SIGPIPE sending chars to aborted child process through FIFO pipe.
I am trying to avoid this, using select() function. In the attached sample code,
select() retruns OK even after the child at the other end of pipe having been terminated.
Tested in
RedHat EL5 (Linux 2.6.18-194.32.1.el5)
GNU C Library stable release version 2.5
Any help appreciated. Thnak you.
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/stat.h>
#include <unistd.h>
static void sigpipe_fct();
main()
{
struct stat st;
int i, fd_out, fd_in, child;
char buf[1024];
#define p_out "/tmp/pout"
signal(SIGPIPE, sigpipe_fct);
if (stat(p_out, &st) != 0) {
mknod(p_out, S_IFIFO, 0);
chmod(p_out, 0666);
}
/* start receiving process */
if ((child = fork()) == 0) {
if ((fd_in = open(p_out, O_RDONLY)) < 0) {
perror(p_out);
exit(1);
}
while(1) {
i = read(fd_in, buf, sizeof(buf));
fprintf(stderr, "child %d read %.*s\n", getpid(), i, buf);
lseek(fd_in, 0, 0);
}
}
else {
fprintf(stderr,
"reading from %s - exec \"kill -9 %d\" to test\n", p_out, child);
if ((fd_out = open(p_out, O_WRONLY + O_NDELAY)) < 0) { /* output */
perror(p_out);
exit(1);
}
while(1) {
if (SelectChkWrite(fd_out) == fd_out) {
fprintf(stderr, "SelectChkWrite() success write abc\n");
write(fd_out, "abc", 3);
}
else
fprintf(stderr, "SelectChkWrite() failed\n");
sleep(3);
}
}
}
static void sigpipe_fct()
{
fprintf(stderr, "SIGPIPE received\n");
exit(-1);
}
SelectChkWrite(ch)
int ch;
{
#include <sys/select.h>
fd_set writefds;
int i;
FD_ZERO(&writefds);
FD_SET (ch, &writefds);
i = select(ch + 1, NULL, &writefds, NULL, NULL);
if (i == -1)
return(-1);
else if (FD_ISSET(ch, &writefds))
return(ch);
else
return(-1);
}
From the Linux select(3) man page:
A descriptor shall be considered ready for writing when a call to an
output function with O_NONBLOCK clear would not block, whether or not
the function would transfer data successfully.
When the pipe is closed, it won't block, so it is considered "ready" by select.
BTW, having #include <sys/select.h> inside your SelectChkWrite() function is extremely bad form.
Although select() and poll() are both in the POSIX standard, select() is much older and more limited than poll(). In general, I recommend people use poll() by default and only use select() if they have a good reason. (See here for one example.)

Resources