I have code that's worked without change since the 90s that is now getting Permission denied on Linux when trying to turn off the Nagle algorithm. Read of the man pages and a google search don't indicate why. Any ideas?
int iFlags, iSize;
/* NOTE: Sol 2.8 header requires socklen_t but man page says int! */
int iSizeSize = sizeof( iSize );
#ifdef _WIN32
unsigned long ulMSDummy;
if ( ioctlsocket( iFD, FIONBIO, (u_long FAR*) &ulMSDummy ) != 0 ) {
printf( "%s: ioctlsocket( %s:%d, FIONBIO, 1 ): %s",
pszName, pszAddr, iPort, strerror(errno));
return -1;
}
#else
if ( ( iFlags = fcntl( iFD, F_GETFL, 0 ) ) < 0 ) {
AKWarn( "%s: fcntl( %s:%d, F_GETFL ): %s",
pszName, pszAddr, iPort, strerror(errno));
return -1;
}
// NOTE: O_NDELAY may need to be changed to FNDELAY on some
// platforms (which does the same thing) or O_NONBLOCK (which may
// cause AKread() to return different values when there's no data).
// Any of these three make the socket non-blocking, which is
// DIFFERENT from TCP_NODELAY (see below).
if ( fcntl( iFD, F_SETFL, iFlags | O_NDELAY ) < 0 ) {
printf( "%s: fcntl( %s:%d, F_SETFL, +NDELAY ): %s",
pszName, pszAddr, iPort, strerror(errno));
return -1;
}
#endif
// NOTE: TCP_NODELAY is unrelated to the various NDELAY/NONBLOCK
// options (above). Instead, it disables the "Nagle Algorithm",
// which caches tiny packets.
// NOTE: This option hardcodes a tradeoff for less latency and more
// packets. Actually this could be a configuration parameter.
iFlags = 1;
if ( setsockopt( iFD, SOL_SOCKET, TCP_NODELAY,
(char*) &iFlags, sizeof( int ) ) ) {
printf( "%s: setsockopt( %s:%d, TCP_NODELAY, %d ): %s",
pszName, pszAddr, iPort, iFlags, strerror(errno) );
#ifndef __linux__
return -1; // giving Permission denied on Linux???
#endif
}
if ( setsockopt( iFD, SOL_SOCKET, TCP_NODELAY,
This is simply wrong from start. It should be IPPROTO_TCP and not SOL_SOCKET. These are different constants. Likely it never properly worked before, i.e. did something else than you intended.
Related
I try to run a TCP client without a server. The idea is simply to periodically try to connect.
For this, the client tries to connect to port 1500 on localhost.
Piece of code:
// Create socket
if ((create_socket=socket (AF_INET, SOCK_STREAM, PF_UNSPEC)) > 0)
printf ("Socket created\n");
address.sin_family = AF_INET;
address.sin_port = htons (1500);
inet_aton (argv[1], &address.sin_addr);
// Connect to server
connect ( create_socket,
(struct sockaddr *) &address,
sizeof (address));
FD_ZERO(&fdset);
FD_SET(create_socket, &fdset);
tv.tv_sec = 2; /* 2 seconds timeout */
tv.tv_usec = 0;
rv = select(create_socket + 1, NULL, &fdset, NULL, &tv);
if (rv == 1)
{
int so_error;
socklen_t len = sizeof so_error;
getsockopt(create_socket, SOL_SOCKET, SO_ERROR, &so_error, &len);
if (so_error == 0)
{
printf ("Connection with server (%s) established \n",
inet_ntoa (address.sin_addr));
}
else
{
printf("Error on connect: unsuccessfull\n");
close (create_socket);
continue;
}
}
else if (rv == 0)
{
printf("Timeout on connect\n");
close (create_socket);
continue;
}
else
{
printf("Error on connect\n");
close (create_socket);
continue;
}
I've set it up in Ubuntu 18.04 on WSL. There, the code waits on select for the defined timeout of 2 seconds and returns appropriate return values. (0 on timeout, 1 on connect).
The return value of connect is -1 on WSL and VMware.
In Ubuntu 18 (VMware) there is no pause in that line. In any case, even without any server listening on that port, I get immediately a return value of 1.
Why is there this difference?
There is a similar behavior later on in that code:
tv.tv_sec = 2;
tv.tv_usec = 0;
if (setsockopt(create_socket, SOL_SOCKET, SO_RCVTIMEO, (const char*)&tv, sizeof tv) < 0)
{
printf("Error on setsockopt SO_RCVTIMEO");
exit(EXIT_FAILURE);
}
// INNER LOOP: Receive data
do
{
size = recv(create_socket, buffer, BUF-1, 0);
if( size > 0)
{
buffer[size] = '\0';
printf ("Message received: %s\n", buffer);
}
else if (size == -1)
{
// on VMware, errno is 107 if there is no server, but coming to that line was not intended
printf ("Timeout\n");
}
else //
{
printf("Server offline\n");
// GO BACK TO OUTER LOOP and reconnect
break;
}
Here, in WSL the recv takes up to 2 seconds, while waiting for any incoming data. (But only if the aforementioned block (connect, select) indicates a valid connection)
In VMware I directly get the feedback. (even without connection)
Does it simply work on WSL by chance?
The argument contains the server IP and is 127.0.0.1.
lsof shows no connection.
Update 2020-11-18
Here's the full code as requested by Bodo
#include <iostream>
#include <vector>
#include <string>
#include <sys/types.h>
#include <sys/socket.h>
#include <cstring>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#define BUF 1024
using namespace std;
int main (int argc, char **argv) {
int create_socket;
char *buffer = (char*)malloc(BUF);
struct sockaddr_in address;
int size;
int rv;
struct timeval tv;
fd_set fdset;
// HERE STARTS THE OUTER LOOP - Connect and restart connection
do
{
// Create socket
if ((create_socket=socket (AF_INET, SOCK_STREAM, PF_UNSPEC)) > 0)
printf ("Socket created\n");
address.sin_family = AF_INET;
address.sin_port = htons (15000);
inet_aton ("127.0.0.1", &address.sin_addr);
// Connect to server
int flags = fcntl(create_socket, F_GETFL, 0);
if (flags == -1) return false;
rv = connect ( create_socket,
(struct sockaddr *) &address,
sizeof (address));
printf ("Connect. rv = %i\n", rv);
if (rv == -1)
{
switch (errno)
{
case ECONNREFUSED: printf ("errno = %i (ECONNREFUSED)\n", errno); break;
default: printf ("errno = %i (ECONNREFUSED)\n", errno); break;
}
}
FD_ZERO(&fdset);
FD_SET(create_socket, &fdset);
tv.tv_sec = 2;
tv.tv_usec = 0;
rv = select(create_socket + 1, NULL, &fdset, NULL, &tv);
if (rv == 1)
{
int so_error;
socklen_t len = sizeof so_error;
getsockopt(create_socket, SOL_SOCKET, SO_ERROR, &so_error, &len);
if (so_error == 0)
{
printf ("Connection with server (%s) established \n",
inet_ntoa (address.sin_addr));
}
else
{
printf("Error on connect: unsuccessfull\n");
close (create_socket);
continue;
}
}
else if (rv == 0)
{
printf("Timeout on connect\n");
close (create_socket);
continue;
}
else
{
printf("Error on connect\n");
close (create_socket);
continue;
}
if (setsockopt(create_socket, SOL_SOCKET, SO_RCVTIMEO, (const char*)&tv, sizeof tv) < 0)
{
printf("Error on setsockopt SO_RCVTIMEO");
exit(EXIT_FAILURE);
}
// INNER LOOP: Receive data
do
{
size = recv(create_socket, buffer, BUF-1, 0);
if( size > 0)
{
buffer[size] = '\0';
printf ("Data received: %s\n", buffer);
}
else if (size == -1)
{
printf ("Timeout\n");
}
else //
{
printf("Server offline\n");
// GO BACK TO OUTER LOOP and reconnect
break;
}
} while (strcmp (buffer, "quit\n") != 0);
close (create_socket);
} while (strcmp (buffer, "quit\n") != 0);
return EXIT_SUCCESS;
}
In WSL the output is
Socket created
Connect. rv = -1
errno = 111 (ECONNREFUSED)
then nothing for 2 seconds
afterwards
Timeout on connect
Socket created
Connect. rv = -1
errno = 111 (ECONNREFUSED)
and again nothing for 2 seconds ...
Output in VMware
Socket created
Connect. rv = -1
errno = 111 (ECONNREFUSED)
Connection with server (127.0.0.1) established
Timeout
Timeout
Timeout
Timeout
Where no timeout is fulfilled.
The idea of timeout has been to try to connect on a regular basis, but not as fast as possible.
Obviously there is something wrong when errno = 111 (ECONNREFUSED) is followed by Connection with server (127.0.0.1) established.
When connect returns -1 and errno is NOT EINPROGRESS you should not use selectand getsockopt(...SO_ERROR...). According to https://man7.org/linux/man-pages/man2/connect.2.html, this is only documented for EINPROGRESS.
Both on real Linux and WSL you get errno = 111 (ECONNREFUSED) after a failed connect. I consider the timeout in WSL wrong as the error (conection refused) was already reported, so it does not make sense to wait for a result. But as the behavior is not specified, it may be implementation dependent.
If you want to have a delay before the next connection attempt, you should not use select but for example sleep followed by repeating the loop.
I suggest something like this:
rv = connect ( create_socket,
(struct sockaddr *) &address,
sizeof (address));
printf ("Connect. rv = %i\n", rv);
if (rv == -1)
{
switch (errno)
{
case ECONNREFUSED: printf ("errno = %i (ECONNREFUSED) %s\n", errno, strerror(errno)); break;
default: printf ("errno = %i (other) %s\n", errno, strerror(errno)); break;
}
if(errno != EINPROGRESS)
{
sleep(10); // chose a suitable delay before next connection attempt
continue;
}
}
This question follows on from the following:
Communicating between NodeJS and C using node-ipc and unix sockets
In regards to the accepted solution (https://stackoverflow.com/a/39848936/1834057), I was wondering if someone might be able to clarify exactly how to send data from C to Node.js. The solution demonstrates sending data from Node.js to C, but not in reverse. I have an application that requires two-way communications, so the missing component is critical for me.
My understanding of unix sockets that one of either write, send or sendmsg should be able to do the job, however, I am not having any luck. If this understanding is incorrect, please advise.
In order to get a trivial example running, lets say when a message is read in the C code, lets send back a message and try to trigger the ipc.of[socketId].on('message',...) event on the node server.
Which means I am trying to turn this:
while ( (rc=read(cl,buf,sizeof(buf))) > 0) {
printf("read %u bytes: %.*s\n", rc, rc, buf);
}
Into this:
while ( (rc=read(cl,buf,sizeof(buf)) ) > 0) {
printf("read %u bytes: %.*s\n", rc, rc, buf);
//Respond to the node server
int n;
char * msg = "{\"type\":\"message\",\"data\":\"hello response\"}\t";
if((n = write(fd,msg,sizeof(msg))) < 0){
printf("send failed\n");
}else{
printf("sent %d bytes\n", n);
}
}
This would mean that the complete server.c code now becomes:
#include <stdio.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <stdlib.h>
#include <string.h> //Missing from original server.c
char *socket_path = "/tmp/icp-test";
int main(int argc, char *argv[]) {
struct sockaddr_un addr;
char buf[100];
int fd,cl,rc;
if (argc > 1) socket_path=argv[1];
if ( (fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
perror("socket error");
exit(-1);
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
if (*socket_path == '\0') {
*addr.sun_path = '\0';
strncpy(addr.sun_path+1, socket_path+1, sizeof(addr.sun_path)-2);
} else {
strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path)-1);
unlink(socket_path);
}
if (bind(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
perror("bind error");
exit(-1);
}
if (listen(fd, 5) == -1) {
perror("listen error");
exit(-1);
}
while (1) {
if ( (cl = accept(fd, NULL, NULL)) == -1) {
perror("accept error");
continue;
}
while ( (rc=read(cl,buf,sizeof(buf)) ) > 0) {
printf("read %u bytes: %.*s\n", rc, rc, buf);
//Respond to the node server
int n;
char * msg = "{\"type\":\"message\",\"data\":\"hello response\"}\t";
if((n = write(fd,msg,sizeof(msg))) < 0){
printf("send failed\n");
}else{
printf("sent %d bytes\n", n);
}
}
if (rc == -1) {
perror("read");
exit(-1);
}
else if (rc == 0) {
printf("EOF\n");
close(cl);
}
}
return 0;
}
Now unfortunately, the write message for me returns code -1, and is not received by the node.js server.
The client.js code remains unchanged, and is as provided in the original question.
Can someone please clarify what I am doing wrong?
You have to change
char * msg = "{\"type\":\"message\",\"data\":\"hello response\"}\t";
if((n = write(fd,msg,sizeof(msg))) < 0){
printf("send failed\n");
}else{
printf("sent %d bytes\n", n);
}
to
char * msg = "{\"type\":\"message\",\"data\":\"hello response\"}\f";
if((n = write(cl,msg,strlen(msg))) < 0){
printf("send failed\n");
}else{
printf("sent %d bytes\n", n);
}
The library is waiting for \f at the end of the message :-)
hope the answer is not too late :-)
I am using a Linux system, not a Windows system. I've posted some code, below. Please bear in mind that this code was never intended to be "production quality."
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <netdb.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#define PORT 9909
void die ( const char *fmt, ... )
{
va_list vargs;
va_start( vargs, fmt );
vfprintf( stderr, fmt, vargs );
va_end( vargs );
exit( 1 );
}
int main ( int argc, char **argv )
{
/* *** */
int listener = socket( PF_INET, SOCK_STREAM, 0 );
if( listener < 0 ) die( "socket(listener)" );
int flag = 1;
if( setsockopt( listener, SOL_SOCKET, SO_REUSEADDR, (char*)&flag, sizeof(int) ) < 0 )
die( "setsockopt()" );
struct sockaddr_in svr_addr;
memset( &svr_addr, 0, sizeof(struct sockaddr) );
svr_addr.sin_family = PF_INET;
svr_addr.sin_port = htons( PORT );
svr_addr.sin_addr.s_addr = INADDR_ANY;
if( bind( listener, (struct sockaddr*)&svr_addr, (socklen_t)sizeof(struct sockaddr) ) < 0 )
die( "bind()" );
if( listen( listener, 10 ) < 0 )
die( "listen()" );
/* *** */
fd_set fd_master;
fd_set fd_select;
int fd_max = listener;
FD_ZERO( &fd_master );
FD_ZERO( &fd_select );
FD_SET( listener, &fd_master );
while( 1 )
{
fd_select = fd_master;
if( select( fd_max + 1, &fd_select, NULL, NULL, NULL ) < 0 )
die( "select()" );
for( int ifd = 0; ifd <= fd_max; ++ifd )
{
if( ! FD_ISSET( ifd, &fd_select ) ) continue;
struct sockaddr_in cli_addr; memset( &cli_addr, 0, sizeof(cli_addr) );
socklen_t cli_alen = sizeof(cli_addr);
if( ifd == listener )
{
int cli = accept( listener, (struct sockaddr*)&cli_addr, &cli_alen );
if( cli < 0 ) die( "accept()" );
FD_SET( cli, &fd_master );
if( cli > fd_max ) fd_max = cli;
printf( "new connection> %s:%u\n", inet_ntoa( cli_addr.sin_addr ), ntohs( cli_addr.sin_port ) );
fflush( stdout );
}
else
{
char buf[256];
cli_alen = sizeof(cli_addr);
ssize_t nbytes = recvfrom( ifd, buf, sizeof(buf), 0, (struct sockaddr*)&cli_addr, &cli_alen );
if( nbytes <= 0 )
{
close( ifd );
FD_CLR( ifd, &fd_master );
if( nbytes == 0 )
printf( "connection hung up> %u\n", ifd );
else
printf( "recvfrom() : %s\n", strerror( errno ) );
fflush( stdout );
}
else
{
// build a "from identifier" for each of the recipients
char msg[sizeof(buf) * 2];
sprintf( msg, "%s:%u> ", inet_ntoa( cli_addr.sin_addr ), ntohs( cli_addr.sin_port ) );
memcpy( msg + strlen( msg ), buf, nbytes );
nbytes += strlen( msg );
// send incoming data to all clients (excluding the originator)
for( int ofd = 0; ofd <= fd_max; ++ofd )
{
if( FD_ISSET( ofd, &fd_master ) )
if( ofd != listener && ofd != ifd )
if( send( ofd, msg, nbytes, 0 ) < 0 )
{ printf( "send() %s\n", strerror( errno ) ); fflush( stdout ); }
}
}
}
}
}
return 0;
}
When the code is run and you connect from two or more clients (via telnet), each message shows the sender as "0.0.0.0" with a port of 0.
The Windows documentation for recvfrom() states "[t]he from and fromlen parameters are ignored for connection-oriented sockets." The Linux and POSIX documentation make no such claim and goes as far as to say that recvfrom() "...may be used to receive data on a socket whether or not it is connection-oriented." No where does it say that src_addr and addrlen will be ignored ... so I would expect these to be filled in.
On connected sockets you have to call getpeername and then carry on with your inet_ntoa (consider using inet_ntop instead as it supports multiple address families). As per the man pages:
int getpeername(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len);
Nowhere does it say that src_addr and addrlen will be ignored.
That is simply untrue. It says
If src_addr is not NULL, and the underlying protocol provides the source address, this source address is filled in. [emphasis added]
You can argue about whether TCP can be said to provide the source address, but you can't claim 'nowhere does it say ...'.
I'm writing a program to communicate with an existing device via serial port, and I am noticing a weird pattern. This is being tested with both a real serial port, and a USB-to-serial adapter. I get the same results for both.
Immediately after booting up the computer or plugging in the adapter, serial port communication works fine. I can send binary data to the device, and get a response back. The program can continue to communicate with the device as long as it wants.
However, once the program ends (cleanly closing the port), running the program again results in failure to communicate. It can access the serial port just fine, but all it gets back is garbage.
(Oddly, the garbage appears to be binary data mixed with modem commands like ATE0Q0S0=0, which makes no sense. Again, I don't need to reset the device to communicate with it, just the port, so I don't know where this is coming from.)
Power-cycling or unplugging the device has no effect. It is only when I reboot the computer, or reset the USB device (via unplug, or driver reset), that I can run the program once more and get it to communicate successfully.
What would cause this? From the results, I can only assume that the serial port is not being left in a clean state after use, but I can find no documentation about properly cleaning the serial port state, other than re-applying ioctl attributes and closing the file descriptor after use, both of which I already do.
Maybe a serial port pin gets left on or something? I don't know how I would test for that, or why it would even happen.
My current "solution" is to just stick with the USB adapter, and have my program perform a USB driver reset before attempting to use the serial port, but I'm hoping there is a better solution.
Edit
As requested, here is the C program I'm using to test the serial port read/write.
include
#include <fcntl.h>
#include <inttypes.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <termios.h>
#include <time.h>
#include <unistd.h>
// Saved termios that we can re-apply when we exit
struct termios savedPortAttributes;
// Set the serial port attributes so we can use it
void setPortAttributes( int fd )
{
struct termios tty;
memset( &tty, 0, sizeof(tty) );
if ( tcgetattr( fd, &tty ) != 0 ) {
printf( "tcgetaddr error: $i\n", errno );
return;
}
cfsetispeed( &tty, B9600 );
cfsetospeed( &tty, B9600 );
cfmakeraw( &tty );
tty.c_lflag = 0;
tty.c_oflag = 0;
tty.c_iflag &= ~(IXON | IXOFF | IXANY);
tty.c_cflag |= (CLOCAL | CREAD);
tty.c_cflag &= ~(PARENB | PARODD);
tty.c_cflag &= ~CSTOPB;
tty.c_cflag &= ~CRTSCTS;
tty.c_cc[VMIN] = 0;
tty.c_cc[VTIME] = 5;
if ( tcsetattr( fd, TCSANOW, &tty ) != 0 ) {
printf( "tcsetaddr error: $i\n", errno );
return;
}
if ( tcflush( fd, TCIOFLUSH ) != 0 ) {
printf( "tcflush error: $i\n", errno );
return;
}
}
void test( int fd )
{
// Send a sample MODBUS command
printf( "Writing command\n" );
char sendBuffer[] = { 0x01, 0x03, 0x00, 0x0B, 0x00, 0x02, 0xB5, 0xC9 };
int bytesWritten = write( fd, sendBuffer, sizeof(sendBuffer) );
if ( bytesWritten < 0 ) {
printf( "Error writing command.\n" );
return;
}
// We don't want to wait more than 1000ms for a response
struct timespec spec;
clock_gettime( CLOCK_MONOTONIC, &spec );
int64_t startMs = spec.tv_sec * 1000 + round( spec.tv_nsec / 1.0e6 );
// Read data back from the port
printf( "Reading from port...\n" );
unsigned char buffer[1024];
int bufferOffset = 0;
int count = 0;
while ( 1 ) {
count = read( fd, &buffer[bufferOffset], sizeof(buffer) - bufferOffset );
if ( count < 0 ) {
printf( "Error reading command.\n" );
return;
}
if ( count > 0 ) {
printf( "Bytes read: " );
for ( int i = bufferOffset; i < bufferOffset + count; i++ ) {
printf( "%02x ", buffer[i] );
}
printf( "\n" );
}
bufferOffset += count;
// Test code. If we receive part of a valid MODBUS response, grab the
// field length byte so we know if we're done reading
if ( bufferOffset >= 3 && buffer[0] == 1 && buffer[1] == 3 ) {
int messageLength = buffer[2];
if ( bufferOffset >= messageLength + 5 ) {
break;
}
}
// If it's been 1000ms, stop reading
clock_gettime( CLOCK_MONOTONIC, &spec );
int64_t timeMs = spec.tv_sec * 1000 + round( spec.tv_nsec / 1.0e6 );
//printf( "%" PRId64 " , %" PRId64 "\n", startMs, timeMs );
if ( timeMs - startMs > 1000 ) {
break;
}
}
}
void main()
{
printf( "Opening port\n" );
int fd = open( "/dev/ttyUSB0", O_RDWR|O_NOCTTY );
if ( fd == -1 ) {
printf( "Unable to open port.\n" );
return;
}
tcgetattr( fd, &savedPortAttributes );
setPortAttributes( fd );
test( fd );
test( fd );
tcsetattr( fd, TCSANOW, &savedPortAttributes );
close( fd );
}
I'm trying to use UNIX sockets for inter-thread communication. The program is only intended to run on Linux. To avoid creating the socket files, I wanted to use "abstract" sockets, as documented in unix(7).
However, I don't seem to be able to connect to these sockets. Everything works if I'm using "pathname" sockets, though.
Here is the code (I haven't quoted any error handling, but it's done):
thread#1:
int log_socket = socket(AF_LOCAL, SOCK_STREAM, 0);
struct sockaddr_un logaddr;
socklen_t sun_len = sizeof(struct sockaddr_un);
logaddr.sun_family = AF_UNIX;
logaddr.sun_path[0] = 0;
strcpy(logaddr.sun_path+1, "futurama");
bind(log_socket, &logaddr, sun_len);
listen(log_socket, 5);
accept(log_socket, &logaddr, &sun_len);
... // send - receive
thread#2:
struct sockaddr_un tolog;
int sock = socket(AF_LOCAL, SOCK_STREAM, 0);
tolog.sun_family = AF_UNIX;
tolog.sun_path[0] = 0;
strcpy(tolog.sun_path+1, "futurama");
connect(sock, (struct sockaddr*)&tolog, sizeof(struct sockaddr_un));
If all I do in the above code, is change the sun_path to not have leading \0, things work perfect.
strace output:
t1: socket(PF_FILE, SOCK_STREAM, 0) = 0
t1: bind(0, {sa_family=AF_FILE, path=#"futurama"}, 110)
t1: listen(0, 5)
t2: socket(PF_FILE, SOCK_STREAM, 0) = 1
t2: connect(1, {sa_family=AF_FILE, path=#"futurama"}, 110 <unfinished ...>
t2: <... connect resumed> ) = -1 ECONNREFUSED (Connection refused)
t1: accept(0, <unfinished ...>
I know that the connect comes before accept, that's not an issue (I tried making sure that accept() is called before connect(), same result. Also, things are fine if the socket is "pathname" anyway).
While I was posting this question, and re-reading unix(7) man page, this wording caught my attention:
an abstract socket address is distinguished by the fact
that sun_path[0] is a null byte (’\0’). All of the remaining bytes
in sun_path define the "name" of the socket
So, if I bzero'ed the sun_path before filling in my name into it, things started to work. I figured that's not necessarily straight-forward. Additionally, as rightfully pointed out by #davmac and #StoneThrow, the number of those "remaining bytes" can be reduced by specifying only enough length of the socket address structure to cover the bytes you want to consider as your address. One way to do that is to use SUN_LEN macro, however, the first byte of the sun_path will have to be set to !0, as SUN_LEN uses strlen.
elaboration
If sun_path[0] is \0, The kernel uses the entirety of the remainder of sun_path as the name of the socket, whether it's \0-terminated or not, so all of that remainder counts. In my original code I would zero the first byte, and then strcpy() the socket name into the sun_path at position 1. Whatever gibberish that was in sun_path when the structure was allocated (especially likely to contain gibberish since it's allocated on the stack), and was included in the length of the socket structure (as passed to the syscalls), counted as the name of the socket, and was different in bind() and connect().
IMHO, strace should fix the way it displays abstract socket names, and display all the sun_path bytes from 1 to whatever the structure length that was supplied, if sun_path[0] is 0
The key of making sockets in abstract namespace work is providing the proper length to 'bind' and 'connect' commands. To avoid setting '\0' at the end of the address in sockaddr_un it should be copied with strncpy or alike.
It is already explained in Pawel's answer so I'm just going to give an example.
Server:
int main(int argc, char** argv)
{
//to remove warning for unused variables.
int dummy = argc;
dummy = (int)argv;
int fdServer = 0;
int fdClient = 0;
int iErr = 0;
int n = 0;
socklen_t addr_len = 0;
char buff[1024];
char resp[1024];
const char* const pcSocketName = "/tmp/test";
struct sockaddr_un serv_addr;
//set the structure with 'x' instead of 0 so that we're able
//to see the full socket name by 'cat /proc/net/unix'
//you may try playing with addr_len and see the actual name
//reported in /proc/net/unix
memset(&serv_addr, 'x', sizeof(serv_addr));
serv_addr.sun_family = AF_UNIX;
serv_addr.sun_path[0] = '\0';
//sizeof(pcSocketName) returns the size of 'char*' this is why I use strlen
strncpy(serv_addr.sun_path+1, pcSocketName, strlen(pcSocketName));
fdServer = socket(PF_UNIX, SOCK_STREAM, 0);
if(-1 == fdServer) {
printf("socket() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
iErr = bind(fdServer, (struct sockaddr*)&serv_addr, offsetof(struct sockaddr_un, sun_path) + 1/*\0*/ + strlen(pcSocketName));
if(0 != iErr) {
printf("bind() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
iErr = listen(fdServer, 1);
if(0 != iErr) {
printf("listen() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
addr_len = sizeof(pcSocketName);
while(1) {
fdClient = accept(fdServer, (struct sockaddr*) &serv_addr, &addr_len);
if(0 >= fdClient) {
printf("accept() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
memset(resp, 0, sizeof(resp));
memset(buff, 0, sizeof(buff));
n = recv(fdClient, buff, sizeof(buff), 0);
if(0 > n) {
printf("recv() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
printf("[client]: %s\n", buff);
sprintf(resp, "echo >> %s", buff);
n = send(fdClient, resp, sizeof(resp), 0);
if(0 > n) {
printf("send() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
printf("[server]: %s\n", resp);
}
close(fdServer);
return(0);
}
Client:
int main(int argc, char** argv) {
//to remove warning for unused variables.
int dummy = argc;
dummy = (int)argv;
int fdClient = 0;
struct sockaddr_un serv_addr;
int iErr = 0;
const char* const pcSocketName = "/tmp/test";
char buff[1024];
memset(&serv_addr, 0, sizeof(serv_addr));
serv_addr.sun_family = AF_UNIX;
serv_addr.sun_path[0] = '\0';
strncpy(serv_addr.sun_path+1, pcSocketName, strlen(pcSocketName));
fdClient = socket(PF_UNIX, SOCK_STREAM, 0);
if(-1 == fdClient) {
printf("socket() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
iErr = connect(fdClient, (struct sockaddr*) &serv_addr, offsetof(struct sockaddr_un, sun_path) + 1/*\0*/ + strlen(pcSocketName));
if(0 != iErr) {
printf("connect() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
memset(buff, 0, sizeof(buff));
sprintf(buff, "Hello from client!");
printf("[client]: %s\n", buff);
iErr = send(fdClient, buff, sizeof(buff), 0);
if(0 > iErr){
printf("write() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
iErr = recv(fdClient, buff, sizeof(buff), 0);
if(0 > iErr){
printf("read() failed: [%d][%s]\n", errno, strerror(errno));
return(-1);
}
printf("[server]: %s\n", buff);
return(0);
}
In my case, replacing strncpy() to snprintf() and increasing copy size to UNIX_PATH_MAX solved the problem.
Original
strncpy(server_addr.sun_path, SOCKET_PATH, sizeof(SOCKET_PATH));
Modified
snprintf(server_addr.sun_path, UNIX_PATH_MAX, SOCKET_PATH);
Hope it helps.
Not sure how SOCKET_PATH is defined, but if it's a string literal as I suspect, then sizeof(SOCKET_PATH) will be the size of a char*, typically either 4 or 8 bytes.