I have a server that spawns a thread for each client that connects to it. The thread then deals with receiving/sending data to the client. Here is my server code:
//////////////
// START //
/////////////
while( 1 )
{
if( listen(serverSocket, SOMAXCONN) == SOCKET_ERROR )
{
printf( "Listen failed with error: %ld\n", WSAGetLastError() );
break;
}
/*
* wait for clients...
*/
clientSocket = accept(serverSocket,
(struct sockaddr *)&clientSockAddr,
&clientSockAddrLen);
if( clientSocket == INVALID_SOCKET )
{
printf("%d:accept failed\n", WSAGetLastError());
closesocket(serverSocket);
WSACleanup();
return 1;
}
printf("Client accepted: IP: %s PORT: %d\n",
inet_ntoa(clientSockAddr.sin_addr),
clientSockAddr.sin_port );
THREADDATA threadData = { clientSocket };
sprintf_s(threadData.ip, "%s", inet_ntoa(clientSockAddr.sin_addr));
// spawn a thread for each client
hthread = CreateThread(
NULL, // don't inherit handle
0, // use default size for the executable
processClient,
(pTHREADDATA)&threadData, // thread data
0, // run right away
&threadId );
}
And here is what my thread function looks like:
/*
called by thread for each client
*/
DWORD WINAPI processClient(LPVOID lpParam)
{
int numBytesRecvd = 0,
numBytesSent = 0,
index = 0,
nLeft = SIZE,
TOTAL = SIZE;
char buff[SIZE];
int run = 1;
char msg[MSG_SIZE];
pTHREADDATA td = (pTHREADDATA)lpParam;
/* keep processing client */
while (run)
{
memset(buff, 0, SIZE);
numBytesRecvd = recv(td->clientSocket, buff, nLeft, 0);
/* CLIENT EXITED */
if( !strcmp(buff, "exit") )
{
printf("Client exited!\n");
run = 0;
}
if( numBytesRecvd > 0 )
{
printf("< %s\n", buff);
}
if (numBytesRecvd == 0)
{
printf("Client closed!\n");
run = 0;
}
if( numBytesRecvd == SOCKET_ERROR )
{
printf("%d:Recieve error!\n", WSAGetLastError());
run = 0;
}
}
closesocket(td->clientSocket);
ExitThread(0);
return 0;
}
Issue:
So I start the server and the client, say client1 (from command prompt in win 7), everything is fine. When I type something at the client1 terminal, it is printed at the server terminal.
Now I launch another client, client2, it gets connected to the server and whatever I type gets displayed at the server, but now when I type something at client1 it doesn't get displayed at the server terminal.
So basically every time I start a new client, only that client can talk to the server, the old clients cannot!!! But I thought the thread will keep handling each client? Or is it that cmd prompt in windows is not thread safe?
I can see two probable issues here : 1) You are passing threadData on the stack to a different thread. You should be allocating it on the heap and then pass it to the thread. 2)I think you are not assigning clientSocket correctly to threadData, shouldnt you be assigning to a member of the structure threadData?
Related
I have a server class that has a method called handle_client as follows:
void server::handle_client()
{
do {
// Accept a client socket
EnterCriticalSection(&listenSocketCriticalSection);
SOCKET clientSocket = accept(listenSocket, NULL, NULL);
LeaveCriticalSection(&listenSocketCriticalSection);
// ... rest of the client handling code that reads
// from the client socket and sends appropriate response
// ...
} while(true);
}
I have a run method as follows:
void server::run()
{
// create the threads
for (int i = 0; i < THREAD_POOL_SIZE; i++) {
DWORD dwThreadId;
thread_pool_handle[i] = CreateThread(NULL, 0, thread_function, this, 0, &dwThreadId);
}
WaitForMultipleObjects(THREAD_POOL_SIZE, thread_pool_handle, true, INFINITE);
}
I have a thread_function as follows:
DWORD WINAPI thread_function(LPVOID lpParam)
{
server* pServer = (server*)lpParam;
pServer->handle_client();
}
I am creating a pool of threads that are all waiting for a client socket connection to be accepted. Since I have wrapped the accept within a critical section, only one thread will succeed at a time. Once a client socket is accepted by the server thread, that thread continues to go on to handle the request. The idea is that the thread will loop back indefinitely to the accept call after completing a request.
Questions:
Is the Critical Section necessary? (I think so, because otherwise the accept call from the multiple threads on the same listenSocket would clobber things. Is this correct?)
If handle_client loops indefinitely, what is the best way to cleanly terminate all the threads and exit the server? Should I use a special message from the client to trigger the thread terminations? Any other suggestions?
How should I handle the server process termination gracefully (as it pertains to the thread pool)?
It Is recommended to use Select model to store socket objects in multithreaded sockets. In Select model, you can use FD_CLR() to clear sockets when there are no network events.
I have the code for the server with the select socket, You can try to run and modify.
#include <iostream>
#include<WinSock2.h>
#include<windows.h>
#include<WS2tcpip.h>
#pragma comment(lib,"ws2_32.lib")
#pragma warning(disable:4996)//inet_addr
int main()
{
//1.Obtain version info
WSADATA wsaData = { 0 };
SOCKET hServer = { 0 };
WSAStartup(MAKEWORD(2, 2), &wsaData);
if (LOBYTE(wsaData.wVersion) != 2 || HIBYTE(wsaData.wVersion) != 2)
{
printf("version failed %d\n", GetLastError());
return -1;
}
else
{
printf("version succeed \n");
}
//2.create socket
hServer = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (hServer == SOCKET_ERROR)
{
printf("create socket tcp failed %d\n", GetLastError());
return -1;
}
else
{
printf("create socket tcp succeed \n");
}
//3. Create a protocol address family
sockaddr_in ServerAddr = { 0 };
ServerAddr.sin_family = AF_INET6;
ServerAddr.sin_zero[8];
ServerAddr.sin_port = htons(8888);
ServerAddr.sin_addr.S_un.S_addr = inet_addr("192.168.2.50");;//modify your address
//4.bind
int nRet = bind(hServer, (sockaddr*)&ServerAddr, sizeof ServerAddr);
if (nRet == SOCKET_ERROR)
{
printf("bind failed %d\n", GetLastError());
closesocket(hServer);
WSACleanup();
return -1;
}
else
{
printf("bind succeed \n");
}
//5.listen
nRet = listen(hServer, 3);
if (nRet == SOCKET_ERROR)
{
printf("listen failed %d\n", GetLastError());
closesocket(hServer);
WSACleanup();
return -1;
}
else
{
printf("listen succeed \n");
}
sockaddr_in clientAddr = { 0 };// The protocol address family used to receive the client
int len = sizeof(clientAddr);// The size of the accepted client protocol address family information
// Create a select model to store socket objects
FD_SET fd_read;
FD_ZERO(&fd_read);
FD_SET(hServer, &fd_read);
//6. Accept client connections
while (1) {
FD_SET fd_tmp = fd_read;// Read backup can only be in
const timeval tv = { 1,0 };
int Ret = select(NULL, &fd_tmp, NULL, NULL, &tv);
if (Ret == 0) // No network events, TMP is automatically deleted
{
Sleep(1000);
continue;
}
for (int i = 0; i < fd_tmp.fd_count; i++)
{
// If there are network events for a listening socket, it proves that a client is connecting to the socket
if (fd_tmp.fd_array[i] == hServer)
{
SOCKET hclient;
hclient = accept(hServer, (sockaddr*)&clientAddr, &len);// If you do not want to store the protocol address family information of the client, you can pass a NULL address
if (hclient == SOCKET_ERROR)
{
printf("recieve information of client failed %d\n", GetLastError());
closesocket(hServer);
return -1;
}
printf("connecting: %s******** \n", inet_ntoa(clientAddr.sin_addr));
FD_SET(hclient, &fd_read);
}
else // The client socket has network events that prove that the client is sending data and the server is accepting the data
{
char buff[32] = { 0 };
int nRet = recv(fd_tmp.fd_array[i], (char*)buff, 32, NULL);
if (nRet > 0)
{
printf("message: %s\n", buff);
}
else// Removes the current socket fd_array from the fd_read
{
FD_CLR(fd_tmp.fd_array[i], &fd_read);
printf("Disconnect \n", GetLastError());
closesocket(hServer);
closesocket(fd_read.fd_array[i]);
break;
}
}
break;
}
}
//7.close socket
WSACleanup();
getchar();
return 0;
}
I have a non-blocking connected socket on Linux, and before my first send(), I do a select to see if the socket is ready for writing, but the select times out.
If I do not do the select, the send works fine.
If I do the select first, I never get to send anything.
Why is the select timing out before the send when the socket is obviously writable?
This is some of the code I am using:
fd_set writefds;
struct timeval timeout;//
FD_ZERO(&writefds);
FD_SET(s, &writefds);//s is my connected socket
timeout.tv_sec = 10;
timeout.tv_usec = 0;
ret = select(1, 0, &writefds, 0, &timeout);
switch(ret)
{
case 1://socket is ready for writing
bytesSent = send(s, (const char*)(buf+bytesSent), len-bytesSent, 0);
if(SOCKET_ERROR == bytesSent)
{
*error = errno;
if(EWOULDBLOCK == *error)
{
goto Exit;
}
sprintf(errmsg, ("send() failed %i\n"), *error);
}
else if(bytesSent < len)
{
sprintf(errmsg, ("send() incomplete\n"));
}
else
{
//sprintf(errmsg, ("sent %i bytes\n"), bytesSent);
}
break;
case 0://timeout --!!!! always comes here
bytesSent = -1;//treat as error
break;
default:
bytesSent = -1;
}
}
Blockquote
In your port from Windows select() to UNIX/Berkeley-derived select(), you preserved the dummy value you'd supplied as the first argument to select().
On UNIX, of course, this parameter (nfds) is meaningful and must be one greater than the highest fd in your sets. Thus, for you it should be s + 1.
I have 2 threads:
Thread A:
It's the select() loop. Perform socket handling for reading operations, such as accepting new connections, receiving data.
while (1) {
FD_ZERO(&fdReadSet);
numActiveSockets = 0;
for (std::unordered_map<SOCKET, TcpSocket*>::iterator it = m_sock_table.begin(); it != m_sock_table.end(); it++)
{
numActiveSockets++;
FD_SET(it->first, &fdReadSet);
}
int ret;
bool hasListen = false;
if (( ret = select(numActiveSockets, &fdReadSet, NULL, NULL, NULL)) == SOCKET_ERROR) {
printf("Select Failed, Error code = %d\n", WSAGetLastError());
return -1;
}
for (std::unordered_map<SOCKET, TcpSocket*>::iterator it = m_sock_table.begin(); it != m_sock_table.end(); it++)
{
if (FD_ISSET(it->first, &fdReadSet))
{
if (it->first == TcpSocket::m_listen_sock)
{
if (!hasListen)
{
sockaddr_in sock_addr;
int sockLength = sizeof(sock_addr);
SOCKET sock = accept(it->first, (sockaddr *) &sock_addr, &sockLength);
TcpSocket * socket = new TcpSocket();
socket->m_sock = sock;
m_sock_table[sock] = socket;
it = m_sock_table.begin();
hasListen = true;
}
}
else
{
char * buffer = it->second->GetWriteBuffer();
int numRead = recv(it->first, buffer, SOCKET_BUFFER_SIZE, 0);
if (numRead == SOCKET_ERROR)
{
int err = WSAGetLastError();
if (err == WSAECONNRESET)
{
printf("Connection [%i]: RESET Received. Closing Socket\n", it->first);
closesocket(it->first);
it = socketVector.erase(it->first); // iterator invalidated after erase
}
else
{
printf("Recv Failed. Error code = %d\n", err);
return -1;
}
}
else if (numRead == 0)//connection close
{
printf("Connection [%i]: Graceful exit. Closing Socket\n", it->first);
closesocket(it->first);
it = socketVector.erase(it->first); // iterator invalidated after erase
}
else {
/* Process received data */
}
}
}
}
}
Thread B:
Allow the application to perform connect() to establish new connections. If a connect() is successful, it will the add the returned socket to m_sock_table.
I have a socket table called m_sock_table which holds all the sockets. I use this m_sock_table to initialize the fdReadSet to be used in select().
-----------Problem-----------------
If thread A is blocked by select(), and at the same time thread B establish a new connection through connect(), the application wouldn't be able to receive data from the new connection, because fdReadset has not been updated withed the new connected socket.
What would be a good way to solve this problem? Or the design is just wrong from the start?
You could use a signal that doesn't do anything other than interrupting the system call:
#include <signal.h>
void do_nothing() { }
struct sigaction sa;
sa.sa_handler = do_nothing;
sigemptyset(sa.sa_mask);
#ifdef SA_INTERRUPT
sa.sa_flags = SA_INTERRUPT;
#else
sa.sa_flags = 0;
#endif
sigaction(SIGUSR1, &sa, 0);
Then, in thread B, after starting a new connection, send the signal, after making sure thread A will handle it:
/* need only be done once, but needed in every thread other than A */
sigset_t sigs;
sigemptyset(&sigs);
sigaddset(&sigs, SIGUSR1)
pthread_sigmask(SIG_BLOCK, &sigs, 0);
/* each time we create a new connection */
kill(getpid, SIGUSR1);
With the above, select will return with an EINTR error -- so check for that and loop (adding the new connection to the set).
I am using windows named pipe example. When I run sample programs to create pipe, write something and receive it in client program everything is just fine. When I move client code into dll, which runs in windows service it just won't receive sent bytes.
Server's code is as follows:
ThreadParams * params = reinterpret_cast<ThreadParams*>(args);
CString * connectionString = params->connectString;
HANDLE hPipe;
DWORD dwBytesRead;
TCHAR buf[1024];
int len;
hPipe = CreateNamedPipe(PIPE_NAME, // Name
PIPE_ACCESS_OUTBOUND | WRITE_OWNER, // OpenMode
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, // PipeMode
2, // MaxInstances
1024, // OutBufferSize
1024, // InBuffersize
2000, // TimeOut
NULL); // Security
if (hPipe == INVALID_HANDLE_VALUE)
{
Globals::WriteLog("Could not create the pipe",1);
exit(1);
}
Globals::WriteLog("connect...",1);
ConnectNamedPipe(hPipe, NULL);
Globals::WriteLog("...connected",1);
swprintf(buf, connectionString->GetBuffer());
len = wcslen(buf);
if (!WriteFile(hPipe, buf, len*sizeof(TCHAR), &dwBytesRead, NULL))
Globals::WriteLog("WriteFile failed",1);
else
wprintf(L"written %d bytes\n",dwBytesRead);
DisconnectNamedPipe(hPipe);
CloseHandle(hPipe);
And client:
CString finalResult = _T("");
HANDLE hOut = INVALID_HANDLE_VALUE;
TCHAR buf[1024];
DWORD len;
DWORD dwWritten;
Global::WriteLog("pwrite: waiting for the pipe...",1);
if (WaitNamedPipe(PIPE_NAME, NMPWAIT_WAIT_FOREVER) == 0)
{
Global::WriteLog("WaitNamedPipe failed. error=%d",1,GetLastError());
goto cleanup;
}
Global::WriteLog("the pipe is ready",1);
hOut = CreateFile(PIPE_NAME,
GENERIC_READ,
0,
NULL, OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hOut == INVALID_HANDLE_VALUE)
{
Global::WriteLog("CreateFile failed with error %d",1,GetLastError());
goto cleanup;
}
Global::WriteLog("Opened the pipe",1);
for (;;)
{
if (!ReadFile(hOut, buf, sizeof(buf), &dwWritten, NULL))
{
Global::WriteLog("ReadFile failed -- probably EOF. Read %d bytes.",1,dwWritten);
goto cleanup;
}
else
break;
}
finalResult = CString(buf);
Global::WriteLog("String from pipe:%S",1,buf);
cleanup:
if(hOut != INVALID_HANDLE_VALUE)
CloseHandle(hOut);
Server's code runs in a thread if that changes anything (I've tested it with this thread version in sample program and there was no problem with this).
Why does it not work?
Thanks in advance
Okay, it seems I figured it out. It seems I didn't understood documentation properly.
At server's side WriteFile function doesn't block until the string is read. My program simply wrote data and then closed handle - pipe. Client didn't catch the message and threw error stating that there is no process on the other side of pipe.
Also from client I removed for(;;) loop.
To wait for read operation on client side to complete I added
FlushFileBuffers(hPipe);
after successful write operation.
Hope that help somebody
My problem is the following:
I'm programming an Interface in Linux to control a GPIB Controller via Ethernet. To do so I open a TCP socket and just send the commands to the Controller. This is working fine so far. The problem I have occured at writing some kind of unit test for my Interface:
To check I am using a tcp acceptor from boost lib in a seperate thread and just connect to it instead of the actual controller. This is working too, but only as long as the connect() call from the interface is blocking. However since I need a specified timeout for the connect() call I had to connect with the select() function:
// Open TCP Socket
m_Socket = socket(PF_INET,SOCK_STREAM,0);
if( m_Socket < 0 )
{
m_connectionStatus = STATUS_CLOSED;
return ERR_NET_SOCKET;
}
struct sockaddr_in addr;
inet_aton(m_Host.c_str(), &addr.sin_addr);
addr.sin_port = htons(m_Port);
addr.sin_family = PF_INET;
// Set timeout values for socket
struct timeval timeouts;
timeouts.tv_sec = SOCKET_TIMEOUT_SEC ; // const -> 5
timeouts.tv_usec = SOCKET_TIMEOUT_USEC ; // const -> 0
uint8_t optlen = sizeof(timeouts);
if( setsockopt( m_Socket, SOL_SOCKET, SO_RCVTIMEO,&timeouts,(socklen_t)optlen) < 0 )
{
m_connectionStatus = STATUS_CLOSED;
return ERR_NET_SOCKET;
}
// Set the Socket to TCP Nodelay ( Send immediatly after a send / write command )
int flag_TCP_nodelay = 1;
if ( (setsockopt( m_Socket, IPPROTO_TCP, TCP_NODELAY,
(char *)&flag_TCP_nodelay, sizeof(flag_TCP_nodelay))) < 0)
{
m_connectionStatus = STATUS_CLOSED;
return ERR_NET_SOCKET;
}
// Save Socket Flags
int opts_blocking = fcntl(m_Socket, F_GETFL);
if ( opts_blocking < 0 )
{
return ERR_NET_SOCKET;
}
int opts_noblocking = (opts_blocking | O_NONBLOCK);
// Set Socket to Non-Blocking
if (fcntl(m_Socket, F_SETFL, opts_noblocking)<0)
{
return ERR_NET_SOCKET;
}
// Connect
if ( connect(m_Socket, (struct sockaddr *)&addr, sizeof(addr)) < 0)
{
// EINPROGRESS always appears on Non Blocking connect
if ( errno != EINPROGRESS )
{
m_connectionStatus = STATUS_CLOSED;
return ERR_NET_SOCKET;
}
// Create a set of sockets for select
fd_set socks;
FD_ZERO(&socks);
FD_SET(m_Socket,&socks);
// Wait for connection or timeout
int fdcnt = select(m_Socket+1,NULL,&socks,NULL,&timeouts);
if ( fdcnt < 0 )
{
return ERR_NET_SOCKET;
}
else if ( fdcnt == 0 )
{
return ERR_TIMEOUT;
}
}
//Set Socket to Blocking again
if(fcntl(m_Socket,F_SETFL,opts_blocking)<0)
{
return ERR_NET_SOCKET;
}
m_connectionStatus = STATUS_OPEN;
return x2e::OK;
If I use this function I can still connect on the real controller and communicate with it. But if I use my testserver I just can't connect, select just leaves with a return value of 0.
So now someone may say that my testserver just doesn't work....but If I use a blocking connect() call I can send to my testserver without any problems...
Maybe someone has an idea what I could do...?
with nonblocking socket connect() call may return 0 with the connection is still not ready
the connect() code section, may be written like this(my connect wraper code segment learnt from the python implementation):
if (FAIL_CHECK(connect(sock, (struct sockaddr *) &channel, sizeof(channel)) &&
errno != EINPROGRESS))
{
gko_log(WARNING, "connect error");
ret = HOST_DOWN_FAIL;
goto CONNECT_END;
}
/** Wait for write bit to be set **/
#if HAVE_POLL
{
struct pollfd pollfd;
pollfd.fd = sock;
pollfd.events = POLLOUT;
/* send_sec is in seconds, timeout in ms */
select_ret = poll(&pollfd, 1, (int)(send_sec * 1000 + 1));
}
#else
{
FD_ZERO(&wset);
FD_SET(sock, &wset);
select_ret = select(sock + 1, 0, &wset, 0, &send_timeout);
}
#endif /* HAVE_POLL */
if (select_ret < 0)
{
gko_log(FATAL, "select/poll error on connect");
ret = HOST_DOWN_FAIL;
goto CONNECT_END;
}
if (!select_ret)
{
gko_log(FATAL, "connect timeout on connect");
ret = HOST_DOWN_FAIL;
goto CONNECT_END;
}
python version code segment:
res = connect(s->sock_fd, addr, addrlen);
if (s->sock_timeout > 0.0) {
if (res < 0 && errno == EINPROGRESS && IS_SELECTABLE(s)) {
timeout = internal_select(s, 1);
if (timeout == 0) {
/* Bug #1019808: in case of an EINPROGRESS,
use getsockopt(SO_ERROR) to get the real
error. */
socklen_t res_size = sizeof res;
(void)getsockopt(s->sock_fd, SOL_SOCKET,
SO_ERROR, &res, &res_size);
if (res == EISCONN)
res = 0;
errno = res;
}
else if (timeout == -1) {
res = errno; /* had error */
}
else
res = EWOULDBLOCK; /* timed out */
}
}
if (res < 0)
res = errno;