I've got this strange issue with reading from a serial port on Linux. About 50% of the time, I won't get any data back from the serial port when I'm reading from it, yet if I use GTKTerm I get data back 100% of the time. I've double-checked my settings, and the terminal settings that I'm using are the exact same that GTKTerm uses, so I'm mystified as to why this would occur. The only thing that I could think of is that the port is set in canonical mode, not raw mode, but I set it properly - the device that I'm reading from does not send back newlines with all of the commands. However, everything is working properly when the device sends back commands with a newline character.
Here's the code I'm using:
int fd = open(serial_port, O_RDWR );
if( fd < 0 ){
perror("open");
}
//Set the serial port settings
struct termios newio;
if( tcgetattr( fd, &newio ) < 0 )
perror("tcgetattr");
if( cfsetospeed( &newio, B9600 ) < 0 )
perror("cfsetospeed");
newio.c_cflag &= ~CSTOPB;
newio.c_cflag &= ~CSIZE;
newio.c_cflag |= CS8;
newio.c_cflag |= CREAD;
newio.c_iflag |= IGNPAR;
newio.c_iflag |= IGNBRK;
newio.c_iflag &= ~BRKINT;
newio.c_iflag &= ~ICRNL;
newio.c_iflag &= ~IXON;
newio.c_cflag |= CLOCAL;
newio.c_oflag = 0;
newio.c_lflag = 0;
newio.c_cc[VTIME] = 0;
newio.c_cc[VMIN] = 1;
if( tcsetattr( fd, TCSANOW, &newio ) < 0 )
perror("tcsetattr");
tcflush( fd, TCOFLUSH );
tcflush( fd, TCIFLUSH );
Reading code (in separate thread)
void* thr(void* ign){
char buffer[10];
while( 1 ){
int got = read(fd, buffer, 10);
buffer[got] = 0;
printf("got %s\n", buffer);
}
}
Well, I found out the problem.
For some bizzare reason, the device that I'm sending to doesn't seem to have a big buffer or something; it turns out that when I send commands as a string all at once, I won't always get a response back. Sleeping between sending characters fixes the problem(though I admit, it's not a very good solution). What was probably happening was that I was getting interrupted at points, which would allow enough time for the device to process some of its input. GTKTerm will send out each character as it is received, and it's impossible to type fast enough for this error to occur.
Related
I am testing a serial communication protocol based on printable characters only.
The setup has a pc connected to an arduino board by USB. The PC USB serial is operated in canonical mode, with no echo, no flow control, 9600 baud.
Since a read timeout is requested, pselect is called before the serial read. The arduino board simply echoes back every received character without any processing. The PC OS is Linux Neon with kernel 5.13.0-40-generic.
When lines of a specific length are transmitted from the PC and echoed back by the arduino, they are received correctly except for the final new line that is missing.
A further read, returns an empty line (the previously missing NL).
Lines with different length are transmitted and received correctly, including the trailing NL.
This behavior is fully repeatable and stable. The following code reproduce the problem for a line transmitted with a length of 65 characters (including NL) and received with a length of 64 (NL missing). Other line lengths work fine.
Thanks for any hints.
/* remote serial loop test 20220626 */
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/select.h>
#include <string.h>
#include <termios.h>
#include <time.h>
#include <unistd.h>
#define TX_MINLEN 63
#define TX_MAXLEN 66
#define DATA_MAXLEN 128
#define LINK_DEVICE "/dev/ttyUSB0"
#define LINK_SPEED B9600
#define RECEIVE_TIMEOUT 2000
int main()
{
int wlen;
int retval;
int msglen;
uint8_t tx_data[257] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
for (int i=16; i < 256; i++) tx_data[i] = tx_data[i & 0xf];
uint8_t rx_data[257];
/* serial interface */
char * device;
int speed;
int fd;
fd_set fdset;
struct timespec receive_timeout;
struct timespec *p_receive_timeout = &receive_timeout;
struct termios tty;
/* open serial device in blocking mode */
fd = open(LINK_DEVICE, O_RDWR | O_NOCTTY);
if (fd < 0) {
printf("Error opening %s: %s\n",LINK_DEVICE,strerror(errno));
return -1;
}
/* prepare serial read by select to have read timeout */
FD_ZERO(&(fdset));
FD_SET(fd,&(fdset));
if (RECEIVE_TIMEOUT >= 0) {
p_receive_timeout->tv_sec = RECEIVE_TIMEOUT / 1000;
p_receive_timeout->tv_nsec = RECEIVE_TIMEOUT % 1000 * 1000000;
}
else
p_receive_timeout = NULL;
/* get termios structure */
if (tcgetattr(fd, &tty) < 0) {
printf("Error from tcgetattr: %s\n", strerror(errno));
return -1;
}
/* set tx and rx baudrate */
cfsetospeed(&tty, (speed_t)LINK_SPEED);
cfsetispeed(&tty, (speed_t)LINK_SPEED);
/* set no modem ctrl, 8 bit, no parity, 1 stop */
tty.c_cflag |= (CLOCAL | CREAD); /* ignore modem controls */
tty.c_cflag &= ~CSIZE;
tty.c_cflag |= CS8; /* 8-bit characters */
tty.c_cflag &= ~PARENB; /* no parity bit */
tty.c_cflag &= ~CSTOPB; /* only need 1 stop bit */
tty.c_cflag &= ~CRTSCTS; /* no hardware flowcontrol */
/* canonical mode: one line at a time (\n is line terminator) */
tty.c_lflag |= ICANON | ISIG;
tty.c_lflag &= ~(ECHO | ECHOE | ECHONL | IEXTEN);
/* input control */
tty.c_iflag &= ~IGNCR; /* preserve carriage return */
tty.c_iflag &= ~INPCK; /* no parity checking */
tty.c_iflag &= ~INLCR; /* no NL to CR traslation */
tty.c_iflag &= ~ICRNL; /* no CR to NL traslation */
tty.c_iflag &= ~IUCLC; /* no upper to lower case mapping */
tty.c_iflag &= ~IMAXBEL;/* no ring bell at rx buffer full */
tty.c_iflag &= ~(IXON | IXOFF | IXANY);/* no SW flowcontrol */
/* no output remapping, no char dependent delays */
tty.c_oflag = 0;
/* no additional EOL chars, confirm EOF to be 0x04 */
tty.c_cc[VEOL] = 0x00;
tty.c_cc[VEOL2] = 0x00;
tty.c_cc[VEOF] = 0x04;
/* set changed attributes really */
if (tcsetattr(fd, TCSANOW, &tty) != 0) {
printf("Error from tcsetattr: %s\n", strerror(errno));
return -1;
}
/* wait for serial link hardware to settle, required by arduino reset
* triggered by serial control lines */
sleep(2);
/* empty serial buffers, both tx and rx */
tcflush(fd,TCIOFLUSH);
/* repeat transmit and receive, each time reducing data length by 1 char */
for (int l=TX_MAXLEN; l > TX_MINLEN - 1; l--) {
/* prepare data: set EOL and null terminator for current length */
tx_data[l] = '\n';
tx_data[l+1] = 0;
/* send data */
int sent = write(fd,tx_data,l+1);
/* receive data */
/* wait for received data or for timeout */
retval = pselect(fd+1,&(fdset),NULL,NULL,p_receive_timeout,NULL);
/* check for error or timeout */
if (retval < 0)
printf("pselect error: %d - %s\n",retval,strerror(errno));
else if (retval == 0)
printf("serial read timeout\n");
/* there is enough data for a non block read: do read */
msglen = read(fd,&rx_data,DATA_MAXLEN);
/* check rx data length */
if (msglen != l+1)
printf("******** RX ERROR: sent %d, received %d\n",l+1,msglen);
else
continue;
/* check received data, including new line if present */
for (int i=0; i < msglen; i++) {
if (tx_data[i] == rx_data[i])
continue;
else {
printf("different rx data:|%s|\n",rx_data);
break;
}
}
/* clear RX buffer */
for (int i=0; i < msglen + 1; i++) rx_data[i] = 0;
}
}
When performing stream-based communication, be it via pipes, serial ports, or TCP sockets, you can never rely on reads to always return a full "unit of transmission" (in this case a line, but could also be a fixed-size block). The reason is that stream-based communication can always be split by any part of the transmission stack (even potentially the sender) into multiple blocks, and there is never a guarantee that a single read will always read a full block.
For example, you could always run into the race condition that your microcontroller is still sending parts of the message when you call read(), so not all characters are read exactly. In your case, that's not what you're seeing, because that would be more of a stochastic phenomenon (that would be worse with an increased interrupt load on the computer) and not so easily reproducible. Instead, because you're talking about the number 64 here, you're running into the static buffer size used in the kernel's tty driver that will only ever return at most 64 bytes at once, regardless of what the specified read size actually is. However, in other cases it could still be that you'll see additional failures by the kernel returning only the first couple of characters of a line in the first read(), and the rest in the second, depending on precise timing details -- you've probably not seen that yet, but it's bound to happen at some point.
The only reliable way to properly implement communication protocols in streaming situations (serial port, pipes, TCP sockets, etc.) is to consider the following:
For fixed-size data (e.g. communication units that are always N bytes in size) to loop around a read() call until you've read exactly the right amount of bytes (reads that follow an incomplete read would obviously ask for less bytes than the original read, just to make up the difference)
For variable-size data (for example communication units that are separated by a line end character) you have two options: either you read only one character at a time until you reach the end-of-line character (inefficient, uses lots of syscalls), or you keep track of the communication state via a large enough buffer that you constantly fill with read() operations until the buffer contains a line-end character, at which point you remove that line from the buffer (but keep the rest) and process that.
As a complete aside, if you're doing anything with serial communication, I can very much recommend the excellent libserialport library (LGPLv3 license) that makes working with serial ports a lot easier -- and has the benefit of being cross-platform. (Doesn't help with your issue, just thought that I'd mention it.)
Upgrading from linux kernel version 5.13.0-40-generic to 5.13.0-51-generic solved the problem.
I'm writing an embedded Linux application that (1) opens a serial connection to another device, (2) sends a known command, (3) checks port for incoming characters (response) until expected response phrase or character is detected, (4) repeats step 2 and 3 until a series of commands are sent and responses received, (5) then closes the port.
My app would go through some cycles of the above sequence and every now and then it would be waiting for a response (reading) when all of a sudden the communication stops and my software faults out because of my built-in timeout logic.
Is there anything in my port configuration that would cause the port to be blocked due to specific byte sent (possibly due to electrical noise)?
Here is how I'm opening my ports (showing configurations via termios.h):
struct termios options;
fd = open("/dev/ttyUSB0", O_RDWR | O_NOCTTY | O_NONBLOCK);
if (fd == -1) {
debug() << "Port open failed!"
return FAIL;
}
debug() << "Port Opened Successful"
fcntl(fd, F_SETFL, 0); // This setting interacts with VMIN and VTIME below
// Get options
tcgetattr(fd, &options);
// Adjust Com port options
options.c_cflag |= (CLOCAL | CREAD); // Program will not "own" port, enable reading on port
options.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG); // Sets RAW input mode (does not treat input as a line of text with CR/LF ending)
options.c_oflag &= ~ OPOST; // Sets RAW ouput mode (avoids newline mapping to CR+LF characters)
options.c_iflag &= ~(IXON | IXOFF | IXANY); // Turns off SW flow c
options.c_cc[VMIN] = 0;
options.c_cc[VTIME] = 10;
// Set options
tcsetattr(fd, TCSANOW, &options);
//return fd;
return SUCCEED;
I can't figure out why the communication all of a sudden just freezes up and then goes away when I cycle power to my device. Thanks all!
More info - here are my read and write functions:
int Comm::Receive(unsigned char* rBuf)
{
int bytes;
ioctl(fd, FIONREAD, &bytes);
if (bytes >= 1)
{
bytes = read(fd, rBuf, 1);
if (bytes < 0)
return READ_ERR;
return SUCCEED;
}
else
return NO_DATA_AVAILABLE;
}
int Comm::Send(int xCt, unsigned char* xBuf)
{
int bytes;
if (fd == -1)
return FAIL;
bytes = write(fd, xBuf, xCt);
if (bytes != xCt)
return FAIL;
else
return SUCCEED;
}
Welcome to the joys of serial ports...
Thought 1: wrap your read calls with a select ()
Thought 2: Unset the ICANON flag in tcsetattr, and set a VTIME attribute for a deliberate timeout (and then, obviously, handle it)
Thought 3: Nothing about serial comms ever works perfectly.
I also had a similar problem with sending command to devices and reading responses from the device. Please refer to below SOF post and the answer this was working for my problem.
In these cases, We have to care about the protocol which we are going to use for device communication (send and receive). If we can send commands successfully and we didn't receive a response with a noise from device implies there is something wrong in the data packet which was sent to the device. First of all check the protocol specification first and create a byte array for a simple command (like make a beep sound) and send it.
Send data to a barcode scanner over RS232 serial port
I can do something for you If you can post your complete source code with the output.
Enjoy the code. Thanks.
I'm bit of a newbie but I have an legacy app that reads 64 bytes of AES encrypted data from a device using ttyACM0. I now need to read 128 bytes. Sounded simple; increase the sizes of buffers etc. But no matter what I try, I still can only read 64 bytes. After that it just hangs. I verified the communications in Windows with a terminal and cdc-acm driver. Device does not use flow control. I cant upload code because its proprietary but below are some snippets:
The Intialization:
CACS_RefID::Initialise()
{
int iRet = 1;
struct termios dev_settings;
if(( m_fdRefdev = open("/dev/ttyACM0", O_RDWR))<0)
{
g_dbg->debug("CACS_RefID::Failed to open device\n");
return 0;
}
g_dbg->debug("CACS_RefID::Initialse completed\n");
// Configure the port
tcgetattr(m_fdRefdev, &dev_settings);
cfmakeraw(&dev_settings);
//*tcflush
//tcflush(m_fdRefdev, TCIOFLUSH);
tcsetattr(m_fdRefdev, TCSANOW, &dev_settings);
return iRet;
}
The implementation:
int CACS_RefID::Readport_Refid(int ilen, char* buf)
{
int ierr=0, iret = 0, ictr=0;
fd_set fdrefid;
struct timeval porttime_refrd;
FD_ZERO(&fdrefid);
FD_SET(m_fdRefdev,&fdrefid);
porttime_refrd.tv_sec = 1;
porttime_refrd.tv_usec = 0; //10 Seconds wait time for read port
do
{
iret = select(m_fdRefdev + 1, &fdrefid, NULL, NULL, &porttime_refrd);
switch(iret)
{
case READ_TIMEOUT:
g_dbg->debug("Refid portread: Select timeout:readlen=%d \n",ilen);
ierr = -1;
break;
case READ_ERROR:
g_dbg->debug("Refid portread: Select error:readlen=%d \n",ilen);
ierr = -1;
break;
default:
iret = read(m_fdRefdev, buf, ilen);
g_dbg->debug("Refid portread: Read len(%d):%d\n",ilen,iret);
break;
}
}while((ierr == 0) && (iret<ilen) );
//Flush terminal content at Input and Output after every read completion
// tcflush(m_fdRefdev, TCIOFLUSH);
return ierr;
}
If I initialize every time that I before running the implementation, I get 128 bytes but the data is corrupt after 64 bytes. Even before working on it, I get a lot of READ_ERRORs. Looks like the original author expected the device to block with select() but it doesn't.
Is there some type of limitation on ttyACM0 buffer size in the system? Does baud rate matter with the ttyACM driver? Does read() stop reading after all bytes are read (thinking the first 64 are available, then empty, then more data)?
Pouring thru man pages but I'm stymied. ANY help would be greatly appreciated.
Heres my latest:
int CACS_RefID::Get_GasTest_Result(int ilen)
{
int ierr=0, iret = 0, ictr=0, iread=0;
fd_set fdrefid;
struct timeval porttime_refrd;
porttime_refrd.tv_sec = 5;
porttime_refrd.tv_usec = 0; //10 Seconds wait time for read port
if (Get_GasTest_FirstPass == 0)
{
g_dbg->debug("GasTest_Result_firstPass\n");
memset(strresult, 0, sizeof(strresult)); //SLY clear out result buffer
iread=0;
Get_GasTest_FirstPass = 1;
}
do
{
iread = strlen(strresult);
FD_ZERO(&fdrefid);
FD_SET(m_fdRefdev,&fdrefid);
iret = select(m_fdRefdev + 1, &fdrefid, NULL, NULL, &porttime_refrd);
switch(iret)
{
case READ_TIMEOUT: //0
g_dbg->debug("Get_GasTest_Result: Select timeout\n");
ierr = -1;
break;
case READ_ERROR: //-1
g_dbg->debug("Get_GasTest_Result: Select error=%d %s \n", errno,strerror(errno)) ;
ierr = -1;
break;
}
iret = read(m_fdRefdev, (&strresult[0] + iread), (ilen-iread));
g_dbg->debug("Get_GasTest_Result: ilen=%d,iret=%d,iread=%d \n",ilen,iret,iread);
}while((ierr == 0) && (iread<ilen) );
return ierr;
Note: I am now reading data regardless of select errors and STILL only getting 64bytes. I've contacted my device mfg. Must be something odd going on.
Here is one possible problem with your code; this may not be the one that is causing you to only get 64 bytes but it could explain what you are seeing. Assume that you invoke the function Readport_Refid() with a buffer of 128 bytes. In other words, your invocation was something like:
char buffer[128];
Readport_Refid(128, buffer);
Assume for whatever reason that the first call to select() gets you a return value of 1 (since one bit is set). Your code is only setting one bit so you go off and you read()
iret = read(m_fdRefdev, buf, ilen);
g_dbg->debug("Refid portread: Read len(%d):%d\n",ilen,iret);
break;
iret returns 64 (which means 64 bytes are read) and your program prints a nice message and since ierr is still 0 and iret (64) is less than ilen (128) you go round again and call select().
Assume that you get more data and select() returns 1 again. Then you will go read again on the same buffer with the same ilen and overwrite the first 64 bytes that were read.
At the very least, you should do the following. I have only shown below the changed lines. First add an iread variable and make sure you use it to preserve data that you've already read. Then use iread to determine whether you've read enough or not.
int CACS_RefID::Readport_Refid(int ilen, char* buf)
{
int ierr=0, iret = 0, ictr=0, iread = 0;
[...]
default:
iret = read(m_fdRefdev, buf + iread, ilen - iread);
if (iret > 0)
iread += iret;
g_dbg->debug("Refid portread: Read len(%d):%d\n",ilen,iret);
break;
}
}while((ierr == 0) && (iread<ilen) );
[...]
**** EDITED 2013-08-19 ****
I want to reiterate a comment made by #wildplasser
You should really also be setting FD_SET on each trip around the loop. Great catch.
With respect to your new code, does it work or do you still have a problem?
**** EDITED again 2013-08-19 ****
Getting EINTR is nothing to be worried about. You should just plan on resetting FD_SET and trying again.
I can't say I know why but the fix was to call the initialization code at the beginning of the implementation even though it is called previously. If I call it again, I can read in 128 bytes. If I don't, I can only read up to 64 bytes.
I am developing for an ARM device running Linux 2.6.37. I am trying to toggle an IO pin as fast as possible. I made a little kernel module and a user space application. I tried two things :
Manipulate the GPIO control registers directly from the kernel space using ioremap.
mmap() the GPIO control registers without caching and using them from user space.
Both methods work, but the second is about 3 times slower than the first (observed on oscilloscope). I think I disabled all caching mechanisms.
Of course I'd like to get the best of the two worlds : flexibility and ease of development from user space with the speed of kernel space.
Does anybody know why the mmap() could be slower than the ioremap() ?
Here's my code :
Kernel module code
static int ti81xx_usmap_mmap(struct file* pFile, struct vm_area_struct* pVma)
{
pVma->vm_flags |= VM_RESERVED;
pVma->vm_page_prot = pgprot_noncached(pVma->vm_page_prot);
if (io_remap_pfn_range(pVma, pVma->vm_start, pVma->vm_pgoff,
pVma->vm_end - pVma->vm_start, pVma->vm_page_prot))
return -EAGAIN;
pVma->vm_ops = &ti81xx_usmap_vm_ops;
return 0;
}
static void ti81xx_usmap_test_gpio(void)
{
u32* pGpIoRegisters = ioremap_nocache(TI81XX_GPIO0_BASE, 0x400);
const u32 pin = 1 << 24;
int i;
/* I should use IO read/write functions instead of pointer deferencing,
* but portability isn't the issue here */
pGpIoRegisters[OMAP4_GPIO_OE >> 2] &= ~pin; /* Set pin as output*/
for (i = 0; i < 200000000; ++i)
{
pGpIoRegisters[OMAP4_GPIO_SETDATAOUT >> 2] = pin;
pGpIoRegisters[OMAP4_GPIO_CLEARDATAOUT >> 2] = pin;
}
pGpIoRegisters[OMAP4_GPIO_OE >> 2] |= pin; /* Set pin as input*/
iounmap(pGpIoRegisters);
}
User space application code
int main(int argc, char** argv)
{
int file, i;
ulong* pGpIoRegisters = NULL;
ulong pin = 1 << 24;
file = open("/dev/ti81xx-usmap", O_RDWR | O_SYNC);
if (file < 0)
{
printf("open failed (%d)\n", errno);
return 1;
}
printf("Toggle from kernel space...");
fflush(stdout);
ioctl(file, TI81XX_USMAP_IOCTL_TEST_GPIO);
printf(" done\n");
pGpIoRegisters = mmap(NULL, 0x400, PROT_READ | PROT_WRITE, MAP_SHARED, file, TI81XX_GPIO0_BASE);
printf("Toggle from user space...");
fflush(stdout);
pGpIoRegisters[OMAP4_GPIO_OE >> 2] &= ~pin;
for (i = 0; i < 30000000; ++i)
{
pGpIoRegisters[OMAP4_GPIO_SETDATAOUT >> 2] = pin;
pGpIoRegisters[OMAP4_GPIO_CLEARDATAOUT >> 2] = pin;
}
pGpIoRegisters[OMAP4_GPIO_OE >> 2] |= pin;
printf(" done\n");
fflush(stdout);
munmap(pGpIoRegisters, 0x400);
close(file);
return 0;
}
This is because ioremap_nocache() still enables the CPU write buffer in your VM mapping whereas pgprot_noncached() disables both bufferability and cacheability.
Apples to apples comparison would be to use ioremap_strongly_ordered() instead.
My guess would be that since mmap has to check to make sure you're writing to memory you're allowed to write to, it's going to be slower than the kernel version (which I believe/assume doesn't do that kind of checking--with a kernel module you're responsible for testing until you're very sure you're not breaking things).
Try using do_mmap (I believe that's the one) to use mmap from kernel space, and see how that compares. If it's comparably faster, then I'm right. If it's not, it's something else.
I have a system where I am seeing strange behavior with the serial ports that I don't expect. I've previously seen this on occasion with usb-to-serial adapters, but now I'm seeing it on native serial ports as well, with much greater frequency.
The system is set up to run automated tests and will first perform some tasks that cause a large amount of data to be outputted from the serial device while I do not have the ports open. The device will also reset itself. Only the tx/rx lines are connected. There is no flow control.
After these tasks complete, the testware opens the serial ports and immediately fails because it gets unexpected responses. When I reproduce this, I found that if I open the serial port in a terminal program, I see several kilobytes of old data (that appears to have been sent when the port was closed) immediately flushed out. Once I close this program, I can then run the tests as expected.
What could cause this to happen? How does Linux handle buffering the serial port when the device is closed? If I opened a device, made it send output, and then closed it without reading from it, would this cause the same problem?
The Linux terminal driver buffers input even if it is not opened. This can be a useful feature, especially if the speed/parity/etc. are set appropriately.
To replicate the behavior of lesser operating systems, read all pending input from the port as soon as it is open:
...
int fd = open ("/dev/ttyS0", O_RDWR | O_NOCTTY | O_SYNC);
if (fd < 0)
exit (1);
set_blocking (fd, 0); // disable reads blocked when no input ready
char buf [10000];
int n;
do {
n = read (fd, buf, sizeof buf);
} while (n > 0);
set_blocking (fd, 1); // enable read blocking (if desired)
... // now there is no pending input
void set_blocking (int fd, int should_block)
{
struct termios tty;
memset (&tty, 0, sizeof tty);
if (tcgetattr (fd, &tty) != 0)
{
error ("error %d getting term settings set_blocking", errno);
return;
}
tty.c_cc[VMIN] = should_block ? 1 : 0;
tty.c_cc[VTIME] = should_block ? 5 : 0; // 0.5 seconds read timeout
if (tcsetattr (fd, TCSANOW, &tty) != 0)
error ("error setting term %sblocking", should_block ? "" : "no");
}