segfault on write() with ~8MB buffer (OSX, Linux) - linux

I was curious what kind of buffer sizes write() and read() could handle on Linux/OSX/FreeBSD, so I started playing around with dumb programs like the following:
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
int main( void ) {
size_t s = 8*1024*1024 - 16*1024;
while( 1 ) {
s += 1024;
int f = open( "test.txt", O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR | S_IXUSR );
char mem[s];
size_t written = write( f, &mem[0], s );
close( f );
printf( "(%ld) %lu\n", sizeof(size_t), written );
}
return 0;
}
This allowed me to test how close to a seeming "8MB barrier" I could get before segfaulting. Somewhere around the 8MB mark, my program dies, here's an example output:
(8) 8373248
(8) 8374272
(8) 8375296
(8) 8376320
(8) 8377344
(8) 8378368
(8) 8379392
(8) 8380416
(8) 8381440
(8) 8382464
Segmentation fault: 11
This is the same on OSX and Linux, however my FreeBSD VM is not only much faster at running this test, it also can go on for quite a ways! I've successfully tested it up to 511MB, which is just a ridiculous amount of data to write in one call.
What is it that makes the write() call segfault, and how can I figure out the maximum amount that I can possibly write() in a single call, without doing something ridiculous like I'm doing right now?
(Note, all three operating systems are 64-bit, OSX 10.7.3, Ubuntu 11.10, FreeBSD 9.0)

The fault isn't within write(), it's a stack overflow. Try this:
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
int main( void )
{
void *mem;
size_t s = 512*1024*1024 - 16*1024;
while( 1 )
{
s += 1024;
int f = open( "test.txt", O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR | S_IXUSR );
mem = malloc(s);
size_t written = write( f, mem, s );
free(mem);
close( f );
printf( "(%ld) %lu\n", sizeof(size_t), written );
}
return 0;
}

Related

Will fallocate on mmap file reduce the memory consumption?

I am trying to mmap a 64M file into memory, then read & write into it. But sometimes a certain range within this file will no longer be used, so I called fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 16 << 20, 16 << 20); to release a 16M range inside it.
However, I find that the memory consumption seems not changed(both from free -m and from cat /proc/meminfo).
I understand that fallocate will dig some holes inside the file and return such range back to the filesystem. But I'm not sure whether it will reduce memory consumption for mmaped files.
If it does not reduce the memory consumption, where does such range go? Can another process get the underlying memory previously allocated to it?
The big.file is a normal 64M file instead of a sparse file
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <linux/falloc.h>
#include <stdint.h>
int main(int argc, char *argv[])
{
uint8_t *addr;
int fd = open("home/big.file", O_RDWR | O_CREAT, 0777);
if (fd < 0)
return -1;
addr = (uint8_t *)mmap(NULL, MMAP_SIZE , PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (addr < 0)
return -1;
printf("data[0x%x] = %d\n", offset, addr[offset]);
getchar();
fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 16 << 20, 16 << 20);
getchar();
close(fd);
return 0;
}

Linux read() system call takes longer than my expectation ( serial port programming )

I am trying to read data sent from the tty/USB0 and print it out with byte format.
Question:
I expect to print out the data once the reading bytes reach 40 However, the time takes much longer than I expect. The read() system call hangs and I believe the data should already be larger than 40. The data will finally be printed out but it should not take so long. Did I make anything wrong in this programming ?
thanks
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <termios.h>
#include <stdio.h>
#define BAUDRATE B9600
#define MODEMDEVICE "/dev/ttyUSB0"
#define FALSE 0
#define TRUE 1
main()
{
int fd,c, res;
struct termios oldtio,newtio;
unsigned char buf[40];
fd = open(MODEMDEVICE, O_RDWR | O_NOCTTY );
if (fd <0) {perror(MODEMDEVICE); exit(-1); }
tcgetattr(fd,&oldtio);
bzero(&newtio, sizeof(newtio));
newtio.c_cflag = BAUDRATE | CS8 | CLOCAL | CREAD;
newtio.c_iflag = IGNPAR | ICRNL;
newtio.c_oflag = 1;
newtio.c_lflag = ICANON;
tcflush(fd, TCIOFLUSH);
tcsetattr(fd,TCSANOW,&newtio);
int i;
while (1) {
res = read(fd,buf,40);
if(res==40){
printf("res reaches 40 \n");
}
printf("res: %d\n",res);
for(i=0;i<res;++i){
printf("%02x ", buf[i]);
}
return;
}
}
--------------------raw mode code------------------------
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <termios.h>
#include <stdio.h>
#define BAUDRATE B9600
#define MODEMDEVICE "/dev/ttyUSB0"
#define _POSIX_SOURCE 1 /* POSIX compliant source */
#define FALSE 0
#define TRUE 1
volatile int STOP=FALSE;
main()
{
int fd,c, res;
struct termios oldtio,newtio;
unsigned char buf[255];
fd = open(MODEMDEVICE, O_RDWR | O_NOCTTY );
if (fd <0) {perror(MODEMDEVICE); exit(-1); }
tcgetattr(fd,&oldtio); /* save current port settings */
bzero(&newtio, sizeof(newtio));
newtio.c_cflag = BAUDRATE | CRTSCTS | CS8 | CLOCAL | CREAD;
newtio.c_iflag = IGNPAR;
newtio.c_oflag = 0;
/* set input mode (non-canonical, no echo,...) */
newtio.c_lflag = 0;
newtio.c_cc[VTIME] = 0;
newtio.c_cc[VMIN] = 40;
tcflush(fd, TCIFLUSH);
tcsetattr(fd,TCSANOW,&newtio);
int i;
while (STOP==FALSE) {
res = read(fd,buf,255);
for( i=0;i<res;++i){
printf("%02x \n", buf[i]);
}
}
tcsetattr(fd,TCSANOW,&oldtio);
}
It now can print out the data once buffer capacity is full ( which is 40 ).
1 question:
When I modified the printf
printf("%02x ", buf[i]); ( remove "\n" )
It will not print out when the buffer is full until more bytes are received. Why this happens?
Thanks
You need to switch the terminal to raw mode to disable line buffering.
Citing this answer:
The terms raw and cooked only apply to terminal drivers. "Cooked" is
called canonical and "raw" is called non-canonical mode.
The terminal driver is, by default a line-based system: characters are
buffered internally until a carriage return (Enter or Return) before
it is passed to the program - this is called "cooked". This allows
certain characters to be processed (see stty(1)), such as Cntl-D,
Cntl-S, Ctrl-U Backspace); essentially rudimentary line-editing. The
terminal driver "cooks" the characters before serving them up.
The terminal can be placed into "raw" mode where the characters are
not processed by the terminal driver, but are sent straight through
(it can be set that INTR and QUIT characters are still processed).
This allows programs like emacs and vi to use the entire screen more
easily.
You can read more about this in the "Canonical mode" section of the
termios(3) manpage.
See e.g. this or this how to achieve that programmatically (did not check the code, but it should be easy to find it).
Alternatively you could use e.g. strace or ltrace to check what stty -F /dev/ttyUSB0 raw does (or read the manual page where it is described).
EDIT>
Regarding printf without a newline -- fflush(stdout); immediately after it should help (another line-buffering is taking place).
You might consider reading this and maybe this.

mknod() not creating named pipe

I'm trying to create a FIFO named pipe using the mknod() command:
int main() {
char* file="pipe.txt";
int state;
state = mknod(file, S_IFIFO & 0777, 0);
printf("%d",state);
return 0;
}
But the file is not created in my current directory. I tried listing it by ls -l . State returns -1.
I found similar questions here and on other sites and I've tried the solution that most suggested:
int main() {
char* file="pipe.txt";
int state;
unlink(file);
state = mknod(file, S_IFIFO & 0777, 0);
printf("%d",state);
return 0;
}
This made no difference though and the error remains. Am I doing something wrong here or is there some sort of system intervention which is causing this problem?
Help.. Thanks in advance
You are using & to set the file type instead of |. From the docs:
The file type for path is OR'ed into the mode argument, and the
application shall select one of the following symbolic
constants...
Try this:
state = mknod(file, S_IFIFO | 0777, 0);
Because this works:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
int main() {
char* file="pipe.txt";
int state;
unlink(file);
state = mknod(file, S_IFIFO | 0777, 0);
printf("state %d\n", state);
return 0;
}
Compile it:
gcc -o fifo fifo.c
Run it:
$ strace -e trace=mknod ./fifo
mknod("pipe.txt", S_IFIFO|0777) = 0
state 0
+++ exited with 0 +++
See the result:
$ ls -l pipe.txt
prwxrwxr-x. 1 lars lars 0 Jul 16 12:54 pipe.txt

How do I read data from bar 0, from userspace, on a pci-e card in linux?

On windows there is this program called pcitree that allows you to set and read memory without writing a device driver. Is there a linux alternative to pcitree that will allow me read memory on block 0 of my pcie card?
A simple use case would be that I use driver code to write a 32bit integer on the first memory address in block zero of my pci-e card. I then use pcitree alternative to read the value at the first memory address of block zero and see my integer.
Thank you
I found some code online that does what I want here github.com/billfarrow/pcimem.
As I understand it this link offers code that maps kernel memory to user memory via the system call "mmap"
This was mostly stolen from the readme of the program, and the man pages of mmap.
mmap takes
a start address
a size
memory protection flags
file descriptor that that is linked to bar0 of your pci-card.
and an offset
mmap returns a userspace pointer to the memory defined by the start address and size parameters.
This code shows an example of mmaps usage.
//The file handle can be found by typing "lspci -v "
// and looking for your device.
fd = open("/sys/devices/pci0001\:00/0001\:00\:07.0/resource0", O_RDWR | O_SYNC);
//mmap returns a userspace address
//0, 4096 tells it to pull one page
ptr = mmap(0, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
printf("PCI BAR0 0x0000 = 0x%4x\n", *((unsigned short *) ptr);
I use the way to get PCI BAR0 register described above but get the segmentation fault back. I use gdb to debug the error from my code as follows and it shows the return value of mmap() is (void *) 0xffffffffffffffff
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <signal.h>
#include <fcntl.h>
#include <ctype.h>
#include <termios.h>
#include <sys/types.h>
#include <sys/mman.h>
#define PRINT_ERROR \
do { \
fprintf(stderr, "Error at line %d, file %s (%d) [%s]\n", \
__LINE__, __FILE__, errno, strerror(errno)); exit(1); \
} while(0)
#define MAP_SIZE 4096UL
#define MAP_MASK (MAP_SIZE - 1)
int main(int argc, char **argv) {
int fd;
void *ptr;
//The file handle can be found by typing lscpi -v
//and looking for your device.
fd = open("/sys/bus/pci/devices/0000\:00\:05.0/resource0", O_RDWR | O_SYNC);
//mmap returns a userspace address
//0, 4096 tells it to pull one page
ptr = mmap(0, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
printf("PCI BAR0 0x0000 = 0x%4x\n", *((unsigned short *) ptr));
if(munmap(ptr, 4096) == -1) PRINT_ERROR;
close(fd);
return 0;
}
On a system with functioning /dev/mem in the kernel it is possible to read a bar for a device using:
sudo dd if=/dev/mem skip=13701120 count=1 bs=256 | hexdump
Look at the dd man page. In the above example 13701120 * 256 is the start physical address at which 256 bytes will be read.

tail -f on a named pipe behaves strange

I am trying to use named pipes for interprocess communication but I'm seeing strange things I cannot understand.
I have this "writer" code:
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#define FIFO_NAME "american_maid"
int main(void)
{
char s[300];
int num, fd;
mknod(FIFO_NAME, S_IFIFO | 0666, 0);
printf("waiting for readers...\n");
fd = open(FIFO_NAME, O_WRONLY);
printf ("FD = %d", fd);
printf("got a reader--type some stuff\n");
while (gets(s), !feof(stdin)) {
if ((num = write(fd, s, strlen(s))) == -1)
perror("write");
else
printf("speak: wrote %d bytes\n", num);
}
return 0;
}
Now when I do a "tail -f american_maid" the tail blocks waiting for a writer.
Then I run the writer code and it finds that tail -f is waiting on the other end of the pipe. So far so good.
When I type anything in the writer application, nothing shows up in tail window, HOWEVER, as soon as I close (Ctrl-C) the writer app, ALL that I had typed appear in the tail window.
It's like the writer has to close before tail could show anything. Any ideas what is happening and why it's happening and how I can fix this?
Thanks
There is nothing wrong here. tail wants to extract the last lines. To determine which these are, it is looking for end-of-file in the fifo, which it only sees after the writer has closed the writing end of the fifo.

Resources