#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
int main() {
int i = 0;
struct rusage r_usage;
while (++i <= 10) {
void *m = malloc(20*1024*1024);
memset(m,0,20*1024*1024);
getrusage(RUSAGE_SELF,&r_usage);
printf("Memory usage = %ld\n",r_usage.ru_maxrss);
sleep (3);
}
printf("\nAllocated memory, sleeping ten seconds after which we will check again...\n\n");
sleep (10);
getrusage(RUSAGE_SELF,&r_usage);
printf("Memory usage = %ld\n",r_usage.ru_maxrss);
return 0;
}
The above code uses ru_maxrss attribute of rusage structure. It gives the value of maximum resident set size. What does it mean? Each time the program is executed it gives a different value. So please explain the output of this code?
These are screenshots of two executions of the same code which give different outputs, how can these numbers be explained or what can be interpreted from these two outputs?
Resident set size (RSS) means, roughly, the total amount of physical memory assigned to a process at a given point in time. It does not count pages that have been swapped out, or that are mapped from a file but not currently loaded into physical memory.
"Maximum RSS" means the maximum of the RSS since the process's birth, i.e. the largest it has ever been. So this number tells you the largest amount of physical memory your process has ever been using at any one instant.
It can vary from one run to the next if, for instance, the OS decided to swap out different amounts of your program's memory at different times. This decision would depend in part on what the rest of the system is doing, and where else physical memory is needed.
Related
I have recently discovered that Linux does not guarantee that memory allocated with mmap can be freed with munmap if this leads to situation when number of VMA (Virtual Memory Area) structures exceed vm.max_map_count. Manpage states this (almost) clearly:
ENOMEM The process's maximum number of mappings would have been exceeded.
This error can also occur for munmap(), when unmapping a region
in the middle of an existing mapping, since this results in two
smaller mappings on either side of the region being unmapped.
The problem is that Linux kernel always tries to merge VMA structures if possible, making munmap fail even for separately created mappings. I was able to write a small program to confirm this behavior:
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/mman.h>
// value of vm.max_map_count
#define VM_MAX_MAP_COUNT (65530)
// number of vma for the empty process linked against libc - /proc/<id>/maps
#define VMA_PREMAPPED (15)
#define VMA_SIZE (4096)
#define VMA_COUNT ((VM_MAX_MAP_COUNT - VMA_PREMAPPED) * 2)
int main(void)
{
static void *vma[VMA_COUNT];
for (int i = 0; i < VMA_COUNT; i++) {
vma[i] = mmap(0, VMA_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (vma[i] == MAP_FAILED) {
printf("mmap() failed at %d\n", i);
return 1;
}
}
for (int i = 0; i < VMA_COUNT; i += 2) {
if (munmap(vma[i], VMA_SIZE) != 0) {
printf("munmap() failed at %d (%p): %m\n", i, vma[i]);
}
}
}
It allocates a large number of pages (twice the default allowed maximum) using mmap, then munmaps every second page to create separate VMA structure for each remaining page. On my machine the last munmap call always fails with ENOMEM.
Initially I thought that munmap never fails if used with the same values for address and size that were used to create mapping. Apparently this is not the case on Linux and I was not able to find information about similar behavior on other systems.
At the same time in my opinion partial unmapping applied to the middle of a mapped region is expected to fail on any OS for every sane implementation, but I haven't found any documentation that says such failure is possible.
I would usually consider this a bug in the kernel, but knowing how Linux deals with memory overcommit and OOM I am almost sure this is a "feature" that exists to improve performance and decrease memory consumption.
Other information I was able to find:
Similar APIs on Windows do not have this "feature" due to their design (see MapViewOfFile, UnmapViewOfFile, VirtualAlloc, VirtualFree) - they simply do not support partial unmapping.
glibc malloc implementation does not create more than 65535 mappings, backing off to sbrk when this limit is reached: https://code.woboq.org/userspace/glibc/malloc/malloc.c.html. This looks like a workaround for this issue, but it is still possible to make free silently leak memory.
jemalloc had trouble with this and tried to avoid using mmap/munmap because of this issue (I don't know how it ended for them).
Do other OS's really guarantee deallocation of memory mappings? I know Windows does this, but what about other Unix-like operating systems? FreeBSD? QNX?
EDIT: I am adding example that shows how glibc's free can leak memory when internal munmap call fails with ENOMEM. Use strace to see that munmap fails:
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/mman.h>
// value of vm.max_map_count
#define VM_MAX_MAP_COUNT (65530)
#define VMA_MMAP_SIZE (4096)
#define VMA_MMAP_COUNT (VM_MAX_MAP_COUNT)
// glibc's malloc default mmap_threshold is 128 KiB
#define VMA_MALLOC_SIZE (128 * 1024)
#define VMA_MALLOC_COUNT (VM_MAX_MAP_COUNT)
int main(void)
{
static void *mmap_vma[VMA_MMAP_COUNT];
for (int i = 0; i < VMA_MMAP_COUNT; i++) {
mmap_vma[i] = mmap(0, VMA_MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (mmap_vma[i] == MAP_FAILED) {
printf("mmap() failed at %d\n", i);
return 1;
}
}
for (int i = 0; i < VMA_MMAP_COUNT; i += 2) {
if (munmap(mmap_vma[i], VMA_MMAP_SIZE) != 0) {
printf("munmap() failed at %d (%p): %m\n", i, mmap_vma[i]);
return 1;
}
}
static void *malloc_vma[VMA_MALLOC_COUNT];
for (int i = 0; i < VMA_MALLOC_COUNT; i++) {
malloc_vma[i] = malloc(VMA_MALLOC_SIZE);
if (malloc_vma[i] == NULL) {
printf("malloc() failed at %d\n", i);
return 1;
}
}
for (int i = 0; i < VMA_MALLOC_COUNT; i += 2) {
free(malloc_vma[i]);
}
}
One way to work around this problem on Linux is to mmap more that 1 page at once (e.g. 1 MB at a time), and also map a separator page after it. So, you actually call mmap on 257 pages of memory, then remap the last page with PROT_NONE, so that it cannot be accessed. This should defeat the VMA merging optimization in the kernel. Since you are allocating many pages at once, you should not run into the max mapping limit. The downside is you have to manually manage how you want to slice the large mmap.
As to your questions:
System calls can fail on any system for a variety of reasons. Documentation is not always complete.
You are allowed to munmap a part of a mmapd region as long as the address passed in lies on a page boundary, and the length argument is rounded up to the next multiple of the page size.
I would like calculate the memory usage for single process. So after a little bit of research I came across over smaps and statm.
First of all what is smaps and statm? What is the difference?
statm has a field RSS and in smaps I sum up all RSS values. But those values are different for the same process. I know that statm measures in pages. For comparison purposes I converted that value in kb as in smaps. But those values are not equal.
Why do these two values differ, even though they represent the rss value for the same process?
statm
232214 80703 7168 27 0 161967 0 (measured in pages, pages size is 4096)
smaps
Rss 1956
My aim is to calculate the memory usage for a single process. I am interested in two values. USS and PSS.
Can I gain those two values by just using smaps? Is that value correct?
Also, I would like to return that value as percentage.
I think statm is an approximated simplification of smaps, which is more expensive to get. I came to this conclusion after I looked at the source:
smaps
The information you see in smaps is defined in /fs/proc/task_mmu.c:
static int show_smap(struct seq_file *m, void *v, int is_pid)
{
(...)
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
.mm = vma->vm_mm,
.private = &mss,
};
memset(&mss, 0, sizeof mss);
walk_page_vma(vma, &smaps_walk);
show_map_vma(m, vma, is_pid);
seq_printf(m,
(...)
"Rss: %8lu kB\n"
(...)
mss.resident >> 10,
The information in mss is used by walk_page_vma defined in /mm/pagewalk.c. However, the mss member resident is not filled in walk_page_vma - instead, walk_page_vma calls callback specified in smaps_walk:
.pmd_entry = smaps_pte_range,
.private = &mss,
like this:
if (walk->pmd_entry)
err = walk->pmd_entry(pmd, addr, next, walk);
So what does our callback, smaps_pte_range in /fs/proc/task_mmu.c, do?
It calls smaps_pte_entry and smaps_pmd_entry in some circumstances, out of which both call statm_account(), which in turn... upgrades resident size! All of these functions are defined in the already linked task_mmu.c so I didn't post relevant code snippets as they can be easily seen in the linked sources.
PTE stands for Page Table Entry and PMD is Page Middle Directory. So basically we iterate through the page entries associated with given process and update RAM usage depending on the circumstances.
statm
The information you see in statm is defined in /fs/proc/array.c:
int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
}
seq_put_decimal_ull(m, 0, size);
seq_put_decimal_ull(m, ' ', resident);
seq_put_decimal_ull(m, ' ', shared);
seq_put_decimal_ull(m, ' ', text);
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ull(m, ' ', data);
seq_put_decimal_ull(m, ' ', 0);
seq_putc(m, '\n');
return 0;
}
This time, resident is filled by task_statm. This one has two implementations, one in /fs/proc/task_mmu.c and second in /fs/proc/task_nomm.c. Since they're almost surely mutually exclusive, I'll focus on the implementation in task_mmu.c (which also contained task_smaps). In this implementation we see that
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
(...)
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
it queries some counters, namely, MM_FILEPAGES and MM_ANONPAGES. These counters are modified during different operations on memory such as do_wp_page defined at /mm/memory.c. All of the modifications seem to be done by the files located in /mm/ and there seem to be quite a lot of them, so I didn't include them here.
Conclusion
smaps does complicated iteration through all referenced memory regions and updates resident size using the collected information. statm uses data that was already calculated by someone else.
The most important part is that while smaps collects the data each time in an independent manner, statm uses counters that get incremented or decremented during process life cycle. There are a lot of places that need to do the bookkeeping, and perhaps some places don't upgrade the counters like they should. That's why IMO statm is inferior to smaps, even if it takes fewer CPU cycles to complete.
Please note that this is the conclusion I drew based on common sense, but I might be wrong - perhaps there are no internal inconsistencies in counter decrementing and incrementing, and instead, they might count some pages differently than smaps. At this point I believe it'd be wise to take it to some experienced kernel maintainers.
Sorry if the question is basic, is there options in shell to show how much time and how much memory (maximum memory occupied) the execution of a command has took?
Example: I want to call a binary file as follow: ./binary --option1 option1 --option2 option2
After the execution of this command I would like to know how much time and memory this command took.
Thanks
The time(1) command, either as a separate executable or as a shell built-in, can be used to measure de the time used by a program, both in terms of wallclock time and CPU time.
But measuring the memory usage of a program, or even agreeing how to define it, is a bit different. Do you want the sum of its allocations? The maximum amount of memory allocated at a single moment? Are you interested in what the code does, or in the program behavior as a whole, where the memory allocator makes a difference? And how do you consider the memory used by shared objects? Or memory-mapped areas?
Valgrind may help with some memory-related questions, but it is more of a development tool, rather than a day-to-day system administrator tool. More specifically the Massif heap profiler can be used to profile the memory usage of an application, but it does have a measurable performance impact, especially with stack profiling enabled.
There are several files in /proc that might be simpler than using a profiler, assuming you know the PID of the process in which you're interested.
Of primary interest is /proc/$PID/status which lists ( among other things ) Virtual Memory Peak and Size ( VmPeak, VmSize respectively), and Resident "High Water Mark" and current sets ( VmHWM, VnRSS respectively )
I set up a simple C program to grab memory and then free it and then watched the file in proc corresponding to that program's PID, and it seemed to verify the manpage.
see man proc for a complete list of files that may interest you.
Here's the command line and program I used for the test:
Monitored with
PID="<program's pid>"
watch "cat /proc/$PID/status | grep ^Vm"
( compile with gcc -o grabmem grabmem.c -std=c99 )
#include <sys/types.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#define INTNUM ( 1024 * 1024 )
#define PASSES 16
int main(){
int* mems[16];
int cur = 0;
while( mems[cur] = (int*)calloc( sizeof( int ), INTNUM ) ){
for( int i = 0; i < INTNUM; i++)
mems[cur][i] = rand();
printf("PID %i Pass %i: Grabbed another %lu of mem.\n",
(int)getpid(),
cur,
sizeof(int) * INTNUM
);
sleep( 1 );
cur++;
if( cur >= 16 ){
printf("Freeing memory...");
for( cur = (PASSES - 1); cur >= 0; cur-- ){
free(mems[cur] );
mems[cur] = NULL;
}
cur = 0;
printf("OK\n");
}
}
fprintf(stderr, "Couldn't calloc() memory.\n");
exit( -1 );
}
when I run this, it seems to have no problem with keep allocating memory with cnt going over thousands. I don't understand why -- aren't I supposed to get a NULL at some point? Thanks!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
int main(void)
{
long C = pow(10, 9);
int cnt = 0;
int conversion = 8 * 1024 * 1024;
int *p;
while (1)
{
p = (int *)malloc(C * sizeof(int));
if (p != NULL)
cnt++;
else break;
if (cnt % 10 == 0)
printf("number of successful malloc is %d with %ld Mb\n", cnt, cnt * C / conversion);
}
return 0;
}
Are you running this on Linux? Linux has a highly surprising feature known as overcommit. It doesn't actually allocate memory when you call malloc(), but rather when you actually use that memory. malloc() will happily let you allocate as much memory as your heart desires, never returning a NULL pointer.
It's only when you actually access the memory that Linux takes you seriously and goes out searching for free memory to give you. Of course there may not actually be enough memory to meet the promise it gave your program. You say, "Give me 8GB," and malloc() says, "Sure." Then you try to write to your pointer and Linux says, "Oops! I lied. How bout I just kill off processes (probably yours) until I I free up enough memory?"
You're allocating virtual memory. On a 64-bit OS, virtual memory is available in almost unlimited supply.
I wrote a simple program to calculate the maximum number of threads that a process can have in linux (Centos 5). here is the code:
int main()
{
pthread_t thrd[400];
for(int i=0;i<400;i++)
{
int err=pthread_create(&thrd[i],NULL,thread,(void*)i);
if(err!=0)
cout << "thread creation failed: " << i <<" error code: " << err << endl;
}
return 0;
}
void * thread(void* i)
{
sleep(100);//make the thread still alive
return 0;
}
I figured out that max number for threads is only 300!? What if i need more than that?
I have to mention that pthread_create returns 12 as error code.
Thanks before
There is a thread limit for linux and it can be modified runtime by writing desired limit to /proc/sys/kernel/threads-max. The default value is computed from the available system memory. In addition to that limit, there's also another limit: /proc/sys/vm/max_map_count which limits the maximum mmapped segments and at least recent kernels will mmap memory per thread. It should be safe to increase that limit a lot if you hit it.
However, the limit you're hitting is lack of virtual memory in 32bit operating system. Install a 64 bit linux if your hardware supports it and you'll be fine. I can easily start 30000 threads with a stack size of 8MB. The system has a single Core 2 Duo + 8 GB of system memory (I'm using 5 GB for other stuff in the same time) and it's running 64 bit Ubuntu with kernel 2.6.32. Note that memory overcommit (/proc/sys/vm/overcommit_memory) must be allowed because otherwise system would need at least 240 GB of committable memory (sum of real memory and swap space).
If you need lots of threads and cannot use 64 bit system your only choice is to minimize the memory usage per thread to conserve virtual memory. Start with requesting as little stack as you can live with.
Your system limits may not be allowing you to map the stacks of all the threads you require. Look at /proc/sys/vm/max_map_count, and see this answer. I'm not 100% sure this is your problem, because most people run into problems at much larger thread counts.
I had also encountered the same problem when my number of threads crosses some threshold.
It was because of the user level limit (number of process a user can run at a time) set to 1024 in /etc/security/limits.conf .
so check your /etc/security/limits.conf and look for entry:-
username -/soft/hard -nproc 1024
change it to some larger values to something 100k(requires sudo privileges/root) and it should work for you.
To learn more about security policy ,see http://linux.die.net/man/5/limits.conf.
check the stack size per thread with ulimit, in my case Redhat Linux 2.6:
ulimit -a
...
stack size (kbytes, -s) 10240
Each of your threads will get this amount of memory (10MB) assigned for it's stack. With a 32bit program and a maximum address space of 4GB, that is a maximum of only 4096MB / 10MB = 409 threads !!! Minus program code, minus heap-space will probably lead to your observed max. of 300 threads.
You should be able to raise this by compiling a 64bit application or setting ulimit -s 8192 or even ulimit -s 4096. But if this is advisable is another discussion...
You will run out of memory too unless u shrink the default thread stack size. Its 10MB on our version of linux.
EDIT:
Error code 12 = out of memory, so I think the 1mb stack is still too big for you. Compiled for 32 bit, I can get a 100k stack to give me 30k threads. Beyond 30k threads I get Error code 11 which means no more threads allowed. A 1MB stack gives me about 4k threads before error code 12. 10MB gives me 427 threads. 100MB gives me 42 threads. 1 GB gives me 4... We have 64 bit OS with 64 GB ram. Is your OS 32 bit? When I compile for 64bit, I can use any stack size I want and get the limit of threads.
Also I noticed if i turn the profiling stuff (Tools|Profiling) on for netbeans and run from the ide...I only can get 400 threads. Weird. Netbeans also dies if you use up all the threads.
Here is a test app you can run:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <signal.h>
// this prevents the compiler from reordering code over this COMPILER_BARRIER
// this doesnt do anything
#define COMPILER_BARRIER() __asm__ __volatile__ ("" ::: "memory")
sigset_t _fSigSet;
volatile int _cActive = 0;
pthread_t thrd[1000000];
void * thread(void *i)
{
int nSig, cActive;
cActive = __sync_fetch_and_add(&_cActive, 1);
COMPILER_BARRIER(); // make sure the active count is incremented before sigwait
// sigwait is a handy way to sleep a thread and wake it on command
sigwait(&_fSigSet, &nSig); //make the thread still alive
COMPILER_BARRIER(); // make sure the active count is decrimented after sigwait
cActive = __sync_fetch_and_add(&_cActive, -1);
//printf("%d(%d) ", i, cActive);
return 0;
}
int main(int argc, char** argv)
{
pthread_attr_t attr;
int cThreadRequest, cThreads, i, err, cActive, cbStack;
cbStack = (argc > 1) ? atoi(argv[1]) : 0x100000;
cThreadRequest = (argc > 2) ? atoi(argv[2]) : 30000;
sigemptyset(&_fSigSet);
sigaddset(&_fSigSet, SIGUSR1);
sigaddset(&_fSigSet, SIGSEGV);
printf("Start\n");
pthread_attr_init(&attr);
if ((err = pthread_attr_setstacksize(&attr, cbStack)) != 0)
printf("pthread_attr_setstacksize failed: err: %d %s\n", err, strerror(err));
for (i = 0; i < cThreadRequest; i++)
{
if ((err = pthread_create(&thrd[i], &attr, thread, (void*)i)) != 0)
{
printf("pthread_create failed on thread %d, error code: %d %s\n",
i, err, strerror(err));
break;
}
}
cThreads = i;
printf("\n");
// wait for threads to all be created, although we might not wait for
// all threads to make it through sigwait
while (1)
{
cActive = _cActive;
if (cActive == cThreads)
break;
printf("Waiting A %d/%d,", cActive, cThreads);
sched_yield();
}
// wake em all up so they exit
for (i = 0; i < cThreads; i++)
pthread_kill(thrd[i], SIGUSR1);
// wait for them all to exit, although we might be able to exit before
// the last thread returns
while (1)
{
cActive = _cActive;
if (!cActive)
break;
printf("Waiting B %d/%d,", cActive, cThreads);
sched_yield();
}
printf("\nDone. Threads requested: %d. Threads created: %d. StackSize=%lfmb\n",
cThreadRequest, cThreads, (double)cbStack/0x100000);
return 0;
}