if (!n) n++; while (align<n && align<ALIGN) align += align;
LOCK(lock);
cur += -cur & align-1;
if (n > end-cur) { size_t req = n - (end-cur) + PAGE_SIZE-1 & -PAGE_SIZE;
if (!cur) { brk = __syscall(SYS_brk, 0); brk += -brk & PAGE_SIZE-1; cur = end = brk; }
if (brk == end && req < SIZE_MAX-brk && !traverses_stack_p(brk, brk+req) && __syscall(SYS_brk, brk+req)==brk+req) { brk = end += req; } else { int new_area = 0; req = n + PAGE_SIZE-1 & -PAGE_SIZE; /* Only make a new area rather than individual mmap * if wasted space would be over 1/8 of the map. */ if (req-n > req/8) { /* Geometric area size growth up to 64 pages, * bounding waste by 1/8 of the area. */ size_tmin = PAGE_SIZE<<(mmap_step/2); if (min-n > end-cur) { if (req < min) { req = min; if (mmap_step < 12) mmap_step++; } new_area = 1; } } void *mem = __mmap(0, req, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (mem == MAP_FAILED || !new_area) { UNLOCK(lock); return mem==MAP_FAILED ? 0 : mem; } cur = (uintptr_t)mem; end = cur + req; } }
p = (void *)cur; cur += n; UNLOCK(lock); return p; } weak_alias(__simple_malloc, __libc_malloc_impl);
The maximum size of the process’s data segment (initialized data, uninitialized data, and heap). This limit affects calls to brk(2) and sbrk(2), which fail with the error ENOMEM upon encountering the soft limit of this resource.
#ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out;
/* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out;
/* * Always allow shrinking brk. * __do_munmap() may downgrade mmap_lock to read. */ if (brk <= mm->brk) { int ret;
/* * mm->brk must to be protected by write mmap_lock so update it * before downgrading mmap_lock. When __do_munmap() fails, * mm->brk will be restored from origbrk. */ mm->brk = brk; ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); if (ret < 0) { mm->brk = origbrk; goto out; } elseif (ret == 1) { downgraded = true; } goto success; }
/* Check against existing mmap mappings. */ next = find_vma(mm, oldbrk); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out;
/* Ok, looks good - let it rip. */ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; mm->brk = brk;