X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/994ed2bc552f4114b1f0c8dd3fd8aefaec6beeae..cada7f9671da8af04400853bed3f3bc1790a522a:/deps/jemalloc/doc/jemalloc.html diff --git a/deps/jemalloc/doc/jemalloc.html b/deps/jemalloc/doc/jemalloc.html index b1ba461b..415e298d 100644 --- a/deps/jemalloc/doc/jemalloc.html +++ b/deps/jemalloc/doc/jemalloc.html @@ -1,8 +1,8 @@ -JEMALLOC

Name

jemalloc — general purpose memory allocation functions

LIBRARY

This manual describes jemalloc 2.2.1-0-g5ef7abf6d846720fb3fb8c737861c99b5ad1d862. More information +JEMALLOC

Name

jemalloc — general purpose memory allocation functions

LIBRARY

This manual describes jemalloc 3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046. More information can be found at the jemalloc website.

SYNOPSIS

#include <stdlib.h>
-#include <jemalloc/jemalloc.h>

Standard API

void *malloc(size_t size);
 
void *calloc(size_t number,
 size_t size);
 
int posix_memalign(void **ptr,
 size_t alignment,
 size_t size);
 
void *realloc(void *ptr,
 size_t size);
 
void free(void *ptr);
 

Non-standard API

size_t malloc_usable_size(const void *ptr);
 
void malloc_stats_print(void (*write_cb) +#include <jemalloc/jemalloc.h>

Standard API

void *malloc(size_t size);
 
void *calloc(size_t number,
 size_t size);
 
int posix_memalign(void **ptr,
 size_t alignment,
 size_t size);
 
void *aligned_alloc(size_t alignment,
 size_t size);
 
void *realloc(void *ptr,
 size_t size);
 
void free(void *ptr);
 

Non-standard API

size_t malloc_usable_size(const void *ptr);
 
void malloc_stats_print(void (*write_cb) (void *, const char *) - ,
 void *cbopaque,
 const char *opts);
 
int mallctl(const char *name,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
int mallctlnametomib(const char *name,
 size_t *mibp,
 size_t *miblenp);
 
int mallctlbymib(const size_t *mib,
 size_t miblen,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
void (*malloc_message)(void *cbopaque,
 const char *s);
 

const char *malloc_conf;

Experimental API

int allocm(void **ptr,
 size_t *rsize,
 size_t size,
 int flags);
 
int rallocm(void **ptr,
 size_t *rsize,
 size_t size,
 size_t extra,
 int flags);
 
int sallocm(const void *ptr,
 size_t *rsize,
 int flags);
 
int dallocm(void *ptr,
 int flags);
 

DESCRIPTION

Standard API

The malloc() function allocates + ,

 void *cbopaque,
 const char *opts);
 
int mallctl(const char *name,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
int mallctlnametomib(const char *name,
 size_t *mibp,
 size_t *miblenp);
 
int mallctlbymib(const size_t *mib,
 size_t miblen,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
void (*malloc_message)(void *cbopaque,
 const char *s);
 

const char *malloc_conf;

Experimental API

int allocm(void **ptr,
 size_t *rsize,
 size_t size,
 int flags);
 
int rallocm(void **ptr,
 size_t *rsize,
 size_t size,
 size_t extra,
 int flags);
 
int sallocm(const void *ptr,
 size_t *rsize,
 int flags);
 
int dallocm(void *ptr,
 int flags);
 
int nallocm(size_t *rsize,
 size_t size,
 int flags);
 

DESCRIPTION

Standard API

The malloc() function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object.

The calloc() function allocates @@ -17,7 +17,13 @@ alignment, and returns the allocation in the value pointed to by ptr. The requested alignment must be a power of 2 at least as large - as sizeof(void *).

The realloc() function changes the + as sizeof(void *).

The aligned_alloc() function + allocates size bytes of memory such that the + allocation's base address is an even multiple of + alignment. The requested + alignment must be a power of 2. Behavior is + undefined if size is not an integral multiple of + alignment.

The realloc() function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old @@ -32,7 +38,7 @@ malloc() for the specified size.

The free() function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is - NULL, no action occurs.

Non-standard API

The malloc_usable_size() function + NULL, no action occurs.

Non-standard API

The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The @@ -112,11 +118,14 @@ for (i = 0; i < nbins; i++) { len = sizeof(bin_size); mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); /* Do something with bin_size... */ -}

Experimental API

The experimental API is subject to change or removal without regard - for backward compatibility.

The allocm(), +}

Experimental API

The experimental API is subject to change or removal without regard + for backward compatibility. If --disable-experimental + is specified during configuration, the experimental API is + omitted.

The allocm(), rallocm(), - sallocm(), and - dallocm() functions all have a + sallocm(), + dallocm(), and + nallocm() functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to @@ -142,7 +151,9 @@ for (i = 0; i < nbins; i++) { least size bytes of memory, sets *ptr to the base address of the allocation, and sets *rsize to the real size of the allocation if - rsize is not NULL.

The rallocm() function resizes the + rsize is not NULL. Behavior + is undefined if size is + 0.

The rallocm() function resizes the allocation at *ptr to be at least size bytes, sets *ptr to the base address of the allocation if it moved, and sets @@ -152,12 +163,20 @@ for (i = 0; i < nbins; i++) { the allocation to be at least size + extra) bytes, though inability to allocate the extra byte(s) will not by itself result in failure. Behavior is - undefined if (size + + undefined if size is 0, or if + (size + extra > SIZE_T_MAX).

The sallocm() function sets *rsize to the real size of the allocation.

The dallocm() function causes the memory referenced by ptr to be made available for - future allocations.

TUNING

Once, when the first call is made to one of the memory allocation + future allocations.

The nallocm() function allocates no + memory, but it performs the same size computation as the + allocm() function, and if + rsize is not NULL it sets + *rsize to the real size of the allocation that + would result from the equivalent allocm() + function call. Behavior is undefined if + size is 0.

TUNING

Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time.

The string pointed to by the global variable malloc_conf, the “name” of the file @@ -180,8 +199,8 @@ for (i = 0; i < nbins; i++) { suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If --enable-dss is specified during configuration, this - allocator uses both sbrk(2) and - mmap(2), in that order of preference; + allocator uses both mmap(2) and + sbrk(2), in that order of preference; otherwise only mmap(2) is used.

This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi-processor systems. This works well with regard to threading scalability, but incurs some costs. There is @@ -212,26 +231,14 @@ for (i = 0; i < nbins; i++) { large object). The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time.

Small objects are managed in groups by page runs. Each run maintains - a frontier and free list to track which regions are in use. Unless - --disable-tiny is specified during configuration, - allocation requests that are no more than half the quantum (8 or 16, - depending on architecture) are rounded up to the nearest power of two that - is at least sizeof(void *). - Allocation requests that are more than half the quantum, but no more than - the minimum cacheline-multiple size class (see the - "opt.lg_qspace_max" - - option) are rounded up to the nearest multiple of the quantum. Allocation - requests that are more than the minimum cacheline-multiple size class, but - no more than the minimum subpage-multiple size class (see the - "opt.lg_cspace_max" - - option) are rounded up to the nearest multiple of the cacheline size (64). - Allocation requests that are more than the minimum subpage-multiple size - class, but no more than the maximum subpage-multiple size class are rounded - up to the nearest multiple of the subpage size (256). Allocation requests - that are more than the maximum subpage-multiple size class, but small - enough to fit in an arena-managed chunk (see the + a frontier and free list to track which regions are in use. Allocation + requests that are no more than half the quantum (8 or 16, depending on + architecture) are rounded up to the nearest power of two that is at least + sizeof(double). All other small + object size classes are multiples of the quantum, spaced such that internal + fragmentation is limited to approximately 25% for all but the smallest size + classes. Allocation requests that are larger than the maximum small size + class, but small enough to fit in an arena-managed chunk (see the "opt.lg_chunk" option), are rounded up to the nearest run size. Allocation requests that are too large @@ -241,7 +248,7 @@ for (i = 0; i < nbins; i++) { suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating.

Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit - system, the size classes in each category are as shown in Table 1.

Table 1. Size classes

CategorySubcategorySize
SmallTiny[8]
Quantum-spaced[16, 32, 48, ..., 128]
Cacheline-spaced[192, 256, 320, ..., 512]
Subpage-spaced[768, 1024, 1280, ..., 3840]
Large[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]
Huge[4 MiB, 8 MiB, 12 MiB, ...]

MALLCTL NAMESPACE

The following names are defined in the namespace accessible via the + system, the size classes in each category are as shown in Table 1.

Table 1. Size classes

CategorySpacingSize
Smalllg[8]
16[16, 32, 48, ..., 128]
32[160, 192, 224, 256]
64[320, 384, 448, 512]
128[640, 768, 896, 1024]
256[1280, 1536, 1792, 2048]
512[2560, 3072, 3584]
Large4 KiB[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]
Huge4 MiB[4 MiB, 8 MiB, 12 MiB, ...]

MALLCTL NAMESPACE

The following names are defined in the namespace accessible via the mallctl*() functions. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r-, -w, or @@ -290,13 +297,6 @@ for (i = 0; i < nbins; i++) {

--enable-dss was specified during build configuration.

- "config.dynamic_page_shift" - - (bool) - r- -

--enable-dynamic-page-shift was - specified during build configuration.

- "config.fill" (bool) @@ -311,6 +311,20 @@ for (i = 0; i < nbins; i++) {

--enable-lazy-lock was specified during build configuration.

+ "config.mremap" + + (bool) + r- +

--enable-mremap was specified during + build configuration.

+ + "config.munmap" + + (bool) + r- +

--enable-munmap was specified during + build configuration.

+ "config.prof" (bool) @@ -339,39 +353,32 @@ for (i = 0; i < nbins; i++) {

--enable-stats was specified during build configuration.

- "config.swap" + "config.tcache" (bool) r- -

--enable-swap was specified during - build configuration.

+

--disable-tcache was not specified + during build configuration.

- "config.sysv" + "config.tls" (bool) r- -

--enable-sysv was specified during +

--disable-tls was not specified during build configuration.

- "config.tcache" - - (bool) - r- -

--disable-tcache was not specified - during build configuration.

- - "config.tiny" + "config.utrace" (bool) r- -

--disable-tiny was not specified - during build configuration.

+

--enable-utrace was specified during + build configuration.

- "config.tls" + "config.valgrind" (bool) r- -

--disable-tls was not specified during +

--enable-valgrind was specified during build configuration.

"config.xmalloc" @@ -390,25 +397,7 @@ for (i = 0; i < nbins; i++) { abort(3) in these cases. This option is disabled by default unless --enable-debug is specified during configuration, in which case it is enabled by default. -

- - "opt.lg_qspace_max" - - (size_t) - r- -

Size (log base 2) of the maximum size class that is a - multiple of the quantum (8 or 16 bytes, depending on architecture). - Above this size, cacheline spacing is used for size classes. The - default value is 128 bytes (2^7).

- - "opt.lg_cspace_max" - - (size_t) - r- -

Size (log base 2) of the maximum size class that is a - multiple of the cacheline size (64). Above this size, subpage spacing - (256 bytes) is used for size classes. The default value is 512 bytes - (2^9).

+

"opt.lg_chunk" @@ -465,7 +454,42 @@ for (i = 0; i < nbins; i++) { 0x5a. This is intended for debugging and will impact performance negatively. This option is disabled by default unless --enable-debug is specified during - configuration, in which case it is enabled by default.

+ configuration, in which case it is enabled by default.

+ + "opt.quarantine" + + (size_t) + r- + [--enable-fill] +

Per thread quarantine size in bytes. If non-zero, each + thread maintains a FIFO object quarantine that stores up to the + specified number of bytes of memory. The quarantined memory is not + freed until it is released from quarantine, though it is immediately + junk-filled if the + "opt.junk" + option is + enabled. This feature is of particular use in combination with Valgrind, which can detect attempts + to access quarantined objects. This is intended for debugging and will + impact performance negatively. The default quarantine size is + 0.

+ + "opt.redzone" + + (bool) + r- + [--enable-fill] +

Redzones enabled/disabled. If enabled, small + allocations have redzones before and after them. Furthermore, if the + + "opt.junk" + option is + enabled, the redzones are checked for corruption during deallocation. + However, the primary intended purpose of this feature is to be used in + combination with Valgrind, + which needs redzones in order to do effective buffer overflow/underflow + detection. This option is intended for debugging and will impact + performance negatively. This option is disabled by + default.

"opt.zero" @@ -479,21 +503,38 @@ for (i = 0; i < nbins; i++) { rallocm() calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. -

+

- "opt.sysv" + "opt.utrace" (bool) r- - [--enable-sysv] -

If enabled, attempting to allocate zero bytes will - return a NULL pointer instead of a valid pointer. - (The default behavior is to make a minimal allocation and return a - pointer to it.) This option is provided for System V compatibility. - This option is incompatible with the - "opt.xmalloc" - option. - This option is disabled by default.

+ [--enable-utrace] +

Allocation tracing based on + utrace(2) enabled/disabled. This option + is disabled by default.

+ + "opt.valgrind" + + (bool) + r- + [--enable-valgrind] +

Valgrind + support enabled/disabled. If enabled, several other options are + automatically modified during options processing to work well with + Valgrind: + "opt.junk" + + and + "opt.zero" + are set + to false, + "opt.quarantine" + is + set to 16 MiB, and + "opt.redzone" + is set to + true. This option is disabled by default.

"opt.xmalloc" @@ -521,27 +562,11 @@ malloc_conf = "xmalloc:true";

objects up to a certain size. Thread-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use. See the - - "opt.lg_tcache_gc_sweep" - - and + "opt.lg_tcache_max" - options for related tuning information. This option is enabled by - default.

- - "opt.lg_tcache_gc_sweep" - - (ssize_t) - r- - [--enable-tcache] -

Approximate interval (log base 2) between full - thread-specific cache garbage collection sweeps, counted in terms of - thread-specific cache allocation/deallocation events. Garbage - collection is actually performed incrementally, one size class at a - time, in order to avoid large collection pauses. The default sweep - interval is 8192 (2^13); setting this option to -1 will disable garbage - collection.

+ option for related tuning information. This option is enabled by + default.

"opt.lg_tcache_max" @@ -559,17 +584,7 @@ malloc_conf = "xmalloc:true";

r- [--enable-prof]

Memory profiling enabled/disabled. If enabled, profile - memory allocation activity, and use an - atexit(3) function to dump final memory - usage to a file named according to the pattern - <prefix>.<pid>.<seq>.f.heap, - where <prefix> is controlled by the - "opt.prof_prefix" - - option. See the - "opt.lg_prof_bt_max" - - option for backtrace depth control. See the + memory allocation activity. See the "opt.prof_active" option for on-the-fly activation/deactivation. See the @@ -578,19 +593,19 @@ malloc_conf = "xmalloc:true";

option for probabilistic sampling control. See the "opt.prof_accum" - option for control of cumulative sample reporting. See the - "opt.lg_prof_tcmax" - - option for control of per thread backtrace caching. See the + option for control of cumulative sample reporting. See the "opt.lg_prof_interval" - option for information on interval-triggered profile dumping, and the - + option for information on interval-triggered profile dumping, the "opt.prof_gdump" - option for information on high-water-triggered profile dumping. - Profile output is compatible with the included pprof - Perl script, which originates from the google-perftools + option for information on high-water-triggered profile dumping, and the + + "opt.prof_final" + + option for final profile dumping. Profile output is compatible with + the included pprof Perl script, which originates + from the gperftools package.

"opt.prof_prefix" @@ -602,15 +617,7 @@ malloc_conf = "xmalloc:true";

set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled). The default prefix is - jeprof.

- - "opt.lg_prof_bt_max" - - (size_t) - r- - [--enable-prof] -

Maximum backtrace depth (log base 2) when profiling - memory allocation activity. The default is 128 (2^7).

+ jeprof.

"opt.prof_active" @@ -636,8 +643,8 @@ malloc_conf = "xmalloc:true";

Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity. Increasing the sampling interval decreases profile fidelity, but also decreases the - computational overhead. The default sample interval is 1 (2^0) (i.e. - all allocations are sampled).

+ computational overhead. The default sample interval is 512 KiB (2^19 + B).

"opt.prof_accum" @@ -648,28 +655,8 @@ malloc_conf = "xmalloc:true";

dumps enabled/disabled. If this option is enabled, every unique backtrace must be stored for the duration of execution. Depending on the application, this can impose a large memory overhead, and the - cumulative counts are not always of interest. See the - - "opt.lg_prof_tcmax" - - option for control of per thread backtrace caching, which has important - interactions. This option is enabled by default.

- - "opt.lg_prof_tcmax" - - (ssize_t) - r- - [--enable-prof] -

Maximum per thread backtrace cache (log base 2) used - for heap profiling. A backtrace can only be discarded if the - - "opt.prof_accum" - - option is disabled, and no thread caches currently refer to the - backtrace. Therefore, a backtrace cache limit should be imposed if the - intention is to limit how much memory is used by backtraces. By - default, no limit is imposed (encoded as -1). -

+ cumulative counts are not always of interest. This option is disabled + by default.

"opt.lg_prof_interval" @@ -702,7 +689,21 @@ malloc_conf = "xmalloc:true";

where <prefix> is controlled by the "opt.prof_prefix" - option. This option is disabled by default.

+ option. This option is disabled by default.

+ + "opt.prof_final" + + (bool) + r- + [--enable-prof] +

Use an + atexit(3) function to dump final memory + usage to a file named according to the pattern + <prefix>.<pid>.<seq>.f.heap, + where <prefix> is controlled by the + "opt.prof_prefix" + + option. This option is enabled by default.

"opt.prof_leak" @@ -712,45 +713,11 @@ malloc_conf = "xmalloc:true";

Leak reporting enabled/disabled. If enabled, use an atexit(3) function to report memory leaks detected by allocation sampling. See the - - "opt.lg_prof_bt_max" - - option for backtrace depth control. See the "opt.prof" option for information on analyzing heap profile output. This option is disabled - by default.

- - "opt.overcommit" - - (bool) - r- - [--enable-swap] -

Over-commit enabled/disabled. If enabled, over-commit - memory as a side effect of using anonymous - mmap(2) or - sbrk(2) for virtual memory allocation. - In order for overcommit to be disabled, the - "swap.fds" - mallctl must have - been successfully written to. This option is enabled by - default.

- - "tcache.flush" - - (void) - -- - [--enable-tcache] -

Flush calling thread's tcache. This interface releases - all cached objects and internal data structures associated with the - calling thread's thread-specific cache. Ordinarily, this interface - need not be called, since automatic periodic incremental garbage - collection occurs, and the thread cache is automatically discarded when - a thread exits. However, garbage collection is triggered by allocation - activity, so it is possible for a thread that stops - allocating/deallocating to retain its cache indefinitely, in which case - the developer may find manual flushing useful.

+ by default.

"thread.arena" @@ -810,7 +777,34 @@ malloc_conf = "xmalloc:true";

"thread.deallocated" mallctl. This is useful for avoiding the overhead of repeated - mallctl*() calls.

+ mallctl*() calls.

+ + "thread.tcache.enabled" + + (bool) + rw + [--enable-tcache] +

Enable/disable calling thread's tcache. The tcache is + implicitly flushed as a side effect of becoming + disabled (see + "thread.tcache.flush" + ). +

+ + "thread.tcache.flush" + + (void) + -- + [--enable-tcache] +

Flush calling thread's tcache. This interface releases + all cached objects and internal data structures associated with the + calling thread's thread-specific cache. Ordinarily, this interface + need not be called, since automatic periodic incremental garbage + collection occurs, and the thread cache is automatically discarded when + a thread exits. However, garbage collection is triggered by allocation + activity, so it is possible for a thread that stops + allocating/deallocating to retain its cache indefinitely, in which case + the developer may find manual flushing useful.

"arenas.narenas" @@ -834,80 +828,12 @@ malloc_conf = "xmalloc:true";

r-

Quantum size.

- "arenas.cacheline" - - (size_t) - r- -

Assumed cacheline size.

- - "arenas.subpage" - - (size_t) - r- -

Subpage size class interval.

- - "arenas.pagesize" + "arenas.page" (size_t) r-

Page size.

- "arenas.chunksize" - - (size_t) - r- -

Chunk size.

- - "arenas.tspace_min" - - (size_t) - r- -

Minimum tiny size class. Tiny size classes are powers - of two.

- - "arenas.tspace_max" - - (size_t) - r- -

Maximum tiny size class. Tiny size classes are powers - of two.

- - "arenas.qspace_min" - - (size_t) - r- -

Minimum quantum-spaced size class.

- - "arenas.qspace_max" - - (size_t) - r- -

Maximum quantum-spaced size class.

- - "arenas.cspace_min" - - (size_t) - r- -

Minimum cacheline-spaced size class.

- - "arenas.cspace_max" - - (size_t) - r- -

Maximum cacheline-spaced size class.

- - "arenas.sspace_min" - - (size_t) - r- -

Minimum subpage-spaced size class.

- - "arenas.sspace_max" - - (size_t) - r- -

Maximum subpage-spaced size class.

- "arenas.tcache_max" (size_t) @@ -915,38 +841,11 @@ malloc_conf = "xmalloc:true";

[--enable-tcache]

Maximum thread-cached size class.

- "arenas.ntbins" - - (unsigned) - r- -

Number of tiny bin size classes.

- - "arenas.nqbins" - - (unsigned) - r- -

Number of quantum-spaced bin size - classes.

- - "arenas.ncbins" - - (unsigned) - r- -

Number of cacheline-spaced bin size - classes.

- - "arenas.nsbins" - - (unsigned) - r- -

Number of subpage-spaced bin size - classes.

- "arenas.nbins" (unsigned) r- -

Total number of bin size classes.

+

Number of bin size classes.

"arenas.nhbins" @@ -1079,8 +978,7 @@ malloc_conf = "xmalloc:true";

large as "stats.active" . This - does not include inactive chunks backed by swap files. his does not - include inactive chunks embedded in the DSS.

+ does not include inactive chunks.

"stats.chunks.current" @@ -1088,8 +986,7 @@ malloc_conf = "xmalloc:true";

r- [--enable-stats]

Total number of chunks actively mapped on behalf of the - application. This does not include inactive chunks backed by swap - files. This does not include inactive chunks embedded in the DSS. + application. This does not include inactive chunks.

"stats.chunks.total" @@ -1309,14 +1206,6 @@ malloc_conf = "xmalloc:true";

Cumulative number of times the current run from which to allocate changed.

- "stats.arenas.<i>.bins.<j>.highruns" - - (size_t) - r- - [--enable-stats] -

Maximum number of runs at any time thus far. -

- "stats.arenas.<i>.bins.<j>.curruns" (size_t) @@ -1348,69 +1237,13 @@ malloc_conf = "xmalloc:true";

Cumulative number of allocation requests for this size class.

- "stats.arenas.<i>.lruns.<j>.highruns" - - (size_t) - r- - [--enable-stats] -

Maximum number of runs at any time thus far for this - size class.

- "stats.arenas.<i>.lruns.<j>.curruns" (size_t) r- [--enable-stats]

Current number of runs for this size class. -

- - "swap.avail" - - (size_t) - r- - [--enable-stats --enable-swap] -

Number of swap file bytes that are currently not - associated with any chunk (i.e. mapped, but otherwise completely - unmanaged).

- - "swap.prezeroed" - - (bool) - rw - [--enable-swap] -

If true, the allocator assumes that the swap file(s) - contain nothing but nil bytes. If this assumption is violated, - allocator behavior is undefined. This value becomes read-only after - - "swap.fds" - is - successfully written to.

- - "swap.nfds" - - (size_t) - r- - [--enable-swap] -

Number of file descriptors in use for swap. -

- - "swap.fds" - - (int *) - r- - [--enable-swap] -

When written to, the files associated with the - specified file descriptors are contiguously mapped via - mmap(2). The resulting virtual memory - region is preferred over anonymous - mmap(2) and - sbrk(2) memory. Note that if a file's - size is not a multiple of the page size, it is automatically truncated - to the nearest page size multiple. See the - - "swap.prezeroed" - - mallctl for specifying that the files are pre-zeroed.

DEBUGGING MALLOC PROBLEMS

When debugging, it is a good idea to configure/build jemalloc with +

DEBUGGING MALLOC PROBLEMS

When debugging, it is a good idea to configure/build jemalloc with the --enable-debug and --enable-fill options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety @@ -1428,10 +1261,13 @@ malloc_conf = "xmalloc:true";

the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs.

This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information - would be prohibitive. There are a number of allocator implementations - available on the Internet which focus on detecting and pinpointing problems - by trading performance for extra sanity checks and detailed - diagnostics.

DIAGNOSTIC MESSAGES

If any of the memory allocation/deallocation functions detect an + would be prohibitive. However, jemalloc does integrate with the most + excellent Valgrind tool if the + --enable-valgrind configuration option is enabled and the + + "opt.valgrind" + option + is enabled.

DIAGNOSTIC MESSAGES

If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor STDERR_FILENO. Errors will result in the process dumping core. If the @@ -1447,7 +1283,7 @@ malloc_conf = "xmalloc:true";

malloc_stats_print(), followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock.

All messages are prefixed by - “<jemalloc>: ”.

RETURN VALUES

Standard API

The malloc() and + “<jemalloc>: ”.

RETURN VALUES

Standard API

The malloc() and calloc() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to @@ -1459,6 +1295,14 @@ malloc_conf = "xmalloc:true";

not a power of 2 at least as large as sizeof(void *).

ENOMEM

Memory allocation error.

+

The aligned_alloc() function returns + a pointer to the allocated memory if successful; otherwise a + NULL pointer is returned and + errno is set. The + aligned_alloc() function will fail if: +

EINVAL

The alignment parameter is + not a power of 2. +

ENOMEM

Memory allocation error.

The realloc() function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL @@ -1467,7 +1311,7 @@ malloc_conf = "xmalloc:true";

allocation failure. The realloc() function always leaves the original buffer intact when an error occurs.

The free() function returns no - value.

Non-standard API

The malloc_usable_size() function + value.

Non-standard API

The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr.

The mallctl(), mallctlnametomib(), and @@ -1486,13 +1330,15 @@ malloc_conf = "xmalloc:true";

occurred.

EFAULT

An interface with side effects failed in some way not directly related to mallctl*() read/write processing.

-

Experimental API

The allocm(), +

Experimental API

The allocm(), rallocm(), - sallocm(), and - dallocm() functions return + sallocm(), + dallocm(), and + nallocm() functions return ALLOCM_SUCCESS on success; otherwise they return an - error value. The allocm() and - rallocm() functions will fail if: + error value. The allocm(), + rallocm(), and + nallocm() functions will fail if:

ALLOCM_ERR_OOM

Out of memory. Insufficient contiguous memory was available to service the allocation request. The allocm() function additionally sets @@ -1516,6 +1362,7 @@ malloc_conf = "xmalloc:true";

malloc_conf = "lg_chunk:24";

SEE ALSO

madvise(2), mmap(2), sbrk(2), + utrace(2), alloca(3), atexit(3), getpagesize(3)

STANDARDS

The malloc(),