2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, arenas
, arena_t
*, NULL
)
8 malloc_tsd_data(, thread_allocated
, thread_allocated_t
,
9 THREAD_ALLOCATED_INITIALIZER
)
11 /* Runtime configuration options. */
12 const char *je_malloc_conf
;
14 bool opt_abort
= true;
18 bool opt_junk
= false;
21 bool opt_abort
= false;
22 bool opt_junk
= false;
24 size_t opt_quarantine
= ZU(0);
25 bool opt_redzone
= false;
26 bool opt_utrace
= false;
27 bool opt_valgrind
= false;
28 bool opt_xmalloc
= false;
29 bool opt_zero
= false;
30 size_t opt_narenas
= 0;
34 malloc_mutex_t arenas_lock
;
38 /* Set to true once the allocator has been initialized. */
39 static bool malloc_initialized
= false;
41 #ifdef JEMALLOC_THREADED_INIT
42 /* Used to let the initializing thread recursively allocate. */
43 # define NO_INITIALIZER ((unsigned long)0)
44 # define INITIALIZER pthread_self()
45 # define IS_INITIALIZER (malloc_initializer == pthread_self())
46 static pthread_t malloc_initializer
= NO_INITIALIZER
;
48 # define NO_INITIALIZER false
49 # define INITIALIZER true
50 # define IS_INITIALIZER malloc_initializer
51 static bool malloc_initializer
= NO_INITIALIZER
;
54 /* Used to avoid initialization races. */
56 static malloc_mutex_t init_lock
;
58 JEMALLOC_ATTR(constructor
)
63 malloc_mutex_init(&init_lock
);
67 # pragma section(".CRT$XCU", read)
68 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used
)
69 static const void (WINAPI
*init_init_lock
)(void) = _init_init_lock
;
73 static malloc_mutex_t init_lock
= MALLOC_MUTEX_INITIALIZER
;
77 void *p
; /* Input pointer (as in realloc(p, s)). */
78 size_t s
; /* Request size. */
79 void *r
; /* Result pointer. */
82 #ifdef JEMALLOC_UTRACE
83 # define UTRACE(a, b, c) do { \
89 utrace(&ut, sizeof(ut)); \
93 # define UTRACE(a, b, c)
96 /******************************************************************************/
97 /* Function prototypes for non-inline static functions. */
99 static void stats_print_atexit(void);
100 static unsigned malloc_ncpus(void);
101 static bool malloc_conf_next(char const **opts_p
, char const **k_p
,
102 size_t *klen_p
, char const **v_p
, size_t *vlen_p
);
103 static void malloc_conf_error(const char *msg
, const char *k
, size_t klen
,
104 const char *v
, size_t vlen
);
105 static void malloc_conf_init(void);
106 static bool malloc_init_hard(void);
107 static int imemalign(void **memptr
, size_t alignment
, size_t size
,
108 size_t min_alignment
);
110 /******************************************************************************/
112 * Begin miscellaneous support functions.
115 /* Create a new arena and insert it into the arenas array at index ind. */
117 arenas_extend(unsigned ind
)
121 ret
= (arena_t
*)base_alloc(sizeof(arena_t
));
122 if (ret
!= NULL
&& arena_new(ret
, ind
) == false) {
126 /* Only reached if there is an OOM error. */
129 * OOM here is quite inconvenient to propagate, since dealing with it
130 * would require a check for failure in the fast path. Instead, punt
131 * by using arenas[0]. In practice, this is an extremely unlikely
134 malloc_write("<jemalloc>: Error initializing arena\n");
141 /* Slow path, called only by choose_arena(). */
143 choose_arena_hard(void)
148 unsigned i
, choose
, first_null
;
151 first_null
= narenas
;
152 malloc_mutex_lock(&arenas_lock
);
153 assert(arenas
[0] != NULL
);
154 for (i
= 1; i
< narenas
; i
++) {
155 if (arenas
[i
] != NULL
) {
157 * Choose the first arena that has the lowest
158 * number of threads assigned to it.
160 if (arenas
[i
]->nthreads
<
161 arenas
[choose
]->nthreads
)
163 } else if (first_null
== narenas
) {
165 * Record the index of the first uninitialized
166 * arena, in case all extant arenas are in use.
168 * NB: It is possible for there to be
169 * discontinuities in terms of initialized
170 * versus uninitialized arenas, due to the
171 * "thread.arena" mallctl.
177 if (arenas
[choose
]->nthreads
== 0 || first_null
== narenas
) {
179 * Use an unloaded arena, or the least loaded arena if
180 * all arenas are already initialized.
182 ret
= arenas
[choose
];
184 /* Initialize a new arena. */
185 ret
= arenas_extend(first_null
);
188 malloc_mutex_unlock(&arenas_lock
);
191 malloc_mutex_lock(&arenas_lock
);
193 malloc_mutex_unlock(&arenas_lock
);
196 arenas_tsd_set(&ret
);
202 stats_print_atexit(void)
205 if (config_tcache
&& config_stats
) {
209 * Merge stats from extant threads. This is racy, since
210 * individual threads do not lock when recording tcache stats
211 * events. As a consequence, the final stats may be slightly
212 * out of date by the time they are reported, if other threads
213 * continue to allocate.
215 for (i
= 0; i
< narenas
; i
++) {
216 arena_t
*arena
= arenas
[i
];
221 * tcache_stats_merge() locks bins, so if any
222 * code is introduced that acquires both arena
223 * and bin locks in the opposite order,
224 * deadlocks may result.
226 malloc_mutex_lock(&arena
->lock
);
227 ql_foreach(tcache
, &arena
->tcache_ql
, link
) {
228 tcache_stats_merge(tcache
, arena
);
230 malloc_mutex_unlock(&arena
->lock
);
234 je_malloc_stats_print(NULL
, NULL
, NULL
);
238 * End miscellaneous support functions.
240 /******************************************************************************/
242 * Begin initialization functions.
254 result
= si
.dwNumberOfProcessors
;
256 result
= sysconf(_SC_NPROCESSORS_ONLN
);
262 ret
= (unsigned)result
;
268 arenas_cleanup(void *arg
)
270 arena_t
*arena
= *(arena_t
**)arg
;
272 malloc_mutex_lock(&arenas_lock
);
274 malloc_mutex_unlock(&arenas_lock
);
281 if (malloc_initialized
== false)
282 return (malloc_init_hard());
288 malloc_conf_next(char const **opts_p
, char const **k_p
, size_t *klen_p
,
289 char const **v_p
, size_t *vlen_p
)
292 const char *opts
= *opts_p
;
296 for (accept
= false; accept
== false;) {
298 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
299 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
300 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
301 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
303 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
304 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
305 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
306 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
308 case '0': case '1': case '2': case '3': case '4': case '5':
309 case '6': case '7': case '8': case '9':
315 *klen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*k_p
;
320 if (opts
!= *opts_p
) {
321 malloc_write("<jemalloc>: Conf string ends "
326 malloc_write("<jemalloc>: Malformed conf string\n");
331 for (accept
= false; accept
== false;) {
336 * Look ahead one character here, because the next time
337 * this function is called, it will assume that end of
338 * input has been cleanly reached if no input remains,
339 * but we have optimistically already consumed the
340 * comma if one exists.
343 malloc_write("<jemalloc>: Conf string ends "
346 *vlen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*v_p
;
350 *vlen_p
= (uintptr_t)opts
- (uintptr_t)*v_p
;
364 malloc_conf_error(const char *msg
, const char *k
, size_t klen
, const char *v
,
368 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg
, (int)klen
, k
,
373 malloc_conf_init(void)
376 char buf
[PATH_MAX
+ 1];
377 const char *opts
, *k
, *v
;
380 for (i
= 0; i
< 3; i
++) {
381 /* Get runtime configuration. */
384 if (je_malloc_conf
!= NULL
) {
386 * Use options that were compiled into the
389 opts
= je_malloc_conf
;
391 /* No configuration specified. */
399 const char *linkname
=
400 # ifdef JEMALLOC_PREFIX
401 "/etc/"JEMALLOC_PREFIX
"malloc.conf"
407 if ((linklen
= readlink(linkname
, buf
,
408 sizeof(buf
) - 1)) != -1) {
410 * Use the contents of the "/etc/malloc.conf"
411 * symbolic link's name.
418 /* No configuration specified. */
424 const char *envname
=
425 #ifdef JEMALLOC_PREFIX
426 JEMALLOC_CPREFIX
"MALLOC_CONF"
432 if ((opts
= getenv(envname
)) != NULL
) {
434 * Do nothing; opts is already initialized to
435 * the value of the MALLOC_CONF environment
439 /* No configuration specified. */
451 while (*opts
!= '\0' && malloc_conf_next(&opts
, &k
, &klen
, &v
,
453 #define CONF_HANDLE_BOOL_HIT(o, n, hit) \
454 if (sizeof(n)-1 == klen && strncmp(n, k, \
456 if (strncmp("true", v, vlen) == 0 && \
457 vlen == sizeof("true")-1) \
459 else if (strncmp("false", v, vlen) == \
460 0 && vlen == sizeof("false")-1) \
464 "Invalid conf value", \
470 #define CONF_HANDLE_BOOL(o, n) { \
472 CONF_HANDLE_BOOL_HIT(o, n, hit); \
476 #define CONF_HANDLE_SIZE_T(o, n, min, max) \
477 if (sizeof(n)-1 == klen && strncmp(n, k, \
483 um = malloc_strtoumax(v, &end, 0); \
484 if (get_errno() != 0 || (uintptr_t)end -\
485 (uintptr_t)v != vlen) { \
487 "Invalid conf value", \
489 } else if (um < min || um > max) { \
491 "Out-of-range conf value", \
497 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
498 if (sizeof(n)-1 == klen && strncmp(n, k, \
504 l = strtol(v, &end, 0); \
505 if (get_errno() != 0 || (uintptr_t)end -\
506 (uintptr_t)v != vlen) { \
508 "Invalid conf value", \
510 } else if (l < (ssize_t)min || l > \
513 "Out-of-range conf value", \
519 #define CONF_HANDLE_CHAR_P(o, n, d) \
520 if (sizeof(n)-1 == klen && strncmp(n, k, \
522 size_t cpylen = (vlen <= \
523 sizeof(o)-1) ? vlen : \
525 strncpy(o, v, cpylen); \
530 CONF_HANDLE_BOOL(opt_abort
, "abort")
532 * Chunks always require at least one header page, plus
533 * one data page in the absence of redzones, or three
534 * pages in the presence of redzones. In order to
535 * simplify options processing, fix the limit based on
538 CONF_HANDLE_SIZE_T(opt_lg_chunk
, "lg_chunk", LG_PAGE
+
539 (config_fill
? 2 : 1), (sizeof(size_t) << 3) - 1)
540 CONF_HANDLE_SIZE_T(opt_narenas
, "narenas", 1,
542 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult
, "lg_dirty_mult",
543 -1, (sizeof(size_t) << 3) - 1)
544 CONF_HANDLE_BOOL(opt_stats_print
, "stats_print")
546 CONF_HANDLE_BOOL(opt_junk
, "junk")
547 CONF_HANDLE_SIZE_T(opt_quarantine
, "quarantine",
549 CONF_HANDLE_BOOL(opt_redzone
, "redzone")
550 CONF_HANDLE_BOOL(opt_zero
, "zero")
553 CONF_HANDLE_BOOL(opt_utrace
, "utrace")
555 if (config_valgrind
) {
557 CONF_HANDLE_BOOL_HIT(opt_valgrind
,
559 if (config_fill
&& opt_valgrind
&& hit
) {
562 if (opt_quarantine
== 0) {
564 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT
;
571 if (config_xmalloc
) {
572 CONF_HANDLE_BOOL(opt_xmalloc
, "xmalloc")
575 CONF_HANDLE_BOOL(opt_tcache
, "tcache")
576 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max
,
578 (sizeof(size_t) << 3) - 1)
581 CONF_HANDLE_BOOL(opt_prof
, "prof")
582 CONF_HANDLE_CHAR_P(opt_prof_prefix
,
583 "prof_prefix", "jeprof")
584 CONF_HANDLE_BOOL(opt_prof_active
, "prof_active")
585 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample
,
587 (sizeof(uint64_t) << 3) - 1)
588 CONF_HANDLE_BOOL(opt_prof_accum
, "prof_accum")
589 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval
,
590 "lg_prof_interval", -1,
591 (sizeof(uint64_t) << 3) - 1)
592 CONF_HANDLE_BOOL(opt_prof_gdump
, "prof_gdump")
593 CONF_HANDLE_BOOL(opt_prof_final
, "prof_final")
594 CONF_HANDLE_BOOL(opt_prof_leak
, "prof_leak")
596 malloc_conf_error("Invalid conf pair", k
, klen
, v
,
598 #undef CONF_HANDLE_BOOL
599 #undef CONF_HANDLE_SIZE_T
600 #undef CONF_HANDLE_SSIZE_T
601 #undef CONF_HANDLE_CHAR_P
607 malloc_init_hard(void)
609 arena_t
*init_arenas
[1];
611 malloc_mutex_lock(&init_lock
);
612 if (malloc_initialized
|| IS_INITIALIZER
) {
614 * Another thread initialized the allocator before this one
615 * acquired init_lock, or this thread is the initializing
616 * thread, and it is recursively allocating.
618 malloc_mutex_unlock(&init_lock
);
621 #ifdef JEMALLOC_THREADED_INIT
622 if (malloc_initializer
!= NO_INITIALIZER
&& IS_INITIALIZER
== false) {
623 /* Busy-wait until the initializing thread completes. */
625 malloc_mutex_unlock(&init_lock
);
627 malloc_mutex_lock(&init_lock
);
628 } while (malloc_initialized
== false);
629 malloc_mutex_unlock(&init_lock
);
633 malloc_initializer
= INITIALIZER
;
641 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
643 /* Register fork handlers. */
644 if (pthread_atfork(jemalloc_prefork
, jemalloc_postfork_parent
,
645 jemalloc_postfork_child
) != 0) {
646 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
652 if (opt_stats_print
) {
653 /* Print statistics at exit. */
654 if (atexit(stats_print_atexit
) != 0) {
655 malloc_write("<jemalloc>: Error in atexit()\n");
662 malloc_mutex_unlock(&init_lock
);
667 malloc_mutex_unlock(&init_lock
);
672 malloc_mutex_unlock(&init_lock
);
681 if (config_tcache
&& tcache_boot0()) {
682 malloc_mutex_unlock(&init_lock
);
687 malloc_mutex_unlock(&init_lock
);
691 if (malloc_mutex_init(&arenas_lock
))
695 * Create enough scaffolding to allow recursive allocation in
699 arenas
= init_arenas
;
700 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
703 * Initialize one arena here. The rest are lazily created in
704 * choose_arena_hard().
707 if (arenas
[0] == NULL
) {
708 malloc_mutex_unlock(&init_lock
);
712 /* Initialize allocation counters before any allocations can occur. */
713 if (config_stats
&& thread_allocated_tsd_boot()) {
714 malloc_mutex_unlock(&init_lock
);
718 if (arenas_tsd_boot()) {
719 malloc_mutex_unlock(&init_lock
);
723 if (config_tcache
&& tcache_boot1()) {
724 malloc_mutex_unlock(&init_lock
);
728 if (config_fill
&& quarantine_boot()) {
729 malloc_mutex_unlock(&init_lock
);
733 if (config_prof
&& prof_boot2()) {
734 malloc_mutex_unlock(&init_lock
);
738 /* Get number of CPUs. */
739 malloc_mutex_unlock(&init_lock
);
740 ncpus
= malloc_ncpus();
741 malloc_mutex_lock(&init_lock
);
744 malloc_mutex_unlock(&init_lock
);
748 if (opt_narenas
== 0) {
750 * For SMP systems, create more than one arena per CPU by
754 opt_narenas
= ncpus
<< 2;
758 narenas
= opt_narenas
;
760 * Make sure that the arenas array can be allocated. In practice, this
761 * limit is enough to allow the allocator to function, but the ctl
762 * machinery will fail to allocate memory at far lower limits.
764 if (narenas
> chunksize
/ sizeof(arena_t
*)) {
765 narenas
= chunksize
/ sizeof(arena_t
*);
766 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
770 /* Allocate and initialize arenas. */
771 arenas
= (arena_t
**)base_alloc(sizeof(arena_t
*) * narenas
);
772 if (arenas
== NULL
) {
773 malloc_mutex_unlock(&init_lock
);
777 * Zero the array. In practice, this should always be pre-zeroed,
778 * since it was just mmap()ed, but let's be sure.
780 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
781 /* Copy the pointer to the one arena that was already initialized. */
782 arenas
[0] = init_arenas
[0];
784 malloc_initialized
= true;
785 malloc_mutex_unlock(&init_lock
);
790 * End initialization functions.
792 /******************************************************************************/
794 * Begin malloc(3)-compatible functions.
798 je_malloc(size_t size
)
801 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
802 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
812 if (config_prof
&& opt_prof
) {
814 PROF_ALLOC_PREP(1, usize
, cnt
);
819 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
821 ret
= imalloc(SMALL_MAXCLASS
+1);
823 arena_prof_promoted(ret
, usize
);
827 if (config_stats
|| (config_valgrind
&& opt_valgrind
))
834 if (config_xmalloc
&& opt_xmalloc
) {
835 malloc_write("<jemalloc>: Error in malloc(): "
841 if (config_prof
&& opt_prof
&& ret
!= NULL
)
842 prof_malloc(ret
, usize
, cnt
);
843 if (config_stats
&& ret
!= NULL
) {
844 assert(usize
== isalloc(ret
, config_prof
));
845 thread_allocated_tsd_get()->allocated
+= usize
;
847 UTRACE(0, size
, ret
);
848 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, usize
, false);
852 JEMALLOC_ATTR(nonnull(1))
855 * Avoid any uncertainty as to how many backtrace frames to ignore in
858 JEMALLOC_ATTR(noinline
)
861 imemalign(void **memptr
, size_t alignment
, size_t size
,
862 size_t min_alignment
)
867 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
869 assert(min_alignment
!= 0);
877 /* Make sure that alignment is a large enough power of 2. */
878 if (((alignment
- 1) & alignment
) != 0
879 || (alignment
< min_alignment
)) {
880 if (config_xmalloc
&& opt_xmalloc
) {
881 malloc_write("<jemalloc>: Error allocating "
882 "aligned memory: invalid alignment\n");
890 usize
= sa2u(size
, alignment
);
897 if (config_prof
&& opt_prof
) {
898 PROF_ALLOC_PREP(2, usize
, cnt
);
903 if (prof_promote
&& (uintptr_t)cnt
!=
904 (uintptr_t)1U && usize
<= SMALL_MAXCLASS
) {
905 assert(sa2u(SMALL_MAXCLASS
+1,
907 result
= ipalloc(sa2u(SMALL_MAXCLASS
+1,
908 alignment
), alignment
, false);
909 if (result
!= NULL
) {
910 arena_prof_promoted(result
,
914 result
= ipalloc(usize
, alignment
,
919 result
= ipalloc(usize
, alignment
, false);
922 if (result
== NULL
) {
923 if (config_xmalloc
&& opt_xmalloc
) {
924 malloc_write("<jemalloc>: Error allocating aligned "
925 "memory: out of memory\n");
936 if (config_stats
&& result
!= NULL
) {
937 assert(usize
== isalloc(result
, config_prof
));
938 thread_allocated_tsd_get()->allocated
+= usize
;
940 if (config_prof
&& opt_prof
&& result
!= NULL
)
941 prof_malloc(result
, usize
, cnt
);
942 UTRACE(0, size
, result
);
947 je_posix_memalign(void **memptr
, size_t alignment
, size_t size
)
949 int ret
= imemalign(memptr
, alignment
, size
, sizeof(void *));
950 JEMALLOC_VALGRIND_MALLOC(ret
== 0, *memptr
, isalloc(*memptr
,
951 config_prof
), false);
956 je_aligned_alloc(size_t alignment
, size_t size
)
961 if ((err
= imemalign(&ret
, alignment
, size
, 1)) != 0) {
965 JEMALLOC_VALGRIND_MALLOC(err
== 0, ret
, isalloc(ret
, config_prof
),
971 je_calloc(size_t num
, size_t size
)
975 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
976 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
984 num_size
= num
* size
;
986 if (num
== 0 || size
== 0)
993 * Try to avoid division here. We know that it isn't possible to
994 * overflow during multiplication if neither operand uses any of the
995 * most significant half of the bits in a size_t.
997 } else if (((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) << 2)))
998 && (num_size
/ size
!= num
)) {
999 /* size_t overflow. */
1004 if (config_prof
&& opt_prof
) {
1005 usize
= s2u(num_size
);
1006 PROF_ALLOC_PREP(1, usize
, cnt
);
1011 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
1012 <= SMALL_MAXCLASS
) {
1013 ret
= icalloc(SMALL_MAXCLASS
+1);
1015 arena_prof_promoted(ret
, usize
);
1017 ret
= icalloc(num_size
);
1019 if (config_stats
|| (config_valgrind
&& opt_valgrind
))
1020 usize
= s2u(num_size
);
1021 ret
= icalloc(num_size
);
1026 if (config_xmalloc
&& opt_xmalloc
) {
1027 malloc_write("<jemalloc>: Error in calloc(): out of "
1034 if (config_prof
&& opt_prof
&& ret
!= NULL
)
1035 prof_malloc(ret
, usize
, cnt
);
1036 if (config_stats
&& ret
!= NULL
) {
1037 assert(usize
== isalloc(ret
, config_prof
));
1038 thread_allocated_tsd_get()->allocated
+= usize
;
1040 UTRACE(0, num_size
, ret
);
1041 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, usize
, true);
1046 je_realloc(void *ptr
, size_t size
)
1049 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
1050 size_t old_size
= 0;
1051 size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1052 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
1053 prof_ctx_t
*old_ctx
JEMALLOC_CC_SILENCE_INIT(NULL
);
1057 /* realloc(ptr, 0) is equivalent to free(p). */
1059 old_size
= isalloc(ptr
, true);
1060 if (config_valgrind
&& opt_valgrind
)
1061 old_rzsize
= p2rz(ptr
);
1062 } else if (config_stats
) {
1063 old_size
= isalloc(ptr
, false);
1064 if (config_valgrind
&& opt_valgrind
)
1065 old_rzsize
= u2rz(old_size
);
1066 } else if (config_valgrind
&& opt_valgrind
) {
1067 old_size
= isalloc(ptr
, false);
1068 old_rzsize
= u2rz(old_size
);
1070 if (config_prof
&& opt_prof
) {
1071 old_ctx
= prof_ctx_get(ptr
);
1082 assert(malloc_initialized
|| IS_INITIALIZER
);
1085 old_size
= isalloc(ptr
, true);
1086 if (config_valgrind
&& opt_valgrind
)
1087 old_rzsize
= p2rz(ptr
);
1088 } else if (config_stats
) {
1089 old_size
= isalloc(ptr
, false);
1090 if (config_valgrind
&& opt_valgrind
)
1091 old_rzsize
= u2rz(old_size
);
1092 } else if (config_valgrind
&& opt_valgrind
) {
1093 old_size
= isalloc(ptr
, false);
1094 old_rzsize
= u2rz(old_size
);
1096 if (config_prof
&& opt_prof
) {
1098 old_ctx
= prof_ctx_get(ptr
);
1099 PROF_ALLOC_PREP(1, usize
, cnt
);
1105 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U &&
1106 usize
<= SMALL_MAXCLASS
) {
1107 ret
= iralloc(ptr
, SMALL_MAXCLASS
+1, 0, 0,
1110 arena_prof_promoted(ret
, usize
);
1114 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1119 if (config_stats
|| (config_valgrind
&& opt_valgrind
))
1121 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1126 if (config_xmalloc
&& opt_xmalloc
) {
1127 malloc_write("<jemalloc>: Error in realloc(): "
1134 /* realloc(NULL, size) is equivalent to malloc(size). */
1135 if (config_prof
&& opt_prof
)
1137 if (malloc_init()) {
1138 if (config_prof
&& opt_prof
)
1142 if (config_prof
&& opt_prof
) {
1144 PROF_ALLOC_PREP(1, usize
, cnt
);
1148 if (prof_promote
&& (uintptr_t)cnt
!=
1149 (uintptr_t)1U && usize
<=
1151 ret
= imalloc(SMALL_MAXCLASS
+1);
1153 arena_prof_promoted(ret
,
1157 ret
= imalloc(size
);
1160 if (config_stats
|| (config_valgrind
&&
1163 ret
= imalloc(size
);
1168 if (config_xmalloc
&& opt_xmalloc
) {
1169 malloc_write("<jemalloc>: Error in realloc(): "
1178 if (config_prof
&& opt_prof
)
1179 prof_realloc(ret
, usize
, cnt
, old_size
, old_ctx
);
1180 if (config_stats
&& ret
!= NULL
) {
1181 thread_allocated_t
*ta
;
1182 assert(usize
== isalloc(ret
, config_prof
));
1183 ta
= thread_allocated_tsd_get();
1184 ta
->allocated
+= usize
;
1185 ta
->deallocated
+= old_size
;
1187 UTRACE(ptr
, size
, ret
);
1188 JEMALLOC_VALGRIND_REALLOC(ret
, usize
, ptr
, old_size
, old_rzsize
, false);
1199 size_t rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1201 assert(malloc_initialized
|| IS_INITIALIZER
);
1203 if (config_prof
&& opt_prof
) {
1204 usize
= isalloc(ptr
, config_prof
);
1205 prof_free(ptr
, usize
);
1206 } else if (config_stats
|| config_valgrind
)
1207 usize
= isalloc(ptr
, config_prof
);
1209 thread_allocated_tsd_get()->deallocated
+= usize
;
1210 if (config_valgrind
&& opt_valgrind
)
1213 JEMALLOC_VALGRIND_FREE(ptr
, rzsize
);
1218 * End malloc(3)-compatible functions.
1220 /******************************************************************************/
1222 * Begin non-standard override functions.
1225 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1227 je_memalign(size_t alignment
, size_t size
)
1229 void *ret
JEMALLOC_CC_SILENCE_INIT(NULL
);
1230 imemalign(&ret
, alignment
, size
, 1);
1231 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, size
, false);
1236 #ifdef JEMALLOC_OVERRIDE_VALLOC
1238 je_valloc(size_t size
)
1240 void *ret
JEMALLOC_CC_SILENCE_INIT(NULL
);
1241 imemalign(&ret
, PAGE
, size
, 1);
1242 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, size
, false);
1248 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1249 * #define je_malloc malloc
1251 #define malloc_is_malloc 1
1252 #define is_malloc_(a) malloc_is_ ## a
1253 #define is_malloc(a) is_malloc_(a)
1255 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1257 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1258 * to inconsistently reference libc's malloc(3)-compatible functions
1259 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1261 * These definitions interpose hooks in glibc. The functions are actually
1262 * passed an extra argument for the caller return address, which will be
1265 JEMALLOC_EXPORT
void (* const __free_hook
)(void *ptr
) = je_free
;
1266 JEMALLOC_EXPORT
void *(* const __malloc_hook
)(size_t size
) = je_malloc
;
1267 JEMALLOC_EXPORT
void *(* const __realloc_hook
)(void *ptr
, size_t size
) =
1269 JEMALLOC_EXPORT
void *(* const __memalign_hook
)(size_t alignment
, size_t size
) =
1274 * End non-standard override functions.
1276 /******************************************************************************/
1278 * Begin non-standard functions.
1282 je_malloc_usable_size(const void *ptr
)
1286 assert(malloc_initialized
|| IS_INITIALIZER
);
1288 if (config_ivsalloc
)
1289 ret
= ivsalloc(ptr
, config_prof
);
1291 ret
= (ptr
!= NULL
) ? isalloc(ptr
, config_prof
) : 0;
1297 je_malloc_stats_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
1301 stats_print(write_cb
, cbopaque
, opts
);
1305 je_mallctl(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
,
1312 return (ctl_byname(name
, oldp
, oldlenp
, newp
, newlen
));
1316 je_mallctlnametomib(const char *name
, size_t *mibp
, size_t *miblenp
)
1322 return (ctl_nametomib(name
, mibp
, miblenp
));
1326 je_mallctlbymib(const size_t *mib
, size_t miblen
, void *oldp
, size_t *oldlenp
,
1327 void *newp
, size_t newlen
)
1333 return (ctl_bymib(mib
, miblen
, oldp
, oldlenp
, newp
, newlen
));
1337 * End non-standard functions.
1339 /******************************************************************************/
1341 * Begin experimental functions.
1343 #ifdef JEMALLOC_EXPERIMENTAL
1345 JEMALLOC_INLINE
void *
1346 iallocm(size_t usize
, size_t alignment
, bool zero
)
1349 assert(usize
== ((alignment
== 0) ? s2u(usize
) : sa2u(usize
,
1353 return (ipalloc(usize
, alignment
, zero
));
1355 return (icalloc(usize
));
1357 return (imalloc(usize
));
1361 je_allocm(void **ptr
, size_t *rsize
, size_t size
, int flags
)
1365 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1367 bool zero
= flags
& ALLOCM_ZERO
;
1369 assert(ptr
!= NULL
);
1375 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
);
1379 if (config_prof
&& opt_prof
) {
1380 prof_thr_cnt_t
*cnt
;
1382 PROF_ALLOC_PREP(1, usize
, cnt
);
1385 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
1387 size_t usize_promoted
= (alignment
== 0) ?
1388 s2u(SMALL_MAXCLASS
+1) : sa2u(SMALL_MAXCLASS
+1,
1390 assert(usize_promoted
!= 0);
1391 p
= iallocm(usize_promoted
, alignment
, zero
);
1394 arena_prof_promoted(p
, usize
);
1396 p
= iallocm(usize
, alignment
, zero
);
1400 prof_malloc(p
, usize
, cnt
);
1402 p
= iallocm(usize
, alignment
, zero
);
1411 assert(usize
== isalloc(p
, config_prof
));
1412 thread_allocated_tsd_get()->allocated
+= usize
;
1415 JEMALLOC_VALGRIND_MALLOC(true, p
, usize
, zero
);
1416 return (ALLOCM_SUCCESS
);
1418 if (config_xmalloc
&& opt_xmalloc
) {
1419 malloc_write("<jemalloc>: Error in allocm(): "
1425 return (ALLOCM_ERR_OOM
);
1429 je_rallocm(void **ptr
, size_t *rsize
, size_t size
, size_t extra
, int flags
)
1434 size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1435 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1437 bool zero
= flags
& ALLOCM_ZERO
;
1438 bool no_move
= flags
& ALLOCM_NO_MOVE
;
1440 assert(ptr
!= NULL
);
1441 assert(*ptr
!= NULL
);
1443 assert(SIZE_T_MAX
- size
>= extra
);
1444 assert(malloc_initialized
|| IS_INITIALIZER
);
1447 if (config_prof
&& opt_prof
) {
1448 prof_thr_cnt_t
*cnt
;
1451 * usize isn't knowable before iralloc() returns when extra is
1452 * non-zero. Therefore, compute its maximum possible value and
1453 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1454 * backtrace. prof_realloc() will use the actual usize to
1455 * decide whether to sample.
1457 size_t max_usize
= (alignment
== 0) ? s2u(size
+extra
) :
1458 sa2u(size
+extra
, alignment
);
1459 prof_ctx_t
*old_ctx
= prof_ctx_get(p
);
1460 old_size
= isalloc(p
, true);
1461 if (config_valgrind
&& opt_valgrind
)
1462 old_rzsize
= p2rz(p
);
1463 PROF_ALLOC_PREP(1, max_usize
, cnt
);
1467 * Use minimum usize to determine whether promotion may happen.
1469 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U
1470 && ((alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
))
1471 <= SMALL_MAXCLASS
) {
1472 q
= iralloc(p
, SMALL_MAXCLASS
+1, (SMALL_MAXCLASS
+1 >=
1473 size
+extra
) ? 0 : size
+extra
- (SMALL_MAXCLASS
+1),
1474 alignment
, zero
, no_move
);
1477 if (max_usize
< PAGE
) {
1479 arena_prof_promoted(q
, usize
);
1481 usize
= isalloc(q
, config_prof
);
1483 q
= iralloc(p
, size
, extra
, alignment
, zero
, no_move
);
1486 usize
= isalloc(q
, config_prof
);
1488 prof_realloc(q
, usize
, cnt
, old_size
, old_ctx
);
1493 old_size
= isalloc(p
, false);
1494 if (config_valgrind
&& opt_valgrind
)
1495 old_rzsize
= u2rz(old_size
);
1496 } else if (config_valgrind
&& opt_valgrind
) {
1497 old_size
= isalloc(p
, false);
1498 old_rzsize
= u2rz(old_size
);
1500 q
= iralloc(p
, size
, extra
, alignment
, zero
, no_move
);
1504 usize
= isalloc(q
, config_prof
);
1505 if (rsize
!= NULL
) {
1506 if (config_stats
== false)
1507 usize
= isalloc(q
, config_prof
);
1514 thread_allocated_t
*ta
;
1515 ta
= thread_allocated_tsd_get();
1516 ta
->allocated
+= usize
;
1517 ta
->deallocated
+= old_size
;
1520 JEMALLOC_VALGRIND_REALLOC(q
, usize
, p
, old_size
, old_rzsize
, zero
);
1521 return (ALLOCM_SUCCESS
);
1525 return (ALLOCM_ERR_NOT_MOVED
);
1528 if (config_xmalloc
&& opt_xmalloc
) {
1529 malloc_write("<jemalloc>: Error in rallocm(): "
1534 return (ALLOCM_ERR_OOM
);
1538 je_sallocm(const void *ptr
, size_t *rsize
, int flags
)
1542 assert(malloc_initialized
|| IS_INITIALIZER
);
1544 if (config_ivsalloc
)
1545 sz
= ivsalloc(ptr
, config_prof
);
1547 assert(ptr
!= NULL
);
1548 sz
= isalloc(ptr
, config_prof
);
1550 assert(rsize
!= NULL
);
1553 return (ALLOCM_SUCCESS
);
1557 je_dallocm(void *ptr
, int flags
)
1560 size_t rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1562 assert(ptr
!= NULL
);
1563 assert(malloc_initialized
|| IS_INITIALIZER
);
1566 if (config_stats
|| config_valgrind
)
1567 usize
= isalloc(ptr
, config_prof
);
1568 if (config_prof
&& opt_prof
) {
1569 if (config_stats
== false && config_valgrind
== false)
1570 usize
= isalloc(ptr
, config_prof
);
1571 prof_free(ptr
, usize
);
1574 thread_allocated_tsd_get()->deallocated
+= usize
;
1575 if (config_valgrind
&& opt_valgrind
)
1578 JEMALLOC_VALGRIND_FREE(ptr
, rzsize
);
1580 return (ALLOCM_SUCCESS
);
1584 je_nallocm(size_t *rsize
, size_t size
, int flags
)
1587 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1593 return (ALLOCM_ERR_OOM
);
1595 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
);
1597 return (ALLOCM_ERR_OOM
);
1601 return (ALLOCM_SUCCESS
);
1606 * End experimental functions.
1608 /******************************************************************************/
1610 * The following functions are used by threading libraries for protection of
1611 * malloc during fork().
1614 #ifndef JEMALLOC_MUTEX_INIT_CB
1616 jemalloc_prefork(void)
1618 JEMALLOC_EXPORT
void
1619 _malloc_prefork(void)
1624 #ifdef JEMALLOC_MUTEX_INIT_CB
1625 if (malloc_initialized
== false)
1628 assert(malloc_initialized
);
1630 /* Acquire all mutexes in a safe order. */
1631 malloc_mutex_prefork(&arenas_lock
);
1632 for (i
= 0; i
< narenas
; i
++) {
1633 if (arenas
[i
] != NULL
)
1634 arena_prefork(arenas
[i
]);
1638 chunk_dss_prefork();
1641 #ifndef JEMALLOC_MUTEX_INIT_CB
1643 jemalloc_postfork_parent(void)
1645 JEMALLOC_EXPORT
void
1646 _malloc_postfork(void)
1651 #ifdef JEMALLOC_MUTEX_INIT_CB
1652 if (malloc_initialized
== false)
1655 assert(malloc_initialized
);
1657 /* Release all mutexes, now that fork() has completed. */
1658 chunk_dss_postfork_parent();
1659 huge_postfork_parent();
1660 base_postfork_parent();
1661 for (i
= 0; i
< narenas
; i
++) {
1662 if (arenas
[i
] != NULL
)
1663 arena_postfork_parent(arenas
[i
]);
1665 malloc_mutex_postfork_parent(&arenas_lock
);
1669 jemalloc_postfork_child(void)
1673 assert(malloc_initialized
);
1675 /* Release all mutexes, now that fork() has completed. */
1676 chunk_dss_postfork_child();
1677 huge_postfork_child();
1678 base_postfork_child();
1679 for (i
= 0; i
< narenas
; i
++) {
1680 if (arenas
[i
] != NULL
)
1681 arena_postfork_child(arenas
[i
]);
1683 malloc_mutex_postfork_child(&arenas_lock
);
1686 /******************************************************************************/
1688 * The following functions are used for TLS allocation/deallocation in static
1689 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1690 * is that these avoid accessing TLS variables.
1694 a0alloc(size_t size
, bool zero
)
1703 if (size
<= arena_maxclass
)
1704 return (arena_malloc(arenas
[0], size
, zero
, false));
1706 return (huge_malloc(size
, zero
));
1710 a0malloc(size_t size
)
1713 return (a0alloc(size
, false));
1717 a0calloc(size_t num
, size_t size
)
1720 return (a0alloc(num
* size
, true));
1726 arena_chunk_t
*chunk
;
1731 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1733 arena_dalloc(chunk
->arena
, chunk
, ptr
, false);
1735 huge_dalloc(ptr
, true);
1738 /******************************************************************************/