2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_tsd_data(, arenas
, arena_t
*, NULL
)
8 malloc_tsd_data(, thread_allocated
, thread_allocated_t
,
9 THREAD_ALLOCATED_INITIALIZER
)
11 /* Runtime configuration options. */
12 const char *je_malloc_conf
;
14 bool opt_abort
= true;
18 bool opt_junk
= false;
21 bool opt_abort
= false;
22 bool opt_junk
= false;
24 size_t opt_quarantine
= ZU(0);
25 bool opt_redzone
= false;
26 bool opt_utrace
= false;
27 bool opt_valgrind
= false;
28 bool opt_xmalloc
= false;
29 bool opt_zero
= false;
30 size_t opt_narenas
= 0;
34 malloc_mutex_t arenas_lock
;
36 unsigned narenas_total
;
37 unsigned narenas_auto
;
39 /* Set to true once the allocator has been initialized. */
40 static bool malloc_initialized
= false;
42 #ifdef JEMALLOC_THREADED_INIT
43 /* Used to let the initializing thread recursively allocate. */
44 # define NO_INITIALIZER ((unsigned long)0)
45 # define INITIALIZER pthread_self()
46 # define IS_INITIALIZER (malloc_initializer == pthread_self())
47 static pthread_t malloc_initializer
= NO_INITIALIZER
;
49 # define NO_INITIALIZER false
50 # define INITIALIZER true
51 # define IS_INITIALIZER malloc_initializer
52 static bool malloc_initializer
= NO_INITIALIZER
;
55 /* Used to avoid initialization races. */
57 static malloc_mutex_t init_lock
;
59 JEMALLOC_ATTR(constructor
)
64 malloc_mutex_init(&init_lock
);
68 # pragma section(".CRT$XCU", read)
69 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used
)
70 static const void (WINAPI
*init_init_lock
)(void) = _init_init_lock
;
74 static malloc_mutex_t init_lock
= MALLOC_MUTEX_INITIALIZER
;
78 void *p
; /* Input pointer (as in realloc(p, s)). */
79 size_t s
; /* Request size. */
80 void *r
; /* Result pointer. */
83 #ifdef JEMALLOC_UTRACE
84 # define UTRACE(a, b, c) do { \
90 utrace(&ut, sizeof(ut)); \
94 # define UTRACE(a, b, c)
97 /******************************************************************************/
98 /* Function prototypes for non-inline static functions. */
100 static void stats_print_atexit(void);
101 static unsigned malloc_ncpus(void);
102 static bool malloc_conf_next(char const **opts_p
, char const **k_p
,
103 size_t *klen_p
, char const **v_p
, size_t *vlen_p
);
104 static void malloc_conf_error(const char *msg
, const char *k
, size_t klen
,
105 const char *v
, size_t vlen
);
106 static void malloc_conf_init(void);
107 static bool malloc_init_hard(void);
108 static int imemalign(void **memptr
, size_t alignment
, size_t size
,
109 size_t min_alignment
);
111 /******************************************************************************/
113 * Begin miscellaneous support functions.
116 /* Create a new arena and insert it into the arenas array at index ind. */
118 arenas_extend(unsigned ind
)
122 ret
= (arena_t
*)base_alloc(sizeof(arena_t
));
123 if (ret
!= NULL
&& arena_new(ret
, ind
) == false) {
127 /* Only reached if there is an OOM error. */
130 * OOM here is quite inconvenient to propagate, since dealing with it
131 * would require a check for failure in the fast path. Instead, punt
132 * by using arenas[0]. In practice, this is an extremely unlikely
135 malloc_write("<jemalloc>: Error initializing arena\n");
142 /* Slow path, called only by choose_arena(). */
144 choose_arena_hard(void)
148 if (narenas_auto
> 1) {
149 unsigned i
, choose
, first_null
;
152 first_null
= narenas_auto
;
153 malloc_mutex_lock(&arenas_lock
);
154 assert(arenas
[0] != NULL
);
155 for (i
= 1; i
< narenas_auto
; i
++) {
156 if (arenas
[i
] != NULL
) {
158 * Choose the first arena that has the lowest
159 * number of threads assigned to it.
161 if (arenas
[i
]->nthreads
<
162 arenas
[choose
]->nthreads
)
164 } else if (first_null
== narenas_auto
) {
166 * Record the index of the first uninitialized
167 * arena, in case all extant arenas are in use.
169 * NB: It is possible for there to be
170 * discontinuities in terms of initialized
171 * versus uninitialized arenas, due to the
172 * "thread.arena" mallctl.
178 if (arenas
[choose
]->nthreads
== 0
179 || first_null
== narenas_auto
) {
181 * Use an unloaded arena, or the least loaded arena if
182 * all arenas are already initialized.
184 ret
= arenas
[choose
];
186 /* Initialize a new arena. */
187 ret
= arenas_extend(first_null
);
190 malloc_mutex_unlock(&arenas_lock
);
193 malloc_mutex_lock(&arenas_lock
);
195 malloc_mutex_unlock(&arenas_lock
);
198 arenas_tsd_set(&ret
);
204 stats_print_atexit(void)
207 if (config_tcache
&& config_stats
) {
211 * Merge stats from extant threads. This is racy, since
212 * individual threads do not lock when recording tcache stats
213 * events. As a consequence, the final stats may be slightly
214 * out of date by the time they are reported, if other threads
215 * continue to allocate.
217 for (i
= 0, narenas
= narenas_total_get(); i
< narenas
; i
++) {
218 arena_t
*arena
= arenas
[i
];
223 * tcache_stats_merge() locks bins, so if any
224 * code is introduced that acquires both arena
225 * and bin locks in the opposite order,
226 * deadlocks may result.
228 malloc_mutex_lock(&arena
->lock
);
229 ql_foreach(tcache
, &arena
->tcache_ql
, link
) {
230 tcache_stats_merge(tcache
, arena
);
232 malloc_mutex_unlock(&arena
->lock
);
236 je_malloc_stats_print(NULL
, NULL
, NULL
);
240 * End miscellaneous support functions.
242 /******************************************************************************/
244 * Begin initialization functions.
256 result
= si
.dwNumberOfProcessors
;
258 result
= sysconf(_SC_NPROCESSORS_ONLN
);
264 ret
= (unsigned)result
;
271 arenas_cleanup(void *arg
)
273 arena_t
*arena
= *(arena_t
**)arg
;
275 malloc_mutex_lock(&arenas_lock
);
277 malloc_mutex_unlock(&arenas_lock
);
284 if (malloc_initialized
== false)
285 return (malloc_init_hard());
291 malloc_conf_next(char const **opts_p
, char const **k_p
, size_t *klen_p
,
292 char const **v_p
, size_t *vlen_p
)
295 const char *opts
= *opts_p
;
299 for (accept
= false; accept
== false;) {
301 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
302 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
303 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
304 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
306 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
307 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
308 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
309 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
311 case '0': case '1': case '2': case '3': case '4': case '5':
312 case '6': case '7': case '8': case '9':
318 *klen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*k_p
;
323 if (opts
!= *opts_p
) {
324 malloc_write("<jemalloc>: Conf string ends "
329 malloc_write("<jemalloc>: Malformed conf string\n");
334 for (accept
= false; accept
== false;) {
339 * Look ahead one character here, because the next time
340 * this function is called, it will assume that end of
341 * input has been cleanly reached if no input remains,
342 * but we have optimistically already consumed the
343 * comma if one exists.
346 malloc_write("<jemalloc>: Conf string ends "
349 *vlen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*v_p
;
353 *vlen_p
= (uintptr_t)opts
- (uintptr_t)*v_p
;
367 malloc_conf_error(const char *msg
, const char *k
, size_t klen
, const char *v
,
371 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg
, (int)klen
, k
,
376 malloc_conf_init(void)
379 char buf
[PATH_MAX
+ 1];
380 const char *opts
, *k
, *v
;
384 * Automatically configure valgrind before processing options. The
385 * valgrind option remains in jemalloc 3.x for compatibility reasons.
387 if (config_valgrind
) {
388 opt_valgrind
= (RUNNING_ON_VALGRIND
!= 0) ? true : false;
389 if (config_fill
&& opt_valgrind
) {
391 assert(opt_zero
== false);
392 opt_quarantine
= JEMALLOC_VALGRIND_QUARANTINE_DEFAULT
;
395 if (config_tcache
&& opt_valgrind
)
399 for (i
= 0; i
< 3; i
++) {
400 /* Get runtime configuration. */
403 if (je_malloc_conf
!= NULL
) {
405 * Use options that were compiled into the
408 opts
= je_malloc_conf
;
410 /* No configuration specified. */
418 const char *linkname
=
419 # ifdef JEMALLOC_PREFIX
420 "/etc/"JEMALLOC_PREFIX
"malloc.conf"
426 if ((linklen
= readlink(linkname
, buf
,
427 sizeof(buf
) - 1)) != -1) {
429 * Use the contents of the "/etc/malloc.conf"
430 * symbolic link's name.
437 /* No configuration specified. */
443 const char *envname
=
444 #ifdef JEMALLOC_PREFIX
445 JEMALLOC_CPREFIX
"MALLOC_CONF"
451 if ((opts
= getenv(envname
)) != NULL
) {
453 * Do nothing; opts is already initialized to
454 * the value of the MALLOC_CONF environment
458 /* No configuration specified. */
470 while (*opts
!= '\0' && malloc_conf_next(&opts
, &k
, &klen
, &v
,
472 #define CONF_HANDLE_BOOL_HIT(o, n, hit) \
473 if (sizeof(n)-1 == klen && strncmp(n, k, \
475 if (strncmp("true", v, vlen) == 0 && \
476 vlen == sizeof("true")-1) \
478 else if (strncmp("false", v, vlen) == \
479 0 && vlen == sizeof("false")-1) \
483 "Invalid conf value", \
489 #define CONF_HANDLE_BOOL(o, n) { \
491 CONF_HANDLE_BOOL_HIT(o, n, hit); \
495 #define CONF_HANDLE_SIZE_T(o, n, min, max) \
496 if (sizeof(n)-1 == klen && strncmp(n, k, \
502 um = malloc_strtoumax(v, &end, 0); \
503 if (get_errno() != 0 || (uintptr_t)end -\
504 (uintptr_t)v != vlen) { \
506 "Invalid conf value", \
508 } else if (um < min || um > max) { \
510 "Out-of-range conf value", \
516 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
517 if (sizeof(n)-1 == klen && strncmp(n, k, \
523 l = strtol(v, &end, 0); \
524 if (get_errno() != 0 || (uintptr_t)end -\
525 (uintptr_t)v != vlen) { \
527 "Invalid conf value", \
529 } else if (l < (ssize_t)min || l > \
532 "Out-of-range conf value", \
538 #define CONF_HANDLE_CHAR_P(o, n, d) \
539 if (sizeof(n)-1 == klen && strncmp(n, k, \
541 size_t cpylen = (vlen <= \
542 sizeof(o)-1) ? vlen : \
544 strncpy(o, v, cpylen); \
549 CONF_HANDLE_BOOL(opt_abort
, "abort")
551 * Chunks always require at least one header page, plus
552 * one data page in the absence of redzones, or three
553 * pages in the presence of redzones. In order to
554 * simplify options processing, fix the limit based on
557 CONF_HANDLE_SIZE_T(opt_lg_chunk
, "lg_chunk", LG_PAGE
+
558 (config_fill
? 2 : 1), (sizeof(size_t) << 3) - 1)
559 if (strncmp("dss", k
, klen
) == 0) {
562 for (i
= 0; i
< dss_prec_limit
; i
++) {
563 if (strncmp(dss_prec_names
[i
], v
, vlen
)
565 if (chunk_dss_prec_set(i
)) {
577 if (match
== false) {
578 malloc_conf_error("Invalid conf value",
583 CONF_HANDLE_SIZE_T(opt_narenas
, "narenas", 1,
585 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult
, "lg_dirty_mult",
586 -1, (sizeof(size_t) << 3) - 1)
587 CONF_HANDLE_BOOL(opt_stats_print
, "stats_print")
589 CONF_HANDLE_BOOL(opt_junk
, "junk")
590 CONF_HANDLE_SIZE_T(opt_quarantine
, "quarantine",
592 CONF_HANDLE_BOOL(opt_redzone
, "redzone")
593 CONF_HANDLE_BOOL(opt_zero
, "zero")
596 CONF_HANDLE_BOOL(opt_utrace
, "utrace")
598 if (config_valgrind
) {
599 CONF_HANDLE_BOOL(opt_valgrind
, "valgrind")
601 if (config_xmalloc
) {
602 CONF_HANDLE_BOOL(opt_xmalloc
, "xmalloc")
605 CONF_HANDLE_BOOL(opt_tcache
, "tcache")
606 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max
,
608 (sizeof(size_t) << 3) - 1)
611 CONF_HANDLE_BOOL(opt_prof
, "prof")
612 CONF_HANDLE_CHAR_P(opt_prof_prefix
,
613 "prof_prefix", "jeprof")
614 CONF_HANDLE_BOOL(opt_prof_active
, "prof_active")
615 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample
,
617 (sizeof(uint64_t) << 3) - 1)
618 CONF_HANDLE_BOOL(opt_prof_accum
, "prof_accum")
619 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval
,
620 "lg_prof_interval", -1,
621 (sizeof(uint64_t) << 3) - 1)
622 CONF_HANDLE_BOOL(opt_prof_gdump
, "prof_gdump")
623 CONF_HANDLE_BOOL(opt_prof_final
, "prof_final")
624 CONF_HANDLE_BOOL(opt_prof_leak
, "prof_leak")
626 malloc_conf_error("Invalid conf pair", k
, klen
, v
,
628 #undef CONF_HANDLE_BOOL
629 #undef CONF_HANDLE_SIZE_T
630 #undef CONF_HANDLE_SSIZE_T
631 #undef CONF_HANDLE_CHAR_P
637 malloc_init_hard(void)
639 arena_t
*init_arenas
[1];
641 malloc_mutex_lock(&init_lock
);
642 if (malloc_initialized
|| IS_INITIALIZER
) {
644 * Another thread initialized the allocator before this one
645 * acquired init_lock, or this thread is the initializing
646 * thread, and it is recursively allocating.
648 malloc_mutex_unlock(&init_lock
);
651 #ifdef JEMALLOC_THREADED_INIT
652 if (malloc_initializer
!= NO_INITIALIZER
&& IS_INITIALIZER
== false) {
653 /* Busy-wait until the initializing thread completes. */
655 malloc_mutex_unlock(&init_lock
);
657 malloc_mutex_lock(&init_lock
);
658 } while (malloc_initialized
== false);
659 malloc_mutex_unlock(&init_lock
);
663 malloc_initializer
= INITIALIZER
;
671 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
673 /* Register fork handlers. */
674 if (pthread_atfork(jemalloc_prefork
, jemalloc_postfork_parent
,
675 jemalloc_postfork_child
) != 0) {
676 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
682 if (opt_stats_print
) {
683 /* Print statistics at exit. */
684 if (atexit(stats_print_atexit
) != 0) {
685 malloc_write("<jemalloc>: Error in atexit()\n");
692 malloc_mutex_unlock(&init_lock
);
697 malloc_mutex_unlock(&init_lock
);
702 malloc_mutex_unlock(&init_lock
);
711 if (config_tcache
&& tcache_boot0()) {
712 malloc_mutex_unlock(&init_lock
);
717 malloc_mutex_unlock(&init_lock
);
721 if (malloc_mutex_init(&arenas_lock
))
725 * Create enough scaffolding to allow recursive allocation in
728 narenas_total
= narenas_auto
= 1;
729 arenas
= init_arenas
;
730 memset(arenas
, 0, sizeof(arena_t
*) * narenas_auto
);
733 * Initialize one arena here. The rest are lazily created in
734 * choose_arena_hard().
737 if (arenas
[0] == NULL
) {
738 malloc_mutex_unlock(&init_lock
);
742 /* Initialize allocation counters before any allocations can occur. */
743 if (config_stats
&& thread_allocated_tsd_boot()) {
744 malloc_mutex_unlock(&init_lock
);
748 if (arenas_tsd_boot()) {
749 malloc_mutex_unlock(&init_lock
);
753 if (config_tcache
&& tcache_boot1()) {
754 malloc_mutex_unlock(&init_lock
);
758 if (config_fill
&& quarantine_boot()) {
759 malloc_mutex_unlock(&init_lock
);
763 if (config_prof
&& prof_boot2()) {
764 malloc_mutex_unlock(&init_lock
);
768 /* Get number of CPUs. */
769 malloc_mutex_unlock(&init_lock
);
770 ncpus
= malloc_ncpus();
771 malloc_mutex_lock(&init_lock
);
774 malloc_mutex_unlock(&init_lock
);
778 if (opt_narenas
== 0) {
780 * For SMP systems, create more than one arena per CPU by
784 opt_narenas
= ncpus
<< 2;
788 narenas_auto
= opt_narenas
;
790 * Make sure that the arenas array can be allocated. In practice, this
791 * limit is enough to allow the allocator to function, but the ctl
792 * machinery will fail to allocate memory at far lower limits.
794 if (narenas_auto
> chunksize
/ sizeof(arena_t
*)) {
795 narenas_auto
= chunksize
/ sizeof(arena_t
*);
796 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
799 narenas_total
= narenas_auto
;
801 /* Allocate and initialize arenas. */
802 arenas
= (arena_t
**)base_alloc(sizeof(arena_t
*) * narenas_total
);
803 if (arenas
== NULL
) {
804 malloc_mutex_unlock(&init_lock
);
808 * Zero the array. In practice, this should always be pre-zeroed,
809 * since it was just mmap()ed, but let's be sure.
811 memset(arenas
, 0, sizeof(arena_t
*) * narenas_total
);
812 /* Copy the pointer to the one arena that was already initialized. */
813 arenas
[0] = init_arenas
[0];
815 malloc_initialized
= true;
816 malloc_mutex_unlock(&init_lock
);
821 * End initialization functions.
823 /******************************************************************************/
825 * Begin malloc(3)-compatible functions.
829 je_malloc(size_t size
)
832 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
833 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
843 if (config_prof
&& opt_prof
) {
845 PROF_ALLOC_PREP(1, usize
, cnt
);
850 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
852 ret
= imalloc(SMALL_MAXCLASS
+1);
854 arena_prof_promoted(ret
, usize
);
858 if (config_stats
|| (config_valgrind
&& opt_valgrind
))
865 if (config_xmalloc
&& opt_xmalloc
) {
866 malloc_write("<jemalloc>: Error in malloc(): "
872 if (config_prof
&& opt_prof
&& ret
!= NULL
)
873 prof_malloc(ret
, usize
, cnt
);
874 if (config_stats
&& ret
!= NULL
) {
875 assert(usize
== isalloc(ret
, config_prof
));
876 thread_allocated_tsd_get()->allocated
+= usize
;
878 UTRACE(0, size
, ret
);
879 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, usize
, false);
883 JEMALLOC_ATTR(nonnull(1))
886 * Avoid any uncertainty as to how many backtrace frames to ignore in
889 JEMALLOC_ATTR(noinline
)
892 imemalign(void **memptr
, size_t alignment
, size_t size
,
893 size_t min_alignment
)
898 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
900 assert(min_alignment
!= 0);
908 /* Make sure that alignment is a large enough power of 2. */
909 if (((alignment
- 1) & alignment
) != 0
910 || (alignment
< min_alignment
)) {
911 if (config_xmalloc
&& opt_xmalloc
) {
912 malloc_write("<jemalloc>: Error allocating "
913 "aligned memory: invalid alignment\n");
921 usize
= sa2u(size
, alignment
);
928 if (config_prof
&& opt_prof
) {
929 PROF_ALLOC_PREP(2, usize
, cnt
);
934 if (prof_promote
&& (uintptr_t)cnt
!=
935 (uintptr_t)1U && usize
<= SMALL_MAXCLASS
) {
936 assert(sa2u(SMALL_MAXCLASS
+1,
938 result
= ipalloc(sa2u(SMALL_MAXCLASS
+1,
939 alignment
), alignment
, false);
940 if (result
!= NULL
) {
941 arena_prof_promoted(result
,
945 result
= ipalloc(usize
, alignment
,
950 result
= ipalloc(usize
, alignment
, false);
953 if (result
== NULL
) {
954 if (config_xmalloc
&& opt_xmalloc
) {
955 malloc_write("<jemalloc>: Error allocating aligned "
956 "memory: out of memory\n");
967 if (config_stats
&& result
!= NULL
) {
968 assert(usize
== isalloc(result
, config_prof
));
969 thread_allocated_tsd_get()->allocated
+= usize
;
971 if (config_prof
&& opt_prof
&& result
!= NULL
)
972 prof_malloc(result
, usize
, cnt
);
973 UTRACE(0, size
, result
);
978 je_posix_memalign(void **memptr
, size_t alignment
, size_t size
)
980 int ret
= imemalign(memptr
, alignment
, size
, sizeof(void *));
981 JEMALLOC_VALGRIND_MALLOC(ret
== 0, *memptr
, isalloc(*memptr
,
982 config_prof
), false);
987 je_aligned_alloc(size_t alignment
, size_t size
)
992 if ((err
= imemalign(&ret
, alignment
, size
, 1)) != 0) {
996 JEMALLOC_VALGRIND_MALLOC(err
== 0, ret
, isalloc(ret
, config_prof
),
1002 je_calloc(size_t num
, size_t size
)
1006 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
1007 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
1009 if (malloc_init()) {
1015 num_size
= num
* size
;
1016 if (num_size
== 0) {
1017 if (num
== 0 || size
== 0)
1024 * Try to avoid division here. We know that it isn't possible to
1025 * overflow during multiplication if neither operand uses any of the
1026 * most significant half of the bits in a size_t.
1028 } else if (((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) << 2)))
1029 && (num_size
/ size
!= num
)) {
1030 /* size_t overflow. */
1035 if (config_prof
&& opt_prof
) {
1036 usize
= s2u(num_size
);
1037 PROF_ALLOC_PREP(1, usize
, cnt
);
1042 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
1043 <= SMALL_MAXCLASS
) {
1044 ret
= icalloc(SMALL_MAXCLASS
+1);
1046 arena_prof_promoted(ret
, usize
);
1048 ret
= icalloc(num_size
);
1050 if (config_stats
|| (config_valgrind
&& opt_valgrind
))
1051 usize
= s2u(num_size
);
1052 ret
= icalloc(num_size
);
1057 if (config_xmalloc
&& opt_xmalloc
) {
1058 malloc_write("<jemalloc>: Error in calloc(): out of "
1065 if (config_prof
&& opt_prof
&& ret
!= NULL
)
1066 prof_malloc(ret
, usize
, cnt
);
1067 if (config_stats
&& ret
!= NULL
) {
1068 assert(usize
== isalloc(ret
, config_prof
));
1069 thread_allocated_tsd_get()->allocated
+= usize
;
1071 UTRACE(0, num_size
, ret
);
1072 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, usize
, true);
1077 je_realloc(void *ptr
, size_t size
)
1080 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
1081 size_t old_size
= 0;
1082 size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1083 prof_thr_cnt_t
*cnt
JEMALLOC_CC_SILENCE_INIT(NULL
);
1084 prof_ctx_t
*old_ctx
JEMALLOC_CC_SILENCE_INIT(NULL
);
1088 /* realloc(ptr, 0) is equivalent to free(p). */
1090 old_size
= isalloc(ptr
, true);
1091 if (config_valgrind
&& opt_valgrind
)
1092 old_rzsize
= p2rz(ptr
);
1093 } else if (config_stats
) {
1094 old_size
= isalloc(ptr
, false);
1095 if (config_valgrind
&& opt_valgrind
)
1096 old_rzsize
= u2rz(old_size
);
1097 } else if (config_valgrind
&& opt_valgrind
) {
1098 old_size
= isalloc(ptr
, false);
1099 old_rzsize
= u2rz(old_size
);
1101 if (config_prof
&& opt_prof
) {
1102 old_ctx
= prof_ctx_get(ptr
);
1113 assert(malloc_initialized
|| IS_INITIALIZER
);
1116 old_size
= isalloc(ptr
, true);
1117 if (config_valgrind
&& opt_valgrind
)
1118 old_rzsize
= p2rz(ptr
);
1119 } else if (config_stats
) {
1120 old_size
= isalloc(ptr
, false);
1121 if (config_valgrind
&& opt_valgrind
)
1122 old_rzsize
= u2rz(old_size
);
1123 } else if (config_valgrind
&& opt_valgrind
) {
1124 old_size
= isalloc(ptr
, false);
1125 old_rzsize
= u2rz(old_size
);
1127 if (config_prof
&& opt_prof
) {
1129 old_ctx
= prof_ctx_get(ptr
);
1130 PROF_ALLOC_PREP(1, usize
, cnt
);
1136 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U &&
1137 usize
<= SMALL_MAXCLASS
) {
1138 ret
= iralloc(ptr
, SMALL_MAXCLASS
+1, 0, 0,
1141 arena_prof_promoted(ret
, usize
);
1145 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1150 if (config_stats
|| (config_valgrind
&& opt_valgrind
))
1152 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1157 if (config_xmalloc
&& opt_xmalloc
) {
1158 malloc_write("<jemalloc>: Error in realloc(): "
1165 /* realloc(NULL, size) is equivalent to malloc(size). */
1166 if (config_prof
&& opt_prof
)
1168 if (malloc_init()) {
1169 if (config_prof
&& opt_prof
)
1173 if (config_prof
&& opt_prof
) {
1175 PROF_ALLOC_PREP(1, usize
, cnt
);
1179 if (prof_promote
&& (uintptr_t)cnt
!=
1180 (uintptr_t)1U && usize
<=
1182 ret
= imalloc(SMALL_MAXCLASS
+1);
1184 arena_prof_promoted(ret
,
1188 ret
= imalloc(size
);
1191 if (config_stats
|| (config_valgrind
&&
1194 ret
= imalloc(size
);
1199 if (config_xmalloc
&& opt_xmalloc
) {
1200 malloc_write("<jemalloc>: Error in realloc(): "
1209 if (config_prof
&& opt_prof
)
1210 prof_realloc(ret
, usize
, cnt
, old_size
, old_ctx
);
1211 if (config_stats
&& ret
!= NULL
) {
1212 thread_allocated_t
*ta
;
1213 assert(usize
== isalloc(ret
, config_prof
));
1214 ta
= thread_allocated_tsd_get();
1215 ta
->allocated
+= usize
;
1216 ta
->deallocated
+= old_size
;
1218 UTRACE(ptr
, size
, ret
);
1219 JEMALLOC_VALGRIND_REALLOC(ret
, usize
, ptr
, old_size
, old_rzsize
, false);
1230 size_t rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1232 assert(malloc_initialized
|| IS_INITIALIZER
);
1234 if (config_prof
&& opt_prof
) {
1235 usize
= isalloc(ptr
, config_prof
);
1236 prof_free(ptr
, usize
);
1237 } else if (config_stats
|| config_valgrind
)
1238 usize
= isalloc(ptr
, config_prof
);
1240 thread_allocated_tsd_get()->deallocated
+= usize
;
1241 if (config_valgrind
&& opt_valgrind
)
1244 JEMALLOC_VALGRIND_FREE(ptr
, rzsize
);
1249 * End malloc(3)-compatible functions.
1251 /******************************************************************************/
1253 * Begin non-standard override functions.
1256 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1258 je_memalign(size_t alignment
, size_t size
)
1260 void *ret
JEMALLOC_CC_SILENCE_INIT(NULL
);
1261 imemalign(&ret
, alignment
, size
, 1);
1262 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, size
, false);
1267 #ifdef JEMALLOC_OVERRIDE_VALLOC
1269 je_valloc(size_t size
)
1271 void *ret
JEMALLOC_CC_SILENCE_INIT(NULL
);
1272 imemalign(&ret
, PAGE
, size
, 1);
1273 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, size
, false);
1279 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1280 * #define je_malloc malloc
1282 #define malloc_is_malloc 1
1283 #define is_malloc_(a) malloc_is_ ## a
1284 #define is_malloc(a) is_malloc_(a)
1286 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1288 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1289 * to inconsistently reference libc's malloc(3)-compatible functions
1290 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1292 * These definitions interpose hooks in glibc. The functions are actually
1293 * passed an extra argument for the caller return address, which will be
1296 JEMALLOC_EXPORT
void (* __free_hook
)(void *ptr
) = je_free
;
1297 JEMALLOC_EXPORT
void *(* __malloc_hook
)(size_t size
) = je_malloc
;
1298 JEMALLOC_EXPORT
void *(* __realloc_hook
)(void *ptr
, size_t size
) = je_realloc
;
1299 JEMALLOC_EXPORT
void *(* __memalign_hook
)(size_t alignment
, size_t size
) =
1304 * End non-standard override functions.
1306 /******************************************************************************/
1308 * Begin non-standard functions.
1312 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST
void *ptr
)
1316 assert(malloc_initialized
|| IS_INITIALIZER
);
1318 if (config_ivsalloc
)
1319 ret
= ivsalloc(ptr
, config_prof
);
1321 ret
= (ptr
!= NULL
) ? isalloc(ptr
, config_prof
) : 0;
1327 je_malloc_stats_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
1331 stats_print(write_cb
, cbopaque
, opts
);
1335 je_mallctl(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
,
1342 return (ctl_byname(name
, oldp
, oldlenp
, newp
, newlen
));
1346 je_mallctlnametomib(const char *name
, size_t *mibp
, size_t *miblenp
)
1352 return (ctl_nametomib(name
, mibp
, miblenp
));
1356 je_mallctlbymib(const size_t *mib
, size_t miblen
, void *oldp
, size_t *oldlenp
,
1357 void *newp
, size_t newlen
)
1363 return (ctl_bymib(mib
, miblen
, oldp
, oldlenp
, newp
, newlen
));
1367 * End non-standard functions.
1369 /******************************************************************************/
1371 * Begin experimental functions.
1373 #ifdef JEMALLOC_EXPERIMENTAL
1375 JEMALLOC_INLINE
void *
1376 iallocm(size_t usize
, size_t alignment
, bool zero
, bool try_tcache
,
1380 assert(usize
== ((alignment
== 0) ? s2u(usize
) : sa2u(usize
,
1384 return (ipallocx(usize
, alignment
, zero
, try_tcache
, arena
));
1386 return (icallocx(usize
, try_tcache
, arena
));
1388 return (imallocx(usize
, try_tcache
, arena
));
1392 je_allocm(void **ptr
, size_t *rsize
, size_t size
, int flags
)
1396 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1398 bool zero
= flags
& ALLOCM_ZERO
;
1399 unsigned arena_ind
= ((unsigned)(flags
>> 8)) - 1;
1403 assert(ptr
!= NULL
);
1409 if (arena_ind
!= UINT_MAX
) {
1410 arena
= arenas
[arena_ind
];
1417 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
);
1421 if (config_prof
&& opt_prof
) {
1422 prof_thr_cnt_t
*cnt
;
1424 PROF_ALLOC_PREP(1, usize
, cnt
);
1427 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
1429 size_t usize_promoted
= (alignment
== 0) ?
1430 s2u(SMALL_MAXCLASS
+1) : sa2u(SMALL_MAXCLASS
+1,
1432 assert(usize_promoted
!= 0);
1433 p
= iallocm(usize_promoted
, alignment
, zero
,
1437 arena_prof_promoted(p
, usize
);
1439 p
= iallocm(usize
, alignment
, zero
, try_tcache
, arena
);
1443 prof_malloc(p
, usize
, cnt
);
1445 p
= iallocm(usize
, alignment
, zero
, try_tcache
, arena
);
1454 assert(usize
== isalloc(p
, config_prof
));
1455 thread_allocated_tsd_get()->allocated
+= usize
;
1458 JEMALLOC_VALGRIND_MALLOC(true, p
, usize
, zero
);
1459 return (ALLOCM_SUCCESS
);
1461 if (config_xmalloc
&& opt_xmalloc
) {
1462 malloc_write("<jemalloc>: Error in allocm(): "
1468 return (ALLOCM_ERR_OOM
);
1472 je_rallocm(void **ptr
, size_t *rsize
, size_t size
, size_t extra
, int flags
)
1477 size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1478 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1480 bool zero
= flags
& ALLOCM_ZERO
;
1481 bool no_move
= flags
& ALLOCM_NO_MOVE
;
1482 unsigned arena_ind
= ((unsigned)(flags
>> 8)) - 1;
1483 bool try_tcache_alloc
, try_tcache_dalloc
;
1486 assert(ptr
!= NULL
);
1487 assert(*ptr
!= NULL
);
1489 assert(SIZE_T_MAX
- size
>= extra
);
1490 assert(malloc_initialized
|| IS_INITIALIZER
);
1492 if (arena_ind
!= UINT_MAX
) {
1493 arena_chunk_t
*chunk
;
1494 try_tcache_alloc
= true;
1495 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(*ptr
);
1496 try_tcache_dalloc
= (chunk
== *ptr
|| chunk
->arena
!=
1498 arena
= arenas
[arena_ind
];
1500 try_tcache_alloc
= true;
1501 try_tcache_dalloc
= true;
1506 if (config_prof
&& opt_prof
) {
1507 prof_thr_cnt_t
*cnt
;
1510 * usize isn't knowable before iralloc() returns when extra is
1511 * non-zero. Therefore, compute its maximum possible value and
1512 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1513 * backtrace. prof_realloc() will use the actual usize to
1514 * decide whether to sample.
1516 size_t max_usize
= (alignment
== 0) ? s2u(size
+extra
) :
1517 sa2u(size
+extra
, alignment
);
1518 prof_ctx_t
*old_ctx
= prof_ctx_get(p
);
1519 old_size
= isalloc(p
, true);
1520 if (config_valgrind
&& opt_valgrind
)
1521 old_rzsize
= p2rz(p
);
1522 PROF_ALLOC_PREP(1, max_usize
, cnt
);
1526 * Use minimum usize to determine whether promotion may happen.
1528 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U
1529 && ((alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
))
1530 <= SMALL_MAXCLASS
) {
1531 q
= irallocx(p
, SMALL_MAXCLASS
+1, (SMALL_MAXCLASS
+1 >=
1532 size
+extra
) ? 0 : size
+extra
- (SMALL_MAXCLASS
+1),
1533 alignment
, zero
, no_move
, try_tcache_alloc
,
1534 try_tcache_dalloc
, arena
);
1537 if (max_usize
< PAGE
) {
1539 arena_prof_promoted(q
, usize
);
1541 usize
= isalloc(q
, config_prof
);
1543 q
= irallocx(p
, size
, extra
, alignment
, zero
, no_move
,
1544 try_tcache_alloc
, try_tcache_dalloc
, arena
);
1547 usize
= isalloc(q
, config_prof
);
1549 prof_realloc(q
, usize
, cnt
, old_size
, old_ctx
);
1554 old_size
= isalloc(p
, false);
1555 if (config_valgrind
&& opt_valgrind
)
1556 old_rzsize
= u2rz(old_size
);
1557 } else if (config_valgrind
&& opt_valgrind
) {
1558 old_size
= isalloc(p
, false);
1559 old_rzsize
= u2rz(old_size
);
1561 q
= irallocx(p
, size
, extra
, alignment
, zero
, no_move
,
1562 try_tcache_alloc
, try_tcache_dalloc
, arena
);
1566 usize
= isalloc(q
, config_prof
);
1567 if (rsize
!= NULL
) {
1568 if (config_stats
== false)
1569 usize
= isalloc(q
, config_prof
);
1576 thread_allocated_t
*ta
;
1577 ta
= thread_allocated_tsd_get();
1578 ta
->allocated
+= usize
;
1579 ta
->deallocated
+= old_size
;
1582 JEMALLOC_VALGRIND_REALLOC(q
, usize
, p
, old_size
, old_rzsize
, zero
);
1583 return (ALLOCM_SUCCESS
);
1587 return (ALLOCM_ERR_NOT_MOVED
);
1590 if (config_xmalloc
&& opt_xmalloc
) {
1591 malloc_write("<jemalloc>: Error in rallocm(): "
1596 return (ALLOCM_ERR_OOM
);
1600 je_sallocm(const void *ptr
, size_t *rsize
, int flags
)
1604 assert(malloc_initialized
|| IS_INITIALIZER
);
1606 if (config_ivsalloc
)
1607 sz
= ivsalloc(ptr
, config_prof
);
1609 assert(ptr
!= NULL
);
1610 sz
= isalloc(ptr
, config_prof
);
1612 assert(rsize
!= NULL
);
1615 return (ALLOCM_SUCCESS
);
1619 je_dallocm(void *ptr
, int flags
)
1622 size_t rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1623 unsigned arena_ind
= ((unsigned)(flags
>> 8)) - 1;
1626 assert(ptr
!= NULL
);
1627 assert(malloc_initialized
|| IS_INITIALIZER
);
1629 if (arena_ind
!= UINT_MAX
) {
1630 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1631 try_tcache
= (chunk
== ptr
|| chunk
->arena
!=
1637 if (config_stats
|| config_valgrind
)
1638 usize
= isalloc(ptr
, config_prof
);
1639 if (config_prof
&& opt_prof
) {
1640 if (config_stats
== false && config_valgrind
== false)
1641 usize
= isalloc(ptr
, config_prof
);
1642 prof_free(ptr
, usize
);
1645 thread_allocated_tsd_get()->deallocated
+= usize
;
1646 if (config_valgrind
&& opt_valgrind
)
1648 iqallocx(ptr
, try_tcache
);
1649 JEMALLOC_VALGRIND_FREE(ptr
, rzsize
);
1651 return (ALLOCM_SUCCESS
);
1655 je_nallocm(size_t *rsize
, size_t size
, int flags
)
1658 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1664 return (ALLOCM_ERR_OOM
);
1666 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
);
1668 return (ALLOCM_ERR_OOM
);
1672 return (ALLOCM_SUCCESS
);
1677 * End experimental functions.
1679 /******************************************************************************/
1681 * The following functions are used by threading libraries for protection of
1682 * malloc during fork().
1686 * If an application creates a thread before doing any allocation in the main
1687 * thread, then calls fork(2) in the main thread followed by memory allocation
1688 * in the child process, a race can occur that results in deadlock within the
1689 * child: the main thread may have forked while the created thread had
1690 * partially initialized the allocator. Ordinarily jemalloc prevents
1691 * fork/malloc races via the following functions it registers during
1692 * initialization using pthread_atfork(), but of course that does no good if
1693 * the allocator isn't fully initialized at fork time. The following library
1694 * constructor is a partial solution to this problem. It may still possible to
1695 * trigger the deadlock described above, but doing so would involve forking via
1696 * a library constructor that runs before jemalloc's runs.
1698 JEMALLOC_ATTR(constructor
)
1700 jemalloc_constructor(void)
1706 #ifndef JEMALLOC_MUTEX_INIT_CB
1708 jemalloc_prefork(void)
1710 JEMALLOC_EXPORT
void
1711 _malloc_prefork(void)
1716 #ifdef JEMALLOC_MUTEX_INIT_CB
1717 if (malloc_initialized
== false)
1720 assert(malloc_initialized
);
1722 /* Acquire all mutexes in a safe order. */
1724 malloc_mutex_prefork(&arenas_lock
);
1725 for (i
= 0; i
< narenas_total
; i
++) {
1726 if (arenas
[i
] != NULL
)
1727 arena_prefork(arenas
[i
]);
1735 #ifndef JEMALLOC_MUTEX_INIT_CB
1737 jemalloc_postfork_parent(void)
1739 JEMALLOC_EXPORT
void
1740 _malloc_postfork(void)
1745 #ifdef JEMALLOC_MUTEX_INIT_CB
1746 if (malloc_initialized
== false)
1749 assert(malloc_initialized
);
1751 /* Release all mutexes, now that fork() has completed. */
1752 huge_postfork_parent();
1753 base_postfork_parent();
1754 chunk_postfork_parent();
1755 prof_postfork_parent();
1756 for (i
= 0; i
< narenas_total
; i
++) {
1757 if (arenas
[i
] != NULL
)
1758 arena_postfork_parent(arenas
[i
]);
1760 malloc_mutex_postfork_parent(&arenas_lock
);
1761 ctl_postfork_parent();
1765 jemalloc_postfork_child(void)
1769 assert(malloc_initialized
);
1771 /* Release all mutexes, now that fork() has completed. */
1772 huge_postfork_child();
1773 base_postfork_child();
1774 chunk_postfork_child();
1775 prof_postfork_child();
1776 for (i
= 0; i
< narenas_total
; i
++) {
1777 if (arenas
[i
] != NULL
)
1778 arena_postfork_child(arenas
[i
]);
1780 malloc_mutex_postfork_child(&arenas_lock
);
1781 ctl_postfork_child();
1784 /******************************************************************************/
1786 * The following functions are used for TLS allocation/deallocation in static
1787 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1788 * is that these avoid accessing TLS variables.
1792 a0alloc(size_t size
, bool zero
)
1801 if (size
<= arena_maxclass
)
1802 return (arena_malloc(arenas
[0], size
, zero
, false));
1804 return (huge_malloc(size
, zero
));
1808 a0malloc(size_t size
)
1811 return (a0alloc(size
, false));
1815 a0calloc(size_t num
, size_t size
)
1818 return (a0alloc(num
* size
, true));
1824 arena_chunk_t
*chunk
;
1829 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
1831 arena_dalloc(chunk
->arena
, chunk
, ptr
, false);
1833 huge_dalloc(ptr
, true);
1836 /******************************************************************************/