2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_mutex_t arenas_lock
;
11 pthread_key_t arenas_tsd
;
13 __thread arena_t
*arenas_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
18 __thread thread_allocated_t thread_allocated_tls
;
20 pthread_key_t thread_allocated_tsd
;
24 /* Set to true once the allocator has been initialized. */
25 static bool malloc_initialized
= false;
27 /* Used to let the initializing thread recursively allocate. */
28 static pthread_t malloc_initializer
= (unsigned long)0;
30 /* Used to avoid initialization races. */
31 static malloc_mutex_t init_lock
=
32 #ifdef JEMALLOC_OSSPIN
35 MALLOC_MUTEX_INITIALIZER
39 #ifdef DYNAMIC_PAGE_SHIFT
47 /* Runtime configuration options. */
48 const char *JEMALLOC_P(malloc_conf
) JEMALLOC_ATTR(visibility("default"));
50 bool opt_abort
= true;
55 bool opt_abort
= false;
57 bool opt_junk
= false;
61 bool opt_sysv
= false;
63 #ifdef JEMALLOC_XMALLOC
64 bool opt_xmalloc
= false;
67 bool opt_zero
= false;
69 size_t opt_narenas
= 0;
71 /******************************************************************************/
72 /* Function prototypes for non-inline static functions. */
74 static void wrtmessage(void *cbopaque
, const char *s
);
75 static void stats_print_atexit(void);
76 static unsigned malloc_ncpus(void);
77 static void arenas_cleanup(void *arg
);
78 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
79 static void thread_allocated_cleanup(void *arg
);
81 static bool malloc_conf_next(char const **opts_p
, char const **k_p
,
82 size_t *klen_p
, char const **v_p
, size_t *vlen_p
);
83 static void malloc_conf_error(const char *msg
, const char *k
, size_t klen
,
84 const char *v
, size_t vlen
);
85 static void malloc_conf_init(void);
86 static bool malloc_init_hard(void);
87 static int imemalign(void **memptr
, size_t alignment
, size_t size
);
89 /******************************************************************************/
90 /* malloc_message() setup. */
92 #ifdef JEMALLOC_HAVE_ATTR
93 JEMALLOC_ATTR(visibility("hidden"))
98 wrtmessage(void *cbopaque
, const char *s
)
100 #ifdef JEMALLOC_CC_SILENCE
103 write(STDERR_FILENO
, s
, strlen(s
));
104 #ifdef JEMALLOC_CC_SILENCE
110 void (*JEMALLOC_P(malloc_message
))(void *, const char *s
)
111 JEMALLOC_ATTR(visibility("default")) = wrtmessage
;
113 /******************************************************************************/
115 * Begin miscellaneous support functions.
118 /* Create a new arena and insert it into the arenas array at index ind. */
120 arenas_extend(unsigned ind
)
124 /* Allocate enough space for trailing bins. */
125 ret
= (arena_t
*)base_alloc(offsetof(arena_t
, bins
)
126 + (sizeof(arena_bin_t
) * nbins
));
127 if (ret
!= NULL
&& arena_new(ret
, ind
) == false) {
131 /* Only reached if there is an OOM error. */
134 * OOM here is quite inconvenient to propagate, since dealing with it
135 * would require a check for failure in the fast path. Instead, punt
136 * by using arenas[0]. In practice, this is an extremely unlikely
139 malloc_write("<jemalloc>: Error initializing arena\n");
147 * Choose an arena based on a per-thread value (slow-path code only, called
148 * only by choose_arena()).
151 choose_arena_hard(void)
156 unsigned i
, choose
, first_null
;
159 first_null
= narenas
;
160 malloc_mutex_lock(&arenas_lock
);
161 assert(arenas
[0] != NULL
);
162 for (i
= 1; i
< narenas
; i
++) {
163 if (arenas
[i
] != NULL
) {
165 * Choose the first arena that has the lowest
166 * number of threads assigned to it.
168 if (arenas
[i
]->nthreads
<
169 arenas
[choose
]->nthreads
)
171 } else if (first_null
== narenas
) {
173 * Record the index of the first uninitialized
174 * arena, in case all extant arenas are in use.
176 * NB: It is possible for there to be
177 * discontinuities in terms of initialized
178 * versus uninitialized arenas, due to the
179 * "thread.arena" mallctl.
185 if (arenas
[choose
] == 0 || first_null
== narenas
) {
187 * Use an unloaded arena, or the least loaded arena if
188 * all arenas are already initialized.
190 ret
= arenas
[choose
];
192 /* Initialize a new arena. */
193 ret
= arenas_extend(first_null
);
196 malloc_mutex_unlock(&arenas_lock
);
199 malloc_mutex_lock(&arenas_lock
);
201 malloc_mutex_unlock(&arenas_lock
);
210 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
214 buferror(int errnum
, char *buf
, size_t buflen
)
217 char *b
= strerror_r(errno
, buf
, buflen
);
219 strncpy(buf
, b
, buflen
);
220 buf
[buflen
-1] = '\0';
224 return (strerror_r(errno
, buf
, buflen
));
229 stats_print_atexit(void)
232 #if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
236 * Merge stats from extant threads. This is racy, since individual
237 * threads do not lock when recording tcache stats events. As a
238 * consequence, the final stats may be slightly out of date by the time
239 * they are reported, if other threads continue to allocate.
241 for (i
= 0; i
< narenas
; i
++) {
242 arena_t
*arena
= arenas
[i
];
247 * tcache_stats_merge() locks bins, so if any code is
248 * introduced that acquires both arena and bin locks in
249 * the opposite order, deadlocks may result.
251 malloc_mutex_lock(&arena
->lock
);
252 ql_foreach(tcache
, &arena
->tcache_ql
, link
) {
253 tcache_stats_merge(tcache
, arena
);
255 malloc_mutex_unlock(&arena
->lock
);
259 JEMALLOC_P(malloc_stats_print
)(NULL
, NULL
, NULL
);
262 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
264 thread_allocated_get_hard(void)
266 thread_allocated_t
*thread_allocated
= (thread_allocated_t
*)
267 imalloc(sizeof(thread_allocated_t
));
268 if (thread_allocated
== NULL
) {
269 static thread_allocated_t static_thread_allocated
= {0, 0};
270 malloc_write("<jemalloc>: Error allocating TSD;"
271 " mallctl(\"thread.{de,}allocated[p]\", ...)"
272 " will be inaccurate\n");
275 return (&static_thread_allocated
);
277 pthread_setspecific(thread_allocated_tsd
, thread_allocated
);
278 thread_allocated
->allocated
= 0;
279 thread_allocated
->deallocated
= 0;
280 return (thread_allocated
);
285 * End miscellaneous support functions.
287 /******************************************************************************/
289 * Begin initialization functions.
298 result
= sysconf(_SC_NPROCESSORS_ONLN
);
303 ret
= (unsigned)result
;
309 arenas_cleanup(void *arg
)
311 arena_t
*arena
= (arena_t
*)arg
;
313 malloc_mutex_lock(&arenas_lock
);
315 malloc_mutex_unlock(&arenas_lock
);
318 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
320 thread_allocated_cleanup(void *arg
)
322 uint64_t *allocated
= (uint64_t *)arg
;
324 if (allocated
!= NULL
)
330 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
331 * implementation has to take pains to avoid infinite recursion during
338 if (malloc_initialized
== false)
339 return (malloc_init_hard());
345 malloc_conf_next(char const **opts_p
, char const **k_p
, size_t *klen_p
,
346 char const **v_p
, size_t *vlen_p
)
349 const char *opts
= *opts_p
;
353 for (accept
= false; accept
== false;) {
355 case 'A': case 'B': case 'C': case 'D': case 'E':
356 case 'F': case 'G': case 'H': case 'I': case 'J':
357 case 'K': case 'L': case 'M': case 'N': case 'O':
358 case 'P': case 'Q': case 'R': case 'S': case 'T':
359 case 'U': case 'V': case 'W': case 'X': case 'Y':
361 case 'a': case 'b': case 'c': case 'd': case 'e':
362 case 'f': case 'g': case 'h': case 'i': case 'j':
363 case 'k': case 'l': case 'm': case 'n': case 'o':
364 case 'p': case 'q': case 'r': case 's': case 't':
365 case 'u': case 'v': case 'w': case 'x': case 'y':
367 case '0': case '1': case '2': case '3': case '4':
368 case '5': case '6': case '7': case '8': case '9':
374 *klen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*k_p
;
379 if (opts
!= *opts_p
) {
380 malloc_write("<jemalloc>: Conf string "
385 malloc_write("<jemalloc>: Malformed conf "
391 for (accept
= false; accept
== false;) {
396 * Look ahead one character here, because the
397 * next time this function is called, it will
398 * assume that end of input has been cleanly
399 * reached if no input remains, but we have
400 * optimistically already consumed the comma if
404 malloc_write("<jemalloc>: Conf string "
405 "ends with comma\n");
407 *vlen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*v_p
;
411 *vlen_p
= (uintptr_t)opts
- (uintptr_t)*v_p
;
425 malloc_conf_error(const char *msg
, const char *k
, size_t klen
, const char *v
,
428 char buf
[PATH_MAX
+ 1];
430 malloc_write("<jemalloc>: ");
433 memcpy(buf
, k
, klen
);
434 memcpy(&buf
[klen
], ":", 1);
435 memcpy(&buf
[klen
+1], v
, vlen
);
436 buf
[klen
+1+vlen
] = '\0';
442 malloc_conf_init(void)
445 char buf
[PATH_MAX
+ 1];
446 const char *opts
, *k
, *v
;
449 for (i
= 0; i
< 3; i
++) {
450 /* Get runtime configuration. */
453 if (JEMALLOC_P(malloc_conf
) != NULL
) {
455 * Use options that were compiled into the
458 opts
= JEMALLOC_P(malloc_conf
);
460 /* No configuration specified. */
467 const char *linkname
=
468 #ifdef JEMALLOC_PREFIX
469 "/etc/"JEMALLOC_PREFIX
"malloc.conf"
475 if ((linklen
= readlink(linkname
, buf
,
476 sizeof(buf
) - 1)) != -1) {
478 * Use the contents of the "/etc/malloc.conf"
479 * symbolic link's name.
484 /* No configuration specified. */
491 const char *envname
=
492 #ifdef JEMALLOC_PREFIX
493 JEMALLOC_CPREFIX
"MALLOC_CONF"
499 if ((opts
= getenv(envname
)) != NULL
) {
501 * Do nothing; opts is already initialized to
502 * the value of the MALLOC_CONF environment
506 /* No configuration specified. */
519 while (*opts
!= '\0' && malloc_conf_next(&opts
, &k
, &klen
, &v
,
521 #define CONF_HANDLE_BOOL(n) \
522 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
524 if (strncmp("true", v, vlen) == 0 && \
525 vlen == sizeof("true")-1) \
527 else if (strncmp("false", v, vlen) == \
528 0 && vlen == sizeof("false")-1) \
532 "Invalid conf value", \
537 #define CONF_HANDLE_SIZE_T(n, min, max) \
538 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
544 ul = strtoul(v, &end, 0); \
545 if (errno != 0 || (uintptr_t)end - \
546 (uintptr_t)v != vlen) { \
548 "Invalid conf value", \
550 } else if (ul < min || ul > max) { \
552 "Out-of-range conf value", \
558 #define CONF_HANDLE_SSIZE_T(n, min, max) \
559 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
565 l = strtol(v, &end, 0); \
566 if (errno != 0 || (uintptr_t)end - \
567 (uintptr_t)v != vlen) { \
569 "Invalid conf value", \
571 } else if (l < (ssize_t)min || l > \
574 "Out-of-range conf value", \
580 #define CONF_HANDLE_CHAR_P(n, d) \
581 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
583 size_t cpylen = (vlen <= \
584 sizeof(opt_##n)-1) ? vlen : \
586 strncpy(opt_##n, v, cpylen); \
587 opt_##n[cpylen] = '\0'; \
591 CONF_HANDLE_BOOL(abort
)
592 CONF_HANDLE_SIZE_T(lg_qspace_max
, LG_QUANTUM
,
594 CONF_HANDLE_SIZE_T(lg_cspace_max
, LG_QUANTUM
,
597 * Chunks always require at least one * header page,
598 * plus one data page.
600 CONF_HANDLE_SIZE_T(lg_chunk
, PAGE_SHIFT
+1,
601 (sizeof(size_t) << 3) - 1)
602 CONF_HANDLE_SIZE_T(narenas
, 1, SIZE_T_MAX
)
603 CONF_HANDLE_SSIZE_T(lg_dirty_mult
, -1,
604 (sizeof(size_t) << 3) - 1)
605 CONF_HANDLE_BOOL(stats_print
)
607 CONF_HANDLE_BOOL(junk
)
608 CONF_HANDLE_BOOL(zero
)
611 CONF_HANDLE_BOOL(sysv
)
613 #ifdef JEMALLOC_XMALLOC
614 CONF_HANDLE_BOOL(xmalloc
)
616 #ifdef JEMALLOC_TCACHE
617 CONF_HANDLE_BOOL(tcache
)
618 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep
, -1,
619 (sizeof(size_t) << 3) - 1)
620 CONF_HANDLE_SSIZE_T(lg_tcache_max
, -1,
621 (sizeof(size_t) << 3) - 1)
624 CONF_HANDLE_BOOL(prof
)
625 CONF_HANDLE_CHAR_P(prof_prefix
, "jeprof")
626 CONF_HANDLE_SIZE_T(lg_prof_bt_max
, 0, LG_PROF_BT_MAX
)
627 CONF_HANDLE_BOOL(prof_active
)
628 CONF_HANDLE_SSIZE_T(lg_prof_sample
, 0,
629 (sizeof(uint64_t) << 3) - 1)
630 CONF_HANDLE_BOOL(prof_accum
)
631 CONF_HANDLE_SSIZE_T(lg_prof_tcmax
, -1,
632 (sizeof(size_t) << 3) - 1)
633 CONF_HANDLE_SSIZE_T(lg_prof_interval
, -1,
634 (sizeof(uint64_t) << 3) - 1)
635 CONF_HANDLE_BOOL(prof_gdump
)
636 CONF_HANDLE_BOOL(prof_leak
)
639 CONF_HANDLE_BOOL(overcommit
)
641 malloc_conf_error("Invalid conf pair", k
, klen
, v
,
643 #undef CONF_HANDLE_BOOL
644 #undef CONF_HANDLE_SIZE_T
645 #undef CONF_HANDLE_SSIZE_T
646 #undef CONF_HANDLE_CHAR_P
649 /* Validate configuration of options that are inter-related. */
650 if (opt_lg_qspace_max
+1 >= opt_lg_cspace_max
) {
651 malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
652 "relationship; restoring defaults\n");
653 opt_lg_qspace_max
= LG_QSPACE_MAX_DEFAULT
;
654 opt_lg_cspace_max
= LG_CSPACE_MAX_DEFAULT
;
660 malloc_init_hard(void)
662 arena_t
*init_arenas
[1];
664 malloc_mutex_lock(&init_lock
);
665 if (malloc_initialized
|| malloc_initializer
== pthread_self()) {
667 * Another thread initialized the allocator before this one
668 * acquired init_lock, or this thread is the initializing
669 * thread, and it is recursively allocating.
671 malloc_mutex_unlock(&init_lock
);
674 if (malloc_initializer
!= (unsigned long)0) {
675 /* Busy-wait until the initializing thread completes. */
677 malloc_mutex_unlock(&init_lock
);
679 malloc_mutex_lock(&init_lock
);
680 } while (malloc_initialized
== false);
681 malloc_mutex_unlock(&init_lock
);
685 #ifdef DYNAMIC_PAGE_SHIFT
690 result
= sysconf(_SC_PAGESIZE
);
691 assert(result
!= -1);
692 pagesize
= (size_t)result
;
695 * We assume that pagesize is a power of 2 when calculating
696 * pagesize_mask and lg_pagesize.
698 assert(((result
- 1) & result
) == 0);
699 pagesize_mask
= result
- 1;
700 lg_pagesize
= ffs((int)result
) - 1;
710 /* Register fork handlers. */
711 if (pthread_atfork(jemalloc_prefork
, jemalloc_postfork
,
712 jemalloc_postfork
) != 0) {
713 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
719 malloc_mutex_unlock(&init_lock
);
723 if (opt_stats_print
) {
724 /* Print statistics at exit. */
725 if (atexit(stats_print_atexit
) != 0) {
726 malloc_write("<jemalloc>: Error in atexit()\n");
733 malloc_mutex_unlock(&init_lock
);
738 malloc_mutex_unlock(&init_lock
);
747 malloc_mutex_unlock(&init_lock
);
751 #ifdef JEMALLOC_TCACHE
753 malloc_mutex_unlock(&init_lock
);
759 malloc_mutex_unlock(&init_lock
);
763 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
764 /* Initialize allocation counters before any allocations can occur. */
765 if (pthread_key_create(&thread_allocated_tsd
, thread_allocated_cleanup
)
767 malloc_mutex_unlock(&init_lock
);
772 if (malloc_mutex_init(&arenas_lock
))
775 if (pthread_key_create(&arenas_tsd
, arenas_cleanup
) != 0) {
776 malloc_mutex_unlock(&init_lock
);
781 * Create enough scaffolding to allow recursive allocation in
785 arenas
= init_arenas
;
786 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
789 * Initialize one arena here. The rest are lazily created in
790 * choose_arena_hard().
793 if (arenas
[0] == NULL
) {
794 malloc_mutex_unlock(&init_lock
);
799 * Assign the initial arena to the initial thread, in order to avoid
800 * spurious creation of an extra arena if the application switches to
803 ARENA_SET(arenas
[0]);
804 arenas
[0]->nthreads
++;
808 malloc_mutex_unlock(&init_lock
);
813 /* Get number of CPUs. */
814 malloc_initializer
= pthread_self();
815 malloc_mutex_unlock(&init_lock
);
816 ncpus
= malloc_ncpus();
817 malloc_mutex_lock(&init_lock
);
819 if (opt_narenas
== 0) {
821 * For SMP systems, create more than one arena per CPU by
825 opt_narenas
= ncpus
<< 2;
829 narenas
= opt_narenas
;
831 * Make sure that the arenas array can be allocated. In practice, this
832 * limit is enough to allow the allocator to function, but the ctl
833 * machinery will fail to allocate memory at far lower limits.
835 if (narenas
> chunksize
/ sizeof(arena_t
*)) {
836 char buf
[UMAX2S_BUFSIZE
];
838 narenas
= chunksize
/ sizeof(arena_t
*);
839 malloc_write("<jemalloc>: Reducing narenas to limit (");
840 malloc_write(u2s(narenas
, 10, buf
));
844 /* Allocate and initialize arenas. */
845 arenas
= (arena_t
**)base_alloc(sizeof(arena_t
*) * narenas
);
846 if (arenas
== NULL
) {
847 malloc_mutex_unlock(&init_lock
);
851 * Zero the array. In practice, this should always be pre-zeroed,
852 * since it was just mmap()ed, but let's be sure.
854 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
855 /* Copy the pointer to the one arena that was already initialized. */
856 arenas
[0] = init_arenas
[0];
859 /* Register the custom zone. */
860 malloc_zone_register(create_zone());
863 * Convert the default szone to an "overlay zone" that is capable of
864 * deallocating szone-allocated objects, but allocating new objects
867 szone2ozone(malloc_default_zone());
870 malloc_initialized
= true;
871 malloc_mutex_unlock(&init_lock
);
876 JEMALLOC_ATTR(constructor
)
878 jemalloc_darwin_init(void)
881 if (malloc_init_hard())
887 * End initialization functions.
889 /******************************************************************************/
891 * Begin malloc(3)-compatible functions.
894 JEMALLOC_ATTR(malloc
)
895 JEMALLOC_ATTR(visibility("default"))
897 JEMALLOC_P(malloc
)(size_t size
)
900 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
902 # ifdef JEMALLOC_CC_SILENCE
909 # ifdef JEMALLOC_CC_SILENCE
922 if (opt_sysv
== false)
927 # ifdef JEMALLOC_XMALLOC
929 malloc_write("<jemalloc>: Error in malloc(): "
943 PROF_ALLOC_PREP(1, usize
, cnt
);
948 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
950 ret
= imalloc(small_maxclass
+1);
952 arena_prof_promoted(ret
, usize
);
958 #ifdef JEMALLOC_STATS
966 #ifdef JEMALLOC_XMALLOC
968 malloc_write("<jemalloc>: Error in malloc(): "
980 if (opt_prof
&& ret
!= NULL
)
981 prof_malloc(ret
, usize
, cnt
);
983 #ifdef JEMALLOC_STATS
985 assert(usize
== isalloc(ret
));
986 ALLOCATED_ADD(usize
, 0);
992 JEMALLOC_ATTR(nonnull(1))
995 * Avoid any uncertainty as to how many backtrace frames to ignore in
998 JEMALLOC_ATTR(noinline
)
1001 imemalign(void **memptr
, size_t alignment
, size_t size
)
1005 #ifdef JEMALLOC_CC_SILENCE
1010 #ifdef JEMALLOC_PROF
1012 # ifdef JEMALLOC_CC_SILENCE
1022 #ifdef JEMALLOC_SYSV
1023 if (opt_sysv
== false)
1026 #ifdef JEMALLOC_SYSV
1028 # ifdef JEMALLOC_XMALLOC
1030 malloc_write("<jemalloc>: Error in "
1031 "posix_memalign(): invalid size "
1044 /* Make sure that alignment is a large enough power of 2. */
1045 if (((alignment
- 1) & alignment
) != 0
1046 || alignment
< sizeof(void *)) {
1047 #ifdef JEMALLOC_XMALLOC
1049 malloc_write("<jemalloc>: Error in "
1050 "posix_memalign(): invalid alignment\n");
1059 usize
= sa2u(size
, alignment
, NULL
);
1066 #ifdef JEMALLOC_PROF
1068 PROF_ALLOC_PREP(2, usize
, cnt
);
1073 if (prof_promote
&& (uintptr_t)cnt
!=
1074 (uintptr_t)1U && usize
<= small_maxclass
) {
1075 assert(sa2u(small_maxclass
+1,
1076 alignment
, NULL
) != 0);
1077 result
= ipalloc(sa2u(small_maxclass
+1,
1078 alignment
, NULL
), alignment
, false);
1079 if (result
!= NULL
) {
1080 arena_prof_promoted(result
,
1084 result
= ipalloc(usize
, alignment
,
1090 result
= ipalloc(usize
, alignment
, false);
1093 if (result
== NULL
) {
1094 #ifdef JEMALLOC_XMALLOC
1096 malloc_write("<jemalloc>: Error in posix_memalign(): "
1109 #ifdef JEMALLOC_STATS
1110 if (result
!= NULL
) {
1111 assert(usize
== isalloc(result
));
1112 ALLOCATED_ADD(usize
, 0);
1115 #ifdef JEMALLOC_PROF
1116 if (opt_prof
&& result
!= NULL
)
1117 prof_malloc(result
, usize
, cnt
);
1122 JEMALLOC_ATTR(nonnull(1))
1123 JEMALLOC_ATTR(visibility("default"))
1125 JEMALLOC_P(posix_memalign
)(void **memptr
, size_t alignment
, size_t size
)
1128 return imemalign(memptr
, alignment
, size
);
1131 JEMALLOC_ATTR(malloc
)
1132 JEMALLOC_ATTR(visibility("default"))
1134 JEMALLOC_P(calloc
)(size_t num
, size_t size
)
1138 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1140 # ifdef JEMALLOC_CC_SILENCE
1145 #ifdef JEMALLOC_PROF
1147 # ifdef JEMALLOC_CC_SILENCE
1153 if (malloc_init()) {
1159 num_size
= num
* size
;
1160 if (num_size
== 0) {
1161 #ifdef JEMALLOC_SYSV
1162 if ((opt_sysv
== false) && ((num
== 0) || (size
== 0)))
1165 #ifdef JEMALLOC_SYSV
1172 * Try to avoid division here. We know that it isn't possible to
1173 * overflow during multiplication if neither operand uses any of the
1174 * most significant half of the bits in a size_t.
1176 } else if (((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) << 2)))
1177 && (num_size
/ size
!= num
)) {
1178 /* size_t overflow. */
1183 #ifdef JEMALLOC_PROF
1185 usize
= s2u(num_size
);
1186 PROF_ALLOC_PREP(1, usize
, cnt
);
1191 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
1192 <= small_maxclass
) {
1193 ret
= icalloc(small_maxclass
+1);
1195 arena_prof_promoted(ret
, usize
);
1197 ret
= icalloc(num_size
);
1201 #ifdef JEMALLOC_STATS
1202 usize
= s2u(num_size
);
1204 ret
= icalloc(num_size
);
1209 #ifdef JEMALLOC_XMALLOC
1211 malloc_write("<jemalloc>: Error in calloc(): out of "
1219 #ifdef JEMALLOC_PROF
1220 if (opt_prof
&& ret
!= NULL
)
1221 prof_malloc(ret
, usize
, cnt
);
1223 #ifdef JEMALLOC_STATS
1225 assert(usize
== isalloc(ret
));
1226 ALLOCATED_ADD(usize
, 0);
1232 JEMALLOC_ATTR(visibility("default"))
1234 JEMALLOC_P(realloc
)(void *ptr
, size_t size
)
1237 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1239 # ifdef JEMALLOC_CC_SILENCE
1243 size_t old_size
= 0;
1245 #ifdef JEMALLOC_PROF
1247 # ifdef JEMALLOC_CC_SILENCE
1252 # ifdef JEMALLOC_CC_SILENCE
1259 #ifdef JEMALLOC_SYSV
1260 if (opt_sysv
== false)
1263 #ifdef JEMALLOC_SYSV
1266 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1267 old_size
= isalloc(ptr
);
1269 #ifdef JEMALLOC_PROF
1271 old_ctx
= prof_ctx_get(ptr
);
1277 #ifdef JEMALLOC_PROF
1278 else if (opt_prof
) {
1290 assert(malloc_initialized
|| malloc_initializer
==
1293 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1294 old_size
= isalloc(ptr
);
1296 #ifdef JEMALLOC_PROF
1299 old_ctx
= prof_ctx_get(ptr
);
1300 PROF_ALLOC_PREP(1, usize
, cnt
);
1306 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U &&
1307 usize
<= small_maxclass
) {
1308 ret
= iralloc(ptr
, small_maxclass
+1, 0, 0,
1311 arena_prof_promoted(ret
, usize
);
1315 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1322 #ifdef JEMALLOC_STATS
1325 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1328 #ifdef JEMALLOC_PROF
1332 #ifdef JEMALLOC_XMALLOC
1334 malloc_write("<jemalloc>: Error in realloc(): "
1342 #ifdef JEMALLOC_PROF
1346 if (malloc_init()) {
1347 #ifdef JEMALLOC_PROF
1353 #ifdef JEMALLOC_PROF
1356 PROF_ALLOC_PREP(1, usize
, cnt
);
1360 if (prof_promote
&& (uintptr_t)cnt
!=
1361 (uintptr_t)1U && usize
<=
1363 ret
= imalloc(small_maxclass
+1);
1365 arena_prof_promoted(ret
,
1369 ret
= imalloc(size
);
1374 #ifdef JEMALLOC_STATS
1377 ret
= imalloc(size
);
1382 #ifdef JEMALLOC_XMALLOC
1384 malloc_write("<jemalloc>: Error in realloc(): "
1393 #ifdef JEMALLOC_SYSV
1396 #ifdef JEMALLOC_PROF
1398 prof_realloc(ret
, usize
, cnt
, old_size
, old_ctx
);
1400 #ifdef JEMALLOC_STATS
1402 assert(usize
== isalloc(ret
));
1403 ALLOCATED_ADD(usize
, old_size
);
1409 JEMALLOC_ATTR(visibility("default"))
1411 JEMALLOC_P(free
)(void *ptr
)
1415 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1419 assert(malloc_initialized
|| malloc_initializer
==
1422 #ifdef JEMALLOC_STATS
1423 usize
= isalloc(ptr
);
1425 #ifdef JEMALLOC_PROF
1427 # ifndef JEMALLOC_STATS
1428 usize
= isalloc(ptr
);
1430 prof_free(ptr
, usize
);
1433 #ifdef JEMALLOC_STATS
1434 ALLOCATED_ADD(0, usize
);
1441 * End malloc(3)-compatible functions.
1443 /******************************************************************************/
1445 * Begin non-standard override functions.
1447 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1448 * entire point is to avoid accidental mixed allocator usage.
1450 #ifndef JEMALLOC_PREFIX
1452 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1453 JEMALLOC_ATTR(malloc
)
1454 JEMALLOC_ATTR(visibility("default"))
1456 JEMALLOC_P(memalign
)(size_t alignment
, size_t size
)
1459 #ifdef JEMALLOC_CC_SILENCE
1462 imemalign(&ret
, alignment
, size
);
1463 #ifdef JEMALLOC_CC_SILENCE
1471 #ifdef JEMALLOC_OVERRIDE_VALLOC
1472 JEMALLOC_ATTR(malloc
)
1473 JEMALLOC_ATTR(visibility("default"))
1475 JEMALLOC_P(valloc
)(size_t size
)
1478 #ifdef JEMALLOC_CC_SILENCE
1481 imemalign(&ret
, PAGE_SIZE
, size
);
1482 #ifdef JEMALLOC_CC_SILENCE
1490 #endif /* JEMALLOC_PREFIX */
1492 * End non-standard override functions.
1494 /******************************************************************************/
1496 * Begin non-standard functions.
1499 JEMALLOC_ATTR(visibility("default"))
1501 JEMALLOC_P(malloc_usable_size
)(const void *ptr
)
1505 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1507 #ifdef JEMALLOC_IVSALLOC
1508 ret
= ivsalloc(ptr
);
1510 assert(ptr
!= NULL
);
1517 JEMALLOC_ATTR(visibility("default"))
1519 JEMALLOC_P(malloc_stats_print
)(void (*write_cb
)(void *, const char *),
1520 void *cbopaque
, const char *opts
)
1523 stats_print(write_cb
, cbopaque
, opts
);
1526 JEMALLOC_ATTR(visibility("default"))
1528 JEMALLOC_P(mallctl
)(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
,
1535 return (ctl_byname(name
, oldp
, oldlenp
, newp
, newlen
));
1538 JEMALLOC_ATTR(visibility("default"))
1540 JEMALLOC_P(mallctlnametomib
)(const char *name
, size_t *mibp
, size_t *miblenp
)
1546 return (ctl_nametomib(name
, mibp
, miblenp
));
1549 JEMALLOC_ATTR(visibility("default"))
1551 JEMALLOC_P(mallctlbymib
)(const size_t *mib
, size_t miblen
, void *oldp
,
1552 size_t *oldlenp
, void *newp
, size_t newlen
)
1558 return (ctl_bymib(mib
, miblen
, oldp
, oldlenp
, newp
, newlen
));
1561 JEMALLOC_INLINE
void *
1562 iallocm(size_t usize
, size_t alignment
, bool zero
)
1565 assert(usize
== ((alignment
== 0) ? s2u(usize
) : sa2u(usize
, alignment
,
1569 return (ipalloc(usize
, alignment
, zero
));
1571 return (icalloc(usize
));
1573 return (imalloc(usize
));
1576 JEMALLOC_ATTR(nonnull(1))
1577 JEMALLOC_ATTR(visibility("default"))
1579 JEMALLOC_P(allocm
)(void **ptr
, size_t *rsize
, size_t size
, int flags
)
1583 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1585 bool zero
= flags
& ALLOCM_ZERO
;
1586 #ifdef JEMALLOC_PROF
1587 prof_thr_cnt_t
*cnt
;
1590 assert(ptr
!= NULL
);
1596 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
, NULL
);
1600 #ifdef JEMALLOC_PROF
1602 PROF_ALLOC_PREP(1, usize
, cnt
);
1605 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
1607 size_t usize_promoted
= (alignment
== 0) ?
1608 s2u(small_maxclass
+1) : sa2u(small_maxclass
+1,
1610 assert(usize_promoted
!= 0);
1611 p
= iallocm(usize_promoted
, alignment
, zero
);
1614 arena_prof_promoted(p
, usize
);
1616 p
= iallocm(usize
, alignment
, zero
);
1620 prof_malloc(p
, usize
, cnt
);
1626 p
= iallocm(usize
, alignment
, zero
);
1629 #ifndef JEMALLOC_STATS
1633 #ifdef JEMALLOC_STATS
1641 #ifdef JEMALLOC_STATS
1642 assert(usize
== isalloc(p
));
1643 ALLOCATED_ADD(usize
, 0);
1645 return (ALLOCM_SUCCESS
);
1647 #ifdef JEMALLOC_XMALLOC
1649 malloc_write("<jemalloc>: Error in allocm(): "
1655 return (ALLOCM_ERR_OOM
);
1658 JEMALLOC_ATTR(nonnull(1))
1659 JEMALLOC_ATTR(visibility("default"))
1661 JEMALLOC_P(rallocm
)(void **ptr
, size_t *rsize
, size_t size
, size_t extra
,
1666 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1669 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1671 bool zero
= flags
& ALLOCM_ZERO
;
1672 bool no_move
= flags
& ALLOCM_NO_MOVE
;
1673 #ifdef JEMALLOC_PROF
1674 prof_thr_cnt_t
*cnt
;
1677 assert(ptr
!= NULL
);
1678 assert(*ptr
!= NULL
);
1680 assert(SIZE_T_MAX
- size
>= extra
);
1681 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1684 #ifdef JEMALLOC_PROF
1687 * usize isn't knowable before iralloc() returns when extra is
1688 * non-zero. Therefore, compute its maximum possible value and
1689 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1690 * backtrace. prof_realloc() will use the actual usize to
1691 * decide whether to sample.
1693 size_t max_usize
= (alignment
== 0) ? s2u(size
+extra
) :
1694 sa2u(size
+extra
, alignment
, NULL
);
1695 prof_ctx_t
*old_ctx
= prof_ctx_get(p
);
1696 old_size
= isalloc(p
);
1697 PROF_ALLOC_PREP(1, max_usize
, cnt
);
1701 * Use minimum usize to determine whether promotion may happen.
1703 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U
1704 && ((alignment
== 0) ? s2u(size
) : sa2u(size
,
1705 alignment
, NULL
)) <= small_maxclass
) {
1706 q
= iralloc(p
, small_maxclass
+1, (small_maxclass
+1 >=
1707 size
+extra
) ? 0 : size
+extra
- (small_maxclass
+1),
1708 alignment
, zero
, no_move
);
1711 if (max_usize
< PAGE_SIZE
) {
1713 arena_prof_promoted(q
, usize
);
1717 q
= iralloc(p
, size
, extra
, alignment
, zero
, no_move
);
1722 prof_realloc(q
, usize
, cnt
, old_size
, old_ctx
);
1728 #ifdef JEMALLOC_STATS
1729 old_size
= isalloc(p
);
1731 q
= iralloc(p
, size
, extra
, alignment
, zero
, no_move
);
1734 #ifndef JEMALLOC_STATS
1739 #ifdef JEMALLOC_STATS
1747 #ifdef JEMALLOC_STATS
1748 ALLOCATED_ADD(usize
, old_size
);
1750 return (ALLOCM_SUCCESS
);
1753 return (ALLOCM_ERR_NOT_MOVED
);
1754 #ifdef JEMALLOC_PROF
1757 #ifdef JEMALLOC_XMALLOC
1759 malloc_write("<jemalloc>: Error in rallocm(): "
1764 return (ALLOCM_ERR_OOM
);
1767 JEMALLOC_ATTR(nonnull(1))
1768 JEMALLOC_ATTR(visibility("default"))
1770 JEMALLOC_P(sallocm
)(const void *ptr
, size_t *rsize
, int flags
)
1774 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1776 #ifdef JEMALLOC_IVSALLOC
1779 assert(ptr
!= NULL
);
1782 assert(rsize
!= NULL
);
1785 return (ALLOCM_SUCCESS
);
1788 JEMALLOC_ATTR(nonnull(1))
1789 JEMALLOC_ATTR(visibility("default"))
1791 JEMALLOC_P(dallocm
)(void *ptr
, int flags
)
1793 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1797 assert(ptr
!= NULL
);
1798 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1800 #ifdef JEMALLOC_STATS
1801 usize
= isalloc(ptr
);
1803 #ifdef JEMALLOC_PROF
1805 # ifndef JEMALLOC_STATS
1806 usize
= isalloc(ptr
);
1808 prof_free(ptr
, usize
);
1811 #ifdef JEMALLOC_STATS
1812 ALLOCATED_ADD(0, usize
);
1816 return (ALLOCM_SUCCESS
);
1820 * End non-standard functions.
1822 /******************************************************************************/
1825 * The following functions are used by threading libraries for protection of
1826 * malloc during fork().
1830 jemalloc_prefork(void)
1834 /* Acquire all mutexes in a safe order. */
1836 malloc_mutex_lock(&arenas_lock
);
1837 for (i
= 0; i
< narenas
; i
++) {
1838 if (arenas
[i
] != NULL
)
1839 malloc_mutex_lock(&arenas
[i
]->lock
);
1842 malloc_mutex_lock(&base_mtx
);
1844 malloc_mutex_lock(&huge_mtx
);
1847 malloc_mutex_lock(&dss_mtx
);
1850 #ifdef JEMALLOC_SWAP
1851 malloc_mutex_lock(&swap_mtx
);
1856 jemalloc_postfork(void)
1860 /* Release all mutexes, now that fork() has completed. */
1862 #ifdef JEMALLOC_SWAP
1863 malloc_mutex_unlock(&swap_mtx
);
1867 malloc_mutex_unlock(&dss_mtx
);
1870 malloc_mutex_unlock(&huge_mtx
);
1872 malloc_mutex_unlock(&base_mtx
);
1874 for (i
= 0; i
< narenas
; i
++) {
1875 if (arenas
[i
] != NULL
)
1876 malloc_mutex_unlock(&arenas
[i
]->lock
);
1878 malloc_mutex_unlock(&arenas_lock
);
1881 /******************************************************************************/