2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_mutex_t arenas_lock
;
11 pthread_key_t arenas_tsd
;
13 __thread arena_t
*arenas_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
18 __thread thread_allocated_t thread_allocated_tls
;
20 pthread_key_t thread_allocated_tsd
;
24 /* Set to true once the allocator has been initialized. */
25 static bool malloc_initialized
= false;
27 /* Used to let the initializing thread recursively allocate. */
28 static pthread_t malloc_initializer
= (unsigned long)0;
30 /* Used to avoid initialization races. */
31 static malloc_mutex_t init_lock
=
32 #ifdef JEMALLOC_OSSPIN
35 MALLOC_MUTEX_INITIALIZER
39 #ifdef DYNAMIC_PAGE_SHIFT
47 /* Runtime configuration options. */
48 const char *JEMALLOC_P(malloc_conf
) JEMALLOC_ATTR(visibility("default"));
50 bool opt_abort
= true;
55 bool opt_abort
= false;
57 bool opt_junk
= false;
61 bool opt_sysv
= false;
63 #ifdef JEMALLOC_XMALLOC
64 bool opt_xmalloc
= false;
67 bool opt_zero
= false;
69 size_t opt_narenas
= 0;
71 /******************************************************************************/
72 /* Function prototypes for non-inline static functions. */
74 static void wrtmessage(void *cbopaque
, const char *s
);
75 static void stats_print_atexit(void);
76 static unsigned malloc_ncpus(void);
77 static void arenas_cleanup(void *arg
);
78 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
79 static void thread_allocated_cleanup(void *arg
);
81 static bool malloc_conf_next(char const **opts_p
, char const **k_p
,
82 size_t *klen_p
, char const **v_p
, size_t *vlen_p
);
83 static void malloc_conf_error(const char *msg
, const char *k
, size_t klen
,
84 const char *v
, size_t vlen
);
85 static void malloc_conf_init(void);
86 static bool malloc_init_hard(void);
88 /******************************************************************************/
89 /* malloc_message() setup. */
91 #ifdef JEMALLOC_HAVE_ATTR
92 JEMALLOC_ATTR(visibility("hidden"))
97 wrtmessage(void *cbopaque
, const char *s
)
99 #ifdef JEMALLOC_CC_SILENCE
102 write(STDERR_FILENO
, s
, strlen(s
));
103 #ifdef JEMALLOC_CC_SILENCE
109 void (*JEMALLOC_P(malloc_message
))(void *, const char *s
)
110 JEMALLOC_ATTR(visibility("default")) = wrtmessage
;
112 /******************************************************************************/
114 * Begin miscellaneous support functions.
117 /* Create a new arena and insert it into the arenas array at index ind. */
119 arenas_extend(unsigned ind
)
123 /* Allocate enough space for trailing bins. */
124 ret
= (arena_t
*)base_alloc(offsetof(arena_t
, bins
)
125 + (sizeof(arena_bin_t
) * nbins
));
126 if (ret
!= NULL
&& arena_new(ret
, ind
) == false) {
130 /* Only reached if there is an OOM error. */
133 * OOM here is quite inconvenient to propagate, since dealing with it
134 * would require a check for failure in the fast path. Instead, punt
135 * by using arenas[0]. In practice, this is an extremely unlikely
138 malloc_write("<jemalloc>: Error initializing arena\n");
146 * Choose an arena based on a per-thread value (slow-path code only, called
147 * only by choose_arena()).
150 choose_arena_hard(void)
155 unsigned i
, choose
, first_null
;
158 first_null
= narenas
;
159 malloc_mutex_lock(&arenas_lock
);
160 assert(arenas
[0] != NULL
);
161 for (i
= 1; i
< narenas
; i
++) {
162 if (arenas
[i
] != NULL
) {
164 * Choose the first arena that has the lowest
165 * number of threads assigned to it.
167 if (arenas
[i
]->nthreads
<
168 arenas
[choose
]->nthreads
)
170 } else if (first_null
== narenas
) {
172 * Record the index of the first uninitialized
173 * arena, in case all extant arenas are in use.
175 * NB: It is possible for there to be
176 * discontinuities in terms of initialized
177 * versus uninitialized arenas, due to the
178 * "thread.arena" mallctl.
184 if (arenas
[choose
] == 0 || first_null
== narenas
) {
186 * Use an unloaded arena, or the least loaded arena if
187 * all arenas are already initialized.
189 ret
= arenas
[choose
];
191 /* Initialize a new arena. */
192 ret
= arenas_extend(first_null
);
195 malloc_mutex_unlock(&arenas_lock
);
198 malloc_mutex_lock(&arenas_lock
);
200 malloc_mutex_unlock(&arenas_lock
);
209 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
213 buferror(int errnum
, char *buf
, size_t buflen
)
216 char *b
= strerror_r(errno
, buf
, buflen
);
218 strncpy(buf
, b
, buflen
);
219 buf
[buflen
-1] = '\0';
223 return (strerror_r(errno
, buf
, buflen
));
228 stats_print_atexit(void)
231 #if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
235 * Merge stats from extant threads. This is racy, since individual
236 * threads do not lock when recording tcache stats events. As a
237 * consequence, the final stats may be slightly out of date by the time
238 * they are reported, if other threads continue to allocate.
240 for (i
= 0; i
< narenas
; i
++) {
241 arena_t
*arena
= arenas
[i
];
246 * tcache_stats_merge() locks bins, so if any code is
247 * introduced that acquires both arena and bin locks in
248 * the opposite order, deadlocks may result.
250 malloc_mutex_lock(&arena
->lock
);
251 ql_foreach(tcache
, &arena
->tcache_ql
, link
) {
252 tcache_stats_merge(tcache
, arena
);
254 malloc_mutex_unlock(&arena
->lock
);
258 JEMALLOC_P(malloc_stats_print
)(NULL
, NULL
, NULL
);
261 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
263 thread_allocated_get_hard(void)
265 thread_allocated_t
*thread_allocated
= (thread_allocated_t
*)
266 imalloc(sizeof(thread_allocated_t
));
267 if (thread_allocated
== NULL
) {
268 static thread_allocated_t static_thread_allocated
= {0, 0};
269 malloc_write("<jemalloc>: Error allocating TSD;"
270 " mallctl(\"thread.{de,}allocated[p]\", ...)"
271 " will be inaccurate\n");
274 return (&static_thread_allocated
);
276 pthread_setspecific(thread_allocated_tsd
, thread_allocated
);
277 thread_allocated
->allocated
= 0;
278 thread_allocated
->deallocated
= 0;
279 return (thread_allocated
);
284 * End miscellaneous support functions.
286 /******************************************************************************/
288 * Begin initialization functions.
297 result
= sysconf(_SC_NPROCESSORS_ONLN
);
302 ret
= (unsigned)result
;
308 arenas_cleanup(void *arg
)
310 arena_t
*arena
= (arena_t
*)arg
;
312 malloc_mutex_lock(&arenas_lock
);
314 malloc_mutex_unlock(&arenas_lock
);
317 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
319 thread_allocated_cleanup(void *arg
)
321 uint64_t *allocated
= (uint64_t *)arg
;
323 if (allocated
!= NULL
)
329 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
330 * implementation has to take pains to avoid infinite recursion during
337 if (malloc_initialized
== false)
338 return (malloc_init_hard());
344 malloc_conf_next(char const **opts_p
, char const **k_p
, size_t *klen_p
,
345 char const **v_p
, size_t *vlen_p
)
348 const char *opts
= *opts_p
;
352 for (accept
= false; accept
== false;) {
354 case 'A': case 'B': case 'C': case 'D': case 'E':
355 case 'F': case 'G': case 'H': case 'I': case 'J':
356 case 'K': case 'L': case 'M': case 'N': case 'O':
357 case 'P': case 'Q': case 'R': case 'S': case 'T':
358 case 'U': case 'V': case 'W': case 'X': case 'Y':
360 case 'a': case 'b': case 'c': case 'd': case 'e':
361 case 'f': case 'g': case 'h': case 'i': case 'j':
362 case 'k': case 'l': case 'm': case 'n': case 'o':
363 case 'p': case 'q': case 'r': case 's': case 't':
364 case 'u': case 'v': case 'w': case 'x': case 'y':
366 case '0': case '1': case '2': case '3': case '4':
367 case '5': case '6': case '7': case '8': case '9':
373 *klen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*k_p
;
378 if (opts
!= *opts_p
) {
379 malloc_write("<jemalloc>: Conf string "
384 malloc_write("<jemalloc>: Malformed conf "
390 for (accept
= false; accept
== false;) {
395 * Look ahead one character here, because the
396 * next time this function is called, it will
397 * assume that end of input has been cleanly
398 * reached if no input remains, but we have
399 * optimistically already consumed the comma if
403 malloc_write("<jemalloc>: Conf string "
404 "ends with comma\n");
406 *vlen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*v_p
;
410 *vlen_p
= (uintptr_t)opts
- (uintptr_t)*v_p
;
424 malloc_conf_error(const char *msg
, const char *k
, size_t klen
, const char *v
,
427 char buf
[PATH_MAX
+ 1];
429 malloc_write("<jemalloc>: ");
432 memcpy(buf
, k
, klen
);
433 memcpy(&buf
[klen
], ":", 1);
434 memcpy(&buf
[klen
+1], v
, vlen
);
435 buf
[klen
+1+vlen
] = '\0';
441 malloc_conf_init(void)
444 char buf
[PATH_MAX
+ 1];
445 const char *opts
, *k
, *v
;
448 for (i
= 0; i
< 3; i
++) {
449 /* Get runtime configuration. */
452 if (JEMALLOC_P(malloc_conf
) != NULL
) {
454 * Use options that were compiled into the
457 opts
= JEMALLOC_P(malloc_conf
);
459 /* No configuration specified. */
466 const char *linkname
=
467 #ifdef JEMALLOC_PREFIX
468 "/etc/"JEMALLOC_PREFIX
"malloc.conf"
474 if ((linklen
= readlink(linkname
, buf
,
475 sizeof(buf
) - 1)) != -1) {
477 * Use the contents of the "/etc/malloc.conf"
478 * symbolic link's name.
483 /* No configuration specified. */
490 const char *envname
=
491 #ifdef JEMALLOC_PREFIX
492 JEMALLOC_CPREFIX
"MALLOC_CONF"
498 if ((opts
= getenv(envname
)) != NULL
) {
500 * Do nothing; opts is already initialized to
501 * the value of the MALLOC_CONF environment
505 /* No configuration specified. */
518 while (*opts
!= '\0' && malloc_conf_next(&opts
, &k
, &klen
, &v
,
520 #define CONF_HANDLE_BOOL(n) \
521 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
523 if (strncmp("true", v, vlen) == 0 && \
524 vlen == sizeof("true")-1) \
526 else if (strncmp("false", v, vlen) == \
527 0 && vlen == sizeof("false")-1) \
531 "Invalid conf value", \
536 #define CONF_HANDLE_SIZE_T(n, min, max) \
537 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
543 ul = strtoul(v, &end, 0); \
544 if (errno != 0 || (uintptr_t)end - \
545 (uintptr_t)v != vlen) { \
547 "Invalid conf value", \
549 } else if (ul < min || ul > max) { \
551 "Out-of-range conf value", \
557 #define CONF_HANDLE_SSIZE_T(n, min, max) \
558 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
564 l = strtol(v, &end, 0); \
565 if (errno != 0 || (uintptr_t)end - \
566 (uintptr_t)v != vlen) { \
568 "Invalid conf value", \
570 } else if (l < (ssize_t)min || l > \
573 "Out-of-range conf value", \
579 #define CONF_HANDLE_CHAR_P(n, d) \
580 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
582 size_t cpylen = (vlen <= \
583 sizeof(opt_##n)-1) ? vlen : \
585 strncpy(opt_##n, v, cpylen); \
586 opt_##n[cpylen] = '\0'; \
590 CONF_HANDLE_BOOL(abort
)
591 CONF_HANDLE_SIZE_T(lg_qspace_max
, LG_QUANTUM
,
593 CONF_HANDLE_SIZE_T(lg_cspace_max
, LG_QUANTUM
,
596 * Chunks always require at least one * header page,
597 * plus one data page.
599 CONF_HANDLE_SIZE_T(lg_chunk
, PAGE_SHIFT
+1,
600 (sizeof(size_t) << 3) - 1)
601 CONF_HANDLE_SIZE_T(narenas
, 1, SIZE_T_MAX
)
602 CONF_HANDLE_SSIZE_T(lg_dirty_mult
, -1,
603 (sizeof(size_t) << 3) - 1)
604 CONF_HANDLE_BOOL(stats_print
)
606 CONF_HANDLE_BOOL(junk
)
607 CONF_HANDLE_BOOL(zero
)
610 CONF_HANDLE_BOOL(sysv
)
612 #ifdef JEMALLOC_XMALLOC
613 CONF_HANDLE_BOOL(xmalloc
)
615 #ifdef JEMALLOC_TCACHE
616 CONF_HANDLE_BOOL(tcache
)
617 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep
, -1,
618 (sizeof(size_t) << 3) - 1)
619 CONF_HANDLE_SSIZE_T(lg_tcache_max
, -1,
620 (sizeof(size_t) << 3) - 1)
623 CONF_HANDLE_BOOL(prof
)
624 CONF_HANDLE_CHAR_P(prof_prefix
, "jeprof")
625 CONF_HANDLE_SIZE_T(lg_prof_bt_max
, 0, LG_PROF_BT_MAX
)
626 CONF_HANDLE_BOOL(prof_active
)
627 CONF_HANDLE_SSIZE_T(lg_prof_sample
, 0,
628 (sizeof(uint64_t) << 3) - 1)
629 CONF_HANDLE_BOOL(prof_accum
)
630 CONF_HANDLE_SSIZE_T(lg_prof_tcmax
, -1,
631 (sizeof(size_t) << 3) - 1)
632 CONF_HANDLE_SSIZE_T(lg_prof_interval
, -1,
633 (sizeof(uint64_t) << 3) - 1)
634 CONF_HANDLE_BOOL(prof_gdump
)
635 CONF_HANDLE_BOOL(prof_leak
)
638 CONF_HANDLE_BOOL(overcommit
)
640 malloc_conf_error("Invalid conf pair", k
, klen
, v
,
642 #undef CONF_HANDLE_BOOL
643 #undef CONF_HANDLE_SIZE_T
644 #undef CONF_HANDLE_SSIZE_T
645 #undef CONF_HANDLE_CHAR_P
648 /* Validate configuration of options that are inter-related. */
649 if (opt_lg_qspace_max
+1 >= opt_lg_cspace_max
) {
650 malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
651 "relationship; restoring defaults\n");
652 opt_lg_qspace_max
= LG_QSPACE_MAX_DEFAULT
;
653 opt_lg_cspace_max
= LG_CSPACE_MAX_DEFAULT
;
659 malloc_init_hard(void)
661 arena_t
*init_arenas
[1];
663 malloc_mutex_lock(&init_lock
);
664 if (malloc_initialized
|| malloc_initializer
== pthread_self()) {
666 * Another thread initialized the allocator before this one
667 * acquired init_lock, or this thread is the initializing
668 * thread, and it is recursively allocating.
670 malloc_mutex_unlock(&init_lock
);
673 if (malloc_initializer
!= (unsigned long)0) {
674 /* Busy-wait until the initializing thread completes. */
676 malloc_mutex_unlock(&init_lock
);
678 malloc_mutex_lock(&init_lock
);
679 } while (malloc_initialized
== false);
680 malloc_mutex_unlock(&init_lock
);
684 #ifdef DYNAMIC_PAGE_SHIFT
689 result
= sysconf(_SC_PAGESIZE
);
690 assert(result
!= -1);
691 pagesize
= (unsigned)result
;
694 * We assume that pagesize is a power of 2 when calculating
695 * pagesize_mask and lg_pagesize.
697 assert(((result
- 1) & result
) == 0);
698 pagesize_mask
= result
- 1;
699 lg_pagesize
= ffs((int)result
) - 1;
709 /* Register fork handlers. */
710 if (pthread_atfork(jemalloc_prefork
, jemalloc_postfork
,
711 jemalloc_postfork
) != 0) {
712 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
718 malloc_mutex_unlock(&init_lock
);
722 if (opt_stats_print
) {
723 /* Print statistics at exit. */
724 if (atexit(stats_print_atexit
) != 0) {
725 malloc_write("<jemalloc>: Error in atexit()\n");
732 malloc_mutex_unlock(&init_lock
);
737 malloc_mutex_unlock(&init_lock
);
746 malloc_mutex_unlock(&init_lock
);
750 #ifdef JEMALLOC_TCACHE
752 malloc_mutex_unlock(&init_lock
);
758 malloc_mutex_unlock(&init_lock
);
762 #if (defined(JEMALLOC_STATS) && defined(NO_TLS))
763 /* Initialize allocation counters before any allocations can occur. */
764 if (pthread_key_create(&thread_allocated_tsd
, thread_allocated_cleanup
)
766 malloc_mutex_unlock(&init_lock
);
772 * Create enough scaffolding to allow recursive allocation in
776 arenas
= init_arenas
;
777 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
780 * Initialize one arena here. The rest are lazily created in
781 * choose_arena_hard().
784 if (arenas
[0] == NULL
) {
785 malloc_mutex_unlock(&init_lock
);
790 * Assign the initial arena to the initial thread, in order to avoid
791 * spurious creation of an extra arena if the application switches to
794 ARENA_SET(arenas
[0]);
795 arenas
[0]->nthreads
++;
797 if (malloc_mutex_init(&arenas_lock
))
800 if (pthread_key_create(&arenas_tsd
, arenas_cleanup
) != 0) {
801 malloc_mutex_unlock(&init_lock
);
807 malloc_mutex_unlock(&init_lock
);
812 /* Get number of CPUs. */
813 malloc_initializer
= pthread_self();
814 malloc_mutex_unlock(&init_lock
);
815 ncpus
= malloc_ncpus();
816 malloc_mutex_lock(&init_lock
);
818 if (opt_narenas
== 0) {
820 * For SMP systems, create more than one arena per CPU by
824 opt_narenas
= ncpus
<< 2;
828 narenas
= opt_narenas
;
830 * Make sure that the arenas array can be allocated. In practice, this
831 * limit is enough to allow the allocator to function, but the ctl
832 * machinery will fail to allocate memory at far lower limits.
834 if (narenas
> chunksize
/ sizeof(arena_t
*)) {
835 char buf
[UMAX2S_BUFSIZE
];
837 narenas
= chunksize
/ sizeof(arena_t
*);
838 malloc_write("<jemalloc>: Reducing narenas to limit (");
839 malloc_write(u2s(narenas
, 10, buf
));
843 /* Allocate and initialize arenas. */
844 arenas
= (arena_t
**)base_alloc(sizeof(arena_t
*) * narenas
);
845 if (arenas
== NULL
) {
846 malloc_mutex_unlock(&init_lock
);
850 * Zero the array. In practice, this should always be pre-zeroed,
851 * since it was just mmap()ed, but let's be sure.
853 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
854 /* Copy the pointer to the one arena that was already initialized. */
855 arenas
[0] = init_arenas
[0];
858 /* Register the custom zone. */
859 malloc_zone_register(create_zone());
862 * Convert the default szone to an "overlay zone" that is capable of
863 * deallocating szone-allocated objects, but allocating new objects
866 szone2ozone(malloc_default_zone());
869 malloc_initialized
= true;
870 malloc_mutex_unlock(&init_lock
);
875 JEMALLOC_ATTR(constructor
)
877 jemalloc_darwin_init(void)
880 if (malloc_init_hard())
886 * End initialization functions.
888 /******************************************************************************/
890 * Begin malloc(3)-compatible functions.
893 JEMALLOC_ATTR(malloc
)
894 JEMALLOC_ATTR(visibility("default"))
896 JEMALLOC_P(malloc
)(size_t size
)
899 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
901 # ifdef JEMALLOC_CC_SILENCE
908 # ifdef JEMALLOC_CC_SILENCE
921 if (opt_sysv
== false)
926 # ifdef JEMALLOC_XMALLOC
928 malloc_write("<jemalloc>: Error in malloc(): "
942 if ((cnt
= prof_alloc_prep(usize
)) == NULL
) {
946 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
948 ret
= imalloc(small_maxclass
+1);
950 arena_prof_promoted(ret
, usize
);
956 #ifdef JEMALLOC_STATS
964 #ifdef JEMALLOC_XMALLOC
966 malloc_write("<jemalloc>: Error in malloc(): "
978 if (opt_prof
&& ret
!= NULL
)
979 prof_malloc(ret
, usize
, cnt
);
981 #ifdef JEMALLOC_STATS
983 assert(usize
== isalloc(ret
));
984 ALLOCATED_ADD(usize
, 0);
990 JEMALLOC_ATTR(nonnull(1))
991 JEMALLOC_ATTR(visibility("default"))
993 JEMALLOC_P(posix_memalign
)(void **memptr
, size_t alignment
, size_t size
)
997 #ifdef JEMALLOC_CC_SILENCE
1002 #ifdef JEMALLOC_PROF
1004 # ifdef JEMALLOC_CC_SILENCE
1014 #ifdef JEMALLOC_SYSV
1015 if (opt_sysv
== false)
1018 #ifdef JEMALLOC_SYSV
1020 # ifdef JEMALLOC_XMALLOC
1022 malloc_write("<jemalloc>: Error in "
1023 "posix_memalign(): invalid size "
1036 /* Make sure that alignment is a large enough power of 2. */
1037 if (((alignment
- 1) & alignment
) != 0
1038 || alignment
< sizeof(void *)) {
1039 #ifdef JEMALLOC_XMALLOC
1041 malloc_write("<jemalloc>: Error in "
1042 "posix_memalign(): invalid alignment\n");
1051 usize
= sa2u(size
, alignment
, NULL
);
1058 #ifdef JEMALLOC_PROF
1060 if ((cnt
= prof_alloc_prep(usize
)) == NULL
) {
1064 if (prof_promote
&& (uintptr_t)cnt
!=
1065 (uintptr_t)1U && usize
<= small_maxclass
) {
1066 assert(sa2u(small_maxclass
+1,
1067 alignment
, NULL
) != 0);
1068 result
= ipalloc(sa2u(small_maxclass
+1,
1069 alignment
, NULL
), alignment
, false);
1070 if (result
!= NULL
) {
1071 arena_prof_promoted(result
,
1075 result
= ipalloc(usize
, alignment
,
1081 result
= ipalloc(usize
, alignment
, false);
1084 if (result
== NULL
) {
1085 #ifdef JEMALLOC_XMALLOC
1087 malloc_write("<jemalloc>: Error in posix_memalign(): "
1100 #ifdef JEMALLOC_STATS
1101 if (result
!= NULL
) {
1102 assert(usize
== isalloc(result
));
1103 ALLOCATED_ADD(usize
, 0);
1106 #ifdef JEMALLOC_PROF
1107 if (opt_prof
&& result
!= NULL
)
1108 prof_malloc(result
, usize
, cnt
);
1113 JEMALLOC_ATTR(malloc
)
1114 JEMALLOC_ATTR(visibility("default"))
1116 JEMALLOC_P(calloc
)(size_t num
, size_t size
)
1120 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1122 # ifdef JEMALLOC_CC_SILENCE
1127 #ifdef JEMALLOC_PROF
1129 # ifdef JEMALLOC_CC_SILENCE
1135 if (malloc_init()) {
1141 num_size
= num
* size
;
1142 if (num_size
== 0) {
1143 #ifdef JEMALLOC_SYSV
1144 if ((opt_sysv
== false) && ((num
== 0) || (size
== 0)))
1147 #ifdef JEMALLOC_SYSV
1154 * Try to avoid division here. We know that it isn't possible to
1155 * overflow during multiplication if neither operand uses any of the
1156 * most significant half of the bits in a size_t.
1158 } else if (((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) << 2)))
1159 && (num_size
/ size
!= num
)) {
1160 /* size_t overflow. */
1165 #ifdef JEMALLOC_PROF
1167 usize
= s2u(num_size
);
1168 if ((cnt
= prof_alloc_prep(usize
)) == NULL
) {
1172 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
1173 <= small_maxclass
) {
1174 ret
= icalloc(small_maxclass
+1);
1176 arena_prof_promoted(ret
, usize
);
1178 ret
= icalloc(num_size
);
1182 #ifdef JEMALLOC_STATS
1183 usize
= s2u(num_size
);
1185 ret
= icalloc(num_size
);
1190 #ifdef JEMALLOC_XMALLOC
1192 malloc_write("<jemalloc>: Error in calloc(): out of "
1200 #ifdef JEMALLOC_PROF
1201 if (opt_prof
&& ret
!= NULL
)
1202 prof_malloc(ret
, usize
, cnt
);
1204 #ifdef JEMALLOC_STATS
1206 assert(usize
== isalloc(ret
));
1207 ALLOCATED_ADD(usize
, 0);
1213 JEMALLOC_ATTR(visibility("default"))
1215 JEMALLOC_P(realloc
)(void *ptr
, size_t size
)
1218 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1220 # ifdef JEMALLOC_CC_SILENCE
1224 size_t old_size
= 0;
1226 #ifdef JEMALLOC_PROF
1228 # ifdef JEMALLOC_CC_SILENCE
1233 # ifdef JEMALLOC_CC_SILENCE
1240 #ifdef JEMALLOC_SYSV
1241 if (opt_sysv
== false)
1244 #ifdef JEMALLOC_SYSV
1247 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1248 old_size
= isalloc(ptr
);
1250 #ifdef JEMALLOC_PROF
1252 old_ctx
= prof_ctx_get(ptr
);
1258 #ifdef JEMALLOC_PROF
1259 else if (opt_prof
) {
1271 assert(malloc_initialized
|| malloc_initializer
==
1274 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1275 old_size
= isalloc(ptr
);
1277 #ifdef JEMALLOC_PROF
1280 old_ctx
= prof_ctx_get(ptr
);
1281 if ((cnt
= prof_alloc_prep(usize
)) == NULL
) {
1285 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U &&
1286 usize
<= small_maxclass
) {
1287 ret
= iralloc(ptr
, small_maxclass
+1, 0, 0,
1290 arena_prof_promoted(ret
, usize
);
1292 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1296 #ifdef JEMALLOC_STATS
1299 ret
= iralloc(ptr
, size
, 0, 0, false, false);
1302 #ifdef JEMALLOC_PROF
1306 #ifdef JEMALLOC_XMALLOC
1308 malloc_write("<jemalloc>: Error in realloc(): "
1316 #ifdef JEMALLOC_PROF
1320 if (malloc_init()) {
1321 #ifdef JEMALLOC_PROF
1327 #ifdef JEMALLOC_PROF
1330 if ((cnt
= prof_alloc_prep(usize
)) == NULL
)
1333 if (prof_promote
&& (uintptr_t)cnt
!=
1334 (uintptr_t)1U && usize
<=
1336 ret
= imalloc(small_maxclass
+1);
1338 arena_prof_promoted(ret
,
1342 ret
= imalloc(size
);
1347 #ifdef JEMALLOC_STATS
1350 ret
= imalloc(size
);
1355 #ifdef JEMALLOC_XMALLOC
1357 malloc_write("<jemalloc>: Error in realloc(): "
1366 #ifdef JEMALLOC_SYSV
1369 #ifdef JEMALLOC_PROF
1371 prof_realloc(ret
, usize
, cnt
, old_size
, old_ctx
);
1373 #ifdef JEMALLOC_STATS
1375 assert(usize
== isalloc(ret
));
1376 ALLOCATED_ADD(usize
, old_size
);
1382 JEMALLOC_ATTR(visibility("default"))
1384 JEMALLOC_P(free
)(void *ptr
)
1388 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1392 assert(malloc_initialized
|| malloc_initializer
==
1395 #ifdef JEMALLOC_STATS
1396 usize
= isalloc(ptr
);
1398 #ifdef JEMALLOC_PROF
1400 # ifndef JEMALLOC_STATS
1401 usize
= isalloc(ptr
);
1403 prof_free(ptr
, usize
);
1406 #ifdef JEMALLOC_STATS
1407 ALLOCATED_ADD(0, usize
);
1414 * End malloc(3)-compatible functions.
1416 /******************************************************************************/
1418 * Begin non-standard override functions.
1420 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1421 * entire point is to avoid accidental mixed allocator usage.
1423 #ifndef JEMALLOC_PREFIX
1425 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1426 JEMALLOC_ATTR(malloc
)
1427 JEMALLOC_ATTR(visibility("default"))
1429 JEMALLOC_P(memalign
)(size_t alignment
, size_t size
)
1432 #ifdef JEMALLOC_CC_SILENCE
1435 JEMALLOC_P(posix_memalign
)(&ret
, alignment
, size
);
1436 #ifdef JEMALLOC_CC_SILENCE
1444 #ifdef JEMALLOC_OVERRIDE_VALLOC
1445 JEMALLOC_ATTR(malloc
)
1446 JEMALLOC_ATTR(visibility("default"))
1448 JEMALLOC_P(valloc
)(size_t size
)
1451 #ifdef JEMALLOC_CC_SILENCE
1454 JEMALLOC_P(posix_memalign
)(&ret
, PAGE_SIZE
, size
);
1455 #ifdef JEMALLOC_CC_SILENCE
1463 #endif /* JEMALLOC_PREFIX */
1465 * End non-standard override functions.
1467 /******************************************************************************/
1469 * Begin non-standard functions.
1472 JEMALLOC_ATTR(visibility("default"))
1474 JEMALLOC_P(malloc_usable_size
)(const void *ptr
)
1478 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1480 #ifdef JEMALLOC_IVSALLOC
1481 ret
= ivsalloc(ptr
);
1483 assert(ptr
!= NULL
);
1490 JEMALLOC_ATTR(visibility("default"))
1492 JEMALLOC_P(malloc_stats_print
)(void (*write_cb
)(void *, const char *),
1493 void *cbopaque
, const char *opts
)
1496 stats_print(write_cb
, cbopaque
, opts
);
1499 JEMALLOC_ATTR(visibility("default"))
1501 JEMALLOC_P(mallctl
)(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
,
1508 return (ctl_byname(name
, oldp
, oldlenp
, newp
, newlen
));
1511 JEMALLOC_ATTR(visibility("default"))
1513 JEMALLOC_P(mallctlnametomib
)(const char *name
, size_t *mibp
, size_t *miblenp
)
1519 return (ctl_nametomib(name
, mibp
, miblenp
));
1522 JEMALLOC_ATTR(visibility("default"))
1524 JEMALLOC_P(mallctlbymib
)(const size_t *mib
, size_t miblen
, void *oldp
,
1525 size_t *oldlenp
, void *newp
, size_t newlen
)
1531 return (ctl_bymib(mib
, miblen
, oldp
, oldlenp
, newp
, newlen
));
1534 JEMALLOC_INLINE
void *
1535 iallocm(size_t usize
, size_t alignment
, bool zero
)
1538 assert(usize
== ((alignment
== 0) ? s2u(usize
) : sa2u(usize
, alignment
,
1542 return (ipalloc(usize
, alignment
, zero
));
1544 return (icalloc(usize
));
1546 return (imalloc(usize
));
1549 JEMALLOC_ATTR(nonnull(1))
1550 JEMALLOC_ATTR(visibility("default"))
1552 JEMALLOC_P(allocm
)(void **ptr
, size_t *rsize
, size_t size
, int flags
)
1556 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1558 bool zero
= flags
& ALLOCM_ZERO
;
1559 #ifdef JEMALLOC_PROF
1560 prof_thr_cnt_t
*cnt
;
1563 assert(ptr
!= NULL
);
1569 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
,
1574 #ifdef JEMALLOC_PROF
1576 if ((cnt
= prof_alloc_prep(usize
)) == NULL
)
1578 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && usize
<=
1580 size_t usize_promoted
= (alignment
== 0) ?
1581 s2u(small_maxclass
+1) : sa2u(small_maxclass
+1,
1583 assert(usize_promoted
!= 0);
1584 p
= iallocm(usize_promoted
, alignment
, zero
);
1587 arena_prof_promoted(p
, usize
);
1589 p
= iallocm(usize
, alignment
, zero
);
1599 p
= iallocm(usize
, alignment
, zero
);
1602 #ifndef JEMALLOC_STATS
1606 #ifdef JEMALLOC_STATS
1614 #ifdef JEMALLOC_STATS
1615 assert(usize
== isalloc(p
));
1616 ALLOCATED_ADD(usize
, 0);
1618 return (ALLOCM_SUCCESS
);
1620 #ifdef JEMALLOC_XMALLOC
1622 malloc_write("<jemalloc>: Error in allocm(): "
1628 return (ALLOCM_ERR_OOM
);
1631 JEMALLOC_ATTR(nonnull(1))
1632 JEMALLOC_ATTR(visibility("default"))
1634 JEMALLOC_P(rallocm
)(void **ptr
, size_t *rsize
, size_t size
, size_t extra
,
1639 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1642 size_t alignment
= (ZU(1) << (flags
& ALLOCM_LG_ALIGN_MASK
)
1644 bool zero
= flags
& ALLOCM_ZERO
;
1645 bool no_move
= flags
& ALLOCM_NO_MOVE
;
1646 #ifdef JEMALLOC_PROF
1647 prof_thr_cnt_t
*cnt
;
1648 prof_ctx_t
*old_ctx
;
1651 assert(ptr
!= NULL
);
1652 assert(*ptr
!= NULL
);
1654 assert(SIZE_T_MAX
- size
>= extra
);
1655 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1658 #ifdef JEMALLOC_PROF
1661 * usize isn't knowable before iralloc() returns when extra is
1662 * non-zero. Therefore, compute its maximum possible value and
1663 * use that in prof_alloc_prep() to decide whether to capture a
1664 * backtrace. prof_realloc() will use the actual usize to
1665 * decide whether to sample.
1667 size_t max_usize
= (alignment
== 0) ? s2u(size
+extra
) :
1668 sa2u(size
+extra
, alignment
, NULL
);
1669 old_size
= isalloc(p
);
1670 old_ctx
= prof_ctx_get(p
);
1671 if ((cnt
= prof_alloc_prep(max_usize
)) == NULL
)
1673 if (prof_promote
&& (uintptr_t)cnt
!= (uintptr_t)1U && max_usize
1674 <= small_maxclass
) {
1675 q
= iralloc(p
, small_maxclass
+1, (small_maxclass
+1 >=
1676 size
+extra
) ? 0 : size
+extra
- (small_maxclass
+1),
1677 alignment
, zero
, no_move
);
1681 arena_prof_promoted(q
, usize
);
1683 q
= iralloc(p
, size
, extra
, alignment
, zero
, no_move
);
1688 prof_realloc(q
, usize
, cnt
, old_size
, old_ctx
);
1694 #ifdef JEMALLOC_STATS
1695 old_size
= isalloc(p
);
1697 q
= iralloc(p
, size
, extra
, alignment
, zero
, no_move
);
1700 #ifndef JEMALLOC_STATS
1705 #ifdef JEMALLOC_STATS
1713 #ifdef JEMALLOC_STATS
1714 ALLOCATED_ADD(usize
, old_size
);
1716 return (ALLOCM_SUCCESS
);
1719 return (ALLOCM_ERR_NOT_MOVED
);
1720 #ifdef JEMALLOC_PROF
1723 #ifdef JEMALLOC_XMALLOC
1725 malloc_write("<jemalloc>: Error in rallocm(): "
1730 return (ALLOCM_ERR_OOM
);
1733 JEMALLOC_ATTR(nonnull(1))
1734 JEMALLOC_ATTR(visibility("default"))
1736 JEMALLOC_P(sallocm
)(const void *ptr
, size_t *rsize
, int flags
)
1740 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1742 #ifdef JEMALLOC_IVSALLOC
1745 assert(ptr
!= NULL
);
1748 assert(rsize
!= NULL
);
1751 return (ALLOCM_SUCCESS
);
1754 JEMALLOC_ATTR(nonnull(1))
1755 JEMALLOC_ATTR(visibility("default"))
1757 JEMALLOC_P(dallocm
)(void *ptr
, int flags
)
1759 #if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1763 assert(ptr
!= NULL
);
1764 assert(malloc_initialized
|| malloc_initializer
== pthread_self());
1766 #ifdef JEMALLOC_STATS
1767 usize
= isalloc(ptr
);
1769 #ifdef JEMALLOC_PROF
1771 # ifndef JEMALLOC_STATS
1772 usize
= isalloc(ptr
);
1774 prof_free(ptr
, usize
);
1777 #ifdef JEMALLOC_STATS
1778 ALLOCATED_ADD(0, usize
);
1782 return (ALLOCM_SUCCESS
);
1786 * End non-standard functions.
1788 /******************************************************************************/
1791 * The following functions are used by threading libraries for protection of
1792 * malloc during fork().
1796 jemalloc_prefork(void)
1800 /* Acquire all mutexes in a safe order. */
1802 malloc_mutex_lock(&arenas_lock
);
1803 for (i
= 0; i
< narenas
; i
++) {
1804 if (arenas
[i
] != NULL
)
1805 malloc_mutex_lock(&arenas
[i
]->lock
);
1808 malloc_mutex_lock(&base_mtx
);
1810 malloc_mutex_lock(&huge_mtx
);
1813 malloc_mutex_lock(&dss_mtx
);
1816 #ifdef JEMALLOC_SWAP
1817 malloc_mutex_lock(&swap_mtx
);
1822 jemalloc_postfork(void)
1826 /* Release all mutexes, now that fork() has completed. */
1828 #ifdef JEMALLOC_SWAP
1829 malloc_mutex_unlock(&swap_mtx
);
1833 malloc_mutex_unlock(&dss_mtx
);
1836 malloc_mutex_unlock(&huge_mtx
);
1838 malloc_mutex_unlock(&base_mtx
);
1840 for (i
= 0; i
< narenas
; i
++) {
1841 if (arenas
[i
] != NULL
)
1842 malloc_mutex_unlock(&arenas
[i
]->lock
);
1844 malloc_mutex_unlock(&arenas_lock
);
1847 /******************************************************************************/