]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/jemalloc.c
Query the archive to provide a complete KEYS list.
[redis.git] / deps / jemalloc / src / jemalloc.c
CommitLineData
a78e148b 1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
4934f93d 7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
a78e148b 10
4934f93d 11/* Runtime configuration options. */
12const char *je_malloc_conf;
13#ifdef JEMALLOC_DEBUG
14bool opt_abort = true;
15# ifdef JEMALLOC_FILL
16bool opt_junk = true;
a78e148b 17# else
4934f93d 18bool opt_junk = false;
a78e148b 19# endif
4934f93d 20#else
21bool opt_abort = false;
22bool opt_junk = false;
a78e148b 23#endif
4934f93d 24size_t opt_quarantine = ZU(0);
25bool opt_redzone = false;
26bool opt_utrace = false;
27bool opt_valgrind = false;
28bool opt_xmalloc = false;
29bool opt_zero = false;
30size_t opt_narenas = 0;
31
32unsigned ncpus;
33
34malloc_mutex_t arenas_lock;
35arena_t **arenas;
21b26915 36unsigned narenas_total;
37unsigned narenas_auto;
a78e148b 38
39/* Set to true once the allocator has been initialized. */
40static bool malloc_initialized = false;
41
4934f93d 42#ifdef JEMALLOC_THREADED_INIT
a78e148b 43/* Used to let the initializing thread recursively allocate. */
4934f93d 44# define NO_INITIALIZER ((unsigned long)0)
45# define INITIALIZER pthread_self()
46# define IS_INITIALIZER (malloc_initializer == pthread_self())
47static pthread_t malloc_initializer = NO_INITIALIZER;
a78e148b 48#else
4934f93d 49# define NO_INITIALIZER false
50# define INITIALIZER true
51# define IS_INITIALIZER malloc_initializer
52static bool malloc_initializer = NO_INITIALIZER;
a78e148b 53#endif
a78e148b 54
4934f93d 55/* Used to avoid initialization races. */
56#ifdef _WIN32
57static malloc_mutex_t init_lock;
a78e148b 58
4934f93d 59JEMALLOC_ATTR(constructor)
60static void WINAPI
61_init_init_lock(void)
62{
a78e148b 63
4934f93d 64 malloc_mutex_init(&init_lock);
65}
66
67#ifdef _MSC_VER
68# pragma section(".CRT$XCU", read)
69JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
70static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
a78e148b 71#endif
4934f93d 72
73#else
74static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
75#endif
76
77typedef struct {
78 void *p; /* Input pointer (as in realloc(p, s)). */
79 size_t s; /* Request size. */
80 void *r; /* Result pointer. */
81} malloc_utrace_t;
82
83#ifdef JEMALLOC_UTRACE
84# define UTRACE(a, b, c) do { \
85 if (opt_utrace) { \
86 malloc_utrace_t ut; \
87 ut.p = (a); \
88 ut.s = (b); \
89 ut.r = (c); \
90 utrace(&ut, sizeof(ut)); \
91 } \
92} while (0)
93#else
94# define UTRACE(a, b, c)
a78e148b 95#endif
a78e148b 96
97/******************************************************************************/
98/* Function prototypes for non-inline static functions. */
99
a78e148b 100static void stats_print_atexit(void);
101static unsigned malloc_ncpus(void);
a78e148b 102static bool malloc_conf_next(char const **opts_p, char const **k_p,
103 size_t *klen_p, char const **v_p, size_t *vlen_p);
104static void malloc_conf_error(const char *msg, const char *k, size_t klen,
105 const char *v, size_t vlen);
106static void malloc_conf_init(void);
107static bool malloc_init_hard(void);
4934f93d 108static int imemalign(void **memptr, size_t alignment, size_t size,
109 size_t min_alignment);
a78e148b 110
111/******************************************************************************/
112/*
113 * Begin miscellaneous support functions.
114 */
115
116/* Create a new arena and insert it into the arenas array at index ind. */
117arena_t *
118arenas_extend(unsigned ind)
119{
120 arena_t *ret;
121
4934f93d 122 ret = (arena_t *)base_alloc(sizeof(arena_t));
a78e148b 123 if (ret != NULL && arena_new(ret, ind) == false) {
124 arenas[ind] = ret;
125 return (ret);
126 }
127 /* Only reached if there is an OOM error. */
128
129 /*
130 * OOM here is quite inconvenient to propagate, since dealing with it
131 * would require a check for failure in the fast path. Instead, punt
132 * by using arenas[0]. In practice, this is an extremely unlikely
133 * failure.
134 */
135 malloc_write("<jemalloc>: Error initializing arena\n");
136 if (opt_abort)
137 abort();
138
139 return (arenas[0]);
140}
141
4934f93d 142/* Slow path, called only by choose_arena(). */
a78e148b 143arena_t *
144choose_arena_hard(void)
145{
146 arena_t *ret;
147
21b26915 148 if (narenas_auto > 1) {
a78e148b 149 unsigned i, choose, first_null;
150
151 choose = 0;
21b26915 152 first_null = narenas_auto;
a78e148b 153 malloc_mutex_lock(&arenas_lock);
154 assert(arenas[0] != NULL);
21b26915 155 for (i = 1; i < narenas_auto; i++) {
a78e148b 156 if (arenas[i] != NULL) {
157 /*
158 * Choose the first arena that has the lowest
159 * number of threads assigned to it.
160 */
161 if (arenas[i]->nthreads <
162 arenas[choose]->nthreads)
163 choose = i;
21b26915 164 } else if (first_null == narenas_auto) {
a78e148b 165 /*
166 * Record the index of the first uninitialized
167 * arena, in case all extant arenas are in use.
168 *
169 * NB: It is possible for there to be
170 * discontinuities in terms of initialized
171 * versus uninitialized arenas, due to the
172 * "thread.arena" mallctl.
173 */
174 first_null = i;
175 }
176 }
177
21b26915 178 if (arenas[choose]->nthreads == 0
179 || first_null == narenas_auto) {
a78e148b 180 /*
181 * Use an unloaded arena, or the least loaded arena if
182 * all arenas are already initialized.
183 */
184 ret = arenas[choose];
185 } else {
186 /* Initialize a new arena. */
187 ret = arenas_extend(first_null);
188 }
189 ret->nthreads++;
190 malloc_mutex_unlock(&arenas_lock);
191 } else {
192 ret = arenas[0];
193 malloc_mutex_lock(&arenas_lock);
194 ret->nthreads++;
195 malloc_mutex_unlock(&arenas_lock);
196 }
197
4934f93d 198 arenas_tsd_set(&ret);
a78e148b 199
200 return (ret);
201}
202
a78e148b 203static void
204stats_print_atexit(void)
205{
206
4934f93d 207 if (config_tcache && config_stats) {
21b26915 208 unsigned narenas, i;
a78e148b 209
4934f93d 210 /*
211 * Merge stats from extant threads. This is racy, since
212 * individual threads do not lock when recording tcache stats
213 * events. As a consequence, the final stats may be slightly
214 * out of date by the time they are reported, if other threads
215 * continue to allocate.
216 */
21b26915 217 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
4934f93d 218 arena_t *arena = arenas[i];
219 if (arena != NULL) {
220 tcache_t *tcache;
a78e148b 221
4934f93d 222 /*
223 * tcache_stats_merge() locks bins, so if any
224 * code is introduced that acquires both arena
225 * and bin locks in the opposite order,
226 * deadlocks may result.
227 */
228 malloc_mutex_lock(&arena->lock);
229 ql_foreach(tcache, &arena->tcache_ql, link) {
230 tcache_stats_merge(tcache, arena);
231 }
232 malloc_mutex_unlock(&arena->lock);
a78e148b 233 }
a78e148b 234 }
235 }
4934f93d 236 je_malloc_stats_print(NULL, NULL, NULL);
a78e148b 237}
a78e148b 238
239/*
240 * End miscellaneous support functions.
241 */
242/******************************************************************************/
243/*
244 * Begin initialization functions.
245 */
246
247static unsigned
248malloc_ncpus(void)
249{
250 unsigned ret;
251 long result;
252
4934f93d 253#ifdef _WIN32
254 SYSTEM_INFO si;
255 GetSystemInfo(&si);
256 result = si.dwNumberOfProcessors;
257#else
a78e148b 258 result = sysconf(_SC_NPROCESSORS_ONLN);
21b26915 259#endif
a78e148b 260 if (result == -1) {
261 /* Error. */
262 ret = 1;
21b26915 263 } else {
264 ret = (unsigned)result;
265 }
a78e148b 266
267 return (ret);
268}
269
4934f93d 270void
a78e148b 271arenas_cleanup(void *arg)
272{
4934f93d 273 arena_t *arena = *(arena_t **)arg;
a78e148b 274
275 malloc_mutex_lock(&arenas_lock);
276 arena->nthreads--;
277 malloc_mutex_unlock(&arenas_lock);
278}
279
a78e148b 280static inline bool
281malloc_init(void)
282{
283
284 if (malloc_initialized == false)
285 return (malloc_init_hard());
286
287 return (false);
288}
289
290static bool
291malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
292 char const **v_p, size_t *vlen_p)
293{
294 bool accept;
295 const char *opts = *opts_p;
296
297 *k_p = opts;
298
299 for (accept = false; accept == false;) {
300 switch (*opts) {
4934f93d 301 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
302 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
303 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
304 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
305 case 'Y': case 'Z':
306 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
307 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
308 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
309 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
310 case 'y': case 'z':
311 case '0': case '1': case '2': case '3': case '4': case '5':
312 case '6': case '7': case '8': case '9':
313 case '_':
314 opts++;
315 break;
316 case ':':
317 opts++;
318 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
319 *v_p = opts;
320 accept = true;
321 break;
322 case '\0':
323 if (opts != *opts_p) {
324 malloc_write("<jemalloc>: Conf string ends "
325 "with key\n");
326 }
327 return (true);
328 default:
329 malloc_write("<jemalloc>: Malformed conf string\n");
330 return (true);
a78e148b 331 }
332 }
333
334 for (accept = false; accept == false;) {
335 switch (*opts) {
4934f93d 336 case ',':
337 opts++;
338 /*
339 * Look ahead one character here, because the next time
340 * this function is called, it will assume that end of
341 * input has been cleanly reached if no input remains,
342 * but we have optimistically already consumed the
343 * comma if one exists.
344 */
345 if (*opts == '\0') {
346 malloc_write("<jemalloc>: Conf string ends "
347 "with comma\n");
348 }
349 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
350 accept = true;
351 break;
352 case '\0':
353 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
354 accept = true;
355 break;
356 default:
357 opts++;
358 break;
a78e148b 359 }
360 }
361
362 *opts_p = opts;
363 return (false);
364}
365
366static void
367malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
368 size_t vlen)
369{
a78e148b 370
4934f93d 371 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
372 (int)vlen, v);
a78e148b 373}
374
375static void
376malloc_conf_init(void)
377{
378 unsigned i;
379 char buf[PATH_MAX + 1];
380 const char *opts, *k, *v;
381 size_t klen, vlen;
382
21b26915 383 /*
384 * Automatically configure valgrind before processing options. The
385 * valgrind option remains in jemalloc 3.x for compatibility reasons.
386 */
387 if (config_valgrind) {
388 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
389 if (config_fill && opt_valgrind) {
390 opt_junk = false;
391 assert(opt_zero == false);
392 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
393 opt_redzone = true;
394 }
395 if (config_tcache && opt_valgrind)
396 opt_tcache = false;
397 }
398
a78e148b 399 for (i = 0; i < 3; i++) {
400 /* Get runtime configuration. */
401 switch (i) {
402 case 0:
4934f93d 403 if (je_malloc_conf != NULL) {
a78e148b 404 /*
405 * Use options that were compiled into the
406 * program.
407 */
4934f93d 408 opts = je_malloc_conf;
a78e148b 409 } else {
410 /* No configuration specified. */
411 buf[0] = '\0';
412 opts = buf;
413 }
414 break;
415 case 1: {
4934f93d 416#ifndef _WIN32
a78e148b 417 int linklen;
418 const char *linkname =
4934f93d 419# ifdef JEMALLOC_PREFIX
a78e148b 420 "/etc/"JEMALLOC_PREFIX"malloc.conf"
4934f93d 421# else
a78e148b 422 "/etc/malloc.conf"
4934f93d 423# endif
a78e148b 424 ;
425
426 if ((linklen = readlink(linkname, buf,
427 sizeof(buf) - 1)) != -1) {
428 /*
429 * Use the contents of the "/etc/malloc.conf"
430 * symbolic link's name.
431 */
432 buf[linklen] = '\0';
433 opts = buf;
4934f93d 434 } else
435#endif
436 {
a78e148b 437 /* No configuration specified. */
438 buf[0] = '\0';
439 opts = buf;
440 }
441 break;
4934f93d 442 } case 2: {
a78e148b 443 const char *envname =
444#ifdef JEMALLOC_PREFIX
445 JEMALLOC_CPREFIX"MALLOC_CONF"
446#else
447 "MALLOC_CONF"
448#endif
449 ;
450
451 if ((opts = getenv(envname)) != NULL) {
452 /*
453 * Do nothing; opts is already initialized to
454 * the value of the MALLOC_CONF environment
455 * variable.
456 */
457 } else {
458 /* No configuration specified. */
459 buf[0] = '\0';
460 opts = buf;
461 }
462 break;
4934f93d 463 } default:
a78e148b 464 /* NOTREACHED */
465 assert(false);
466 buf[0] = '\0';
467 opts = buf;
468 }
469
470 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
471 &vlen) == false) {
4934f93d 472#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
473 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 474 klen) == 0) { \
475 if (strncmp("true", v, vlen) == 0 && \
476 vlen == sizeof("true")-1) \
4934f93d 477 o = true; \
a78e148b 478 else if (strncmp("false", v, vlen) == \
479 0 && vlen == sizeof("false")-1) \
4934f93d 480 o = false; \
a78e148b 481 else { \
482 malloc_conf_error( \
483 "Invalid conf value", \
484 k, klen, v, vlen); \
485 } \
4934f93d 486 hit = true; \
487 } else \
488 hit = false;
489#define CONF_HANDLE_BOOL(o, n) { \
490 bool hit; \
491 CONF_HANDLE_BOOL_HIT(o, n, hit); \
492 if (hit) \
a78e148b 493 continue; \
4934f93d 494}
495#define CONF_HANDLE_SIZE_T(o, n, min, max) \
496 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 497 klen) == 0) { \
4934f93d 498 uintmax_t um; \
a78e148b 499 char *end; \
500 \
4934f93d 501 set_errno(0); \
502 um = malloc_strtoumax(v, &end, 0); \
503 if (get_errno() != 0 || (uintptr_t)end -\
a78e148b 504 (uintptr_t)v != vlen) { \
505 malloc_conf_error( \
506 "Invalid conf value", \
507 k, klen, v, vlen); \
4934f93d 508 } else if (um < min || um > max) { \
a78e148b 509 malloc_conf_error( \
510 "Out-of-range conf value", \
511 k, klen, v, vlen); \
512 } else \
4934f93d 513 o = um; \
a78e148b 514 continue; \
515 }
4934f93d 516#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
517 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 518 klen) == 0) { \
519 long l; \
520 char *end; \
521 \
4934f93d 522 set_errno(0); \
a78e148b 523 l = strtol(v, &end, 0); \
4934f93d 524 if (get_errno() != 0 || (uintptr_t)end -\
a78e148b 525 (uintptr_t)v != vlen) { \
526 malloc_conf_error( \
527 "Invalid conf value", \
528 k, klen, v, vlen); \
529 } else if (l < (ssize_t)min || l > \
530 (ssize_t)max) { \
531 malloc_conf_error( \
532 "Out-of-range conf value", \
533 k, klen, v, vlen); \
534 } else \
4934f93d 535 o = l; \
a78e148b 536 continue; \
537 }
4934f93d 538#define CONF_HANDLE_CHAR_P(o, n, d) \
539 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 540 klen) == 0) { \
541 size_t cpylen = (vlen <= \
4934f93d 542 sizeof(o)-1) ? vlen : \
543 sizeof(o)-1; \
544 strncpy(o, v, cpylen); \
545 o[cpylen] = '\0'; \
a78e148b 546 continue; \
547 }
548
4934f93d 549 CONF_HANDLE_BOOL(opt_abort, "abort")
a78e148b 550 /*
4934f93d 551 * Chunks always require at least one header page, plus
552 * one data page in the absence of redzones, or three
553 * pages in the presence of redzones. In order to
554 * simplify options processing, fix the limit based on
555 * config_fill.
a78e148b 556 */
4934f93d 557 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
558 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
21b26915 559 if (strncmp("dss", k, klen) == 0) {
560 int i;
561 bool match = false;
562 for (i = 0; i < dss_prec_limit; i++) {
563 if (strncmp(dss_prec_names[i], v, vlen)
564 == 0) {
565 if (chunk_dss_prec_set(i)) {
566 malloc_conf_error(
567 "Error setting dss",
568 k, klen, v, vlen);
569 } else {
570 opt_dss =
571 dss_prec_names[i];
572 match = true;
573 break;
574 }
575 }
576 }
577 if (match == false) {
578 malloc_conf_error("Invalid conf value",
579 k, klen, v, vlen);
580 }
581 continue;
582 }
4934f93d 583 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
584 SIZE_T_MAX)
585 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
586 -1, (sizeof(size_t) << 3) - 1)
587 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
588 if (config_fill) {
589 CONF_HANDLE_BOOL(opt_junk, "junk")
590 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
591 0, SIZE_T_MAX)
592 CONF_HANDLE_BOOL(opt_redzone, "redzone")
593 CONF_HANDLE_BOOL(opt_zero, "zero")
594 }
595 if (config_utrace) {
596 CONF_HANDLE_BOOL(opt_utrace, "utrace")
597 }
598 if (config_valgrind) {
21b26915 599 CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
4934f93d 600 }
601 if (config_xmalloc) {
602 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
603 }
604 if (config_tcache) {
605 CONF_HANDLE_BOOL(opt_tcache, "tcache")
606 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
607 "lg_tcache_max", -1,
608 (sizeof(size_t) << 3) - 1)
609 }
610 if (config_prof) {
611 CONF_HANDLE_BOOL(opt_prof, "prof")
612 CONF_HANDLE_CHAR_P(opt_prof_prefix,
613 "prof_prefix", "jeprof")
614 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
615 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
616 "lg_prof_sample", 0,
617 (sizeof(uint64_t) << 3) - 1)
618 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
619 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
620 "lg_prof_interval", -1,
621 (sizeof(uint64_t) << 3) - 1)
622 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
623 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
624 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
625 }
a78e148b 626 malloc_conf_error("Invalid conf pair", k, klen, v,
627 vlen);
628#undef CONF_HANDLE_BOOL
629#undef CONF_HANDLE_SIZE_T
630#undef CONF_HANDLE_SSIZE_T
631#undef CONF_HANDLE_CHAR_P
632 }
a78e148b 633 }
634}
635
636static bool
637malloc_init_hard(void)
638{
639 arena_t *init_arenas[1];
640
641 malloc_mutex_lock(&init_lock);
4934f93d 642 if (malloc_initialized || IS_INITIALIZER) {
a78e148b 643 /*
644 * Another thread initialized the allocator before this one
645 * acquired init_lock, or this thread is the initializing
646 * thread, and it is recursively allocating.
647 */
648 malloc_mutex_unlock(&init_lock);
649 return (false);
650 }
4934f93d 651#ifdef JEMALLOC_THREADED_INIT
652 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
a78e148b 653 /* Busy-wait until the initializing thread completes. */
654 do {
655 malloc_mutex_unlock(&init_lock);
656 CPU_SPINWAIT;
657 malloc_mutex_lock(&init_lock);
658 } while (malloc_initialized == false);
659 malloc_mutex_unlock(&init_lock);
660 return (false);
661 }
a78e148b 662#endif
4934f93d 663 malloc_initializer = INITIALIZER;
a78e148b 664
4934f93d 665 malloc_tsd_boot();
666 if (config_prof)
667 prof_boot0();
a78e148b 668
669 malloc_conf_init();
670
4934f93d 671#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
672 && !defined(_WIN32))
a78e148b 673 /* Register fork handlers. */
4934f93d 674 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
675 jemalloc_postfork_child) != 0) {
a78e148b 676 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
677 if (opt_abort)
678 abort();
679 }
4934f93d 680#endif
a78e148b 681
682 if (opt_stats_print) {
683 /* Print statistics at exit. */
684 if (atexit(stats_print_atexit) != 0) {
685 malloc_write("<jemalloc>: Error in atexit()\n");
686 if (opt_abort)
687 abort();
688 }
689 }
690
a78e148b 691 if (base_boot()) {
692 malloc_mutex_unlock(&init_lock);
693 return (true);
694 }
695
4934f93d 696 if (chunk_boot()) {
a78e148b 697 malloc_mutex_unlock(&init_lock);
698 return (true);
699 }
700
4934f93d 701 if (ctl_boot()) {
a78e148b 702 malloc_mutex_unlock(&init_lock);
703 return (true);
704 }
a78e148b 705
4934f93d 706 if (config_prof)
707 prof_boot1();
708
709 arena_boot();
710
711 if (config_tcache && tcache_boot0()) {
a78e148b 712 malloc_mutex_unlock(&init_lock);
713 return (true);
714 }
715
4934f93d 716 if (huge_boot()) {
a78e148b 717 malloc_mutex_unlock(&init_lock);
718 return (true);
719 }
a78e148b 720
1d03c1c9 721 if (malloc_mutex_init(&arenas_lock))
722 return (true);
723
a78e148b 724 /*
725 * Create enough scaffolding to allow recursive allocation in
726 * malloc_ncpus().
727 */
21b26915 728 narenas_total = narenas_auto = 1;
a78e148b 729 arenas = init_arenas;
21b26915 730 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
a78e148b 731
732 /*
733 * Initialize one arena here. The rest are lazily created in
734 * choose_arena_hard().
735 */
736 arenas_extend(0);
737 if (arenas[0] == NULL) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741
4934f93d 742 /* Initialize allocation counters before any allocations can occur. */
743 if (config_stats && thread_allocated_tsd_boot()) {
744 malloc_mutex_unlock(&init_lock);
745 return (true);
746 }
a78e148b 747
4934f93d 748 if (arenas_tsd_boot()) {
749 malloc_mutex_unlock(&init_lock);
750 return (true);
751 }
752
753 if (config_tcache && tcache_boot1()) {
754 malloc_mutex_unlock(&init_lock);
755 return (true);
756 }
757
758 if (config_fill && quarantine_boot()) {
759 malloc_mutex_unlock(&init_lock);
760 return (true);
761 }
762
763 if (config_prof && prof_boot2()) {
a78e148b 764 malloc_mutex_unlock(&init_lock);
765 return (true);
766 }
a78e148b 767
768 /* Get number of CPUs. */
a78e148b 769 malloc_mutex_unlock(&init_lock);
770 ncpus = malloc_ncpus();
771 malloc_mutex_lock(&init_lock);
772
4934f93d 773 if (mutex_boot()) {
774 malloc_mutex_unlock(&init_lock);
775 return (true);
776 }
777
a78e148b 778 if (opt_narenas == 0) {
779 /*
780 * For SMP systems, create more than one arena per CPU by
781 * default.
782 */
783 if (ncpus > 1)
784 opt_narenas = ncpus << 2;
785 else
786 opt_narenas = 1;
787 }
21b26915 788 narenas_auto = opt_narenas;
a78e148b 789 /*
790 * Make sure that the arenas array can be allocated. In practice, this
791 * limit is enough to allow the allocator to function, but the ctl
792 * machinery will fail to allocate memory at far lower limits.
793 */
21b26915 794 if (narenas_auto > chunksize / sizeof(arena_t *)) {
795 narenas_auto = chunksize / sizeof(arena_t *);
4934f93d 796 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
21b26915 797 narenas_auto);
a78e148b 798 }
21b26915 799 narenas_total = narenas_auto;
a78e148b 800
801 /* Allocate and initialize arenas. */
21b26915 802 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
a78e148b 803 if (arenas == NULL) {
804 malloc_mutex_unlock(&init_lock);
805 return (true);
806 }
807 /*
808 * Zero the array. In practice, this should always be pre-zeroed,
809 * since it was just mmap()ed, but let's be sure.
810 */
21b26915 811 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
a78e148b 812 /* Copy the pointer to the one arena that was already initialized. */
813 arenas[0] = init_arenas[0];
814
a78e148b 815 malloc_initialized = true;
816 malloc_mutex_unlock(&init_lock);
817 return (false);
818}
819
a78e148b 820/*
821 * End initialization functions.
822 */
823/******************************************************************************/
824/*
825 * Begin malloc(3)-compatible functions.
826 */
827
a78e148b 828void *
4934f93d 829je_malloc(size_t size)
a78e148b 830{
831 void *ret;
4934f93d 832 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
833 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
a78e148b 834
835 if (malloc_init()) {
836 ret = NULL;
4934f93d 837 goto label_oom;
a78e148b 838 }
839
4934f93d 840 if (size == 0)
841 size = 1;
a78e148b 842
4934f93d 843 if (config_prof && opt_prof) {
a78e148b 844 usize = s2u(size);
1d03c1c9 845 PROF_ALLOC_PREP(1, usize, cnt);
846 if (cnt == NULL) {
a78e148b 847 ret = NULL;
4934f93d 848 goto label_oom;
a78e148b 849 }
850 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
4934f93d 851 SMALL_MAXCLASS) {
852 ret = imalloc(SMALL_MAXCLASS+1);
a78e148b 853 if (ret != NULL)
854 arena_prof_promoted(ret, usize);
855 } else
856 ret = imalloc(size);
4934f93d 857 } else {
858 if (config_stats || (config_valgrind && opt_valgrind))
859 usize = s2u(size);
a78e148b 860 ret = imalloc(size);
861 }
862
4934f93d 863label_oom:
a78e148b 864 if (ret == NULL) {
4934f93d 865 if (config_xmalloc && opt_xmalloc) {
a78e148b 866 malloc_write("<jemalloc>: Error in malloc(): "
867 "out of memory\n");
868 abort();
869 }
4934f93d 870 set_errno(ENOMEM);
a78e148b 871 }
4934f93d 872 if (config_prof && opt_prof && ret != NULL)
a78e148b 873 prof_malloc(ret, usize, cnt);
4934f93d 874 if (config_stats && ret != NULL) {
875 assert(usize == isalloc(ret, config_prof));
876 thread_allocated_tsd_get()->allocated += usize;
a78e148b 877 }
4934f93d 878 UTRACE(0, size, ret);
879 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
a78e148b 880 return (ret);
881}
882
883JEMALLOC_ATTR(nonnull(1))
1d03c1c9 884#ifdef JEMALLOC_PROF
885/*
4934f93d 886 * Avoid any uncertainty as to how many backtrace frames to ignore in
1d03c1c9 887 * PROF_ALLOC_PREP().
888 */
889JEMALLOC_ATTR(noinline)
890#endif
891static int
4934f93d 892imemalign(void **memptr, size_t alignment, size_t size,
893 size_t min_alignment)
a78e148b 894{
895 int ret;
4934f93d 896 size_t usize;
a78e148b 897 void *result;
4934f93d 898 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
899
900 assert(min_alignment != 0);
a78e148b 901
902 if (malloc_init())
903 result = NULL;
904 else {
4934f93d 905 if (size == 0)
906 size = 1;
a78e148b 907
908 /* Make sure that alignment is a large enough power of 2. */
909 if (((alignment - 1) & alignment) != 0
4934f93d 910 || (alignment < min_alignment)) {
911 if (config_xmalloc && opt_xmalloc) {
912 malloc_write("<jemalloc>: Error allocating "
913 "aligned memory: invalid alignment\n");
a78e148b 914 abort();
915 }
a78e148b 916 result = NULL;
917 ret = EINVAL;
4934f93d 918 goto label_return;
a78e148b 919 }
920
4934f93d 921 usize = sa2u(size, alignment);
a78e148b 922 if (usize == 0) {
923 result = NULL;
924 ret = ENOMEM;
4934f93d 925 goto label_return;
a78e148b 926 }
927
4934f93d 928 if (config_prof && opt_prof) {
1d03c1c9 929 PROF_ALLOC_PREP(2, usize, cnt);
930 if (cnt == NULL) {
a78e148b 931 result = NULL;
932 ret = EINVAL;
933 } else {
934 if (prof_promote && (uintptr_t)cnt !=
4934f93d 935 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
936 assert(sa2u(SMALL_MAXCLASS+1,
937 alignment) != 0);
938 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
939 alignment), alignment, false);
a78e148b 940 if (result != NULL) {
941 arena_prof_promoted(result,
942 usize);
943 }
944 } else {
945 result = ipalloc(usize, alignment,
946 false);
947 }
948 }
949 } else
a78e148b 950 result = ipalloc(usize, alignment, false);
951 }
952
953 if (result == NULL) {
4934f93d 954 if (config_xmalloc && opt_xmalloc) {
955 malloc_write("<jemalloc>: Error allocating aligned "
956 "memory: out of memory\n");
a78e148b 957 abort();
958 }
a78e148b 959 ret = ENOMEM;
4934f93d 960 goto label_return;
a78e148b 961 }
962
963 *memptr = result;
964 ret = 0;
965
4934f93d 966label_return:
967 if (config_stats && result != NULL) {
968 assert(usize == isalloc(result, config_prof));
969 thread_allocated_tsd_get()->allocated += usize;
a78e148b 970 }
4934f93d 971 if (config_prof && opt_prof && result != NULL)
a78e148b 972 prof_malloc(result, usize, cnt);
4934f93d 973 UTRACE(0, size, result);
a78e148b 974 return (ret);
975}
976
1d03c1c9 977int
4934f93d 978je_posix_memalign(void **memptr, size_t alignment, size_t size)
979{
980 int ret = imemalign(memptr, alignment, size, sizeof(void *));
981 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
982 config_prof), false);
983 return (ret);
984}
985
986void *
987je_aligned_alloc(size_t alignment, size_t size)
1d03c1c9 988{
4934f93d 989 void *ret;
990 int err;
1d03c1c9 991
4934f93d 992 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
993 ret = NULL;
994 set_errno(err);
995 }
996 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
997 false);
998 return (ret);
1d03c1c9 999}
1000
a78e148b 1001void *
4934f93d 1002je_calloc(size_t num, size_t size)
a78e148b 1003{
1004 void *ret;
1005 size_t num_size;
4934f93d 1006 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1007 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
a78e148b 1008
1009 if (malloc_init()) {
1010 num_size = 0;
1011 ret = NULL;
4934f93d 1012 goto label_return;
a78e148b 1013 }
1014
1015 num_size = num * size;
1016 if (num_size == 0) {
4934f93d 1017 if (num == 0 || size == 0)
a78e148b 1018 num_size = 1;
a78e148b 1019 else {
1020 ret = NULL;
4934f93d 1021 goto label_return;
a78e148b 1022 }
a78e148b 1023 /*
1024 * Try to avoid division here. We know that it isn't possible to
1025 * overflow during multiplication if neither operand uses any of the
1026 * most significant half of the bits in a size_t.
1027 */
1028 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1029 && (num_size / size != num)) {
1030 /* size_t overflow. */
1031 ret = NULL;
4934f93d 1032 goto label_return;
a78e148b 1033 }
1034
4934f93d 1035 if (config_prof && opt_prof) {
a78e148b 1036 usize = s2u(num_size);
1d03c1c9 1037 PROF_ALLOC_PREP(1, usize, cnt);
1038 if (cnt == NULL) {
a78e148b 1039 ret = NULL;
4934f93d 1040 goto label_return;
a78e148b 1041 }
1042 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
4934f93d 1043 <= SMALL_MAXCLASS) {
1044 ret = icalloc(SMALL_MAXCLASS+1);
a78e148b 1045 if (ret != NULL)
1046 arena_prof_promoted(ret, usize);
1047 } else
1048 ret = icalloc(num_size);
4934f93d 1049 } else {
1050 if (config_stats || (config_valgrind && opt_valgrind))
1051 usize = s2u(num_size);
a78e148b 1052 ret = icalloc(num_size);
1053 }
1054
4934f93d 1055label_return:
a78e148b 1056 if (ret == NULL) {
4934f93d 1057 if (config_xmalloc && opt_xmalloc) {
a78e148b 1058 malloc_write("<jemalloc>: Error in calloc(): out of "
1059 "memory\n");
1060 abort();
1061 }
4934f93d 1062 set_errno(ENOMEM);
a78e148b 1063 }
1064
4934f93d 1065 if (config_prof && opt_prof && ret != NULL)
a78e148b 1066 prof_malloc(ret, usize, cnt);
4934f93d 1067 if (config_stats && ret != NULL) {
1068 assert(usize == isalloc(ret, config_prof));
1069 thread_allocated_tsd_get()->allocated += usize;
a78e148b 1070 }
4934f93d 1071 UTRACE(0, num_size, ret);
1072 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
a78e148b 1073 return (ret);
1074}
1075
a78e148b 1076void *
4934f93d 1077je_realloc(void *ptr, size_t size)
a78e148b 1078{
1079 void *ret;
4934f93d 1080 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1081 size_t old_size = 0;
4934f93d 1082 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1083 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1084 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
a78e148b 1085
1086 if (size == 0) {
4934f93d 1087 if (ptr != NULL) {
1088 /* realloc(ptr, 0) is equivalent to free(p). */
1089 if (config_prof) {
1090 old_size = isalloc(ptr, true);
1091 if (config_valgrind && opt_valgrind)
1092 old_rzsize = p2rz(ptr);
1093 } else if (config_stats) {
1094 old_size = isalloc(ptr, false);
1095 if (config_valgrind && opt_valgrind)
1096 old_rzsize = u2rz(old_size);
1097 } else if (config_valgrind && opt_valgrind) {
1098 old_size = isalloc(ptr, false);
1099 old_rzsize = u2rz(old_size);
a78e148b 1100 }
4934f93d 1101 if (config_prof && opt_prof) {
1102 old_ctx = prof_ctx_get(ptr);
a78e148b 1103 cnt = NULL;
1104 }
4934f93d 1105 iqalloc(ptr);
a78e148b 1106 ret = NULL;
4934f93d 1107 goto label_return;
1108 } else
1109 size = 1;
a78e148b 1110 }
1111
1112 if (ptr != NULL) {
4934f93d 1113 assert(malloc_initialized || IS_INITIALIZER);
1114
1115 if (config_prof) {
1116 old_size = isalloc(ptr, true);
1117 if (config_valgrind && opt_valgrind)
1118 old_rzsize = p2rz(ptr);
1119 } else if (config_stats) {
1120 old_size = isalloc(ptr, false);
1121 if (config_valgrind && opt_valgrind)
1122 old_rzsize = u2rz(old_size);
1123 } else if (config_valgrind && opt_valgrind) {
1124 old_size = isalloc(ptr, false);
1125 old_rzsize = u2rz(old_size);
1126 }
1127 if (config_prof && opt_prof) {
a78e148b 1128 usize = s2u(size);
1129 old_ctx = prof_ctx_get(ptr);
1d03c1c9 1130 PROF_ALLOC_PREP(1, usize, cnt);
1131 if (cnt == NULL) {
1132 old_ctx = NULL;
a78e148b 1133 ret = NULL;
4934f93d 1134 goto label_oom;
a78e148b 1135 }
1136 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
4934f93d 1137 usize <= SMALL_MAXCLASS) {
1138 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
a78e148b 1139 false, false);
1140 if (ret != NULL)
1141 arena_prof_promoted(ret, usize);
1d03c1c9 1142 else
1143 old_ctx = NULL;
1144 } else {
a78e148b 1145 ret = iralloc(ptr, size, 0, 0, false, false);
1d03c1c9 1146 if (ret == NULL)
1147 old_ctx = NULL;
1148 }
4934f93d 1149 } else {
1150 if (config_stats || (config_valgrind && opt_valgrind))
1151 usize = s2u(size);
a78e148b 1152 ret = iralloc(ptr, size, 0, 0, false, false);
1153 }
1154
4934f93d 1155label_oom:
a78e148b 1156 if (ret == NULL) {
4934f93d 1157 if (config_xmalloc && opt_xmalloc) {
a78e148b 1158 malloc_write("<jemalloc>: Error in realloc(): "
1159 "out of memory\n");
1160 abort();
1161 }
4934f93d 1162 set_errno(ENOMEM);
a78e148b 1163 }
1164 } else {
4934f93d 1165 /* realloc(NULL, size) is equivalent to malloc(size). */
1166 if (config_prof && opt_prof)
a78e148b 1167 old_ctx = NULL;
a78e148b 1168 if (malloc_init()) {
4934f93d 1169 if (config_prof && opt_prof)
a78e148b 1170 cnt = NULL;
a78e148b 1171 ret = NULL;
1172 } else {
4934f93d 1173 if (config_prof && opt_prof) {
a78e148b 1174 usize = s2u(size);
1d03c1c9 1175 PROF_ALLOC_PREP(1, usize, cnt);
1176 if (cnt == NULL)
a78e148b 1177 ret = NULL;
1178 else {
1179 if (prof_promote && (uintptr_t)cnt !=
1180 (uintptr_t)1U && usize <=
4934f93d 1181 SMALL_MAXCLASS) {
1182 ret = imalloc(SMALL_MAXCLASS+1);
a78e148b 1183 if (ret != NULL) {
1184 arena_prof_promoted(ret,
1185 usize);
1186 }
1187 } else
1188 ret = imalloc(size);
1189 }
4934f93d 1190 } else {
1191 if (config_stats || (config_valgrind &&
1192 opt_valgrind))
1193 usize = s2u(size);
a78e148b 1194 ret = imalloc(size);
1195 }
1196 }
1197
1198 if (ret == NULL) {
4934f93d 1199 if (config_xmalloc && opt_xmalloc) {
a78e148b 1200 malloc_write("<jemalloc>: Error in realloc(): "
1201 "out of memory\n");
1202 abort();
1203 }
4934f93d 1204 set_errno(ENOMEM);
a78e148b 1205 }
1206 }
1207
4934f93d 1208label_return:
1209 if (config_prof && opt_prof)
a78e148b 1210 prof_realloc(ret, usize, cnt, old_size, old_ctx);
4934f93d 1211 if (config_stats && ret != NULL) {
1212 thread_allocated_t *ta;
1213 assert(usize == isalloc(ret, config_prof));
1214 ta = thread_allocated_tsd_get();
1215 ta->allocated += usize;
1216 ta->deallocated += old_size;
a78e148b 1217 }
4934f93d 1218 UTRACE(ptr, size, ret);
1219 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
a78e148b 1220 return (ret);
1221}
1222
a78e148b 1223void
4934f93d 1224je_free(void *ptr)
a78e148b 1225{
1226
4934f93d 1227 UTRACE(ptr, 0, 0);
a78e148b 1228 if (ptr != NULL) {
a78e148b 1229 size_t usize;
4934f93d 1230 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1231
4934f93d 1232 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1233
4934f93d 1234 if (config_prof && opt_prof) {
1235 usize = isalloc(ptr, config_prof);
a78e148b 1236 prof_free(ptr, usize);
4934f93d 1237 } else if (config_stats || config_valgrind)
1238 usize = isalloc(ptr, config_prof);
1239 if (config_stats)
1240 thread_allocated_tsd_get()->deallocated += usize;
1241 if (config_valgrind && opt_valgrind)
1242 rzsize = p2rz(ptr);
1243 iqalloc(ptr);
1244 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
a78e148b 1245 }
1246}
1247
1248/*
1249 * End malloc(3)-compatible functions.
1250 */
1251/******************************************************************************/
1252/*
1253 * Begin non-standard override functions.
a78e148b 1254 */
a78e148b 1255
1256#ifdef JEMALLOC_OVERRIDE_MEMALIGN
a78e148b 1257void *
4934f93d 1258je_memalign(size_t alignment, size_t size)
a78e148b 1259{
4934f93d 1260 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1261 imemalign(&ret, alignment, size, 1);
1262 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
a78e148b 1263 return (ret);
1264}
1265#endif
1266
1267#ifdef JEMALLOC_OVERRIDE_VALLOC
a78e148b 1268void *
4934f93d 1269je_valloc(size_t size)
a78e148b 1270{
4934f93d 1271 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1272 imemalign(&ret, PAGE, size, 1);
1273 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
a78e148b 1274 return (ret);
1275}
1276#endif
1277
4934f93d 1278/*
1279 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1280 * #define je_malloc malloc
1281 */
1282#define malloc_is_malloc 1
1283#define is_malloc_(a) malloc_is_ ## a
1284#define is_malloc(a) is_malloc_(a)
1285
1286#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1287/*
1288 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1289 * to inconsistently reference libc's malloc(3)-compatible functions
1290 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1291 *
1292 * These definitions interpose hooks in glibc. The functions are actually
1293 * passed an extra argument for the caller return address, which will be
1294 * ignored.
1295 */
21b26915 1296JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1297JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1298JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1299JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
4934f93d 1300 je_memalign;
1301#endif
1302
a78e148b 1303/*
1304 * End non-standard override functions.
1305 */
1306/******************************************************************************/
1307/*
1308 * Begin non-standard functions.
1309 */
1310
a78e148b 1311size_t
21b26915 1312je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
a78e148b 1313{
1314 size_t ret;
1315
4934f93d 1316 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1317
4934f93d 1318 if (config_ivsalloc)
1319 ret = ivsalloc(ptr, config_prof);
1320 else
1321 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
a78e148b 1322
1323 return (ret);
1324}
1325
a78e148b 1326void
4934f93d 1327je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1328 const char *opts)
a78e148b 1329{
1330
1331 stats_print(write_cb, cbopaque, opts);
1332}
1333
a78e148b 1334int
4934f93d 1335je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
a78e148b 1336 size_t newlen)
1337{
1338
1339 if (malloc_init())
1340 return (EAGAIN);
1341
1342 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1343}
1344
a78e148b 1345int
4934f93d 1346je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
a78e148b 1347{
1348
1349 if (malloc_init())
1350 return (EAGAIN);
1351
1352 return (ctl_nametomib(name, mibp, miblenp));
1353}
1354
a78e148b 1355int
4934f93d 1356je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1357 void *newp, size_t newlen)
a78e148b 1358{
1359
1360 if (malloc_init())
1361 return (EAGAIN);
1362
1363 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1364}
1365
4934f93d 1366/*
1367 * End non-standard functions.
1368 */
1369/******************************************************************************/
1370/*
1371 * Begin experimental functions.
1372 */
1373#ifdef JEMALLOC_EXPERIMENTAL
1374
a78e148b 1375JEMALLOC_INLINE void *
21b26915 1376iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
1377 arena_t *arena)
a78e148b 1378{
1379
4934f93d 1380 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1381 alignment)));
a78e148b 1382
1383 if (alignment != 0)
21b26915 1384 return (ipallocx(usize, alignment, zero, try_tcache, arena));
a78e148b 1385 else if (zero)
21b26915 1386 return (icallocx(usize, try_tcache, arena));
a78e148b 1387 else
21b26915 1388 return (imallocx(usize, try_tcache, arena));
a78e148b 1389}
1390
a78e148b 1391int
4934f93d 1392je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
a78e148b 1393{
1394 void *p;
1395 size_t usize;
1396 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1397 & (SIZE_T_MAX-1));
1398 bool zero = flags & ALLOCM_ZERO;
21b26915 1399 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1400 arena_t *arena;
1401 bool try_tcache;
a78e148b 1402
1403 assert(ptr != NULL);
1404 assert(size != 0);
1405
1406 if (malloc_init())
4934f93d 1407 goto label_oom;
a78e148b 1408
21b26915 1409 if (arena_ind != UINT_MAX) {
1410 arena = arenas[arena_ind];
1411 try_tcache = false;
1412 } else {
1413 arena = NULL;
1414 try_tcache = true;
1415 }
1416
4934f93d 1417 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
a78e148b 1418 if (usize == 0)
4934f93d 1419 goto label_oom;
1420
1421 if (config_prof && opt_prof) {
1422 prof_thr_cnt_t *cnt;
a78e148b 1423
1d03c1c9 1424 PROF_ALLOC_PREP(1, usize, cnt);
1425 if (cnt == NULL)
4934f93d 1426 goto label_oom;
a78e148b 1427 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
4934f93d 1428 SMALL_MAXCLASS) {
a78e148b 1429 size_t usize_promoted = (alignment == 0) ?
4934f93d 1430 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1431 alignment);
a78e148b 1432 assert(usize_promoted != 0);
21b26915 1433 p = iallocm(usize_promoted, alignment, zero,
1434 try_tcache, arena);
a78e148b 1435 if (p == NULL)
4934f93d 1436 goto label_oom;
a78e148b 1437 arena_prof_promoted(p, usize);
1438 } else {
21b26915 1439 p = iallocm(usize, alignment, zero, try_tcache, arena);
a78e148b 1440 if (p == NULL)
4934f93d 1441 goto label_oom;
a78e148b 1442 }
1d03c1c9 1443 prof_malloc(p, usize, cnt);
4934f93d 1444 } else {
21b26915 1445 p = iallocm(usize, alignment, zero, try_tcache, arena);
a78e148b 1446 if (p == NULL)
4934f93d 1447 goto label_oom;
a78e148b 1448 }
4934f93d 1449 if (rsize != NULL)
1450 *rsize = usize;
a78e148b 1451
1452 *ptr = p;
4934f93d 1453 if (config_stats) {
1454 assert(usize == isalloc(p, config_prof));
1455 thread_allocated_tsd_get()->allocated += usize;
1456 }
1457 UTRACE(0, size, p);
1458 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
a78e148b 1459 return (ALLOCM_SUCCESS);
4934f93d 1460label_oom:
1461 if (config_xmalloc && opt_xmalloc) {
a78e148b 1462 malloc_write("<jemalloc>: Error in allocm(): "
1463 "out of memory\n");
1464 abort();
1465 }
a78e148b 1466 *ptr = NULL;
4934f93d 1467 UTRACE(0, size, 0);
a78e148b 1468 return (ALLOCM_ERR_OOM);
1469}
1470
a78e148b 1471int
4934f93d 1472je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
a78e148b 1473{
1474 void *p, *q;
1475 size_t usize;
a78e148b 1476 size_t old_size;
4934f93d 1477 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1478 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1479 & (SIZE_T_MAX-1));
1480 bool zero = flags & ALLOCM_ZERO;
1481 bool no_move = flags & ALLOCM_NO_MOVE;
21b26915 1482 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1483 bool try_tcache_alloc, try_tcache_dalloc;
1484 arena_t *arena;
a78e148b 1485
1486 assert(ptr != NULL);
1487 assert(*ptr != NULL);
1488 assert(size != 0);
1489 assert(SIZE_T_MAX - size >= extra);
4934f93d 1490 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1491
21b26915 1492 if (arena_ind != UINT_MAX) {
1493 arena_chunk_t *chunk;
1494 try_tcache_alloc = true;
1495 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
1496 try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
1497 arenas[arena_ind]);
1498 arena = arenas[arena_ind];
1499 } else {
1500 try_tcache_alloc = true;
1501 try_tcache_dalloc = true;
1502 arena = NULL;
1503 }
1504
a78e148b 1505 p = *ptr;
4934f93d 1506 if (config_prof && opt_prof) {
1507 prof_thr_cnt_t *cnt;
1508
a78e148b 1509 /*
1510 * usize isn't knowable before iralloc() returns when extra is
1511 * non-zero. Therefore, compute its maximum possible value and
1d03c1c9 1512 * use that in PROF_ALLOC_PREP() to decide whether to capture a
a78e148b 1513 * backtrace. prof_realloc() will use the actual usize to
1514 * decide whether to sample.
1515 */
1516 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
4934f93d 1517 sa2u(size+extra, alignment);
1d03c1c9 1518 prof_ctx_t *old_ctx = prof_ctx_get(p);
4934f93d 1519 old_size = isalloc(p, true);
1520 if (config_valgrind && opt_valgrind)
1521 old_rzsize = p2rz(p);
1d03c1c9 1522 PROF_ALLOC_PREP(1, max_usize, cnt);
1523 if (cnt == NULL)
4934f93d 1524 goto label_oom;
1d03c1c9 1525 /*
1526 * Use minimum usize to determine whether promotion may happen.
1527 */
1528 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
4934f93d 1529 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1530 <= SMALL_MAXCLASS) {
21b26915 1531 q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
4934f93d 1532 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
21b26915 1533 alignment, zero, no_move, try_tcache_alloc,
1534 try_tcache_dalloc, arena);
a78e148b 1535 if (q == NULL)
4934f93d 1536 goto label_err;
1537 if (max_usize < PAGE) {
1d03c1c9 1538 usize = max_usize;
1539 arena_prof_promoted(q, usize);
1540 } else
4934f93d 1541 usize = isalloc(q, config_prof);
a78e148b 1542 } else {
21b26915 1543 q = irallocx(p, size, extra, alignment, zero, no_move,
1544 try_tcache_alloc, try_tcache_dalloc, arena);
a78e148b 1545 if (q == NULL)
4934f93d 1546 goto label_err;
1547 usize = isalloc(q, config_prof);
a78e148b 1548 }
1549 prof_realloc(q, usize, cnt, old_size, old_ctx);
1550 if (rsize != NULL)
1551 *rsize = usize;
4934f93d 1552 } else {
1553 if (config_stats) {
1554 old_size = isalloc(p, false);
1555 if (config_valgrind && opt_valgrind)
1556 old_rzsize = u2rz(old_size);
1557 } else if (config_valgrind && opt_valgrind) {
1558 old_size = isalloc(p, false);
1559 old_rzsize = u2rz(old_size);
1560 }
21b26915 1561 q = irallocx(p, size, extra, alignment, zero, no_move,
1562 try_tcache_alloc, try_tcache_dalloc, arena);
a78e148b 1563 if (q == NULL)
4934f93d 1564 goto label_err;
1565 if (config_stats)
1566 usize = isalloc(q, config_prof);
1567 if (rsize != NULL) {
1568 if (config_stats == false)
1569 usize = isalloc(q, config_prof);
1570 *rsize = usize;
a78e148b 1571 }
1572 }
1573
1574 *ptr = q;
4934f93d 1575 if (config_stats) {
1576 thread_allocated_t *ta;
1577 ta = thread_allocated_tsd_get();
1578 ta->allocated += usize;
1579 ta->deallocated += old_size;
1580 }
1581 UTRACE(p, size, q);
1582 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
a78e148b 1583 return (ALLOCM_SUCCESS);
4934f93d 1584label_err:
1585 if (no_move) {
1586 UTRACE(p, size, q);
a78e148b 1587 return (ALLOCM_ERR_NOT_MOVED);
4934f93d 1588 }
1589label_oom:
1590 if (config_xmalloc && opt_xmalloc) {
a78e148b 1591 malloc_write("<jemalloc>: Error in rallocm(): "
1592 "out of memory\n");
1593 abort();
1594 }
4934f93d 1595 UTRACE(p, size, 0);
a78e148b 1596 return (ALLOCM_ERR_OOM);
1597}
1598
a78e148b 1599int
4934f93d 1600je_sallocm(const void *ptr, size_t *rsize, int flags)
a78e148b 1601{
1602 size_t sz;
1603
4934f93d 1604 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1605
4934f93d 1606 if (config_ivsalloc)
1607 sz = ivsalloc(ptr, config_prof);
1608 else {
1609 assert(ptr != NULL);
1610 sz = isalloc(ptr, config_prof);
1611 }
a78e148b 1612 assert(rsize != NULL);
1613 *rsize = sz;
1614
1615 return (ALLOCM_SUCCESS);
1616}
1617
a78e148b 1618int
4934f93d 1619je_dallocm(void *ptr, int flags)
a78e148b 1620{
a78e148b 1621 size_t usize;
4934f93d 1622 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
21b26915 1623 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1624 bool try_tcache;
a78e148b 1625
1626 assert(ptr != NULL);
4934f93d 1627 assert(malloc_initialized || IS_INITIALIZER);
1628
21b26915 1629 if (arena_ind != UINT_MAX) {
1630 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1631 try_tcache = (chunk == ptr || chunk->arena !=
1632 arenas[arena_ind]);
1633 } else
1634 try_tcache = true;
1635
4934f93d 1636 UTRACE(ptr, 0, 0);
1637 if (config_stats || config_valgrind)
1638 usize = isalloc(ptr, config_prof);
1639 if (config_prof && opt_prof) {
1640 if (config_stats == false && config_valgrind == false)
1641 usize = isalloc(ptr, config_prof);
a78e148b 1642 prof_free(ptr, usize);
1643 }
4934f93d 1644 if (config_stats)
1645 thread_allocated_tsd_get()->deallocated += usize;
1646 if (config_valgrind && opt_valgrind)
1647 rzsize = p2rz(ptr);
21b26915 1648 iqallocx(ptr, try_tcache);
4934f93d 1649 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
a78e148b 1650
1651 return (ALLOCM_SUCCESS);
1652}
1653
4934f93d 1654int
1655je_nallocm(size_t *rsize, size_t size, int flags)
1656{
1657 size_t usize;
1658 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1659 & (SIZE_T_MAX-1));
1660
1661 assert(size != 0);
1662
1663 if (malloc_init())
1664 return (ALLOCM_ERR_OOM);
1665
1666 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1667 if (usize == 0)
1668 return (ALLOCM_ERR_OOM);
1669
1670 if (rsize != NULL)
1671 *rsize = usize;
1672 return (ALLOCM_SUCCESS);
1673}
1674
1675#endif
a78e148b 1676/*
4934f93d 1677 * End experimental functions.
a78e148b 1678 */
1679/******************************************************************************/
a78e148b 1680/*
1681 * The following functions are used by threading libraries for protection of
1682 * malloc during fork().
1683 */
1684
21b26915 1685/*
1686 * If an application creates a thread before doing any allocation in the main
1687 * thread, then calls fork(2) in the main thread followed by memory allocation
1688 * in the child process, a race can occur that results in deadlock within the
1689 * child: the main thread may have forked while the created thread had
1690 * partially initialized the allocator. Ordinarily jemalloc prevents
1691 * fork/malloc races via the following functions it registers during
1692 * initialization using pthread_atfork(), but of course that does no good if
1693 * the allocator isn't fully initialized at fork time. The following library
1694 * constructor is a partial solution to this problem. It may still possible to
1695 * trigger the deadlock described above, but doing so would involve forking via
1696 * a library constructor that runs before jemalloc's runs.
1697 */
1698JEMALLOC_ATTR(constructor)
1699static void
1700jemalloc_constructor(void)
1701{
1702
1703 malloc_init();
1704}
1705
4934f93d 1706#ifndef JEMALLOC_MUTEX_INIT_CB
a78e148b 1707void
1708jemalloc_prefork(void)
4934f93d 1709#else
1710JEMALLOC_EXPORT void
1711_malloc_prefork(void)
1712#endif
a78e148b 1713{
1714 unsigned i;
1715
4934f93d 1716#ifdef JEMALLOC_MUTEX_INIT_CB
1717 if (malloc_initialized == false)
1718 return;
1719#endif
1720 assert(malloc_initialized);
a78e148b 1721
4934f93d 1722 /* Acquire all mutexes in a safe order. */
21b26915 1723 ctl_prefork();
4934f93d 1724 malloc_mutex_prefork(&arenas_lock);
21b26915 1725 for (i = 0; i < narenas_total; i++) {
a78e148b 1726 if (arenas[i] != NULL)
4934f93d 1727 arena_prefork(arenas[i]);
a78e148b 1728 }
21b26915 1729 prof_prefork();
1730 chunk_prefork();
4934f93d 1731 base_prefork();
1732 huge_prefork();
4934f93d 1733}
a78e148b 1734
4934f93d 1735#ifndef JEMALLOC_MUTEX_INIT_CB
1736void
1737jemalloc_postfork_parent(void)
1738#else
1739JEMALLOC_EXPORT void
1740_malloc_postfork(void)
a78e148b 1741#endif
4934f93d 1742{
1743 unsigned i;
a78e148b 1744
4934f93d 1745#ifdef JEMALLOC_MUTEX_INIT_CB
1746 if (malloc_initialized == false)
1747 return;
a78e148b 1748#endif
4934f93d 1749 assert(malloc_initialized);
1750
1751 /* Release all mutexes, now that fork() has completed. */
4934f93d 1752 huge_postfork_parent();
1753 base_postfork_parent();
21b26915 1754 chunk_postfork_parent();
1755 prof_postfork_parent();
1756 for (i = 0; i < narenas_total; i++) {
4934f93d 1757 if (arenas[i] != NULL)
1758 arena_postfork_parent(arenas[i]);
1759 }
1760 malloc_mutex_postfork_parent(&arenas_lock);
21b26915 1761 ctl_postfork_parent();
a78e148b 1762}
1763
1764void
4934f93d 1765jemalloc_postfork_child(void)
a78e148b 1766{
1767 unsigned i;
1768
4934f93d 1769 assert(malloc_initialized);
1770
a78e148b 1771 /* Release all mutexes, now that fork() has completed. */
4934f93d 1772 huge_postfork_child();
1773 base_postfork_child();
21b26915 1774 chunk_postfork_child();
1775 prof_postfork_child();
1776 for (i = 0; i < narenas_total; i++) {
4934f93d 1777 if (arenas[i] != NULL)
1778 arena_postfork_child(arenas[i]);
1779 }
1780 malloc_mutex_postfork_child(&arenas_lock);
21b26915 1781 ctl_postfork_child();
4934f93d 1782}
a78e148b 1783
4934f93d 1784/******************************************************************************/
1785/*
1786 * The following functions are used for TLS allocation/deallocation in static
1787 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1788 * is that these avoid accessing TLS variables.
1789 */
a78e148b 1790
4934f93d 1791static void *
1792a0alloc(size_t size, bool zero)
1793{
1794
1795 if (malloc_init())
1796 return (NULL);
a78e148b 1797
4934f93d 1798 if (size == 0)
1799 size = 1;
a78e148b 1800
4934f93d 1801 if (size <= arena_maxclass)
1802 return (arena_malloc(arenas[0], size, zero, false));
1803 else
1804 return (huge_malloc(size, zero));
1805}
a78e148b 1806
4934f93d 1807void *
1808a0malloc(size_t size)
1809{
1810
1811 return (a0alloc(size, false));
1812}
1813
1814void *
1815a0calloc(size_t num, size_t size)
1816{
1817
1818 return (a0alloc(num * size, true));
1819}
1820
1821void
1822a0free(void *ptr)
1823{
1824 arena_chunk_t *chunk;
1825
1826 if (ptr == NULL)
1827 return;
1828
1829 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1830 if (chunk != ptr)
1831 arena_dalloc(chunk->arena, chunk, ptr, false);
1832 else
1833 huge_dalloc(ptr, true);
a78e148b 1834}
1835
1836/******************************************************************************/