]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/jemalloc.c
Sentinel: SENTINEL FAILOVER command implemented.
[redis.git] / deps / jemalloc / src / jemalloc.c
CommitLineData
a78e148b 1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
ad4c0b41 7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
a78e148b 10
ad4c0b41 11/* Runtime configuration options. */
12const char *je_malloc_conf;
13#ifdef JEMALLOC_DEBUG
14bool opt_abort = true;
15# ifdef JEMALLOC_FILL
16bool opt_junk = true;
a78e148b 17# else
ad4c0b41 18bool opt_junk = false;
a78e148b 19# endif
ad4c0b41 20#else
21bool opt_abort = false;
22bool opt_junk = false;
a78e148b 23#endif
ad4c0b41 24size_t opt_quarantine = ZU(0);
25bool opt_redzone = false;
26bool opt_utrace = false;
27bool opt_valgrind = false;
28bool opt_xmalloc = false;
29bool opt_zero = false;
30size_t opt_narenas = 0;
31
32unsigned ncpus;
33
34malloc_mutex_t arenas_lock;
35arena_t **arenas;
36unsigned narenas;
a78e148b 37
38/* Set to true once the allocator has been initialized. */
39static bool malloc_initialized = false;
40
ad4c0b41 41#ifdef JEMALLOC_THREADED_INIT
a78e148b 42/* Used to let the initializing thread recursively allocate. */
ad4c0b41 43# define NO_INITIALIZER ((unsigned long)0)
44# define INITIALIZER pthread_self()
45# define IS_INITIALIZER (malloc_initializer == pthread_self())
46static pthread_t malloc_initializer = NO_INITIALIZER;
a78e148b 47#else
ad4c0b41 48# define NO_INITIALIZER false
49# define INITIALIZER true
50# define IS_INITIALIZER malloc_initializer
51static bool malloc_initializer = NO_INITIALIZER;
a78e148b 52#endif
a78e148b 53
ad4c0b41 54/* Used to avoid initialization races. */
55#ifdef _WIN32
56static malloc_mutex_t init_lock;
a78e148b 57
ad4c0b41 58JEMALLOC_ATTR(constructor)
59static void WINAPI
60_init_init_lock(void)
61{
a78e148b 62
ad4c0b41 63 malloc_mutex_init(&init_lock);
64}
65
66#ifdef _MSC_VER
67# pragma section(".CRT$XCU", read)
68JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
69static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
a78e148b 70#endif
ad4c0b41 71
72#else
73static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
74#endif
75
76typedef struct {
77 void *p; /* Input pointer (as in realloc(p, s)). */
78 size_t s; /* Request size. */
79 void *r; /* Result pointer. */
80} malloc_utrace_t;
81
82#ifdef JEMALLOC_UTRACE
83# define UTRACE(a, b, c) do { \
84 if (opt_utrace) { \
85 malloc_utrace_t ut; \
86 ut.p = (a); \
87 ut.s = (b); \
88 ut.r = (c); \
89 utrace(&ut, sizeof(ut)); \
90 } \
91} while (0)
92#else
93# define UTRACE(a, b, c)
a78e148b 94#endif
a78e148b 95
96/******************************************************************************/
97/* Function prototypes for non-inline static functions. */
98
a78e148b 99static void stats_print_atexit(void);
100static unsigned malloc_ncpus(void);
a78e148b 101static bool malloc_conf_next(char const **opts_p, char const **k_p,
102 size_t *klen_p, char const **v_p, size_t *vlen_p);
103static void malloc_conf_error(const char *msg, const char *k, size_t klen,
104 const char *v, size_t vlen);
105static void malloc_conf_init(void);
106static bool malloc_init_hard(void);
ad4c0b41 107static int imemalign(void **memptr, size_t alignment, size_t size,
108 size_t min_alignment);
a78e148b 109
110/******************************************************************************/
111/*
112 * Begin miscellaneous support functions.
113 */
114
115/* Create a new arena and insert it into the arenas array at index ind. */
116arena_t *
117arenas_extend(unsigned ind)
118{
119 arena_t *ret;
120
ad4c0b41 121 ret = (arena_t *)base_alloc(sizeof(arena_t));
a78e148b 122 if (ret != NULL && arena_new(ret, ind) == false) {
123 arenas[ind] = ret;
124 return (ret);
125 }
126 /* Only reached if there is an OOM error. */
127
128 /*
129 * OOM here is quite inconvenient to propagate, since dealing with it
130 * would require a check for failure in the fast path. Instead, punt
131 * by using arenas[0]. In practice, this is an extremely unlikely
132 * failure.
133 */
134 malloc_write("<jemalloc>: Error initializing arena\n");
135 if (opt_abort)
136 abort();
137
138 return (arenas[0]);
139}
140
ad4c0b41 141/* Slow path, called only by choose_arena(). */
a78e148b 142arena_t *
143choose_arena_hard(void)
144{
145 arena_t *ret;
146
147 if (narenas > 1) {
148 unsigned i, choose, first_null;
149
150 choose = 0;
151 first_null = narenas;
152 malloc_mutex_lock(&arenas_lock);
153 assert(arenas[0] != NULL);
154 for (i = 1; i < narenas; i++) {
155 if (arenas[i] != NULL) {
156 /*
157 * Choose the first arena that has the lowest
158 * number of threads assigned to it.
159 */
160 if (arenas[i]->nthreads <
161 arenas[choose]->nthreads)
162 choose = i;
163 } else if (first_null == narenas) {
164 /*
165 * Record the index of the first uninitialized
166 * arena, in case all extant arenas are in use.
167 *
168 * NB: It is possible for there to be
169 * discontinuities in terms of initialized
170 * versus uninitialized arenas, due to the
171 * "thread.arena" mallctl.
172 */
173 first_null = i;
174 }
175 }
176
ad4c0b41 177 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
a78e148b 178 /*
179 * Use an unloaded arena, or the least loaded arena if
180 * all arenas are already initialized.
181 */
182 ret = arenas[choose];
183 } else {
184 /* Initialize a new arena. */
185 ret = arenas_extend(first_null);
186 }
187 ret->nthreads++;
188 malloc_mutex_unlock(&arenas_lock);
189 } else {
190 ret = arenas[0];
191 malloc_mutex_lock(&arenas_lock);
192 ret->nthreads++;
193 malloc_mutex_unlock(&arenas_lock);
194 }
195
ad4c0b41 196 arenas_tsd_set(&ret);
a78e148b 197
198 return (ret);
199}
200
a78e148b 201static void
202stats_print_atexit(void)
203{
204
ad4c0b41 205 if (config_tcache && config_stats) {
206 unsigned i;
a78e148b 207
ad4c0b41 208 /*
209 * Merge stats from extant threads. This is racy, since
210 * individual threads do not lock when recording tcache stats
211 * events. As a consequence, the final stats may be slightly
212 * out of date by the time they are reported, if other threads
213 * continue to allocate.
214 */
215 for (i = 0; i < narenas; i++) {
216 arena_t *arena = arenas[i];
217 if (arena != NULL) {
218 tcache_t *tcache;
a78e148b 219
ad4c0b41 220 /*
221 * tcache_stats_merge() locks bins, so if any
222 * code is introduced that acquires both arena
223 * and bin locks in the opposite order,
224 * deadlocks may result.
225 */
226 malloc_mutex_lock(&arena->lock);
227 ql_foreach(tcache, &arena->tcache_ql, link) {
228 tcache_stats_merge(tcache, arena);
229 }
230 malloc_mutex_unlock(&arena->lock);
a78e148b 231 }
a78e148b 232 }
233 }
ad4c0b41 234 je_malloc_stats_print(NULL, NULL, NULL);
a78e148b 235}
a78e148b 236
237/*
238 * End miscellaneous support functions.
239 */
240/******************************************************************************/
241/*
242 * Begin initialization functions.
243 */
244
245static unsigned
246malloc_ncpus(void)
247{
248 unsigned ret;
249 long result;
250
ad4c0b41 251#ifdef _WIN32
252 SYSTEM_INFO si;
253 GetSystemInfo(&si);
254 result = si.dwNumberOfProcessors;
255#else
a78e148b 256 result = sysconf(_SC_NPROCESSORS_ONLN);
257 if (result == -1) {
258 /* Error. */
259 ret = 1;
260 }
ad4c0b41 261#endif
a78e148b 262 ret = (unsigned)result;
263
264 return (ret);
265}
266
ad4c0b41 267void
a78e148b 268arenas_cleanup(void *arg)
269{
ad4c0b41 270 arena_t *arena = *(arena_t **)arg;
a78e148b 271
272 malloc_mutex_lock(&arenas_lock);
273 arena->nthreads--;
274 malloc_mutex_unlock(&arenas_lock);
275}
276
a78e148b 277static inline bool
278malloc_init(void)
279{
280
281 if (malloc_initialized == false)
282 return (malloc_init_hard());
283
284 return (false);
285}
286
287static bool
288malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
289 char const **v_p, size_t *vlen_p)
290{
291 bool accept;
292 const char *opts = *opts_p;
293
294 *k_p = opts;
295
296 for (accept = false; accept == false;) {
297 switch (*opts) {
ad4c0b41 298 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
299 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
300 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
301 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
302 case 'Y': case 'Z':
303 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
304 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
305 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
306 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
307 case 'y': case 'z':
308 case '0': case '1': case '2': case '3': case '4': case '5':
309 case '6': case '7': case '8': case '9':
310 case '_':
311 opts++;
312 break;
313 case ':':
314 opts++;
315 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
316 *v_p = opts;
317 accept = true;
318 break;
319 case '\0':
320 if (opts != *opts_p) {
321 malloc_write("<jemalloc>: Conf string ends "
322 "with key\n");
323 }
324 return (true);
325 default:
326 malloc_write("<jemalloc>: Malformed conf string\n");
327 return (true);
a78e148b 328 }
329 }
330
331 for (accept = false; accept == false;) {
332 switch (*opts) {
ad4c0b41 333 case ',':
334 opts++;
335 /*
336 * Look ahead one character here, because the next time
337 * this function is called, it will assume that end of
338 * input has been cleanly reached if no input remains,
339 * but we have optimistically already consumed the
340 * comma if one exists.
341 */
342 if (*opts == '\0') {
343 malloc_write("<jemalloc>: Conf string ends "
344 "with comma\n");
345 }
346 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
347 accept = true;
348 break;
349 case '\0':
350 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
351 accept = true;
352 break;
353 default:
354 opts++;
355 break;
a78e148b 356 }
357 }
358
359 *opts_p = opts;
360 return (false);
361}
362
363static void
364malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
365 size_t vlen)
366{
a78e148b 367
ad4c0b41 368 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
369 (int)vlen, v);
a78e148b 370}
371
372static void
373malloc_conf_init(void)
374{
375 unsigned i;
376 char buf[PATH_MAX + 1];
377 const char *opts, *k, *v;
378 size_t klen, vlen;
379
380 for (i = 0; i < 3; i++) {
381 /* Get runtime configuration. */
382 switch (i) {
383 case 0:
ad4c0b41 384 if (je_malloc_conf != NULL) {
a78e148b 385 /*
386 * Use options that were compiled into the
387 * program.
388 */
ad4c0b41 389 opts = je_malloc_conf;
a78e148b 390 } else {
391 /* No configuration specified. */
392 buf[0] = '\0';
393 opts = buf;
394 }
395 break;
396 case 1: {
ad4c0b41 397#ifndef _WIN32
a78e148b 398 int linklen;
399 const char *linkname =
ad4c0b41 400# ifdef JEMALLOC_PREFIX
a78e148b 401 "/etc/"JEMALLOC_PREFIX"malloc.conf"
ad4c0b41 402# else
a78e148b 403 "/etc/malloc.conf"
ad4c0b41 404# endif
a78e148b 405 ;
406
407 if ((linklen = readlink(linkname, buf,
408 sizeof(buf) - 1)) != -1) {
409 /*
410 * Use the contents of the "/etc/malloc.conf"
411 * symbolic link's name.
412 */
413 buf[linklen] = '\0';
414 opts = buf;
ad4c0b41 415 } else
416#endif
417 {
a78e148b 418 /* No configuration specified. */
419 buf[0] = '\0';
420 opts = buf;
421 }
422 break;
ad4c0b41 423 } case 2: {
a78e148b 424 const char *envname =
425#ifdef JEMALLOC_PREFIX
426 JEMALLOC_CPREFIX"MALLOC_CONF"
427#else
428 "MALLOC_CONF"
429#endif
430 ;
431
432 if ((opts = getenv(envname)) != NULL) {
433 /*
434 * Do nothing; opts is already initialized to
435 * the value of the MALLOC_CONF environment
436 * variable.
437 */
438 } else {
439 /* No configuration specified. */
440 buf[0] = '\0';
441 opts = buf;
442 }
443 break;
ad4c0b41 444 } default:
a78e148b 445 /* NOTREACHED */
446 assert(false);
447 buf[0] = '\0';
448 opts = buf;
449 }
450
451 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
452 &vlen) == false) {
ad4c0b41 453#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
454 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 455 klen) == 0) { \
456 if (strncmp("true", v, vlen) == 0 && \
457 vlen == sizeof("true")-1) \
ad4c0b41 458 o = true; \
a78e148b 459 else if (strncmp("false", v, vlen) == \
460 0 && vlen == sizeof("false")-1) \
ad4c0b41 461 o = false; \
a78e148b 462 else { \
463 malloc_conf_error( \
464 "Invalid conf value", \
465 k, klen, v, vlen); \
466 } \
ad4c0b41 467 hit = true; \
468 } else \
469 hit = false;
470#define CONF_HANDLE_BOOL(o, n) { \
471 bool hit; \
472 CONF_HANDLE_BOOL_HIT(o, n, hit); \
473 if (hit) \
a78e148b 474 continue; \
ad4c0b41 475}
476#define CONF_HANDLE_SIZE_T(o, n, min, max) \
477 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 478 klen) == 0) { \
ad4c0b41 479 uintmax_t um; \
a78e148b 480 char *end; \
481 \
ad4c0b41 482 set_errno(0); \
483 um = malloc_strtoumax(v, &end, 0); \
484 if (get_errno() != 0 || (uintptr_t)end -\
a78e148b 485 (uintptr_t)v != vlen) { \
486 malloc_conf_error( \
487 "Invalid conf value", \
488 k, klen, v, vlen); \
ad4c0b41 489 } else if (um < min || um > max) { \
a78e148b 490 malloc_conf_error( \
491 "Out-of-range conf value", \
492 k, klen, v, vlen); \
493 } else \
ad4c0b41 494 o = um; \
a78e148b 495 continue; \
496 }
ad4c0b41 497#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
498 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 499 klen) == 0) { \
500 long l; \
501 char *end; \
502 \
ad4c0b41 503 set_errno(0); \
a78e148b 504 l = strtol(v, &end, 0); \
ad4c0b41 505 if (get_errno() != 0 || (uintptr_t)end -\
a78e148b 506 (uintptr_t)v != vlen) { \
507 malloc_conf_error( \
508 "Invalid conf value", \
509 k, klen, v, vlen); \
510 } else if (l < (ssize_t)min || l > \
511 (ssize_t)max) { \
512 malloc_conf_error( \
513 "Out-of-range conf value", \
514 k, klen, v, vlen); \
515 } else \
ad4c0b41 516 o = l; \
a78e148b 517 continue; \
518 }
ad4c0b41 519#define CONF_HANDLE_CHAR_P(o, n, d) \
520 if (sizeof(n)-1 == klen && strncmp(n, k, \
a78e148b 521 klen) == 0) { \
522 size_t cpylen = (vlen <= \
ad4c0b41 523 sizeof(o)-1) ? vlen : \
524 sizeof(o)-1; \
525 strncpy(o, v, cpylen); \
526 o[cpylen] = '\0'; \
a78e148b 527 continue; \
528 }
529
ad4c0b41 530 CONF_HANDLE_BOOL(opt_abort, "abort")
a78e148b 531 /*
ad4c0b41 532 * Chunks always require at least one header page, plus
533 * one data page in the absence of redzones, or three
534 * pages in the presence of redzones. In order to
535 * simplify options processing, fix the limit based on
536 * config_fill.
a78e148b 537 */
ad4c0b41 538 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
539 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
540 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
541 SIZE_T_MAX)
542 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
543 -1, (sizeof(size_t) << 3) - 1)
544 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
545 if (config_fill) {
546 CONF_HANDLE_BOOL(opt_junk, "junk")
547 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
548 0, SIZE_T_MAX)
549 CONF_HANDLE_BOOL(opt_redzone, "redzone")
550 CONF_HANDLE_BOOL(opt_zero, "zero")
551 }
552 if (config_utrace) {
553 CONF_HANDLE_BOOL(opt_utrace, "utrace")
554 }
555 if (config_valgrind) {
556 bool hit;
557 CONF_HANDLE_BOOL_HIT(opt_valgrind,
558 "valgrind", hit)
559 if (config_fill && opt_valgrind && hit) {
560 opt_junk = false;
561 opt_zero = false;
562 if (opt_quarantine == 0) {
563 opt_quarantine =
564 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
565 }
566 opt_redzone = true;
567 }
568 if (hit)
569 continue;
570 }
571 if (config_xmalloc) {
572 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
573 }
574 if (config_tcache) {
575 CONF_HANDLE_BOOL(opt_tcache, "tcache")
576 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
577 "lg_tcache_max", -1,
578 (sizeof(size_t) << 3) - 1)
579 }
580 if (config_prof) {
581 CONF_HANDLE_BOOL(opt_prof, "prof")
582 CONF_HANDLE_CHAR_P(opt_prof_prefix,
583 "prof_prefix", "jeprof")
584 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
585 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
586 "lg_prof_sample", 0,
587 (sizeof(uint64_t) << 3) - 1)
588 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
589 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
590 "lg_prof_interval", -1,
591 (sizeof(uint64_t) << 3) - 1)
592 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
593 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
594 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
595 }
a78e148b 596 malloc_conf_error("Invalid conf pair", k, klen, v,
597 vlen);
598#undef CONF_HANDLE_BOOL
599#undef CONF_HANDLE_SIZE_T
600#undef CONF_HANDLE_SSIZE_T
601#undef CONF_HANDLE_CHAR_P
602 }
a78e148b 603 }
604}
605
606static bool
607malloc_init_hard(void)
608{
609 arena_t *init_arenas[1];
610
611 malloc_mutex_lock(&init_lock);
ad4c0b41 612 if (malloc_initialized || IS_INITIALIZER) {
a78e148b 613 /*
614 * Another thread initialized the allocator before this one
615 * acquired init_lock, or this thread is the initializing
616 * thread, and it is recursively allocating.
617 */
618 malloc_mutex_unlock(&init_lock);
619 return (false);
620 }
ad4c0b41 621#ifdef JEMALLOC_THREADED_INIT
622 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
a78e148b 623 /* Busy-wait until the initializing thread completes. */
624 do {
625 malloc_mutex_unlock(&init_lock);
626 CPU_SPINWAIT;
627 malloc_mutex_lock(&init_lock);
628 } while (malloc_initialized == false);
629 malloc_mutex_unlock(&init_lock);
630 return (false);
631 }
a78e148b 632#endif
ad4c0b41 633 malloc_initializer = INITIALIZER;
a78e148b 634
ad4c0b41 635 malloc_tsd_boot();
636 if (config_prof)
637 prof_boot0();
a78e148b 638
639 malloc_conf_init();
640
ad4c0b41 641#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
642 && !defined(_WIN32))
a78e148b 643 /* Register fork handlers. */
ad4c0b41 644 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
645 jemalloc_postfork_child) != 0) {
a78e148b 646 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
647 if (opt_abort)
648 abort();
649 }
ad4c0b41 650#endif
a78e148b 651
652 if (opt_stats_print) {
653 /* Print statistics at exit. */
654 if (atexit(stats_print_atexit) != 0) {
655 malloc_write("<jemalloc>: Error in atexit()\n");
656 if (opt_abort)
657 abort();
658 }
659 }
660
a78e148b 661 if (base_boot()) {
662 malloc_mutex_unlock(&init_lock);
663 return (true);
664 }
665
ad4c0b41 666 if (chunk_boot()) {
a78e148b 667 malloc_mutex_unlock(&init_lock);
668 return (true);
669 }
670
ad4c0b41 671 if (ctl_boot()) {
a78e148b 672 malloc_mutex_unlock(&init_lock);
673 return (true);
674 }
a78e148b 675
ad4c0b41 676 if (config_prof)
677 prof_boot1();
678
679 arena_boot();
680
681 if (config_tcache && tcache_boot0()) {
a78e148b 682 malloc_mutex_unlock(&init_lock);
683 return (true);
684 }
685
ad4c0b41 686 if (huge_boot()) {
a78e148b 687 malloc_mutex_unlock(&init_lock);
688 return (true);
689 }
a78e148b 690
1d03c1c9 691 if (malloc_mutex_init(&arenas_lock))
692 return (true);
693
a78e148b 694 /*
695 * Create enough scaffolding to allow recursive allocation in
696 * malloc_ncpus().
697 */
698 narenas = 1;
699 arenas = init_arenas;
700 memset(arenas, 0, sizeof(arena_t *) * narenas);
701
702 /*
703 * Initialize one arena here. The rest are lazily created in
704 * choose_arena_hard().
705 */
706 arenas_extend(0);
707 if (arenas[0] == NULL) {
708 malloc_mutex_unlock(&init_lock);
709 return (true);
710 }
711
ad4c0b41 712 /* Initialize allocation counters before any allocations can occur. */
713 if (config_stats && thread_allocated_tsd_boot()) {
714 malloc_mutex_unlock(&init_lock);
715 return (true);
716 }
a78e148b 717
ad4c0b41 718 if (arenas_tsd_boot()) {
719 malloc_mutex_unlock(&init_lock);
720 return (true);
721 }
722
723 if (config_tcache && tcache_boot1()) {
724 malloc_mutex_unlock(&init_lock);
725 return (true);
726 }
727
728 if (config_fill && quarantine_boot()) {
729 malloc_mutex_unlock(&init_lock);
730 return (true);
731 }
732
733 if (config_prof && prof_boot2()) {
a78e148b 734 malloc_mutex_unlock(&init_lock);
735 return (true);
736 }
a78e148b 737
738 /* Get number of CPUs. */
a78e148b 739 malloc_mutex_unlock(&init_lock);
740 ncpus = malloc_ncpus();
741 malloc_mutex_lock(&init_lock);
742
ad4c0b41 743 if (mutex_boot()) {
744 malloc_mutex_unlock(&init_lock);
745 return (true);
746 }
747
a78e148b 748 if (opt_narenas == 0) {
749 /*
750 * For SMP systems, create more than one arena per CPU by
751 * default.
752 */
753 if (ncpus > 1)
754 opt_narenas = ncpus << 2;
755 else
756 opt_narenas = 1;
757 }
758 narenas = opt_narenas;
759 /*
760 * Make sure that the arenas array can be allocated. In practice, this
761 * limit is enough to allow the allocator to function, but the ctl
762 * machinery will fail to allocate memory at far lower limits.
763 */
764 if (narenas > chunksize / sizeof(arena_t *)) {
a78e148b 765 narenas = chunksize / sizeof(arena_t *);
ad4c0b41 766 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
767 narenas);
a78e148b 768 }
769
770 /* Allocate and initialize arenas. */
771 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
772 if (arenas == NULL) {
773 malloc_mutex_unlock(&init_lock);
774 return (true);
775 }
776 /*
777 * Zero the array. In practice, this should always be pre-zeroed,
778 * since it was just mmap()ed, but let's be sure.
779 */
780 memset(arenas, 0, sizeof(arena_t *) * narenas);
781 /* Copy the pointer to the one arena that was already initialized. */
782 arenas[0] = init_arenas[0];
783
a78e148b 784 malloc_initialized = true;
785 malloc_mutex_unlock(&init_lock);
786 return (false);
787}
788
a78e148b 789/*
790 * End initialization functions.
791 */
792/******************************************************************************/
793/*
794 * Begin malloc(3)-compatible functions.
795 */
796
a78e148b 797void *
ad4c0b41 798je_malloc(size_t size)
a78e148b 799{
800 void *ret;
ad4c0b41 801 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
802 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
a78e148b 803
804 if (malloc_init()) {
805 ret = NULL;
ad4c0b41 806 goto label_oom;
a78e148b 807 }
808
ad4c0b41 809 if (size == 0)
810 size = 1;
a78e148b 811
ad4c0b41 812 if (config_prof && opt_prof) {
a78e148b 813 usize = s2u(size);
1d03c1c9 814 PROF_ALLOC_PREP(1, usize, cnt);
815 if (cnt == NULL) {
a78e148b 816 ret = NULL;
ad4c0b41 817 goto label_oom;
a78e148b 818 }
819 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
ad4c0b41 820 SMALL_MAXCLASS) {
821 ret = imalloc(SMALL_MAXCLASS+1);
a78e148b 822 if (ret != NULL)
823 arena_prof_promoted(ret, usize);
824 } else
825 ret = imalloc(size);
ad4c0b41 826 } else {
827 if (config_stats || (config_valgrind && opt_valgrind))
828 usize = s2u(size);
a78e148b 829 ret = imalloc(size);
830 }
831
ad4c0b41 832label_oom:
a78e148b 833 if (ret == NULL) {
ad4c0b41 834 if (config_xmalloc && opt_xmalloc) {
a78e148b 835 malloc_write("<jemalloc>: Error in malloc(): "
836 "out of memory\n");
837 abort();
838 }
ad4c0b41 839 set_errno(ENOMEM);
a78e148b 840 }
ad4c0b41 841 if (config_prof && opt_prof && ret != NULL)
a78e148b 842 prof_malloc(ret, usize, cnt);
ad4c0b41 843 if (config_stats && ret != NULL) {
844 assert(usize == isalloc(ret, config_prof));
845 thread_allocated_tsd_get()->allocated += usize;
a78e148b 846 }
ad4c0b41 847 UTRACE(0, size, ret);
848 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
a78e148b 849 return (ret);
850}
851
852JEMALLOC_ATTR(nonnull(1))
1d03c1c9 853#ifdef JEMALLOC_PROF
854/*
ad4c0b41 855 * Avoid any uncertainty as to how many backtrace frames to ignore in
1d03c1c9 856 * PROF_ALLOC_PREP().
857 */
858JEMALLOC_ATTR(noinline)
859#endif
860static int
ad4c0b41 861imemalign(void **memptr, size_t alignment, size_t size,
862 size_t min_alignment)
a78e148b 863{
864 int ret;
ad4c0b41 865 size_t usize;
a78e148b 866 void *result;
ad4c0b41 867 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
868
869 assert(min_alignment != 0);
a78e148b 870
871 if (malloc_init())
872 result = NULL;
873 else {
ad4c0b41 874 if (size == 0)
875 size = 1;
a78e148b 876
877 /* Make sure that alignment is a large enough power of 2. */
878 if (((alignment - 1) & alignment) != 0
ad4c0b41 879 || (alignment < min_alignment)) {
880 if (config_xmalloc && opt_xmalloc) {
881 malloc_write("<jemalloc>: Error allocating "
882 "aligned memory: invalid alignment\n");
a78e148b 883 abort();
884 }
a78e148b 885 result = NULL;
886 ret = EINVAL;
ad4c0b41 887 goto label_return;
a78e148b 888 }
889
ad4c0b41 890 usize = sa2u(size, alignment);
a78e148b 891 if (usize == 0) {
892 result = NULL;
893 ret = ENOMEM;
ad4c0b41 894 goto label_return;
a78e148b 895 }
896
ad4c0b41 897 if (config_prof && opt_prof) {
1d03c1c9 898 PROF_ALLOC_PREP(2, usize, cnt);
899 if (cnt == NULL) {
a78e148b 900 result = NULL;
901 ret = EINVAL;
902 } else {
903 if (prof_promote && (uintptr_t)cnt !=
ad4c0b41 904 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
905 assert(sa2u(SMALL_MAXCLASS+1,
906 alignment) != 0);
907 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
908 alignment), alignment, false);
a78e148b 909 if (result != NULL) {
910 arena_prof_promoted(result,
911 usize);
912 }
913 } else {
914 result = ipalloc(usize, alignment,
915 false);
916 }
917 }
918 } else
a78e148b 919 result = ipalloc(usize, alignment, false);
920 }
921
922 if (result == NULL) {
ad4c0b41 923 if (config_xmalloc && opt_xmalloc) {
924 malloc_write("<jemalloc>: Error allocating aligned "
925 "memory: out of memory\n");
a78e148b 926 abort();
927 }
a78e148b 928 ret = ENOMEM;
ad4c0b41 929 goto label_return;
a78e148b 930 }
931
932 *memptr = result;
933 ret = 0;
934
ad4c0b41 935label_return:
936 if (config_stats && result != NULL) {
937 assert(usize == isalloc(result, config_prof));
938 thread_allocated_tsd_get()->allocated += usize;
a78e148b 939 }
ad4c0b41 940 if (config_prof && opt_prof && result != NULL)
a78e148b 941 prof_malloc(result, usize, cnt);
ad4c0b41 942 UTRACE(0, size, result);
a78e148b 943 return (ret);
944}
945
1d03c1c9 946int
ad4c0b41 947je_posix_memalign(void **memptr, size_t alignment, size_t size)
948{
949 int ret = imemalign(memptr, alignment, size, sizeof(void *));
950 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
951 config_prof), false);
952 return (ret);
953}
954
955void *
956je_aligned_alloc(size_t alignment, size_t size)
1d03c1c9 957{
ad4c0b41 958 void *ret;
959 int err;
1d03c1c9 960
ad4c0b41 961 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
962 ret = NULL;
963 set_errno(err);
964 }
965 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
966 false);
967 return (ret);
1d03c1c9 968}
969
a78e148b 970void *
ad4c0b41 971je_calloc(size_t num, size_t size)
a78e148b 972{
973 void *ret;
974 size_t num_size;
ad4c0b41 975 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
976 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
a78e148b 977
978 if (malloc_init()) {
979 num_size = 0;
980 ret = NULL;
ad4c0b41 981 goto label_return;
a78e148b 982 }
983
984 num_size = num * size;
985 if (num_size == 0) {
ad4c0b41 986 if (num == 0 || size == 0)
a78e148b 987 num_size = 1;
a78e148b 988 else {
989 ret = NULL;
ad4c0b41 990 goto label_return;
a78e148b 991 }
a78e148b 992 /*
993 * Try to avoid division here. We know that it isn't possible to
994 * overflow during multiplication if neither operand uses any of the
995 * most significant half of the bits in a size_t.
996 */
997 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
998 && (num_size / size != num)) {
999 /* size_t overflow. */
1000 ret = NULL;
ad4c0b41 1001 goto label_return;
a78e148b 1002 }
1003
ad4c0b41 1004 if (config_prof && opt_prof) {
a78e148b 1005 usize = s2u(num_size);
1d03c1c9 1006 PROF_ALLOC_PREP(1, usize, cnt);
1007 if (cnt == NULL) {
a78e148b 1008 ret = NULL;
ad4c0b41 1009 goto label_return;
a78e148b 1010 }
1011 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
ad4c0b41 1012 <= SMALL_MAXCLASS) {
1013 ret = icalloc(SMALL_MAXCLASS+1);
a78e148b 1014 if (ret != NULL)
1015 arena_prof_promoted(ret, usize);
1016 } else
1017 ret = icalloc(num_size);
ad4c0b41 1018 } else {
1019 if (config_stats || (config_valgrind && opt_valgrind))
1020 usize = s2u(num_size);
a78e148b 1021 ret = icalloc(num_size);
1022 }
1023
ad4c0b41 1024label_return:
a78e148b 1025 if (ret == NULL) {
ad4c0b41 1026 if (config_xmalloc && opt_xmalloc) {
a78e148b 1027 malloc_write("<jemalloc>: Error in calloc(): out of "
1028 "memory\n");
1029 abort();
1030 }
ad4c0b41 1031 set_errno(ENOMEM);
a78e148b 1032 }
1033
ad4c0b41 1034 if (config_prof && opt_prof && ret != NULL)
a78e148b 1035 prof_malloc(ret, usize, cnt);
ad4c0b41 1036 if (config_stats && ret != NULL) {
1037 assert(usize == isalloc(ret, config_prof));
1038 thread_allocated_tsd_get()->allocated += usize;
a78e148b 1039 }
ad4c0b41 1040 UTRACE(0, num_size, ret);
1041 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
a78e148b 1042 return (ret);
1043}
1044
a78e148b 1045void *
ad4c0b41 1046je_realloc(void *ptr, size_t size)
a78e148b 1047{
1048 void *ret;
ad4c0b41 1049 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1050 size_t old_size = 0;
ad4c0b41 1051 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1052 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1053 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
a78e148b 1054
1055 if (size == 0) {
ad4c0b41 1056 if (ptr != NULL) {
1057 /* realloc(ptr, 0) is equivalent to free(p). */
1058 if (config_prof) {
1059 old_size = isalloc(ptr, true);
1060 if (config_valgrind && opt_valgrind)
1061 old_rzsize = p2rz(ptr);
1062 } else if (config_stats) {
1063 old_size = isalloc(ptr, false);
1064 if (config_valgrind && opt_valgrind)
1065 old_rzsize = u2rz(old_size);
1066 } else if (config_valgrind && opt_valgrind) {
1067 old_size = isalloc(ptr, false);
1068 old_rzsize = u2rz(old_size);
a78e148b 1069 }
ad4c0b41 1070 if (config_prof && opt_prof) {
1071 old_ctx = prof_ctx_get(ptr);
a78e148b 1072 cnt = NULL;
1073 }
ad4c0b41 1074 iqalloc(ptr);
a78e148b 1075 ret = NULL;
ad4c0b41 1076 goto label_return;
1077 } else
1078 size = 1;
a78e148b 1079 }
1080
1081 if (ptr != NULL) {
ad4c0b41 1082 assert(malloc_initialized || IS_INITIALIZER);
1083
1084 if (config_prof) {
1085 old_size = isalloc(ptr, true);
1086 if (config_valgrind && opt_valgrind)
1087 old_rzsize = p2rz(ptr);
1088 } else if (config_stats) {
1089 old_size = isalloc(ptr, false);
1090 if (config_valgrind && opt_valgrind)
1091 old_rzsize = u2rz(old_size);
1092 } else if (config_valgrind && opt_valgrind) {
1093 old_size = isalloc(ptr, false);
1094 old_rzsize = u2rz(old_size);
1095 }
1096 if (config_prof && opt_prof) {
a78e148b 1097 usize = s2u(size);
1098 old_ctx = prof_ctx_get(ptr);
1d03c1c9 1099 PROF_ALLOC_PREP(1, usize, cnt);
1100 if (cnt == NULL) {
1101 old_ctx = NULL;
a78e148b 1102 ret = NULL;
ad4c0b41 1103 goto label_oom;
a78e148b 1104 }
1105 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
ad4c0b41 1106 usize <= SMALL_MAXCLASS) {
1107 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
a78e148b 1108 false, false);
1109 if (ret != NULL)
1110 arena_prof_promoted(ret, usize);
1d03c1c9 1111 else
1112 old_ctx = NULL;
1113 } else {
a78e148b 1114 ret = iralloc(ptr, size, 0, 0, false, false);
1d03c1c9 1115 if (ret == NULL)
1116 old_ctx = NULL;
1117 }
ad4c0b41 1118 } else {
1119 if (config_stats || (config_valgrind && opt_valgrind))
1120 usize = s2u(size);
a78e148b 1121 ret = iralloc(ptr, size, 0, 0, false, false);
1122 }
1123
ad4c0b41 1124label_oom:
a78e148b 1125 if (ret == NULL) {
ad4c0b41 1126 if (config_xmalloc && opt_xmalloc) {
a78e148b 1127 malloc_write("<jemalloc>: Error in realloc(): "
1128 "out of memory\n");
1129 abort();
1130 }
ad4c0b41 1131 set_errno(ENOMEM);
a78e148b 1132 }
1133 } else {
ad4c0b41 1134 /* realloc(NULL, size) is equivalent to malloc(size). */
1135 if (config_prof && opt_prof)
a78e148b 1136 old_ctx = NULL;
a78e148b 1137 if (malloc_init()) {
ad4c0b41 1138 if (config_prof && opt_prof)
a78e148b 1139 cnt = NULL;
a78e148b 1140 ret = NULL;
1141 } else {
ad4c0b41 1142 if (config_prof && opt_prof) {
a78e148b 1143 usize = s2u(size);
1d03c1c9 1144 PROF_ALLOC_PREP(1, usize, cnt);
1145 if (cnt == NULL)
a78e148b 1146 ret = NULL;
1147 else {
1148 if (prof_promote && (uintptr_t)cnt !=
1149 (uintptr_t)1U && usize <=
ad4c0b41 1150 SMALL_MAXCLASS) {
1151 ret = imalloc(SMALL_MAXCLASS+1);
a78e148b 1152 if (ret != NULL) {
1153 arena_prof_promoted(ret,
1154 usize);
1155 }
1156 } else
1157 ret = imalloc(size);
1158 }
ad4c0b41 1159 } else {
1160 if (config_stats || (config_valgrind &&
1161 opt_valgrind))
1162 usize = s2u(size);
a78e148b 1163 ret = imalloc(size);
1164 }
1165 }
1166
1167 if (ret == NULL) {
ad4c0b41 1168 if (config_xmalloc && opt_xmalloc) {
a78e148b 1169 malloc_write("<jemalloc>: Error in realloc(): "
1170 "out of memory\n");
1171 abort();
1172 }
ad4c0b41 1173 set_errno(ENOMEM);
a78e148b 1174 }
1175 }
1176
ad4c0b41 1177label_return:
1178 if (config_prof && opt_prof)
a78e148b 1179 prof_realloc(ret, usize, cnt, old_size, old_ctx);
ad4c0b41 1180 if (config_stats && ret != NULL) {
1181 thread_allocated_t *ta;
1182 assert(usize == isalloc(ret, config_prof));
1183 ta = thread_allocated_tsd_get();
1184 ta->allocated += usize;
1185 ta->deallocated += old_size;
a78e148b 1186 }
ad4c0b41 1187 UTRACE(ptr, size, ret);
1188 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
a78e148b 1189 return (ret);
1190}
1191
a78e148b 1192void
ad4c0b41 1193je_free(void *ptr)
a78e148b 1194{
1195
ad4c0b41 1196 UTRACE(ptr, 0, 0);
a78e148b 1197 if (ptr != NULL) {
a78e148b 1198 size_t usize;
ad4c0b41 1199 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1200
ad4c0b41 1201 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1202
ad4c0b41 1203 if (config_prof && opt_prof) {
1204 usize = isalloc(ptr, config_prof);
a78e148b 1205 prof_free(ptr, usize);
ad4c0b41 1206 } else if (config_stats || config_valgrind)
1207 usize = isalloc(ptr, config_prof);
1208 if (config_stats)
1209 thread_allocated_tsd_get()->deallocated += usize;
1210 if (config_valgrind && opt_valgrind)
1211 rzsize = p2rz(ptr);
1212 iqalloc(ptr);
1213 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
a78e148b 1214 }
1215}
1216
1217/*
1218 * End malloc(3)-compatible functions.
1219 */
1220/******************************************************************************/
1221/*
1222 * Begin non-standard override functions.
a78e148b 1223 */
a78e148b 1224
1225#ifdef JEMALLOC_OVERRIDE_MEMALIGN
a78e148b 1226void *
ad4c0b41 1227je_memalign(size_t alignment, size_t size)
a78e148b 1228{
ad4c0b41 1229 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1230 imemalign(&ret, alignment, size, 1);
1231 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
a78e148b 1232 return (ret);
1233}
1234#endif
1235
1236#ifdef JEMALLOC_OVERRIDE_VALLOC
a78e148b 1237void *
ad4c0b41 1238je_valloc(size_t size)
a78e148b 1239{
ad4c0b41 1240 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1241 imemalign(&ret, PAGE, size, 1);
1242 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
a78e148b 1243 return (ret);
1244}
1245#endif
1246
ad4c0b41 1247/*
1248 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1249 * #define je_malloc malloc
1250 */
1251#define malloc_is_malloc 1
1252#define is_malloc_(a) malloc_is_ ## a
1253#define is_malloc(a) is_malloc_(a)
1254
1255#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1256/*
1257 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1258 * to inconsistently reference libc's malloc(3)-compatible functions
1259 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1260 *
1261 * These definitions interpose hooks in glibc. The functions are actually
1262 * passed an extra argument for the caller return address, which will be
1263 * ignored.
1264 */
1265JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
1266JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
1267JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
1268 je_realloc;
1269JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
1270 je_memalign;
1271#endif
1272
a78e148b 1273/*
1274 * End non-standard override functions.
1275 */
1276/******************************************************************************/
1277/*
1278 * Begin non-standard functions.
1279 */
1280
a78e148b 1281size_t
ad4c0b41 1282je_malloc_usable_size(const void *ptr)
a78e148b 1283{
1284 size_t ret;
1285
ad4c0b41 1286 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1287
ad4c0b41 1288 if (config_ivsalloc)
1289 ret = ivsalloc(ptr, config_prof);
1290 else
1291 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
a78e148b 1292
1293 return (ret);
1294}
1295
a78e148b 1296void
ad4c0b41 1297je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1298 const char *opts)
a78e148b 1299{
1300
1301 stats_print(write_cb, cbopaque, opts);
1302}
1303
a78e148b 1304int
ad4c0b41 1305je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
a78e148b 1306 size_t newlen)
1307{
1308
1309 if (malloc_init())
1310 return (EAGAIN);
1311
1312 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1313}
1314
a78e148b 1315int
ad4c0b41 1316je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
a78e148b 1317{
1318
1319 if (malloc_init())
1320 return (EAGAIN);
1321
1322 return (ctl_nametomib(name, mibp, miblenp));
1323}
1324
a78e148b 1325int
ad4c0b41 1326je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1327 void *newp, size_t newlen)
a78e148b 1328{
1329
1330 if (malloc_init())
1331 return (EAGAIN);
1332
1333 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1334}
1335
ad4c0b41 1336/*
1337 * End non-standard functions.
1338 */
1339/******************************************************************************/
1340/*
1341 * Begin experimental functions.
1342 */
1343#ifdef JEMALLOC_EXPERIMENTAL
1344
a78e148b 1345JEMALLOC_INLINE void *
1346iallocm(size_t usize, size_t alignment, bool zero)
1347{
1348
ad4c0b41 1349 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1350 alignment)));
a78e148b 1351
1352 if (alignment != 0)
1353 return (ipalloc(usize, alignment, zero));
1354 else if (zero)
1355 return (icalloc(usize));
1356 else
1357 return (imalloc(usize));
1358}
1359
a78e148b 1360int
ad4c0b41 1361je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
a78e148b 1362{
1363 void *p;
1364 size_t usize;
1365 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1366 & (SIZE_T_MAX-1));
1367 bool zero = flags & ALLOCM_ZERO;
a78e148b 1368
1369 assert(ptr != NULL);
1370 assert(size != 0);
1371
1372 if (malloc_init())
ad4c0b41 1373 goto label_oom;
a78e148b 1374
ad4c0b41 1375 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
a78e148b 1376 if (usize == 0)
ad4c0b41 1377 goto label_oom;
1378
1379 if (config_prof && opt_prof) {
1380 prof_thr_cnt_t *cnt;
a78e148b 1381
1d03c1c9 1382 PROF_ALLOC_PREP(1, usize, cnt);
1383 if (cnt == NULL)
ad4c0b41 1384 goto label_oom;
a78e148b 1385 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
ad4c0b41 1386 SMALL_MAXCLASS) {
a78e148b 1387 size_t usize_promoted = (alignment == 0) ?
ad4c0b41 1388 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1389 alignment);
a78e148b 1390 assert(usize_promoted != 0);
1391 p = iallocm(usize_promoted, alignment, zero);
1392 if (p == NULL)
ad4c0b41 1393 goto label_oom;
a78e148b 1394 arena_prof_promoted(p, usize);
1395 } else {
1396 p = iallocm(usize, alignment, zero);
1397 if (p == NULL)
ad4c0b41 1398 goto label_oom;
a78e148b 1399 }
1d03c1c9 1400 prof_malloc(p, usize, cnt);
ad4c0b41 1401 } else {
a78e148b 1402 p = iallocm(usize, alignment, zero);
1403 if (p == NULL)
ad4c0b41 1404 goto label_oom;
a78e148b 1405 }
ad4c0b41 1406 if (rsize != NULL)
1407 *rsize = usize;
a78e148b 1408
1409 *ptr = p;
ad4c0b41 1410 if (config_stats) {
1411 assert(usize == isalloc(p, config_prof));
1412 thread_allocated_tsd_get()->allocated += usize;
1413 }
1414 UTRACE(0, size, p);
1415 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
a78e148b 1416 return (ALLOCM_SUCCESS);
ad4c0b41 1417label_oom:
1418 if (config_xmalloc && opt_xmalloc) {
a78e148b 1419 malloc_write("<jemalloc>: Error in allocm(): "
1420 "out of memory\n");
1421 abort();
1422 }
a78e148b 1423 *ptr = NULL;
ad4c0b41 1424 UTRACE(0, size, 0);
a78e148b 1425 return (ALLOCM_ERR_OOM);
1426}
1427
a78e148b 1428int
ad4c0b41 1429je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
a78e148b 1430{
1431 void *p, *q;
1432 size_t usize;
a78e148b 1433 size_t old_size;
ad4c0b41 1434 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1435 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1436 & (SIZE_T_MAX-1));
1437 bool zero = flags & ALLOCM_ZERO;
1438 bool no_move = flags & ALLOCM_NO_MOVE;
a78e148b 1439
1440 assert(ptr != NULL);
1441 assert(*ptr != NULL);
1442 assert(size != 0);
1443 assert(SIZE_T_MAX - size >= extra);
ad4c0b41 1444 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1445
1446 p = *ptr;
ad4c0b41 1447 if (config_prof && opt_prof) {
1448 prof_thr_cnt_t *cnt;
1449
a78e148b 1450 /*
1451 * usize isn't knowable before iralloc() returns when extra is
1452 * non-zero. Therefore, compute its maximum possible value and
1d03c1c9 1453 * use that in PROF_ALLOC_PREP() to decide whether to capture a
a78e148b 1454 * backtrace. prof_realloc() will use the actual usize to
1455 * decide whether to sample.
1456 */
1457 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
ad4c0b41 1458 sa2u(size+extra, alignment);
1d03c1c9 1459 prof_ctx_t *old_ctx = prof_ctx_get(p);
ad4c0b41 1460 old_size = isalloc(p, true);
1461 if (config_valgrind && opt_valgrind)
1462 old_rzsize = p2rz(p);
1d03c1c9 1463 PROF_ALLOC_PREP(1, max_usize, cnt);
1464 if (cnt == NULL)
ad4c0b41 1465 goto label_oom;
1d03c1c9 1466 /*
1467 * Use minimum usize to determine whether promotion may happen.
1468 */
1469 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
ad4c0b41 1470 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1471 <= SMALL_MAXCLASS) {
1472 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1473 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
a78e148b 1474 alignment, zero, no_move);
1475 if (q == NULL)
ad4c0b41 1476 goto label_err;
1477 if (max_usize < PAGE) {
1d03c1c9 1478 usize = max_usize;
1479 arena_prof_promoted(q, usize);
1480 } else
ad4c0b41 1481 usize = isalloc(q, config_prof);
a78e148b 1482 } else {
1483 q = iralloc(p, size, extra, alignment, zero, no_move);
1484 if (q == NULL)
ad4c0b41 1485 goto label_err;
1486 usize = isalloc(q, config_prof);
a78e148b 1487 }
1488 prof_realloc(q, usize, cnt, old_size, old_ctx);
1489 if (rsize != NULL)
1490 *rsize = usize;
ad4c0b41 1491 } else {
1492 if (config_stats) {
1493 old_size = isalloc(p, false);
1494 if (config_valgrind && opt_valgrind)
1495 old_rzsize = u2rz(old_size);
1496 } else if (config_valgrind && opt_valgrind) {
1497 old_size = isalloc(p, false);
1498 old_rzsize = u2rz(old_size);
1499 }
a78e148b 1500 q = iralloc(p, size, extra, alignment, zero, no_move);
1501 if (q == NULL)
ad4c0b41 1502 goto label_err;
1503 if (config_stats)
1504 usize = isalloc(q, config_prof);
1505 if (rsize != NULL) {
1506 if (config_stats == false)
1507 usize = isalloc(q, config_prof);
1508 *rsize = usize;
a78e148b 1509 }
1510 }
1511
1512 *ptr = q;
ad4c0b41 1513 if (config_stats) {
1514 thread_allocated_t *ta;
1515 ta = thread_allocated_tsd_get();
1516 ta->allocated += usize;
1517 ta->deallocated += old_size;
1518 }
1519 UTRACE(p, size, q);
1520 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
a78e148b 1521 return (ALLOCM_SUCCESS);
ad4c0b41 1522label_err:
1523 if (no_move) {
1524 UTRACE(p, size, q);
a78e148b 1525 return (ALLOCM_ERR_NOT_MOVED);
ad4c0b41 1526 }
1527label_oom:
1528 if (config_xmalloc && opt_xmalloc) {
a78e148b 1529 malloc_write("<jemalloc>: Error in rallocm(): "
1530 "out of memory\n");
1531 abort();
1532 }
ad4c0b41 1533 UTRACE(p, size, 0);
a78e148b 1534 return (ALLOCM_ERR_OOM);
1535}
1536
a78e148b 1537int
ad4c0b41 1538je_sallocm(const void *ptr, size_t *rsize, int flags)
a78e148b 1539{
1540 size_t sz;
1541
ad4c0b41 1542 assert(malloc_initialized || IS_INITIALIZER);
a78e148b 1543
ad4c0b41 1544 if (config_ivsalloc)
1545 sz = ivsalloc(ptr, config_prof);
1546 else {
1547 assert(ptr != NULL);
1548 sz = isalloc(ptr, config_prof);
1549 }
a78e148b 1550 assert(rsize != NULL);
1551 *rsize = sz;
1552
1553 return (ALLOCM_SUCCESS);
1554}
1555
a78e148b 1556int
ad4c0b41 1557je_dallocm(void *ptr, int flags)
a78e148b 1558{
a78e148b 1559 size_t usize;
ad4c0b41 1560 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
a78e148b 1561
1562 assert(ptr != NULL);
ad4c0b41 1563 assert(malloc_initialized || IS_INITIALIZER);
1564
1565 UTRACE(ptr, 0, 0);
1566 if (config_stats || config_valgrind)
1567 usize = isalloc(ptr, config_prof);
1568 if (config_prof && opt_prof) {
1569 if (config_stats == false && config_valgrind == false)
1570 usize = isalloc(ptr, config_prof);
a78e148b 1571 prof_free(ptr, usize);
1572 }
ad4c0b41 1573 if (config_stats)
1574 thread_allocated_tsd_get()->deallocated += usize;
1575 if (config_valgrind && opt_valgrind)
1576 rzsize = p2rz(ptr);
1577 iqalloc(ptr);
1578 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
a78e148b 1579
1580 return (ALLOCM_SUCCESS);
1581}
1582
ad4c0b41 1583int
1584je_nallocm(size_t *rsize, size_t size, int flags)
1585{
1586 size_t usize;
1587 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1588 & (SIZE_T_MAX-1));
1589
1590 assert(size != 0);
1591
1592 if (malloc_init())
1593 return (ALLOCM_ERR_OOM);
1594
1595 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1596 if (usize == 0)
1597 return (ALLOCM_ERR_OOM);
1598
1599 if (rsize != NULL)
1600 *rsize = usize;
1601 return (ALLOCM_SUCCESS);
1602}
1603
1604#endif
a78e148b 1605/*
ad4c0b41 1606 * End experimental functions.
a78e148b 1607 */
1608/******************************************************************************/
a78e148b 1609/*
1610 * The following functions are used by threading libraries for protection of
1611 * malloc during fork().
1612 */
1613
ad4c0b41 1614#ifndef JEMALLOC_MUTEX_INIT_CB
a78e148b 1615void
1616jemalloc_prefork(void)
ad4c0b41 1617#else
1618JEMALLOC_EXPORT void
1619_malloc_prefork(void)
1620#endif
a78e148b 1621{
1622 unsigned i;
1623
ad4c0b41 1624#ifdef JEMALLOC_MUTEX_INIT_CB
1625 if (malloc_initialized == false)
1626 return;
1627#endif
1628 assert(malloc_initialized);
a78e148b 1629
ad4c0b41 1630 /* Acquire all mutexes in a safe order. */
1631 malloc_mutex_prefork(&arenas_lock);
a78e148b 1632 for (i = 0; i < narenas; i++) {
1633 if (arenas[i] != NULL)
ad4c0b41 1634 arena_prefork(arenas[i]);
a78e148b 1635 }
ad4c0b41 1636 base_prefork();
1637 huge_prefork();
1638 chunk_dss_prefork();
1639}
a78e148b 1640
ad4c0b41 1641#ifndef JEMALLOC_MUTEX_INIT_CB
1642void
1643jemalloc_postfork_parent(void)
1644#else
1645JEMALLOC_EXPORT void
1646_malloc_postfork(void)
a78e148b 1647#endif
ad4c0b41 1648{
1649 unsigned i;
a78e148b 1650
ad4c0b41 1651#ifdef JEMALLOC_MUTEX_INIT_CB
1652 if (malloc_initialized == false)
1653 return;
a78e148b 1654#endif
ad4c0b41 1655 assert(malloc_initialized);
1656
1657 /* Release all mutexes, now that fork() has completed. */
1658 chunk_dss_postfork_parent();
1659 huge_postfork_parent();
1660 base_postfork_parent();
1661 for (i = 0; i < narenas; i++) {
1662 if (arenas[i] != NULL)
1663 arena_postfork_parent(arenas[i]);
1664 }
1665 malloc_mutex_postfork_parent(&arenas_lock);
a78e148b 1666}
1667
1668void
ad4c0b41 1669jemalloc_postfork_child(void)
a78e148b 1670{
1671 unsigned i;
1672
ad4c0b41 1673 assert(malloc_initialized);
1674
a78e148b 1675 /* Release all mutexes, now that fork() has completed. */
ad4c0b41 1676 chunk_dss_postfork_child();
1677 huge_postfork_child();
1678 base_postfork_child();
1679 for (i = 0; i < narenas; i++) {
1680 if (arenas[i] != NULL)
1681 arena_postfork_child(arenas[i]);
1682 }
1683 malloc_mutex_postfork_child(&arenas_lock);
1684}
a78e148b 1685
ad4c0b41 1686/******************************************************************************/
1687/*
1688 * The following functions are used for TLS allocation/deallocation in static
1689 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1690 * is that these avoid accessing TLS variables.
1691 */
a78e148b 1692
ad4c0b41 1693static void *
1694a0alloc(size_t size, bool zero)
1695{
1696
1697 if (malloc_init())
1698 return (NULL);
a78e148b 1699
ad4c0b41 1700 if (size == 0)
1701 size = 1;
a78e148b 1702
ad4c0b41 1703 if (size <= arena_maxclass)
1704 return (arena_malloc(arenas[0], size, zero, false));
1705 else
1706 return (huge_malloc(size, zero));
1707}
a78e148b 1708
ad4c0b41 1709void *
1710a0malloc(size_t size)
1711{
1712
1713 return (a0alloc(size, false));
1714}
1715
1716void *
1717a0calloc(size_t num, size_t size)
1718{
1719
1720 return (a0alloc(num * size, true));
1721}
1722
1723void
1724a0free(void *ptr)
1725{
1726 arena_chunk_t *chunk;
1727
1728 if (ptr == NULL)
1729 return;
1730
1731 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1732 if (chunk != ptr)
1733 arena_dalloc(chunk->arena, chunk, ptr, false);
1734 else
1735 huge_dalloc(ptr, true);
a78e148b 1736}
1737
1738/******************************************************************************/