]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/jemalloc.c
Query the archive to provide a complete KEYS list.
[redis.git] / deps / jemalloc / src / jemalloc.c
1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
10
11 /* Runtime configuration options. */
12 const char *je_malloc_conf;
13 #ifdef JEMALLOC_DEBUG
14 bool opt_abort = true;
15 # ifdef JEMALLOC_FILL
16 bool opt_junk = true;
17 # else
18 bool opt_junk = false;
19 # endif
20 #else
21 bool opt_abort = false;
22 bool opt_junk = false;
23 #endif
24 size_t opt_quarantine = ZU(0);
25 bool opt_redzone = false;
26 bool opt_utrace = false;
27 bool opt_valgrind = false;
28 bool opt_xmalloc = false;
29 bool opt_zero = false;
30 size_t opt_narenas = 0;
31
32 unsigned ncpus;
33
34 malloc_mutex_t arenas_lock;
35 arena_t **arenas;
36 unsigned narenas_total;
37 unsigned narenas_auto;
38
39 /* Set to true once the allocator has been initialized. */
40 static bool malloc_initialized = false;
41
42 #ifdef JEMALLOC_THREADED_INIT
43 /* Used to let the initializing thread recursively allocate. */
44 # define NO_INITIALIZER ((unsigned long)0)
45 # define INITIALIZER pthread_self()
46 # define IS_INITIALIZER (malloc_initializer == pthread_self())
47 static pthread_t malloc_initializer = NO_INITIALIZER;
48 #else
49 # define NO_INITIALIZER false
50 # define INITIALIZER true
51 # define IS_INITIALIZER malloc_initializer
52 static bool malloc_initializer = NO_INITIALIZER;
53 #endif
54
55 /* Used to avoid initialization races. */
56 #ifdef _WIN32
57 static malloc_mutex_t init_lock;
58
59 JEMALLOC_ATTR(constructor)
60 static void WINAPI
61 _init_init_lock(void)
62 {
63
64 malloc_mutex_init(&init_lock);
65 }
66
67 #ifdef _MSC_VER
68 # pragma section(".CRT$XCU", read)
69 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
70 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
71 #endif
72
73 #else
74 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
75 #endif
76
77 typedef struct {
78 void *p; /* Input pointer (as in realloc(p, s)). */
79 size_t s; /* Request size. */
80 void *r; /* Result pointer. */
81 } malloc_utrace_t;
82
83 #ifdef JEMALLOC_UTRACE
84 # define UTRACE(a, b, c) do { \
85 if (opt_utrace) { \
86 malloc_utrace_t ut; \
87 ut.p = (a); \
88 ut.s = (b); \
89 ut.r = (c); \
90 utrace(&ut, sizeof(ut)); \
91 } \
92 } while (0)
93 #else
94 # define UTRACE(a, b, c)
95 #endif
96
97 /******************************************************************************/
98 /* Function prototypes for non-inline static functions. */
99
100 static void stats_print_atexit(void);
101 static unsigned malloc_ncpus(void);
102 static bool malloc_conf_next(char const **opts_p, char const **k_p,
103 size_t *klen_p, char const **v_p, size_t *vlen_p);
104 static void malloc_conf_error(const char *msg, const char *k, size_t klen,
105 const char *v, size_t vlen);
106 static void malloc_conf_init(void);
107 static bool malloc_init_hard(void);
108 static int imemalign(void **memptr, size_t alignment, size_t size,
109 size_t min_alignment);
110
111 /******************************************************************************/
112 /*
113 * Begin miscellaneous support functions.
114 */
115
116 /* Create a new arena and insert it into the arenas array at index ind. */
117 arena_t *
118 arenas_extend(unsigned ind)
119 {
120 arena_t *ret;
121
122 ret = (arena_t *)base_alloc(sizeof(arena_t));
123 if (ret != NULL && arena_new(ret, ind) == false) {
124 arenas[ind] = ret;
125 return (ret);
126 }
127 /* Only reached if there is an OOM error. */
128
129 /*
130 * OOM here is quite inconvenient to propagate, since dealing with it
131 * would require a check for failure in the fast path. Instead, punt
132 * by using arenas[0]. In practice, this is an extremely unlikely
133 * failure.
134 */
135 malloc_write("<jemalloc>: Error initializing arena\n");
136 if (opt_abort)
137 abort();
138
139 return (arenas[0]);
140 }
141
142 /* Slow path, called only by choose_arena(). */
143 arena_t *
144 choose_arena_hard(void)
145 {
146 arena_t *ret;
147
148 if (narenas_auto > 1) {
149 unsigned i, choose, first_null;
150
151 choose = 0;
152 first_null = narenas_auto;
153 malloc_mutex_lock(&arenas_lock);
154 assert(arenas[0] != NULL);
155 for (i = 1; i < narenas_auto; i++) {
156 if (arenas[i] != NULL) {
157 /*
158 * Choose the first arena that has the lowest
159 * number of threads assigned to it.
160 */
161 if (arenas[i]->nthreads <
162 arenas[choose]->nthreads)
163 choose = i;
164 } else if (first_null == narenas_auto) {
165 /*
166 * Record the index of the first uninitialized
167 * arena, in case all extant arenas are in use.
168 *
169 * NB: It is possible for there to be
170 * discontinuities in terms of initialized
171 * versus uninitialized arenas, due to the
172 * "thread.arena" mallctl.
173 */
174 first_null = i;
175 }
176 }
177
178 if (arenas[choose]->nthreads == 0
179 || first_null == narenas_auto) {
180 /*
181 * Use an unloaded arena, or the least loaded arena if
182 * all arenas are already initialized.
183 */
184 ret = arenas[choose];
185 } else {
186 /* Initialize a new arena. */
187 ret = arenas_extend(first_null);
188 }
189 ret->nthreads++;
190 malloc_mutex_unlock(&arenas_lock);
191 } else {
192 ret = arenas[0];
193 malloc_mutex_lock(&arenas_lock);
194 ret->nthreads++;
195 malloc_mutex_unlock(&arenas_lock);
196 }
197
198 arenas_tsd_set(&ret);
199
200 return (ret);
201 }
202
203 static void
204 stats_print_atexit(void)
205 {
206
207 if (config_tcache && config_stats) {
208 unsigned narenas, i;
209
210 /*
211 * Merge stats from extant threads. This is racy, since
212 * individual threads do not lock when recording tcache stats
213 * events. As a consequence, the final stats may be slightly
214 * out of date by the time they are reported, if other threads
215 * continue to allocate.
216 */
217 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
218 arena_t *arena = arenas[i];
219 if (arena != NULL) {
220 tcache_t *tcache;
221
222 /*
223 * tcache_stats_merge() locks bins, so if any
224 * code is introduced that acquires both arena
225 * and bin locks in the opposite order,
226 * deadlocks may result.
227 */
228 malloc_mutex_lock(&arena->lock);
229 ql_foreach(tcache, &arena->tcache_ql, link) {
230 tcache_stats_merge(tcache, arena);
231 }
232 malloc_mutex_unlock(&arena->lock);
233 }
234 }
235 }
236 je_malloc_stats_print(NULL, NULL, NULL);
237 }
238
239 /*
240 * End miscellaneous support functions.
241 */
242 /******************************************************************************/
243 /*
244 * Begin initialization functions.
245 */
246
247 static unsigned
248 malloc_ncpus(void)
249 {
250 unsigned ret;
251 long result;
252
253 #ifdef _WIN32
254 SYSTEM_INFO si;
255 GetSystemInfo(&si);
256 result = si.dwNumberOfProcessors;
257 #else
258 result = sysconf(_SC_NPROCESSORS_ONLN);
259 #endif
260 if (result == -1) {
261 /* Error. */
262 ret = 1;
263 } else {
264 ret = (unsigned)result;
265 }
266
267 return (ret);
268 }
269
270 void
271 arenas_cleanup(void *arg)
272 {
273 arena_t *arena = *(arena_t **)arg;
274
275 malloc_mutex_lock(&arenas_lock);
276 arena->nthreads--;
277 malloc_mutex_unlock(&arenas_lock);
278 }
279
280 static inline bool
281 malloc_init(void)
282 {
283
284 if (malloc_initialized == false)
285 return (malloc_init_hard());
286
287 return (false);
288 }
289
290 static bool
291 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
292 char const **v_p, size_t *vlen_p)
293 {
294 bool accept;
295 const char *opts = *opts_p;
296
297 *k_p = opts;
298
299 for (accept = false; accept == false;) {
300 switch (*opts) {
301 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
302 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
303 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
304 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
305 case 'Y': case 'Z':
306 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
307 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
308 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
309 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
310 case 'y': case 'z':
311 case '0': case '1': case '2': case '3': case '4': case '5':
312 case '6': case '7': case '8': case '9':
313 case '_':
314 opts++;
315 break;
316 case ':':
317 opts++;
318 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
319 *v_p = opts;
320 accept = true;
321 break;
322 case '\0':
323 if (opts != *opts_p) {
324 malloc_write("<jemalloc>: Conf string ends "
325 "with key\n");
326 }
327 return (true);
328 default:
329 malloc_write("<jemalloc>: Malformed conf string\n");
330 return (true);
331 }
332 }
333
334 for (accept = false; accept == false;) {
335 switch (*opts) {
336 case ',':
337 opts++;
338 /*
339 * Look ahead one character here, because the next time
340 * this function is called, it will assume that end of
341 * input has been cleanly reached if no input remains,
342 * but we have optimistically already consumed the
343 * comma if one exists.
344 */
345 if (*opts == '\0') {
346 malloc_write("<jemalloc>: Conf string ends "
347 "with comma\n");
348 }
349 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
350 accept = true;
351 break;
352 case '\0':
353 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
354 accept = true;
355 break;
356 default:
357 opts++;
358 break;
359 }
360 }
361
362 *opts_p = opts;
363 return (false);
364 }
365
366 static void
367 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
368 size_t vlen)
369 {
370
371 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
372 (int)vlen, v);
373 }
374
375 static void
376 malloc_conf_init(void)
377 {
378 unsigned i;
379 char buf[PATH_MAX + 1];
380 const char *opts, *k, *v;
381 size_t klen, vlen;
382
383 /*
384 * Automatically configure valgrind before processing options. The
385 * valgrind option remains in jemalloc 3.x for compatibility reasons.
386 */
387 if (config_valgrind) {
388 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
389 if (config_fill && opt_valgrind) {
390 opt_junk = false;
391 assert(opt_zero == false);
392 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
393 opt_redzone = true;
394 }
395 if (config_tcache && opt_valgrind)
396 opt_tcache = false;
397 }
398
399 for (i = 0; i < 3; i++) {
400 /* Get runtime configuration. */
401 switch (i) {
402 case 0:
403 if (je_malloc_conf != NULL) {
404 /*
405 * Use options that were compiled into the
406 * program.
407 */
408 opts = je_malloc_conf;
409 } else {
410 /* No configuration specified. */
411 buf[0] = '\0';
412 opts = buf;
413 }
414 break;
415 case 1: {
416 #ifndef _WIN32
417 int linklen;
418 const char *linkname =
419 # ifdef JEMALLOC_PREFIX
420 "/etc/"JEMALLOC_PREFIX"malloc.conf"
421 # else
422 "/etc/malloc.conf"
423 # endif
424 ;
425
426 if ((linklen = readlink(linkname, buf,
427 sizeof(buf) - 1)) != -1) {
428 /*
429 * Use the contents of the "/etc/malloc.conf"
430 * symbolic link's name.
431 */
432 buf[linklen] = '\0';
433 opts = buf;
434 } else
435 #endif
436 {
437 /* No configuration specified. */
438 buf[0] = '\0';
439 opts = buf;
440 }
441 break;
442 } case 2: {
443 const char *envname =
444 #ifdef JEMALLOC_PREFIX
445 JEMALLOC_CPREFIX"MALLOC_CONF"
446 #else
447 "MALLOC_CONF"
448 #endif
449 ;
450
451 if ((opts = getenv(envname)) != NULL) {
452 /*
453 * Do nothing; opts is already initialized to
454 * the value of the MALLOC_CONF environment
455 * variable.
456 */
457 } else {
458 /* No configuration specified. */
459 buf[0] = '\0';
460 opts = buf;
461 }
462 break;
463 } default:
464 /* NOTREACHED */
465 assert(false);
466 buf[0] = '\0';
467 opts = buf;
468 }
469
470 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
471 &vlen) == false) {
472 #define CONF_HANDLE_BOOL_HIT(o, n, hit) \
473 if (sizeof(n)-1 == klen && strncmp(n, k, \
474 klen) == 0) { \
475 if (strncmp("true", v, vlen) == 0 && \
476 vlen == sizeof("true")-1) \
477 o = true; \
478 else if (strncmp("false", v, vlen) == \
479 0 && vlen == sizeof("false")-1) \
480 o = false; \
481 else { \
482 malloc_conf_error( \
483 "Invalid conf value", \
484 k, klen, v, vlen); \
485 } \
486 hit = true; \
487 } else \
488 hit = false;
489 #define CONF_HANDLE_BOOL(o, n) { \
490 bool hit; \
491 CONF_HANDLE_BOOL_HIT(o, n, hit); \
492 if (hit) \
493 continue; \
494 }
495 #define CONF_HANDLE_SIZE_T(o, n, min, max) \
496 if (sizeof(n)-1 == klen && strncmp(n, k, \
497 klen) == 0) { \
498 uintmax_t um; \
499 char *end; \
500 \
501 set_errno(0); \
502 um = malloc_strtoumax(v, &end, 0); \
503 if (get_errno() != 0 || (uintptr_t)end -\
504 (uintptr_t)v != vlen) { \
505 malloc_conf_error( \
506 "Invalid conf value", \
507 k, klen, v, vlen); \
508 } else if (um < min || um > max) { \
509 malloc_conf_error( \
510 "Out-of-range conf value", \
511 k, klen, v, vlen); \
512 } else \
513 o = um; \
514 continue; \
515 }
516 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
517 if (sizeof(n)-1 == klen && strncmp(n, k, \
518 klen) == 0) { \
519 long l; \
520 char *end; \
521 \
522 set_errno(0); \
523 l = strtol(v, &end, 0); \
524 if (get_errno() != 0 || (uintptr_t)end -\
525 (uintptr_t)v != vlen) { \
526 malloc_conf_error( \
527 "Invalid conf value", \
528 k, klen, v, vlen); \
529 } else if (l < (ssize_t)min || l > \
530 (ssize_t)max) { \
531 malloc_conf_error( \
532 "Out-of-range conf value", \
533 k, klen, v, vlen); \
534 } else \
535 o = l; \
536 continue; \
537 }
538 #define CONF_HANDLE_CHAR_P(o, n, d) \
539 if (sizeof(n)-1 == klen && strncmp(n, k, \
540 klen) == 0) { \
541 size_t cpylen = (vlen <= \
542 sizeof(o)-1) ? vlen : \
543 sizeof(o)-1; \
544 strncpy(o, v, cpylen); \
545 o[cpylen] = '\0'; \
546 continue; \
547 }
548
549 CONF_HANDLE_BOOL(opt_abort, "abort")
550 /*
551 * Chunks always require at least one header page, plus
552 * one data page in the absence of redzones, or three
553 * pages in the presence of redzones. In order to
554 * simplify options processing, fix the limit based on
555 * config_fill.
556 */
557 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
558 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
559 if (strncmp("dss", k, klen) == 0) {
560 int i;
561 bool match = false;
562 for (i = 0; i < dss_prec_limit; i++) {
563 if (strncmp(dss_prec_names[i], v, vlen)
564 == 0) {
565 if (chunk_dss_prec_set(i)) {
566 malloc_conf_error(
567 "Error setting dss",
568 k, klen, v, vlen);
569 } else {
570 opt_dss =
571 dss_prec_names[i];
572 match = true;
573 break;
574 }
575 }
576 }
577 if (match == false) {
578 malloc_conf_error("Invalid conf value",
579 k, klen, v, vlen);
580 }
581 continue;
582 }
583 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
584 SIZE_T_MAX)
585 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
586 -1, (sizeof(size_t) << 3) - 1)
587 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
588 if (config_fill) {
589 CONF_HANDLE_BOOL(opt_junk, "junk")
590 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
591 0, SIZE_T_MAX)
592 CONF_HANDLE_BOOL(opt_redzone, "redzone")
593 CONF_HANDLE_BOOL(opt_zero, "zero")
594 }
595 if (config_utrace) {
596 CONF_HANDLE_BOOL(opt_utrace, "utrace")
597 }
598 if (config_valgrind) {
599 CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
600 }
601 if (config_xmalloc) {
602 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
603 }
604 if (config_tcache) {
605 CONF_HANDLE_BOOL(opt_tcache, "tcache")
606 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
607 "lg_tcache_max", -1,
608 (sizeof(size_t) << 3) - 1)
609 }
610 if (config_prof) {
611 CONF_HANDLE_BOOL(opt_prof, "prof")
612 CONF_HANDLE_CHAR_P(opt_prof_prefix,
613 "prof_prefix", "jeprof")
614 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
615 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
616 "lg_prof_sample", 0,
617 (sizeof(uint64_t) << 3) - 1)
618 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
619 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
620 "lg_prof_interval", -1,
621 (sizeof(uint64_t) << 3) - 1)
622 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
623 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
624 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
625 }
626 malloc_conf_error("Invalid conf pair", k, klen, v,
627 vlen);
628 #undef CONF_HANDLE_BOOL
629 #undef CONF_HANDLE_SIZE_T
630 #undef CONF_HANDLE_SSIZE_T
631 #undef CONF_HANDLE_CHAR_P
632 }
633 }
634 }
635
636 static bool
637 malloc_init_hard(void)
638 {
639 arena_t *init_arenas[1];
640
641 malloc_mutex_lock(&init_lock);
642 if (malloc_initialized || IS_INITIALIZER) {
643 /*
644 * Another thread initialized the allocator before this one
645 * acquired init_lock, or this thread is the initializing
646 * thread, and it is recursively allocating.
647 */
648 malloc_mutex_unlock(&init_lock);
649 return (false);
650 }
651 #ifdef JEMALLOC_THREADED_INIT
652 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
653 /* Busy-wait until the initializing thread completes. */
654 do {
655 malloc_mutex_unlock(&init_lock);
656 CPU_SPINWAIT;
657 malloc_mutex_lock(&init_lock);
658 } while (malloc_initialized == false);
659 malloc_mutex_unlock(&init_lock);
660 return (false);
661 }
662 #endif
663 malloc_initializer = INITIALIZER;
664
665 malloc_tsd_boot();
666 if (config_prof)
667 prof_boot0();
668
669 malloc_conf_init();
670
671 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
672 && !defined(_WIN32))
673 /* Register fork handlers. */
674 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
675 jemalloc_postfork_child) != 0) {
676 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
677 if (opt_abort)
678 abort();
679 }
680 #endif
681
682 if (opt_stats_print) {
683 /* Print statistics at exit. */
684 if (atexit(stats_print_atexit) != 0) {
685 malloc_write("<jemalloc>: Error in atexit()\n");
686 if (opt_abort)
687 abort();
688 }
689 }
690
691 if (base_boot()) {
692 malloc_mutex_unlock(&init_lock);
693 return (true);
694 }
695
696 if (chunk_boot()) {
697 malloc_mutex_unlock(&init_lock);
698 return (true);
699 }
700
701 if (ctl_boot()) {
702 malloc_mutex_unlock(&init_lock);
703 return (true);
704 }
705
706 if (config_prof)
707 prof_boot1();
708
709 arena_boot();
710
711 if (config_tcache && tcache_boot0()) {
712 malloc_mutex_unlock(&init_lock);
713 return (true);
714 }
715
716 if (huge_boot()) {
717 malloc_mutex_unlock(&init_lock);
718 return (true);
719 }
720
721 if (malloc_mutex_init(&arenas_lock))
722 return (true);
723
724 /*
725 * Create enough scaffolding to allow recursive allocation in
726 * malloc_ncpus().
727 */
728 narenas_total = narenas_auto = 1;
729 arenas = init_arenas;
730 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
731
732 /*
733 * Initialize one arena here. The rest are lazily created in
734 * choose_arena_hard().
735 */
736 arenas_extend(0);
737 if (arenas[0] == NULL) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741
742 /* Initialize allocation counters before any allocations can occur. */
743 if (config_stats && thread_allocated_tsd_boot()) {
744 malloc_mutex_unlock(&init_lock);
745 return (true);
746 }
747
748 if (arenas_tsd_boot()) {
749 malloc_mutex_unlock(&init_lock);
750 return (true);
751 }
752
753 if (config_tcache && tcache_boot1()) {
754 malloc_mutex_unlock(&init_lock);
755 return (true);
756 }
757
758 if (config_fill && quarantine_boot()) {
759 malloc_mutex_unlock(&init_lock);
760 return (true);
761 }
762
763 if (config_prof && prof_boot2()) {
764 malloc_mutex_unlock(&init_lock);
765 return (true);
766 }
767
768 /* Get number of CPUs. */
769 malloc_mutex_unlock(&init_lock);
770 ncpus = malloc_ncpus();
771 malloc_mutex_lock(&init_lock);
772
773 if (mutex_boot()) {
774 malloc_mutex_unlock(&init_lock);
775 return (true);
776 }
777
778 if (opt_narenas == 0) {
779 /*
780 * For SMP systems, create more than one arena per CPU by
781 * default.
782 */
783 if (ncpus > 1)
784 opt_narenas = ncpus << 2;
785 else
786 opt_narenas = 1;
787 }
788 narenas_auto = opt_narenas;
789 /*
790 * Make sure that the arenas array can be allocated. In practice, this
791 * limit is enough to allow the allocator to function, but the ctl
792 * machinery will fail to allocate memory at far lower limits.
793 */
794 if (narenas_auto > chunksize / sizeof(arena_t *)) {
795 narenas_auto = chunksize / sizeof(arena_t *);
796 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
797 narenas_auto);
798 }
799 narenas_total = narenas_auto;
800
801 /* Allocate and initialize arenas. */
802 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
803 if (arenas == NULL) {
804 malloc_mutex_unlock(&init_lock);
805 return (true);
806 }
807 /*
808 * Zero the array. In practice, this should always be pre-zeroed,
809 * since it was just mmap()ed, but let's be sure.
810 */
811 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
812 /* Copy the pointer to the one arena that was already initialized. */
813 arenas[0] = init_arenas[0];
814
815 malloc_initialized = true;
816 malloc_mutex_unlock(&init_lock);
817 return (false);
818 }
819
820 /*
821 * End initialization functions.
822 */
823 /******************************************************************************/
824 /*
825 * Begin malloc(3)-compatible functions.
826 */
827
828 void *
829 je_malloc(size_t size)
830 {
831 void *ret;
832 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
833 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
834
835 if (malloc_init()) {
836 ret = NULL;
837 goto label_oom;
838 }
839
840 if (size == 0)
841 size = 1;
842
843 if (config_prof && opt_prof) {
844 usize = s2u(size);
845 PROF_ALLOC_PREP(1, usize, cnt);
846 if (cnt == NULL) {
847 ret = NULL;
848 goto label_oom;
849 }
850 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
851 SMALL_MAXCLASS) {
852 ret = imalloc(SMALL_MAXCLASS+1);
853 if (ret != NULL)
854 arena_prof_promoted(ret, usize);
855 } else
856 ret = imalloc(size);
857 } else {
858 if (config_stats || (config_valgrind && opt_valgrind))
859 usize = s2u(size);
860 ret = imalloc(size);
861 }
862
863 label_oom:
864 if (ret == NULL) {
865 if (config_xmalloc && opt_xmalloc) {
866 malloc_write("<jemalloc>: Error in malloc(): "
867 "out of memory\n");
868 abort();
869 }
870 set_errno(ENOMEM);
871 }
872 if (config_prof && opt_prof && ret != NULL)
873 prof_malloc(ret, usize, cnt);
874 if (config_stats && ret != NULL) {
875 assert(usize == isalloc(ret, config_prof));
876 thread_allocated_tsd_get()->allocated += usize;
877 }
878 UTRACE(0, size, ret);
879 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
880 return (ret);
881 }
882
883 JEMALLOC_ATTR(nonnull(1))
884 #ifdef JEMALLOC_PROF
885 /*
886 * Avoid any uncertainty as to how many backtrace frames to ignore in
887 * PROF_ALLOC_PREP().
888 */
889 JEMALLOC_ATTR(noinline)
890 #endif
891 static int
892 imemalign(void **memptr, size_t alignment, size_t size,
893 size_t min_alignment)
894 {
895 int ret;
896 size_t usize;
897 void *result;
898 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
899
900 assert(min_alignment != 0);
901
902 if (malloc_init())
903 result = NULL;
904 else {
905 if (size == 0)
906 size = 1;
907
908 /* Make sure that alignment is a large enough power of 2. */
909 if (((alignment - 1) & alignment) != 0
910 || (alignment < min_alignment)) {
911 if (config_xmalloc && opt_xmalloc) {
912 malloc_write("<jemalloc>: Error allocating "
913 "aligned memory: invalid alignment\n");
914 abort();
915 }
916 result = NULL;
917 ret = EINVAL;
918 goto label_return;
919 }
920
921 usize = sa2u(size, alignment);
922 if (usize == 0) {
923 result = NULL;
924 ret = ENOMEM;
925 goto label_return;
926 }
927
928 if (config_prof && opt_prof) {
929 PROF_ALLOC_PREP(2, usize, cnt);
930 if (cnt == NULL) {
931 result = NULL;
932 ret = EINVAL;
933 } else {
934 if (prof_promote && (uintptr_t)cnt !=
935 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
936 assert(sa2u(SMALL_MAXCLASS+1,
937 alignment) != 0);
938 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
939 alignment), alignment, false);
940 if (result != NULL) {
941 arena_prof_promoted(result,
942 usize);
943 }
944 } else {
945 result = ipalloc(usize, alignment,
946 false);
947 }
948 }
949 } else
950 result = ipalloc(usize, alignment, false);
951 }
952
953 if (result == NULL) {
954 if (config_xmalloc && opt_xmalloc) {
955 malloc_write("<jemalloc>: Error allocating aligned "
956 "memory: out of memory\n");
957 abort();
958 }
959 ret = ENOMEM;
960 goto label_return;
961 }
962
963 *memptr = result;
964 ret = 0;
965
966 label_return:
967 if (config_stats && result != NULL) {
968 assert(usize == isalloc(result, config_prof));
969 thread_allocated_tsd_get()->allocated += usize;
970 }
971 if (config_prof && opt_prof && result != NULL)
972 prof_malloc(result, usize, cnt);
973 UTRACE(0, size, result);
974 return (ret);
975 }
976
977 int
978 je_posix_memalign(void **memptr, size_t alignment, size_t size)
979 {
980 int ret = imemalign(memptr, alignment, size, sizeof(void *));
981 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
982 config_prof), false);
983 return (ret);
984 }
985
986 void *
987 je_aligned_alloc(size_t alignment, size_t size)
988 {
989 void *ret;
990 int err;
991
992 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
993 ret = NULL;
994 set_errno(err);
995 }
996 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
997 false);
998 return (ret);
999 }
1000
1001 void *
1002 je_calloc(size_t num, size_t size)
1003 {
1004 void *ret;
1005 size_t num_size;
1006 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1007 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1008
1009 if (malloc_init()) {
1010 num_size = 0;
1011 ret = NULL;
1012 goto label_return;
1013 }
1014
1015 num_size = num * size;
1016 if (num_size == 0) {
1017 if (num == 0 || size == 0)
1018 num_size = 1;
1019 else {
1020 ret = NULL;
1021 goto label_return;
1022 }
1023 /*
1024 * Try to avoid division here. We know that it isn't possible to
1025 * overflow during multiplication if neither operand uses any of the
1026 * most significant half of the bits in a size_t.
1027 */
1028 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1029 && (num_size / size != num)) {
1030 /* size_t overflow. */
1031 ret = NULL;
1032 goto label_return;
1033 }
1034
1035 if (config_prof && opt_prof) {
1036 usize = s2u(num_size);
1037 PROF_ALLOC_PREP(1, usize, cnt);
1038 if (cnt == NULL) {
1039 ret = NULL;
1040 goto label_return;
1041 }
1042 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1043 <= SMALL_MAXCLASS) {
1044 ret = icalloc(SMALL_MAXCLASS+1);
1045 if (ret != NULL)
1046 arena_prof_promoted(ret, usize);
1047 } else
1048 ret = icalloc(num_size);
1049 } else {
1050 if (config_stats || (config_valgrind && opt_valgrind))
1051 usize = s2u(num_size);
1052 ret = icalloc(num_size);
1053 }
1054
1055 label_return:
1056 if (ret == NULL) {
1057 if (config_xmalloc && opt_xmalloc) {
1058 malloc_write("<jemalloc>: Error in calloc(): out of "
1059 "memory\n");
1060 abort();
1061 }
1062 set_errno(ENOMEM);
1063 }
1064
1065 if (config_prof && opt_prof && ret != NULL)
1066 prof_malloc(ret, usize, cnt);
1067 if (config_stats && ret != NULL) {
1068 assert(usize == isalloc(ret, config_prof));
1069 thread_allocated_tsd_get()->allocated += usize;
1070 }
1071 UTRACE(0, num_size, ret);
1072 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1073 return (ret);
1074 }
1075
1076 void *
1077 je_realloc(void *ptr, size_t size)
1078 {
1079 void *ret;
1080 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1081 size_t old_size = 0;
1082 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1083 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1084 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1085
1086 if (size == 0) {
1087 if (ptr != NULL) {
1088 /* realloc(ptr, 0) is equivalent to free(p). */
1089 if (config_prof) {
1090 old_size = isalloc(ptr, true);
1091 if (config_valgrind && opt_valgrind)
1092 old_rzsize = p2rz(ptr);
1093 } else if (config_stats) {
1094 old_size = isalloc(ptr, false);
1095 if (config_valgrind && opt_valgrind)
1096 old_rzsize = u2rz(old_size);
1097 } else if (config_valgrind && opt_valgrind) {
1098 old_size = isalloc(ptr, false);
1099 old_rzsize = u2rz(old_size);
1100 }
1101 if (config_prof && opt_prof) {
1102 old_ctx = prof_ctx_get(ptr);
1103 cnt = NULL;
1104 }
1105 iqalloc(ptr);
1106 ret = NULL;
1107 goto label_return;
1108 } else
1109 size = 1;
1110 }
1111
1112 if (ptr != NULL) {
1113 assert(malloc_initialized || IS_INITIALIZER);
1114
1115 if (config_prof) {
1116 old_size = isalloc(ptr, true);
1117 if (config_valgrind && opt_valgrind)
1118 old_rzsize = p2rz(ptr);
1119 } else if (config_stats) {
1120 old_size = isalloc(ptr, false);
1121 if (config_valgrind && opt_valgrind)
1122 old_rzsize = u2rz(old_size);
1123 } else if (config_valgrind && opt_valgrind) {
1124 old_size = isalloc(ptr, false);
1125 old_rzsize = u2rz(old_size);
1126 }
1127 if (config_prof && opt_prof) {
1128 usize = s2u(size);
1129 old_ctx = prof_ctx_get(ptr);
1130 PROF_ALLOC_PREP(1, usize, cnt);
1131 if (cnt == NULL) {
1132 old_ctx = NULL;
1133 ret = NULL;
1134 goto label_oom;
1135 }
1136 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1137 usize <= SMALL_MAXCLASS) {
1138 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1139 false, false);
1140 if (ret != NULL)
1141 arena_prof_promoted(ret, usize);
1142 else
1143 old_ctx = NULL;
1144 } else {
1145 ret = iralloc(ptr, size, 0, 0, false, false);
1146 if (ret == NULL)
1147 old_ctx = NULL;
1148 }
1149 } else {
1150 if (config_stats || (config_valgrind && opt_valgrind))
1151 usize = s2u(size);
1152 ret = iralloc(ptr, size, 0, 0, false, false);
1153 }
1154
1155 label_oom:
1156 if (ret == NULL) {
1157 if (config_xmalloc && opt_xmalloc) {
1158 malloc_write("<jemalloc>: Error in realloc(): "
1159 "out of memory\n");
1160 abort();
1161 }
1162 set_errno(ENOMEM);
1163 }
1164 } else {
1165 /* realloc(NULL, size) is equivalent to malloc(size). */
1166 if (config_prof && opt_prof)
1167 old_ctx = NULL;
1168 if (malloc_init()) {
1169 if (config_prof && opt_prof)
1170 cnt = NULL;
1171 ret = NULL;
1172 } else {
1173 if (config_prof && opt_prof) {
1174 usize = s2u(size);
1175 PROF_ALLOC_PREP(1, usize, cnt);
1176 if (cnt == NULL)
1177 ret = NULL;
1178 else {
1179 if (prof_promote && (uintptr_t)cnt !=
1180 (uintptr_t)1U && usize <=
1181 SMALL_MAXCLASS) {
1182 ret = imalloc(SMALL_MAXCLASS+1);
1183 if (ret != NULL) {
1184 arena_prof_promoted(ret,
1185 usize);
1186 }
1187 } else
1188 ret = imalloc(size);
1189 }
1190 } else {
1191 if (config_stats || (config_valgrind &&
1192 opt_valgrind))
1193 usize = s2u(size);
1194 ret = imalloc(size);
1195 }
1196 }
1197
1198 if (ret == NULL) {
1199 if (config_xmalloc && opt_xmalloc) {
1200 malloc_write("<jemalloc>: Error in realloc(): "
1201 "out of memory\n");
1202 abort();
1203 }
1204 set_errno(ENOMEM);
1205 }
1206 }
1207
1208 label_return:
1209 if (config_prof && opt_prof)
1210 prof_realloc(ret, usize, cnt, old_size, old_ctx);
1211 if (config_stats && ret != NULL) {
1212 thread_allocated_t *ta;
1213 assert(usize == isalloc(ret, config_prof));
1214 ta = thread_allocated_tsd_get();
1215 ta->allocated += usize;
1216 ta->deallocated += old_size;
1217 }
1218 UTRACE(ptr, size, ret);
1219 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1220 return (ret);
1221 }
1222
1223 void
1224 je_free(void *ptr)
1225 {
1226
1227 UTRACE(ptr, 0, 0);
1228 if (ptr != NULL) {
1229 size_t usize;
1230 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1231
1232 assert(malloc_initialized || IS_INITIALIZER);
1233
1234 if (config_prof && opt_prof) {
1235 usize = isalloc(ptr, config_prof);
1236 prof_free(ptr, usize);
1237 } else if (config_stats || config_valgrind)
1238 usize = isalloc(ptr, config_prof);
1239 if (config_stats)
1240 thread_allocated_tsd_get()->deallocated += usize;
1241 if (config_valgrind && opt_valgrind)
1242 rzsize = p2rz(ptr);
1243 iqalloc(ptr);
1244 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1245 }
1246 }
1247
1248 /*
1249 * End malloc(3)-compatible functions.
1250 */
1251 /******************************************************************************/
1252 /*
1253 * Begin non-standard override functions.
1254 */
1255
1256 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1257 void *
1258 je_memalign(size_t alignment, size_t size)
1259 {
1260 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1261 imemalign(&ret, alignment, size, 1);
1262 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1263 return (ret);
1264 }
1265 #endif
1266
1267 #ifdef JEMALLOC_OVERRIDE_VALLOC
1268 void *
1269 je_valloc(size_t size)
1270 {
1271 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1272 imemalign(&ret, PAGE, size, 1);
1273 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1274 return (ret);
1275 }
1276 #endif
1277
1278 /*
1279 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1280 * #define je_malloc malloc
1281 */
1282 #define malloc_is_malloc 1
1283 #define is_malloc_(a) malloc_is_ ## a
1284 #define is_malloc(a) is_malloc_(a)
1285
1286 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1287 /*
1288 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1289 * to inconsistently reference libc's malloc(3)-compatible functions
1290 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1291 *
1292 * These definitions interpose hooks in glibc. The functions are actually
1293 * passed an extra argument for the caller return address, which will be
1294 * ignored.
1295 */
1296 JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1297 JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1298 JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1299 JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1300 je_memalign;
1301 #endif
1302
1303 /*
1304 * End non-standard override functions.
1305 */
1306 /******************************************************************************/
1307 /*
1308 * Begin non-standard functions.
1309 */
1310
1311 size_t
1312 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1313 {
1314 size_t ret;
1315
1316 assert(malloc_initialized || IS_INITIALIZER);
1317
1318 if (config_ivsalloc)
1319 ret = ivsalloc(ptr, config_prof);
1320 else
1321 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1322
1323 return (ret);
1324 }
1325
1326 void
1327 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1328 const char *opts)
1329 {
1330
1331 stats_print(write_cb, cbopaque, opts);
1332 }
1333
1334 int
1335 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1336 size_t newlen)
1337 {
1338
1339 if (malloc_init())
1340 return (EAGAIN);
1341
1342 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1343 }
1344
1345 int
1346 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1347 {
1348
1349 if (malloc_init())
1350 return (EAGAIN);
1351
1352 return (ctl_nametomib(name, mibp, miblenp));
1353 }
1354
1355 int
1356 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1357 void *newp, size_t newlen)
1358 {
1359
1360 if (malloc_init())
1361 return (EAGAIN);
1362
1363 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1364 }
1365
1366 /*
1367 * End non-standard functions.
1368 */
1369 /******************************************************************************/
1370 /*
1371 * Begin experimental functions.
1372 */
1373 #ifdef JEMALLOC_EXPERIMENTAL
1374
1375 JEMALLOC_INLINE void *
1376 iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
1377 arena_t *arena)
1378 {
1379
1380 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1381 alignment)));
1382
1383 if (alignment != 0)
1384 return (ipallocx(usize, alignment, zero, try_tcache, arena));
1385 else if (zero)
1386 return (icallocx(usize, try_tcache, arena));
1387 else
1388 return (imallocx(usize, try_tcache, arena));
1389 }
1390
1391 int
1392 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1393 {
1394 void *p;
1395 size_t usize;
1396 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1397 & (SIZE_T_MAX-1));
1398 bool zero = flags & ALLOCM_ZERO;
1399 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1400 arena_t *arena;
1401 bool try_tcache;
1402
1403 assert(ptr != NULL);
1404 assert(size != 0);
1405
1406 if (malloc_init())
1407 goto label_oom;
1408
1409 if (arena_ind != UINT_MAX) {
1410 arena = arenas[arena_ind];
1411 try_tcache = false;
1412 } else {
1413 arena = NULL;
1414 try_tcache = true;
1415 }
1416
1417 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1418 if (usize == 0)
1419 goto label_oom;
1420
1421 if (config_prof && opt_prof) {
1422 prof_thr_cnt_t *cnt;
1423
1424 PROF_ALLOC_PREP(1, usize, cnt);
1425 if (cnt == NULL)
1426 goto label_oom;
1427 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1428 SMALL_MAXCLASS) {
1429 size_t usize_promoted = (alignment == 0) ?
1430 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1431 alignment);
1432 assert(usize_promoted != 0);
1433 p = iallocm(usize_promoted, alignment, zero,
1434 try_tcache, arena);
1435 if (p == NULL)
1436 goto label_oom;
1437 arena_prof_promoted(p, usize);
1438 } else {
1439 p = iallocm(usize, alignment, zero, try_tcache, arena);
1440 if (p == NULL)
1441 goto label_oom;
1442 }
1443 prof_malloc(p, usize, cnt);
1444 } else {
1445 p = iallocm(usize, alignment, zero, try_tcache, arena);
1446 if (p == NULL)
1447 goto label_oom;
1448 }
1449 if (rsize != NULL)
1450 *rsize = usize;
1451
1452 *ptr = p;
1453 if (config_stats) {
1454 assert(usize == isalloc(p, config_prof));
1455 thread_allocated_tsd_get()->allocated += usize;
1456 }
1457 UTRACE(0, size, p);
1458 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1459 return (ALLOCM_SUCCESS);
1460 label_oom:
1461 if (config_xmalloc && opt_xmalloc) {
1462 malloc_write("<jemalloc>: Error in allocm(): "
1463 "out of memory\n");
1464 abort();
1465 }
1466 *ptr = NULL;
1467 UTRACE(0, size, 0);
1468 return (ALLOCM_ERR_OOM);
1469 }
1470
1471 int
1472 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1473 {
1474 void *p, *q;
1475 size_t usize;
1476 size_t old_size;
1477 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1478 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1479 & (SIZE_T_MAX-1));
1480 bool zero = flags & ALLOCM_ZERO;
1481 bool no_move = flags & ALLOCM_NO_MOVE;
1482 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1483 bool try_tcache_alloc, try_tcache_dalloc;
1484 arena_t *arena;
1485
1486 assert(ptr != NULL);
1487 assert(*ptr != NULL);
1488 assert(size != 0);
1489 assert(SIZE_T_MAX - size >= extra);
1490 assert(malloc_initialized || IS_INITIALIZER);
1491
1492 if (arena_ind != UINT_MAX) {
1493 arena_chunk_t *chunk;
1494 try_tcache_alloc = true;
1495 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
1496 try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
1497 arenas[arena_ind]);
1498 arena = arenas[arena_ind];
1499 } else {
1500 try_tcache_alloc = true;
1501 try_tcache_dalloc = true;
1502 arena = NULL;
1503 }
1504
1505 p = *ptr;
1506 if (config_prof && opt_prof) {
1507 prof_thr_cnt_t *cnt;
1508
1509 /*
1510 * usize isn't knowable before iralloc() returns when extra is
1511 * non-zero. Therefore, compute its maximum possible value and
1512 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1513 * backtrace. prof_realloc() will use the actual usize to
1514 * decide whether to sample.
1515 */
1516 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1517 sa2u(size+extra, alignment);
1518 prof_ctx_t *old_ctx = prof_ctx_get(p);
1519 old_size = isalloc(p, true);
1520 if (config_valgrind && opt_valgrind)
1521 old_rzsize = p2rz(p);
1522 PROF_ALLOC_PREP(1, max_usize, cnt);
1523 if (cnt == NULL)
1524 goto label_oom;
1525 /*
1526 * Use minimum usize to determine whether promotion may happen.
1527 */
1528 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1529 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1530 <= SMALL_MAXCLASS) {
1531 q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1532 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1533 alignment, zero, no_move, try_tcache_alloc,
1534 try_tcache_dalloc, arena);
1535 if (q == NULL)
1536 goto label_err;
1537 if (max_usize < PAGE) {
1538 usize = max_usize;
1539 arena_prof_promoted(q, usize);
1540 } else
1541 usize = isalloc(q, config_prof);
1542 } else {
1543 q = irallocx(p, size, extra, alignment, zero, no_move,
1544 try_tcache_alloc, try_tcache_dalloc, arena);
1545 if (q == NULL)
1546 goto label_err;
1547 usize = isalloc(q, config_prof);
1548 }
1549 prof_realloc(q, usize, cnt, old_size, old_ctx);
1550 if (rsize != NULL)
1551 *rsize = usize;
1552 } else {
1553 if (config_stats) {
1554 old_size = isalloc(p, false);
1555 if (config_valgrind && opt_valgrind)
1556 old_rzsize = u2rz(old_size);
1557 } else if (config_valgrind && opt_valgrind) {
1558 old_size = isalloc(p, false);
1559 old_rzsize = u2rz(old_size);
1560 }
1561 q = irallocx(p, size, extra, alignment, zero, no_move,
1562 try_tcache_alloc, try_tcache_dalloc, arena);
1563 if (q == NULL)
1564 goto label_err;
1565 if (config_stats)
1566 usize = isalloc(q, config_prof);
1567 if (rsize != NULL) {
1568 if (config_stats == false)
1569 usize = isalloc(q, config_prof);
1570 *rsize = usize;
1571 }
1572 }
1573
1574 *ptr = q;
1575 if (config_stats) {
1576 thread_allocated_t *ta;
1577 ta = thread_allocated_tsd_get();
1578 ta->allocated += usize;
1579 ta->deallocated += old_size;
1580 }
1581 UTRACE(p, size, q);
1582 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1583 return (ALLOCM_SUCCESS);
1584 label_err:
1585 if (no_move) {
1586 UTRACE(p, size, q);
1587 return (ALLOCM_ERR_NOT_MOVED);
1588 }
1589 label_oom:
1590 if (config_xmalloc && opt_xmalloc) {
1591 malloc_write("<jemalloc>: Error in rallocm(): "
1592 "out of memory\n");
1593 abort();
1594 }
1595 UTRACE(p, size, 0);
1596 return (ALLOCM_ERR_OOM);
1597 }
1598
1599 int
1600 je_sallocm(const void *ptr, size_t *rsize, int flags)
1601 {
1602 size_t sz;
1603
1604 assert(malloc_initialized || IS_INITIALIZER);
1605
1606 if (config_ivsalloc)
1607 sz = ivsalloc(ptr, config_prof);
1608 else {
1609 assert(ptr != NULL);
1610 sz = isalloc(ptr, config_prof);
1611 }
1612 assert(rsize != NULL);
1613 *rsize = sz;
1614
1615 return (ALLOCM_SUCCESS);
1616 }
1617
1618 int
1619 je_dallocm(void *ptr, int flags)
1620 {
1621 size_t usize;
1622 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1623 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1624 bool try_tcache;
1625
1626 assert(ptr != NULL);
1627 assert(malloc_initialized || IS_INITIALIZER);
1628
1629 if (arena_ind != UINT_MAX) {
1630 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1631 try_tcache = (chunk == ptr || chunk->arena !=
1632 arenas[arena_ind]);
1633 } else
1634 try_tcache = true;
1635
1636 UTRACE(ptr, 0, 0);
1637 if (config_stats || config_valgrind)
1638 usize = isalloc(ptr, config_prof);
1639 if (config_prof && opt_prof) {
1640 if (config_stats == false && config_valgrind == false)
1641 usize = isalloc(ptr, config_prof);
1642 prof_free(ptr, usize);
1643 }
1644 if (config_stats)
1645 thread_allocated_tsd_get()->deallocated += usize;
1646 if (config_valgrind && opt_valgrind)
1647 rzsize = p2rz(ptr);
1648 iqallocx(ptr, try_tcache);
1649 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1650
1651 return (ALLOCM_SUCCESS);
1652 }
1653
1654 int
1655 je_nallocm(size_t *rsize, size_t size, int flags)
1656 {
1657 size_t usize;
1658 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1659 & (SIZE_T_MAX-1));
1660
1661 assert(size != 0);
1662
1663 if (malloc_init())
1664 return (ALLOCM_ERR_OOM);
1665
1666 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1667 if (usize == 0)
1668 return (ALLOCM_ERR_OOM);
1669
1670 if (rsize != NULL)
1671 *rsize = usize;
1672 return (ALLOCM_SUCCESS);
1673 }
1674
1675 #endif
1676 /*
1677 * End experimental functions.
1678 */
1679 /******************************************************************************/
1680 /*
1681 * The following functions are used by threading libraries for protection of
1682 * malloc during fork().
1683 */
1684
1685 /*
1686 * If an application creates a thread before doing any allocation in the main
1687 * thread, then calls fork(2) in the main thread followed by memory allocation
1688 * in the child process, a race can occur that results in deadlock within the
1689 * child: the main thread may have forked while the created thread had
1690 * partially initialized the allocator. Ordinarily jemalloc prevents
1691 * fork/malloc races via the following functions it registers during
1692 * initialization using pthread_atfork(), but of course that does no good if
1693 * the allocator isn't fully initialized at fork time. The following library
1694 * constructor is a partial solution to this problem. It may still possible to
1695 * trigger the deadlock described above, but doing so would involve forking via
1696 * a library constructor that runs before jemalloc's runs.
1697 */
1698 JEMALLOC_ATTR(constructor)
1699 static void
1700 jemalloc_constructor(void)
1701 {
1702
1703 malloc_init();
1704 }
1705
1706 #ifndef JEMALLOC_MUTEX_INIT_CB
1707 void
1708 jemalloc_prefork(void)
1709 #else
1710 JEMALLOC_EXPORT void
1711 _malloc_prefork(void)
1712 #endif
1713 {
1714 unsigned i;
1715
1716 #ifdef JEMALLOC_MUTEX_INIT_CB
1717 if (malloc_initialized == false)
1718 return;
1719 #endif
1720 assert(malloc_initialized);
1721
1722 /* Acquire all mutexes in a safe order. */
1723 ctl_prefork();
1724 malloc_mutex_prefork(&arenas_lock);
1725 for (i = 0; i < narenas_total; i++) {
1726 if (arenas[i] != NULL)
1727 arena_prefork(arenas[i]);
1728 }
1729 prof_prefork();
1730 chunk_prefork();
1731 base_prefork();
1732 huge_prefork();
1733 }
1734
1735 #ifndef JEMALLOC_MUTEX_INIT_CB
1736 void
1737 jemalloc_postfork_parent(void)
1738 #else
1739 JEMALLOC_EXPORT void
1740 _malloc_postfork(void)
1741 #endif
1742 {
1743 unsigned i;
1744
1745 #ifdef JEMALLOC_MUTEX_INIT_CB
1746 if (malloc_initialized == false)
1747 return;
1748 #endif
1749 assert(malloc_initialized);
1750
1751 /* Release all mutexes, now that fork() has completed. */
1752 huge_postfork_parent();
1753 base_postfork_parent();
1754 chunk_postfork_parent();
1755 prof_postfork_parent();
1756 for (i = 0; i < narenas_total; i++) {
1757 if (arenas[i] != NULL)
1758 arena_postfork_parent(arenas[i]);
1759 }
1760 malloc_mutex_postfork_parent(&arenas_lock);
1761 ctl_postfork_parent();
1762 }
1763
1764 void
1765 jemalloc_postfork_child(void)
1766 {
1767 unsigned i;
1768
1769 assert(malloc_initialized);
1770
1771 /* Release all mutexes, now that fork() has completed. */
1772 huge_postfork_child();
1773 base_postfork_child();
1774 chunk_postfork_child();
1775 prof_postfork_child();
1776 for (i = 0; i < narenas_total; i++) {
1777 if (arenas[i] != NULL)
1778 arena_postfork_child(arenas[i]);
1779 }
1780 malloc_mutex_postfork_child(&arenas_lock);
1781 ctl_postfork_child();
1782 }
1783
1784 /******************************************************************************/
1785 /*
1786 * The following functions are used for TLS allocation/deallocation in static
1787 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1788 * is that these avoid accessing TLS variables.
1789 */
1790
1791 static void *
1792 a0alloc(size_t size, bool zero)
1793 {
1794
1795 if (malloc_init())
1796 return (NULL);
1797
1798 if (size == 0)
1799 size = 1;
1800
1801 if (size <= arena_maxclass)
1802 return (arena_malloc(arenas[0], size, zero, false));
1803 else
1804 return (huge_malloc(size, zero));
1805 }
1806
1807 void *
1808 a0malloc(size_t size)
1809 {
1810
1811 return (a0alloc(size, false));
1812 }
1813
1814 void *
1815 a0calloc(size_t num, size_t size)
1816 {
1817
1818 return (a0alloc(num * size, true));
1819 }
1820
1821 void
1822 a0free(void *ptr)
1823 {
1824 arena_chunk_t *chunk;
1825
1826 if (ptr == NULL)
1827 return;
1828
1829 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1830 if (chunk != ptr)
1831 arena_dalloc(chunk->arena, chunk, ptr, false);
1832 else
1833 huge_dalloc(ptr, true);
1834 }
1835
1836 /******************************************************************************/