]> git.saurik.com Git - redis.git/blame - deps/jemalloc.orig/src/jemalloc.c
Jemalloc updated to 3.0.0.
[redis.git] / deps / jemalloc.orig / src / jemalloc.c
CommitLineData
ad4c0b41 1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_mutex_t arenas_lock;
8arena_t **arenas;
9unsigned narenas;
10
11pthread_key_t arenas_tsd;
12#ifndef NO_TLS
13__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
14#endif
15
16#ifdef JEMALLOC_STATS
17# ifndef NO_TLS
18__thread thread_allocated_t thread_allocated_tls;
19# else
20pthread_key_t thread_allocated_tsd;
21# endif
22#endif
23
24/* Set to true once the allocator has been initialized. */
25static bool malloc_initialized = false;
26
27/* Used to let the initializing thread recursively allocate. */
28static pthread_t malloc_initializer = (unsigned long)0;
29
30/* Used to avoid initialization races. */
31static malloc_mutex_t init_lock =
32#ifdef JEMALLOC_OSSPIN
33 0
34#else
35 MALLOC_MUTEX_INITIALIZER
36#endif
37 ;
38
39#ifdef DYNAMIC_PAGE_SHIFT
40size_t pagesize;
41size_t pagesize_mask;
42size_t lg_pagesize;
43#endif
44
45unsigned ncpus;
46
47/* Runtime configuration options. */
48const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
49#ifdef JEMALLOC_DEBUG
50bool opt_abort = true;
51# ifdef JEMALLOC_FILL
52bool opt_junk = true;
53# endif
54#else
55bool opt_abort = false;
56# ifdef JEMALLOC_FILL
57bool opt_junk = false;
58# endif
59#endif
60#ifdef JEMALLOC_SYSV
61bool opt_sysv = false;
62#endif
63#ifdef JEMALLOC_XMALLOC
64bool opt_xmalloc = false;
65#endif
66#ifdef JEMALLOC_FILL
67bool opt_zero = false;
68#endif
69size_t opt_narenas = 0;
70
71/******************************************************************************/
72/* Function prototypes for non-inline static functions. */
73
74static void wrtmessage(void *cbopaque, const char *s);
75static void stats_print_atexit(void);
76static unsigned malloc_ncpus(void);
77static void arenas_cleanup(void *arg);
78#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
79static void thread_allocated_cleanup(void *arg);
80#endif
81static bool malloc_conf_next(char const **opts_p, char const **k_p,
82 size_t *klen_p, char const **v_p, size_t *vlen_p);
83static void malloc_conf_error(const char *msg, const char *k, size_t klen,
84 const char *v, size_t vlen);
85static void malloc_conf_init(void);
86static bool malloc_init_hard(void);
87static int imemalign(void **memptr, size_t alignment, size_t size);
88
89/******************************************************************************/
90/* malloc_message() setup. */
91
92#ifdef JEMALLOC_HAVE_ATTR
93JEMALLOC_ATTR(visibility("hidden"))
94#else
95static
96#endif
97void
98wrtmessage(void *cbopaque, const char *s)
99{
100#ifdef JEMALLOC_CC_SILENCE
101 int result =
102#endif
103 write(STDERR_FILENO, s, strlen(s));
104#ifdef JEMALLOC_CC_SILENCE
105 if (result < 0)
106 result = errno;
107#endif
108}
109
110void (*JEMALLOC_P(malloc_message))(void *, const char *s)
111 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
112
113/******************************************************************************/
114/*
115 * Begin miscellaneous support functions.
116 */
117
118/* Create a new arena and insert it into the arenas array at index ind. */
119arena_t *
120arenas_extend(unsigned ind)
121{
122 arena_t *ret;
123
124 /* Allocate enough space for trailing bins. */
125 ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
126 + (sizeof(arena_bin_t) * nbins));
127 if (ret != NULL && arena_new(ret, ind) == false) {
128 arenas[ind] = ret;
129 return (ret);
130 }
131 /* Only reached if there is an OOM error. */
132
133 /*
134 * OOM here is quite inconvenient to propagate, since dealing with it
135 * would require a check for failure in the fast path. Instead, punt
136 * by using arenas[0]. In practice, this is an extremely unlikely
137 * failure.
138 */
139 malloc_write("<jemalloc>: Error initializing arena\n");
140 if (opt_abort)
141 abort();
142
143 return (arenas[0]);
144}
145
146/*
147 * Choose an arena based on a per-thread value (slow-path code only, called
148 * only by choose_arena()).
149 */
150arena_t *
151choose_arena_hard(void)
152{
153 arena_t *ret;
154
155 if (narenas > 1) {
156 unsigned i, choose, first_null;
157
158 choose = 0;
159 first_null = narenas;
160 malloc_mutex_lock(&arenas_lock);
161 assert(arenas[0] != NULL);
162 for (i = 1; i < narenas; i++) {
163 if (arenas[i] != NULL) {
164 /*
165 * Choose the first arena that has the lowest
166 * number of threads assigned to it.
167 */
168 if (arenas[i]->nthreads <
169 arenas[choose]->nthreads)
170 choose = i;
171 } else if (first_null == narenas) {
172 /*
173 * Record the index of the first uninitialized
174 * arena, in case all extant arenas are in use.
175 *
176 * NB: It is possible for there to be
177 * discontinuities in terms of initialized
178 * versus uninitialized arenas, due to the
179 * "thread.arena" mallctl.
180 */
181 first_null = i;
182 }
183 }
184
185 if (arenas[choose] == 0 || first_null == narenas) {
186 /*
187 * Use an unloaded arena, or the least loaded arena if
188 * all arenas are already initialized.
189 */
190 ret = arenas[choose];
191 } else {
192 /* Initialize a new arena. */
193 ret = arenas_extend(first_null);
194 }
195 ret->nthreads++;
196 malloc_mutex_unlock(&arenas_lock);
197 } else {
198 ret = arenas[0];
199 malloc_mutex_lock(&arenas_lock);
200 ret->nthreads++;
201 malloc_mutex_unlock(&arenas_lock);
202 }
203
204 ARENA_SET(ret);
205
206 return (ret);
207}
208
209/*
210 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
211 * provide a wrapper.
212 */
213int
214buferror(int errnum, char *buf, size_t buflen)
215{
216#ifdef _GNU_SOURCE
217 char *b = strerror_r(errno, buf, buflen);
218 if (b != buf) {
219 strncpy(buf, b, buflen);
220 buf[buflen-1] = '\0';
221 }
222 return (0);
223#else
224 return (strerror_r(errno, buf, buflen));
225#endif
226}
227
228static void
229stats_print_atexit(void)
230{
231
232#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
233 unsigned i;
234
235 /*
236 * Merge stats from extant threads. This is racy, since individual
237 * threads do not lock when recording tcache stats events. As a
238 * consequence, the final stats may be slightly out of date by the time
239 * they are reported, if other threads continue to allocate.
240 */
241 for (i = 0; i < narenas; i++) {
242 arena_t *arena = arenas[i];
243 if (arena != NULL) {
244 tcache_t *tcache;
245
246 /*
247 * tcache_stats_merge() locks bins, so if any code is
248 * introduced that acquires both arena and bin locks in
249 * the opposite order, deadlocks may result.
250 */
251 malloc_mutex_lock(&arena->lock);
252 ql_foreach(tcache, &arena->tcache_ql, link) {
253 tcache_stats_merge(tcache, arena);
254 }
255 malloc_mutex_unlock(&arena->lock);
256 }
257 }
258#endif
259 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
260}
261
262#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
263thread_allocated_t *
264thread_allocated_get_hard(void)
265{
266 thread_allocated_t *thread_allocated = (thread_allocated_t *)
267 imalloc(sizeof(thread_allocated_t));
268 if (thread_allocated == NULL) {
269 static thread_allocated_t static_thread_allocated = {0, 0};
270 malloc_write("<jemalloc>: Error allocating TSD;"
271 " mallctl(\"thread.{de,}allocated[p]\", ...)"
272 " will be inaccurate\n");
273 if (opt_abort)
274 abort();
275 return (&static_thread_allocated);
276 }
277 pthread_setspecific(thread_allocated_tsd, thread_allocated);
278 thread_allocated->allocated = 0;
279 thread_allocated->deallocated = 0;
280 return (thread_allocated);
281}
282#endif
283
284/*
285 * End miscellaneous support functions.
286 */
287/******************************************************************************/
288/*
289 * Begin initialization functions.
290 */
291
292static unsigned
293malloc_ncpus(void)
294{
295 unsigned ret;
296 long result;
297
298 result = sysconf(_SC_NPROCESSORS_ONLN);
299 if (result == -1) {
300 /* Error. */
301 ret = 1;
302 }
303 ret = (unsigned)result;
304
305 return (ret);
306}
307
308static void
309arenas_cleanup(void *arg)
310{
311 arena_t *arena = (arena_t *)arg;
312
313 malloc_mutex_lock(&arenas_lock);
314 arena->nthreads--;
315 malloc_mutex_unlock(&arenas_lock);
316}
317
318#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
319static void
320thread_allocated_cleanup(void *arg)
321{
322 uint64_t *allocated = (uint64_t *)arg;
323
324 if (allocated != NULL)
325 idalloc(allocated);
326}
327#endif
328
329/*
330 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
331 * implementation has to take pains to avoid infinite recursion during
332 * initialization.
333 */
334static inline bool
335malloc_init(void)
336{
337
338 if (malloc_initialized == false)
339 return (malloc_init_hard());
340
341 return (false);
342}
343
344static bool
345malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
346 char const **v_p, size_t *vlen_p)
347{
348 bool accept;
349 const char *opts = *opts_p;
350
351 *k_p = opts;
352
353 for (accept = false; accept == false;) {
354 switch (*opts) {
355 case 'A': case 'B': case 'C': case 'D': case 'E':
356 case 'F': case 'G': case 'H': case 'I': case 'J':
357 case 'K': case 'L': case 'M': case 'N': case 'O':
358 case 'P': case 'Q': case 'R': case 'S': case 'T':
359 case 'U': case 'V': case 'W': case 'X': case 'Y':
360 case 'Z':
361 case 'a': case 'b': case 'c': case 'd': case 'e':
362 case 'f': case 'g': case 'h': case 'i': case 'j':
363 case 'k': case 'l': case 'm': case 'n': case 'o':
364 case 'p': case 'q': case 'r': case 's': case 't':
365 case 'u': case 'v': case 'w': case 'x': case 'y':
366 case 'z':
367 case '0': case '1': case '2': case '3': case '4':
368 case '5': case '6': case '7': case '8': case '9':
369 case '_':
370 opts++;
371 break;
372 case ':':
373 opts++;
374 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
375 *v_p = opts;
376 accept = true;
377 break;
378 case '\0':
379 if (opts != *opts_p) {
380 malloc_write("<jemalloc>: Conf string "
381 "ends with key\n");
382 }
383 return (true);
384 default:
385 malloc_write("<jemalloc>: Malformed conf "
386 "string\n");
387 return (true);
388 }
389 }
390
391 for (accept = false; accept == false;) {
392 switch (*opts) {
393 case ',':
394 opts++;
395 /*
396 * Look ahead one character here, because the
397 * next time this function is called, it will
398 * assume that end of input has been cleanly
399 * reached if no input remains, but we have
400 * optimistically already consumed the comma if
401 * one exists.
402 */
403 if (*opts == '\0') {
404 malloc_write("<jemalloc>: Conf string "
405 "ends with comma\n");
406 }
407 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
408 accept = true;
409 break;
410 case '\0':
411 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
412 accept = true;
413 break;
414 default:
415 opts++;
416 break;
417 }
418 }
419
420 *opts_p = opts;
421 return (false);
422}
423
424static void
425malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
426 size_t vlen)
427{
428 char buf[PATH_MAX + 1];
429
430 malloc_write("<jemalloc>: ");
431 malloc_write(msg);
432 malloc_write(": ");
433 memcpy(buf, k, klen);
434 memcpy(&buf[klen], ":", 1);
435 memcpy(&buf[klen+1], v, vlen);
436 buf[klen+1+vlen] = '\0';
437 malloc_write(buf);
438 malloc_write("\n");
439}
440
441static void
442malloc_conf_init(void)
443{
444 unsigned i;
445 char buf[PATH_MAX + 1];
446 const char *opts, *k, *v;
447 size_t klen, vlen;
448
449 for (i = 0; i < 3; i++) {
450 /* Get runtime configuration. */
451 switch (i) {
452 case 0:
453 if (JEMALLOC_P(malloc_conf) != NULL) {
454 /*
455 * Use options that were compiled into the
456 * program.
457 */
458 opts = JEMALLOC_P(malloc_conf);
459 } else {
460 /* No configuration specified. */
461 buf[0] = '\0';
462 opts = buf;
463 }
464 break;
465 case 1: {
466 int linklen;
467 const char *linkname =
468#ifdef JEMALLOC_PREFIX
469 "/etc/"JEMALLOC_PREFIX"malloc.conf"
470#else
471 "/etc/malloc.conf"
472#endif
473 ;
474
475 if ((linklen = readlink(linkname, buf,
476 sizeof(buf) - 1)) != -1) {
477 /*
478 * Use the contents of the "/etc/malloc.conf"
479 * symbolic link's name.
480 */
481 buf[linklen] = '\0';
482 opts = buf;
483 } else {
484 /* No configuration specified. */
485 buf[0] = '\0';
486 opts = buf;
487 }
488 break;
489 }
490 case 2: {
491 const char *envname =
492#ifdef JEMALLOC_PREFIX
493 JEMALLOC_CPREFIX"MALLOC_CONF"
494#else
495 "MALLOC_CONF"
496#endif
497 ;
498
499 if ((opts = getenv(envname)) != NULL) {
500 /*
501 * Do nothing; opts is already initialized to
502 * the value of the MALLOC_CONF environment
503 * variable.
504 */
505 } else {
506 /* No configuration specified. */
507 buf[0] = '\0';
508 opts = buf;
509 }
510 break;
511 }
512 default:
513 /* NOTREACHED */
514 assert(false);
515 buf[0] = '\0';
516 opts = buf;
517 }
518
519 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
520 &vlen) == false) {
521#define CONF_HANDLE_BOOL(n) \
522 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
523 klen) == 0) { \
524 if (strncmp("true", v, vlen) == 0 && \
525 vlen == sizeof("true")-1) \
526 opt_##n = true; \
527 else if (strncmp("false", v, vlen) == \
528 0 && vlen == sizeof("false")-1) \
529 opt_##n = false; \
530 else { \
531 malloc_conf_error( \
532 "Invalid conf value", \
533 k, klen, v, vlen); \
534 } \
535 continue; \
536 }
537#define CONF_HANDLE_SIZE_T(n, min, max) \
538 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
539 klen) == 0) { \
540 unsigned long ul; \
541 char *end; \
542 \
543 errno = 0; \
544 ul = strtoul(v, &end, 0); \
545 if (errno != 0 || (uintptr_t)end - \
546 (uintptr_t)v != vlen) { \
547 malloc_conf_error( \
548 "Invalid conf value", \
549 k, klen, v, vlen); \
550 } else if (ul < min || ul > max) { \
551 malloc_conf_error( \
552 "Out-of-range conf value", \
553 k, klen, v, vlen); \
554 } else \
555 opt_##n = ul; \
556 continue; \
557 }
558#define CONF_HANDLE_SSIZE_T(n, min, max) \
559 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
560 klen) == 0) { \
561 long l; \
562 char *end; \
563 \
564 errno = 0; \
565 l = strtol(v, &end, 0); \
566 if (errno != 0 || (uintptr_t)end - \
567 (uintptr_t)v != vlen) { \
568 malloc_conf_error( \
569 "Invalid conf value", \
570 k, klen, v, vlen); \
571 } else if (l < (ssize_t)min || l > \
572 (ssize_t)max) { \
573 malloc_conf_error( \
574 "Out-of-range conf value", \
575 k, klen, v, vlen); \
576 } else \
577 opt_##n = l; \
578 continue; \
579 }
580#define CONF_HANDLE_CHAR_P(n, d) \
581 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
582 klen) == 0) { \
583 size_t cpylen = (vlen <= \
584 sizeof(opt_##n)-1) ? vlen : \
585 sizeof(opt_##n)-1; \
586 strncpy(opt_##n, v, cpylen); \
587 opt_##n[cpylen] = '\0'; \
588 continue; \
589 }
590
591 CONF_HANDLE_BOOL(abort)
592 CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
593 PAGE_SHIFT-1)
594 CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
595 PAGE_SHIFT-1)
596 /*
597 * Chunks always require at least one * header page,
598 * plus one data page.
599 */
600 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
601 (sizeof(size_t) << 3) - 1)
602 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
603 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
604 (sizeof(size_t) << 3) - 1)
605 CONF_HANDLE_BOOL(stats_print)
606#ifdef JEMALLOC_FILL
607 CONF_HANDLE_BOOL(junk)
608 CONF_HANDLE_BOOL(zero)
609#endif
610#ifdef JEMALLOC_SYSV
611 CONF_HANDLE_BOOL(sysv)
612#endif
613#ifdef JEMALLOC_XMALLOC
614 CONF_HANDLE_BOOL(xmalloc)
615#endif
616#ifdef JEMALLOC_TCACHE
617 CONF_HANDLE_BOOL(tcache)
618 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
619 (sizeof(size_t) << 3) - 1)
620 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
621 (sizeof(size_t) << 3) - 1)
622#endif
623#ifdef JEMALLOC_PROF
624 CONF_HANDLE_BOOL(prof)
625 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
626 CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
627 CONF_HANDLE_BOOL(prof_active)
628 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
629 (sizeof(uint64_t) << 3) - 1)
630 CONF_HANDLE_BOOL(prof_accum)
631 CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
632 (sizeof(size_t) << 3) - 1)
633 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
634 (sizeof(uint64_t) << 3) - 1)
635 CONF_HANDLE_BOOL(prof_gdump)
636 CONF_HANDLE_BOOL(prof_leak)
637#endif
638#ifdef JEMALLOC_SWAP
639 CONF_HANDLE_BOOL(overcommit)
640#endif
641 malloc_conf_error("Invalid conf pair", k, klen, v,
642 vlen);
643#undef CONF_HANDLE_BOOL
644#undef CONF_HANDLE_SIZE_T
645#undef CONF_HANDLE_SSIZE_T
646#undef CONF_HANDLE_CHAR_P
647 }
648
649 /* Validate configuration of options that are inter-related. */
650 if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
651 malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
652 "relationship; restoring defaults\n");
653 opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
654 opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
655 }
656 }
657}
658
659static bool
660malloc_init_hard(void)
661{
662 arena_t *init_arenas[1];
663
664 malloc_mutex_lock(&init_lock);
665 if (malloc_initialized || malloc_initializer == pthread_self()) {
666 /*
667 * Another thread initialized the allocator before this one
668 * acquired init_lock, or this thread is the initializing
669 * thread, and it is recursively allocating.
670 */
671 malloc_mutex_unlock(&init_lock);
672 return (false);
673 }
674 if (malloc_initializer != (unsigned long)0) {
675 /* Busy-wait until the initializing thread completes. */
676 do {
677 malloc_mutex_unlock(&init_lock);
678 CPU_SPINWAIT;
679 malloc_mutex_lock(&init_lock);
680 } while (malloc_initialized == false);
681 malloc_mutex_unlock(&init_lock);
682 return (false);
683 }
684
685#ifdef DYNAMIC_PAGE_SHIFT
686 /* Get page size. */
687 {
688 long result;
689
690 result = sysconf(_SC_PAGESIZE);
691 assert(result != -1);
692 pagesize = (size_t)result;
693
694 /*
695 * We assume that pagesize is a power of 2 when calculating
696 * pagesize_mask and lg_pagesize.
697 */
698 assert(((result - 1) & result) == 0);
699 pagesize_mask = result - 1;
700 lg_pagesize = ffs((int)result) - 1;
701 }
702#endif
703
704#ifdef JEMALLOC_PROF
705 prof_boot0();
706#endif
707
708 malloc_conf_init();
709
710 /* Register fork handlers. */
711 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
712 jemalloc_postfork) != 0) {
713 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
714 if (opt_abort)
715 abort();
716 }
717
718 if (ctl_boot()) {
719 malloc_mutex_unlock(&init_lock);
720 return (true);
721 }
722
723 if (opt_stats_print) {
724 /* Print statistics at exit. */
725 if (atexit(stats_print_atexit) != 0) {
726 malloc_write("<jemalloc>: Error in atexit()\n");
727 if (opt_abort)
728 abort();
729 }
730 }
731
732 if (chunk_boot()) {
733 malloc_mutex_unlock(&init_lock);
734 return (true);
735 }
736
737 if (base_boot()) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741
742#ifdef JEMALLOC_PROF
743 prof_boot1();
744#endif
745
746 if (arena_boot()) {
747 malloc_mutex_unlock(&init_lock);
748 return (true);
749 }
750
751#ifdef JEMALLOC_TCACHE
752 if (tcache_boot()) {
753 malloc_mutex_unlock(&init_lock);
754 return (true);
755 }
756#endif
757
758 if (huge_boot()) {
759 malloc_mutex_unlock(&init_lock);
760 return (true);
761 }
762
763#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
764 /* Initialize allocation counters before any allocations can occur. */
765 if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
766 != 0) {
767 malloc_mutex_unlock(&init_lock);
768 return (true);
769 }
770#endif
771
772 if (malloc_mutex_init(&arenas_lock))
773 return (true);
774
775 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
776 malloc_mutex_unlock(&init_lock);
777 return (true);
778 }
779
780 /*
781 * Create enough scaffolding to allow recursive allocation in
782 * malloc_ncpus().
783 */
784 narenas = 1;
785 arenas = init_arenas;
786 memset(arenas, 0, sizeof(arena_t *) * narenas);
787
788 /*
789 * Initialize one arena here. The rest are lazily created in
790 * choose_arena_hard().
791 */
792 arenas_extend(0);
793 if (arenas[0] == NULL) {
794 malloc_mutex_unlock(&init_lock);
795 return (true);
796 }
797
798 /*
799 * Assign the initial arena to the initial thread, in order to avoid
800 * spurious creation of an extra arena if the application switches to
801 * threaded mode.
802 */
803 ARENA_SET(arenas[0]);
804 arenas[0]->nthreads++;
805
806#ifdef JEMALLOC_PROF
807 if (prof_boot2()) {
808 malloc_mutex_unlock(&init_lock);
809 return (true);
810 }
811#endif
812
813 /* Get number of CPUs. */
814 malloc_initializer = pthread_self();
815 malloc_mutex_unlock(&init_lock);
816 ncpus = malloc_ncpus();
817 malloc_mutex_lock(&init_lock);
818
819 if (opt_narenas == 0) {
820 /*
821 * For SMP systems, create more than one arena per CPU by
822 * default.
823 */
824 if (ncpus > 1)
825 opt_narenas = ncpus << 2;
826 else
827 opt_narenas = 1;
828 }
829 narenas = opt_narenas;
830 /*
831 * Make sure that the arenas array can be allocated. In practice, this
832 * limit is enough to allow the allocator to function, but the ctl
833 * machinery will fail to allocate memory at far lower limits.
834 */
835 if (narenas > chunksize / sizeof(arena_t *)) {
836 char buf[UMAX2S_BUFSIZE];
837
838 narenas = chunksize / sizeof(arena_t *);
839 malloc_write("<jemalloc>: Reducing narenas to limit (");
840 malloc_write(u2s(narenas, 10, buf));
841 malloc_write(")\n");
842 }
843
844 /* Allocate and initialize arenas. */
845 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
846 if (arenas == NULL) {
847 malloc_mutex_unlock(&init_lock);
848 return (true);
849 }
850 /*
851 * Zero the array. In practice, this should always be pre-zeroed,
852 * since it was just mmap()ed, but let's be sure.
853 */
854 memset(arenas, 0, sizeof(arena_t *) * narenas);
855 /* Copy the pointer to the one arena that was already initialized. */
856 arenas[0] = init_arenas[0];
857
858#ifdef JEMALLOC_ZONE
859 /* Register the custom zone. */
860 malloc_zone_register(create_zone());
861
862 /*
863 * Convert the default szone to an "overlay zone" that is capable of
864 * deallocating szone-allocated objects, but allocating new objects
865 * from jemalloc.
866 */
867 szone2ozone(malloc_default_zone());
868#endif
869
870 malloc_initialized = true;
871 malloc_mutex_unlock(&init_lock);
872 return (false);
873}
874
875#ifdef JEMALLOC_ZONE
876JEMALLOC_ATTR(constructor)
877void
878jemalloc_darwin_init(void)
879{
880
881 if (malloc_init_hard())
882 abort();
883}
884#endif
885
886/*
887 * End initialization functions.
888 */
889/******************************************************************************/
890/*
891 * Begin malloc(3)-compatible functions.
892 */
893
894JEMALLOC_ATTR(malloc)
895JEMALLOC_ATTR(visibility("default"))
896void *
897JEMALLOC_P(malloc)(size_t size)
898{
899 void *ret;
900#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
901 size_t usize
902# ifdef JEMALLOC_CC_SILENCE
903 = 0
904# endif
905 ;
906#endif
907#ifdef JEMALLOC_PROF
908 prof_thr_cnt_t *cnt
909# ifdef JEMALLOC_CC_SILENCE
910 = NULL
911# endif
912 ;
913#endif
914
915 if (malloc_init()) {
916 ret = NULL;
917 goto OOM;
918 }
919
920 if (size == 0) {
921#ifdef JEMALLOC_SYSV
922 if (opt_sysv == false)
923#endif
924 size = 1;
925#ifdef JEMALLOC_SYSV
926 else {
927# ifdef JEMALLOC_XMALLOC
928 if (opt_xmalloc) {
929 malloc_write("<jemalloc>: Error in malloc(): "
930 "invalid size 0\n");
931 abort();
932 }
933# endif
934 ret = NULL;
935 goto RETURN;
936 }
937#endif
938 }
939
940#ifdef JEMALLOC_PROF
941 if (opt_prof) {
942 usize = s2u(size);
943 PROF_ALLOC_PREP(1, usize, cnt);
944 if (cnt == NULL) {
945 ret = NULL;
946 goto OOM;
947 }
948 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
949 small_maxclass) {
950 ret = imalloc(small_maxclass+1);
951 if (ret != NULL)
952 arena_prof_promoted(ret, usize);
953 } else
954 ret = imalloc(size);
955 } else
956#endif
957 {
958#ifdef JEMALLOC_STATS
959 usize = s2u(size);
960#endif
961 ret = imalloc(size);
962 }
963
964OOM:
965 if (ret == NULL) {
966#ifdef JEMALLOC_XMALLOC
967 if (opt_xmalloc) {
968 malloc_write("<jemalloc>: Error in malloc(): "
969 "out of memory\n");
970 abort();
971 }
972#endif
973 errno = ENOMEM;
974 }
975
976#ifdef JEMALLOC_SYSV
977RETURN:
978#endif
979#ifdef JEMALLOC_PROF
980 if (opt_prof && ret != NULL)
981 prof_malloc(ret, usize, cnt);
982#endif
983#ifdef JEMALLOC_STATS
984 if (ret != NULL) {
985 assert(usize == isalloc(ret));
986 ALLOCATED_ADD(usize, 0);
987 }
988#endif
989 return (ret);
990}
991
992JEMALLOC_ATTR(nonnull(1))
993#ifdef JEMALLOC_PROF
994/*
995 * Avoid any uncertainty as to how many backtrace frames to ignore in
996 * PROF_ALLOC_PREP().
997 */
998JEMALLOC_ATTR(noinline)
999#endif
1000static int
1001imemalign(void **memptr, size_t alignment, size_t size)
1002{
1003 int ret;
1004 size_t usize
1005#ifdef JEMALLOC_CC_SILENCE
1006 = 0
1007#endif
1008 ;
1009 void *result;
1010#ifdef JEMALLOC_PROF
1011 prof_thr_cnt_t *cnt
1012# ifdef JEMALLOC_CC_SILENCE
1013 = NULL
1014# endif
1015 ;
1016#endif
1017
1018 if (malloc_init())
1019 result = NULL;
1020 else {
1021 if (size == 0) {
1022#ifdef JEMALLOC_SYSV
1023 if (opt_sysv == false)
1024#endif
1025 size = 1;
1026#ifdef JEMALLOC_SYSV
1027 else {
1028# ifdef JEMALLOC_XMALLOC
1029 if (opt_xmalloc) {
1030 malloc_write("<jemalloc>: Error in "
1031 "posix_memalign(): invalid size "
1032 "0\n");
1033 abort();
1034 }
1035# endif
1036 result = NULL;
1037 *memptr = NULL;
1038 ret = 0;
1039 goto RETURN;
1040 }
1041#endif
1042 }
1043
1044 /* Make sure that alignment is a large enough power of 2. */
1045 if (((alignment - 1) & alignment) != 0
1046 || alignment < sizeof(void *)) {
1047#ifdef JEMALLOC_XMALLOC
1048 if (opt_xmalloc) {
1049 malloc_write("<jemalloc>: Error in "
1050 "posix_memalign(): invalid alignment\n");
1051 abort();
1052 }
1053#endif
1054 result = NULL;
1055 ret = EINVAL;
1056 goto RETURN;
1057 }
1058
1059 usize = sa2u(size, alignment, NULL);
1060 if (usize == 0) {
1061 result = NULL;
1062 ret = ENOMEM;
1063 goto RETURN;
1064 }
1065
1066#ifdef JEMALLOC_PROF
1067 if (opt_prof) {
1068 PROF_ALLOC_PREP(2, usize, cnt);
1069 if (cnt == NULL) {
1070 result = NULL;
1071 ret = EINVAL;
1072 } else {
1073 if (prof_promote && (uintptr_t)cnt !=
1074 (uintptr_t)1U && usize <= small_maxclass) {
1075 assert(sa2u(small_maxclass+1,
1076 alignment, NULL) != 0);
1077 result = ipalloc(sa2u(small_maxclass+1,
1078 alignment, NULL), alignment, false);
1079 if (result != NULL) {
1080 arena_prof_promoted(result,
1081 usize);
1082 }
1083 } else {
1084 result = ipalloc(usize, alignment,
1085 false);
1086 }
1087 }
1088 } else
1089#endif
1090 result = ipalloc(usize, alignment, false);
1091 }
1092
1093 if (result == NULL) {
1094#ifdef JEMALLOC_XMALLOC
1095 if (opt_xmalloc) {
1096 malloc_write("<jemalloc>: Error in posix_memalign(): "
1097 "out of memory\n");
1098 abort();
1099 }
1100#endif
1101 ret = ENOMEM;
1102 goto RETURN;
1103 }
1104
1105 *memptr = result;
1106 ret = 0;
1107
1108RETURN:
1109#ifdef JEMALLOC_STATS
1110 if (result != NULL) {
1111 assert(usize == isalloc(result));
1112 ALLOCATED_ADD(usize, 0);
1113 }
1114#endif
1115#ifdef JEMALLOC_PROF
1116 if (opt_prof && result != NULL)
1117 prof_malloc(result, usize, cnt);
1118#endif
1119 return (ret);
1120}
1121
1122JEMALLOC_ATTR(nonnull(1))
1123JEMALLOC_ATTR(visibility("default"))
1124int
1125JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
1126{
1127
1128 return imemalign(memptr, alignment, size);
1129}
1130
1131JEMALLOC_ATTR(malloc)
1132JEMALLOC_ATTR(visibility("default"))
1133void *
1134JEMALLOC_P(calloc)(size_t num, size_t size)
1135{
1136 void *ret;
1137 size_t num_size;
1138#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1139 size_t usize
1140# ifdef JEMALLOC_CC_SILENCE
1141 = 0
1142# endif
1143 ;
1144#endif
1145#ifdef JEMALLOC_PROF
1146 prof_thr_cnt_t *cnt
1147# ifdef JEMALLOC_CC_SILENCE
1148 = NULL
1149# endif
1150 ;
1151#endif
1152
1153 if (malloc_init()) {
1154 num_size = 0;
1155 ret = NULL;
1156 goto RETURN;
1157 }
1158
1159 num_size = num * size;
1160 if (num_size == 0) {
1161#ifdef JEMALLOC_SYSV
1162 if ((opt_sysv == false) && ((num == 0) || (size == 0)))
1163#endif
1164 num_size = 1;
1165#ifdef JEMALLOC_SYSV
1166 else {
1167 ret = NULL;
1168 goto RETURN;
1169 }
1170#endif
1171 /*
1172 * Try to avoid division here. We know that it isn't possible to
1173 * overflow during multiplication if neither operand uses any of the
1174 * most significant half of the bits in a size_t.
1175 */
1176 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1177 && (num_size / size != num)) {
1178 /* size_t overflow. */
1179 ret = NULL;
1180 goto RETURN;
1181 }
1182
1183#ifdef JEMALLOC_PROF
1184 if (opt_prof) {
1185 usize = s2u(num_size);
1186 PROF_ALLOC_PREP(1, usize, cnt);
1187 if (cnt == NULL) {
1188 ret = NULL;
1189 goto RETURN;
1190 }
1191 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1192 <= small_maxclass) {
1193 ret = icalloc(small_maxclass+1);
1194 if (ret != NULL)
1195 arena_prof_promoted(ret, usize);
1196 } else
1197 ret = icalloc(num_size);
1198 } else
1199#endif
1200 {
1201#ifdef JEMALLOC_STATS
1202 usize = s2u(num_size);
1203#endif
1204 ret = icalloc(num_size);
1205 }
1206
1207RETURN:
1208 if (ret == NULL) {
1209#ifdef JEMALLOC_XMALLOC
1210 if (opt_xmalloc) {
1211 malloc_write("<jemalloc>: Error in calloc(): out of "
1212 "memory\n");
1213 abort();
1214 }
1215#endif
1216 errno = ENOMEM;
1217 }
1218
1219#ifdef JEMALLOC_PROF
1220 if (opt_prof && ret != NULL)
1221 prof_malloc(ret, usize, cnt);
1222#endif
1223#ifdef JEMALLOC_STATS
1224 if (ret != NULL) {
1225 assert(usize == isalloc(ret));
1226 ALLOCATED_ADD(usize, 0);
1227 }
1228#endif
1229 return (ret);
1230}
1231
1232JEMALLOC_ATTR(visibility("default"))
1233void *
1234JEMALLOC_P(realloc)(void *ptr, size_t size)
1235{
1236 void *ret;
1237#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1238 size_t usize
1239# ifdef JEMALLOC_CC_SILENCE
1240 = 0
1241# endif
1242 ;
1243 size_t old_size = 0;
1244#endif
1245#ifdef JEMALLOC_PROF
1246 prof_thr_cnt_t *cnt
1247# ifdef JEMALLOC_CC_SILENCE
1248 = NULL
1249# endif
1250 ;
1251 prof_ctx_t *old_ctx
1252# ifdef JEMALLOC_CC_SILENCE
1253 = NULL
1254# endif
1255 ;
1256#endif
1257
1258 if (size == 0) {
1259#ifdef JEMALLOC_SYSV
1260 if (opt_sysv == false)
1261#endif
1262 size = 1;
1263#ifdef JEMALLOC_SYSV
1264 else {
1265 if (ptr != NULL) {
1266#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1267 old_size = isalloc(ptr);
1268#endif
1269#ifdef JEMALLOC_PROF
1270 if (opt_prof) {
1271 old_ctx = prof_ctx_get(ptr);
1272 cnt = NULL;
1273 }
1274#endif
1275 idalloc(ptr);
1276 }
1277#ifdef JEMALLOC_PROF
1278 else if (opt_prof) {
1279 old_ctx = NULL;
1280 cnt = NULL;
1281 }
1282#endif
1283 ret = NULL;
1284 goto RETURN;
1285 }
1286#endif
1287 }
1288
1289 if (ptr != NULL) {
1290 assert(malloc_initialized || malloc_initializer ==
1291 pthread_self());
1292
1293#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1294 old_size = isalloc(ptr);
1295#endif
1296#ifdef JEMALLOC_PROF
1297 if (opt_prof) {
1298 usize = s2u(size);
1299 old_ctx = prof_ctx_get(ptr);
1300 PROF_ALLOC_PREP(1, usize, cnt);
1301 if (cnt == NULL) {
1302 old_ctx = NULL;
1303 ret = NULL;
1304 goto OOM;
1305 }
1306 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1307 usize <= small_maxclass) {
1308 ret = iralloc(ptr, small_maxclass+1, 0, 0,
1309 false, false);
1310 if (ret != NULL)
1311 arena_prof_promoted(ret, usize);
1312 else
1313 old_ctx = NULL;
1314 } else {
1315 ret = iralloc(ptr, size, 0, 0, false, false);
1316 if (ret == NULL)
1317 old_ctx = NULL;
1318 }
1319 } else
1320#endif
1321 {
1322#ifdef JEMALLOC_STATS
1323 usize = s2u(size);
1324#endif
1325 ret = iralloc(ptr, size, 0, 0, false, false);
1326 }
1327
1328#ifdef JEMALLOC_PROF
1329OOM:
1330#endif
1331 if (ret == NULL) {
1332#ifdef JEMALLOC_XMALLOC
1333 if (opt_xmalloc) {
1334 malloc_write("<jemalloc>: Error in realloc(): "
1335 "out of memory\n");
1336 abort();
1337 }
1338#endif
1339 errno = ENOMEM;
1340 }
1341 } else {
1342#ifdef JEMALLOC_PROF
1343 if (opt_prof)
1344 old_ctx = NULL;
1345#endif
1346 if (malloc_init()) {
1347#ifdef JEMALLOC_PROF
1348 if (opt_prof)
1349 cnt = NULL;
1350#endif
1351 ret = NULL;
1352 } else {
1353#ifdef JEMALLOC_PROF
1354 if (opt_prof) {
1355 usize = s2u(size);
1356 PROF_ALLOC_PREP(1, usize, cnt);
1357 if (cnt == NULL)
1358 ret = NULL;
1359 else {
1360 if (prof_promote && (uintptr_t)cnt !=
1361 (uintptr_t)1U && usize <=
1362 small_maxclass) {
1363 ret = imalloc(small_maxclass+1);
1364 if (ret != NULL) {
1365 arena_prof_promoted(ret,
1366 usize);
1367 }
1368 } else
1369 ret = imalloc(size);
1370 }
1371 } else
1372#endif
1373 {
1374#ifdef JEMALLOC_STATS
1375 usize = s2u(size);
1376#endif
1377 ret = imalloc(size);
1378 }
1379 }
1380
1381 if (ret == NULL) {
1382#ifdef JEMALLOC_XMALLOC
1383 if (opt_xmalloc) {
1384 malloc_write("<jemalloc>: Error in realloc(): "
1385 "out of memory\n");
1386 abort();
1387 }
1388#endif
1389 errno = ENOMEM;
1390 }
1391 }
1392
1393#ifdef JEMALLOC_SYSV
1394RETURN:
1395#endif
1396#ifdef JEMALLOC_PROF
1397 if (opt_prof)
1398 prof_realloc(ret, usize, cnt, old_size, old_ctx);
1399#endif
1400#ifdef JEMALLOC_STATS
1401 if (ret != NULL) {
1402 assert(usize == isalloc(ret));
1403 ALLOCATED_ADD(usize, old_size);
1404 }
1405#endif
1406 return (ret);
1407}
1408
1409JEMALLOC_ATTR(visibility("default"))
1410void
1411JEMALLOC_P(free)(void *ptr)
1412{
1413
1414 if (ptr != NULL) {
1415#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1416 size_t usize;
1417#endif
1418
1419 assert(malloc_initialized || malloc_initializer ==
1420 pthread_self());
1421
1422#ifdef JEMALLOC_STATS
1423 usize = isalloc(ptr);
1424#endif
1425#ifdef JEMALLOC_PROF
1426 if (opt_prof) {
1427# ifndef JEMALLOC_STATS
1428 usize = isalloc(ptr);
1429# endif
1430 prof_free(ptr, usize);
1431 }
1432#endif
1433#ifdef JEMALLOC_STATS
1434 ALLOCATED_ADD(0, usize);
1435#endif
1436 idalloc(ptr);
1437 }
1438}
1439
1440/*
1441 * End malloc(3)-compatible functions.
1442 */
1443/******************************************************************************/
1444/*
1445 * Begin non-standard override functions.
1446 *
1447 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1448 * entire point is to avoid accidental mixed allocator usage.
1449 */
1450#ifndef JEMALLOC_PREFIX
1451
1452#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1453JEMALLOC_ATTR(malloc)
1454JEMALLOC_ATTR(visibility("default"))
1455void *
1456JEMALLOC_P(memalign)(size_t alignment, size_t size)
1457{
1458 void *ret;
1459#ifdef JEMALLOC_CC_SILENCE
1460 int result =
1461#endif
1462 imemalign(&ret, alignment, size);
1463#ifdef JEMALLOC_CC_SILENCE
1464 if (result != 0)
1465 return (NULL);
1466#endif
1467 return (ret);
1468}
1469#endif
1470
1471#ifdef JEMALLOC_OVERRIDE_VALLOC
1472JEMALLOC_ATTR(malloc)
1473JEMALLOC_ATTR(visibility("default"))
1474void *
1475JEMALLOC_P(valloc)(size_t size)
1476{
1477 void *ret;
1478#ifdef JEMALLOC_CC_SILENCE
1479 int result =
1480#endif
1481 imemalign(&ret, PAGE_SIZE, size);
1482#ifdef JEMALLOC_CC_SILENCE
1483 if (result != 0)
1484 return (NULL);
1485#endif
1486 return (ret);
1487}
1488#endif
1489
1490#endif /* JEMALLOC_PREFIX */
1491/*
1492 * End non-standard override functions.
1493 */
1494/******************************************************************************/
1495/*
1496 * Begin non-standard functions.
1497 */
1498
1499JEMALLOC_ATTR(visibility("default"))
1500size_t
1501JEMALLOC_P(malloc_usable_size)(const void *ptr)
1502{
1503 size_t ret;
1504
1505 assert(malloc_initialized || malloc_initializer == pthread_self());
1506
1507#ifdef JEMALLOC_IVSALLOC
1508 ret = ivsalloc(ptr);
1509#else
1510 assert(ptr != NULL);
1511 ret = isalloc(ptr);
1512#endif
1513
1514 return (ret);
1515}
1516
1517JEMALLOC_ATTR(visibility("default"))
1518void
1519JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1520 void *cbopaque, const char *opts)
1521{
1522
1523 stats_print(write_cb, cbopaque, opts);
1524}
1525
1526JEMALLOC_ATTR(visibility("default"))
1527int
1528JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1529 size_t newlen)
1530{
1531
1532 if (malloc_init())
1533 return (EAGAIN);
1534
1535 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1536}
1537
1538JEMALLOC_ATTR(visibility("default"))
1539int
1540JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1541{
1542
1543 if (malloc_init())
1544 return (EAGAIN);
1545
1546 return (ctl_nametomib(name, mibp, miblenp));
1547}
1548
1549JEMALLOC_ATTR(visibility("default"))
1550int
1551JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1552 size_t *oldlenp, void *newp, size_t newlen)
1553{
1554
1555 if (malloc_init())
1556 return (EAGAIN);
1557
1558 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1559}
1560
1561JEMALLOC_INLINE void *
1562iallocm(size_t usize, size_t alignment, bool zero)
1563{
1564
1565 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1566 NULL)));
1567
1568 if (alignment != 0)
1569 return (ipalloc(usize, alignment, zero));
1570 else if (zero)
1571 return (icalloc(usize));
1572 else
1573 return (imalloc(usize));
1574}
1575
1576JEMALLOC_ATTR(nonnull(1))
1577JEMALLOC_ATTR(visibility("default"))
1578int
1579JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1580{
1581 void *p;
1582 size_t usize;
1583 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1584 & (SIZE_T_MAX-1));
1585 bool zero = flags & ALLOCM_ZERO;
1586#ifdef JEMALLOC_PROF
1587 prof_thr_cnt_t *cnt;
1588#endif
1589
1590 assert(ptr != NULL);
1591 assert(size != 0);
1592
1593 if (malloc_init())
1594 goto OOM;
1595
1596 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1597 if (usize == 0)
1598 goto OOM;
1599
1600#ifdef JEMALLOC_PROF
1601 if (opt_prof) {
1602 PROF_ALLOC_PREP(1, usize, cnt);
1603 if (cnt == NULL)
1604 goto OOM;
1605 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1606 small_maxclass) {
1607 size_t usize_promoted = (alignment == 0) ?
1608 s2u(small_maxclass+1) : sa2u(small_maxclass+1,
1609 alignment, NULL);
1610 assert(usize_promoted != 0);
1611 p = iallocm(usize_promoted, alignment, zero);
1612 if (p == NULL)
1613 goto OOM;
1614 arena_prof_promoted(p, usize);
1615 } else {
1616 p = iallocm(usize, alignment, zero);
1617 if (p == NULL)
1618 goto OOM;
1619 }
1620 prof_malloc(p, usize, cnt);
1621 if (rsize != NULL)
1622 *rsize = usize;
1623 } else
1624#endif
1625 {
1626 p = iallocm(usize, alignment, zero);
1627 if (p == NULL)
1628 goto OOM;
1629#ifndef JEMALLOC_STATS
1630 if (rsize != NULL)
1631#endif
1632 {
1633#ifdef JEMALLOC_STATS
1634 if (rsize != NULL)
1635#endif
1636 *rsize = usize;
1637 }
1638 }
1639
1640 *ptr = p;
1641#ifdef JEMALLOC_STATS
1642 assert(usize == isalloc(p));
1643 ALLOCATED_ADD(usize, 0);
1644#endif
1645 return (ALLOCM_SUCCESS);
1646OOM:
1647#ifdef JEMALLOC_XMALLOC
1648 if (opt_xmalloc) {
1649 malloc_write("<jemalloc>: Error in allocm(): "
1650 "out of memory\n");
1651 abort();
1652 }
1653#endif
1654 *ptr = NULL;
1655 return (ALLOCM_ERR_OOM);
1656}
1657
1658JEMALLOC_ATTR(nonnull(1))
1659JEMALLOC_ATTR(visibility("default"))
1660int
1661JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1662 int flags)
1663{
1664 void *p, *q;
1665 size_t usize;
1666#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1667 size_t old_size;
1668#endif
1669 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1670 & (SIZE_T_MAX-1));
1671 bool zero = flags & ALLOCM_ZERO;
1672 bool no_move = flags & ALLOCM_NO_MOVE;
1673#ifdef JEMALLOC_PROF
1674 prof_thr_cnt_t *cnt;
1675#endif
1676
1677 assert(ptr != NULL);
1678 assert(*ptr != NULL);
1679 assert(size != 0);
1680 assert(SIZE_T_MAX - size >= extra);
1681 assert(malloc_initialized || malloc_initializer == pthread_self());
1682
1683 p = *ptr;
1684#ifdef JEMALLOC_PROF
1685 if (opt_prof) {
1686 /*
1687 * usize isn't knowable before iralloc() returns when extra is
1688 * non-zero. Therefore, compute its maximum possible value and
1689 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1690 * backtrace. prof_realloc() will use the actual usize to
1691 * decide whether to sample.
1692 */
1693 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1694 sa2u(size+extra, alignment, NULL);
1695 prof_ctx_t *old_ctx = prof_ctx_get(p);
1696 old_size = isalloc(p);
1697 PROF_ALLOC_PREP(1, max_usize, cnt);
1698 if (cnt == NULL)
1699 goto OOM;
1700 /*
1701 * Use minimum usize to determine whether promotion may happen.
1702 */
1703 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1704 && ((alignment == 0) ? s2u(size) : sa2u(size,
1705 alignment, NULL)) <= small_maxclass) {
1706 q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
1707 size+extra) ? 0 : size+extra - (small_maxclass+1),
1708 alignment, zero, no_move);
1709 if (q == NULL)
1710 goto ERR;
1711 if (max_usize < PAGE_SIZE) {
1712 usize = max_usize;
1713 arena_prof_promoted(q, usize);
1714 } else
1715 usize = isalloc(q);
1716 } else {
1717 q = iralloc(p, size, extra, alignment, zero, no_move);
1718 if (q == NULL)
1719 goto ERR;
1720 usize = isalloc(q);
1721 }
1722 prof_realloc(q, usize, cnt, old_size, old_ctx);
1723 if (rsize != NULL)
1724 *rsize = usize;
1725 } else
1726#endif
1727 {
1728#ifdef JEMALLOC_STATS
1729 old_size = isalloc(p);
1730#endif
1731 q = iralloc(p, size, extra, alignment, zero, no_move);
1732 if (q == NULL)
1733 goto ERR;
1734#ifndef JEMALLOC_STATS
1735 if (rsize != NULL)
1736#endif
1737 {
1738 usize = isalloc(q);
1739#ifdef JEMALLOC_STATS
1740 if (rsize != NULL)
1741#endif
1742 *rsize = usize;
1743 }
1744 }
1745
1746 *ptr = q;
1747#ifdef JEMALLOC_STATS
1748 ALLOCATED_ADD(usize, old_size);
1749#endif
1750 return (ALLOCM_SUCCESS);
1751ERR:
1752 if (no_move)
1753 return (ALLOCM_ERR_NOT_MOVED);
1754#ifdef JEMALLOC_PROF
1755OOM:
1756#endif
1757#ifdef JEMALLOC_XMALLOC
1758 if (opt_xmalloc) {
1759 malloc_write("<jemalloc>: Error in rallocm(): "
1760 "out of memory\n");
1761 abort();
1762 }
1763#endif
1764 return (ALLOCM_ERR_OOM);
1765}
1766
1767JEMALLOC_ATTR(nonnull(1))
1768JEMALLOC_ATTR(visibility("default"))
1769int
1770JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1771{
1772 size_t sz;
1773
1774 assert(malloc_initialized || malloc_initializer == pthread_self());
1775
1776#ifdef JEMALLOC_IVSALLOC
1777 sz = ivsalloc(ptr);
1778#else
1779 assert(ptr != NULL);
1780 sz = isalloc(ptr);
1781#endif
1782 assert(rsize != NULL);
1783 *rsize = sz;
1784
1785 return (ALLOCM_SUCCESS);
1786}
1787
1788JEMALLOC_ATTR(nonnull(1))
1789JEMALLOC_ATTR(visibility("default"))
1790int
1791JEMALLOC_P(dallocm)(void *ptr, int flags)
1792{
1793#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1794 size_t usize;
1795#endif
1796
1797 assert(ptr != NULL);
1798 assert(malloc_initialized || malloc_initializer == pthread_self());
1799
1800#ifdef JEMALLOC_STATS
1801 usize = isalloc(ptr);
1802#endif
1803#ifdef JEMALLOC_PROF
1804 if (opt_prof) {
1805# ifndef JEMALLOC_STATS
1806 usize = isalloc(ptr);
1807# endif
1808 prof_free(ptr, usize);
1809 }
1810#endif
1811#ifdef JEMALLOC_STATS
1812 ALLOCATED_ADD(0, usize);
1813#endif
1814 idalloc(ptr);
1815
1816 return (ALLOCM_SUCCESS);
1817}
1818
1819/*
1820 * End non-standard functions.
1821 */
1822/******************************************************************************/
1823
1824/*
1825 * The following functions are used by threading libraries for protection of
1826 * malloc during fork().
1827 */
1828
1829void
1830jemalloc_prefork(void)
1831{
1832 unsigned i;
1833
1834 /* Acquire all mutexes in a safe order. */
1835
1836 malloc_mutex_lock(&arenas_lock);
1837 for (i = 0; i < narenas; i++) {
1838 if (arenas[i] != NULL)
1839 malloc_mutex_lock(&arenas[i]->lock);
1840 }
1841
1842 malloc_mutex_lock(&base_mtx);
1843
1844 malloc_mutex_lock(&huge_mtx);
1845
1846#ifdef JEMALLOC_DSS
1847 malloc_mutex_lock(&dss_mtx);
1848#endif
1849
1850#ifdef JEMALLOC_SWAP
1851 malloc_mutex_lock(&swap_mtx);
1852#endif
1853}
1854
1855void
1856jemalloc_postfork(void)
1857{
1858 unsigned i;
1859
1860 /* Release all mutexes, now that fork() has completed. */
1861
1862#ifdef JEMALLOC_SWAP
1863 malloc_mutex_unlock(&swap_mtx);
1864#endif
1865
1866#ifdef JEMALLOC_DSS
1867 malloc_mutex_unlock(&dss_mtx);
1868#endif
1869
1870 malloc_mutex_unlock(&huge_mtx);
1871
1872 malloc_mutex_unlock(&base_mtx);
1873
1874 for (i = 0; i < narenas; i++) {
1875 if (arenas[i] != NULL)
1876 malloc_mutex_unlock(&arenas[i]->lock);
1877 }
1878 malloc_mutex_unlock(&arenas_lock);
1879}
1880
1881/******************************************************************************/