2 * Copyright (c) 1999, 2006-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <pthread_internals.h>
25 #include "magmallocProvider.h"
26 #include <mach-o/dyld.h> /* for NSVersionOfLinkTimeLibrary() */
32 #import <malloc/malloc.h>
34 #import <crt_externs.h>
36 #import <pthread_internals.h>
39 #import <mach/mach_vm.h>
40 #import <mach/mach_init.h>
43 #import "scalable_malloc.h"
44 #import "stack_logging.h"
45 #import "malloc_printf.h"
47 #import "CrashReporterClient.h"
50 * MALLOC_ABSOLUTE_MAX_SIZE - There are many instances of addition to a
51 * user-specified size_t, which can cause overflow (and subsequent crashes)
52 * for values near SIZE_T_MAX. Rather than add extra "if" checks everywhere
53 * this occurs, it is easier to just set an absolute maximum request size,
54 * and immediately return an error if the requested size exceeds this maximum.
55 * Of course, values less than this absolute max can fail later if the value
56 * is still too large for the available memory. The largest value added
57 * seems to be PAGE_SIZE (in the macro round_page()), so to be safe, we set
58 * the maximum to be 2 * PAGE_SIZE less than SIZE_T_MAX.
60 #define MALLOC_ABSOLUTE_MAX_SIZE (SIZE_T_MAX - (2 * PAGE_SIZE))
62 #define USE_SLEEP_RATHER_THAN_ABORT 0
64 typedef void (malloc_logger_t
)(uint32_t type
, uintptr_t arg1
, uintptr_t arg2
, uintptr_t arg3
, uintptr_t result
, uint32_t num_hot_frames_to_skip
);
66 __private_extern__ pthread_lock_t _malloc_lock
= 0; // initialized in __libc_init
68 /* The following variables are exported for the benefit of performance tools
70 * It should always be safe to first read malloc_num_zones, then read
71 * malloc_zones without taking the lock, if only iteration is required and
72 * provided that when malloc_destroy_zone is called all prior operations on that
73 * zone are complete and no further calls referencing that zone can be made.
75 unsigned malloc_num_zones
= 0;
76 unsigned malloc_num_zones_allocated
= 0;
77 malloc_zone_t
**malloc_zones
= 0;
78 malloc_logger_t
*malloc_logger
= NULL
;
80 unsigned malloc_debug_flags
= 0;
82 unsigned malloc_check_start
= 0; // 0 means don't check
83 unsigned malloc_check_counter
= 0;
84 unsigned malloc_check_each
= 1000;
86 /* global flag to suppress ASL logging e.g. for syslogd */
87 int _malloc_no_asl_log
= 0;
89 static int malloc_check_sleep
= 100; // default 100 second sleep
90 static int malloc_check_abort
= 0; // default is to sleep, not abort
92 static int malloc_debug_file
= STDERR_FILENO
;
94 * State indicated by malloc_def_zone_state
95 * 0 - the default zone has not yet been created
96 * 1 - a Malloc* environment variable has been set
97 * 2 - the default zone has been created and an environment variable scan done
98 * 3 - a new default zone has been created and another environment variable scan
100 __private_extern__
int malloc_def_zone_state
= 0;
101 __private_extern__ malloc_zone_t
*__zone0
= NULL
;
103 static const char Malloc_Facility
[] = "com.apple.Libsystem.malloc";
105 #define MALLOC_LOCK() LOCK(_malloc_lock)
106 #define MALLOC_UNLOCK() UNLOCK(_malloc_lock)
109 * Counters that coordinate zone destruction (in malloc_zone_unregister) with
110 * find_registered_zone (here abbreviated as FRZ).
112 static int counterAlice
= 0, counterBob
= 0;
113 static int *pFRZCounterLive
= &counterAlice
, *pFRZCounterDrain
= &counterBob
;
115 #define MALLOC_LOG_TYPE_ALLOCATE stack_logging_type_alloc
116 #define MALLOC_LOG_TYPE_DEALLOCATE stack_logging_type_dealloc
117 #define MALLOC_LOG_TYPE_HAS_ZONE stack_logging_flag_zone
118 #define MALLOC_LOG_TYPE_CLEARED stack_logging_flag_cleared
120 /********* Utilities ************/
121 __private_extern__
uint64_t malloc_entropy
[2] = {0, 0};
123 void __malloc_entropy_setup(const char *apple
[]) __attribute__ ((visibility ("hidden")));
126 __entropy_from_kernel(const char *str
)
128 unsigned long long val
;
132 /* Skip over key to the first value */
133 str
= strchr(str
, '=');
138 while (str
&& idx
< sizeof(malloc_entropy
)/sizeof(malloc_entropy
[0])) {
139 strlcpy(tmp
, str
, 20);
140 p
= strchr(tmp
, ',');
142 val
= strtoull(tmp
, NULL
, 0);
143 malloc_entropy
[idx
] = (uint64_t)val
;
145 if ((str
= strchr(str
, ',')) != NULL
)
152 __malloc_entropy_setup(const char *apple
[])
155 for (p
= apple
; p
&& *p
; p
++) {
156 if (strstr(*p
, "malloc_entropy") == *p
) {
157 if (sizeof(malloc_entropy
)/sizeof(malloc_entropy
[0]) == __entropy_from_kernel(*p
))
164 malloc_entropy
[0] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
165 malloc_entropy
[1] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
169 static inline malloc_zone_t
* find_registered_zone(const void *, size_t *) __attribute__((always_inline
));
170 static inline malloc_zone_t
*
171 find_registered_zone(const void *ptr
, size_t *returned_size
) {
172 // Returns a zone which contains ptr, else NULL
174 if (0 == malloc_num_zones
) {
175 if (returned_size
) *returned_size
= 0;
179 // The default zone is registered in malloc_zones[0]. There's no danger that it will ever be unregistered.
180 // So don't advance the FRZ counter yet.
181 malloc_zone_t
*zone
= malloc_zones
[0];
182 size_t size
= zone
->size(zone
, ptr
);
183 if (size
) { // Claimed by this zone?
184 if (returned_size
) *returned_size
= size
;
188 int *pFRZCounter
= pFRZCounterLive
; // Capture pointer to the counter of the moment
189 __sync_fetch_and_add(pFRZCounter
, 1); // Advance this counter -- our thread is in FRZ
192 unsigned limit
= malloc_num_zones
;
193 malloc_zone_t
**zones
= &malloc_zones
[1];
195 for (index
= 1; index
< limit
; ++index
, ++zones
) {
197 size
= zone
->size(zone
, ptr
);
198 if (size
) { // Claimed by this zone?
199 if (returned_size
) *returned_size
= size
;
200 __sync_fetch_and_sub(pFRZCounter
, 1); // our thread is leaving FRZ
204 // Unclaimed by any zone.
205 if (returned_size
) *returned_size
= 0;
206 __sync_fetch_and_sub(pFRZCounter
, 1); // our thread is leaving FRZ
210 __private_extern__
__attribute__((noinline
)) void
211 malloc_error_break(void) {
212 // Provides a non-inlined place for various malloc error procedures to call
213 // that will be called after an error message appears. It does not make
214 // sense for developers to call this function, so it is marked
215 // __private_extern__ to prevent it from becoming API.
216 MAGMALLOC_MALLOCERRORBREAK(); // DTrace USDT probe
219 __private_extern__ boolean_t
__stack_logging_locked();
221 __private_extern__
__attribute__((noinline
)) int
222 malloc_gdb_po_unsafe(void) {
223 // In order to implement "po" other data formatters in gdb, the debugger
224 // calls functions that call malloc. The debugger will only run one thread
225 // of the program in this case, so if another thread is holding a zone lock,
226 // gdb may deadlock in this case.
228 // Iterate over the zones in malloc_zones, and call "trylock" on the zone
229 // lock. If trylock succeeds, unlock it, otherwise return "locked". Returns
230 // 0 == safe, 1 == locked/unsafe.
232 if (__stack_logging_locked())
235 malloc_zone_t
**zones
= malloc_zones
;
236 unsigned i
, e
= malloc_num_zones
;
238 for (i
= 0; i
!= e
; ++i
) {
239 malloc_zone_t
*zone
= zones
[i
];
241 // Version must be >= 5 to look at the new introspection field.
242 if (zone
->version
< 5)
245 if (zone
->introspect
->zone_locked
&& zone
->introspect
->zone_locked(zone
))
251 /********* Creation and destruction ************/
253 static void set_flags_from_environment(void);
256 malloc_zone_register_while_locked(malloc_zone_t
*zone
) {
260 /* scan the list of zones, to see if this zone is already registered. If
261 * so, print an error message and return. */
262 for (i
= 0; i
!= malloc_num_zones
; ++i
)
263 if (zone
== malloc_zones
[i
]) {
264 _malloc_printf(ASL_LEVEL_ERR
, "Attempted to register zone more than once: %p\n", zone
);
268 if (malloc_num_zones
== malloc_num_zones_allocated
) {
269 size_t malloc_zones_size
= malloc_num_zones
* sizeof(malloc_zone_t
*);
270 size_t alloc_size
= malloc_zones_size
+ vm_page_size
;
272 malloc_zone_t
**new_zones
= mmap(0, alloc_size
, PROT_READ
| PROT_WRITE
, MAP_ANON
| MAP_PRIVATE
, VM_MAKE_TAG(VM_MEMORY_MALLOC
), 0);
274 /* If there were previously allocated malloc zones, we need to copy them
275 * out of the previous array and into the new zones array */
277 memcpy(new_zones
, malloc_zones
, malloc_zones_size
);
279 /* Update the malloc_zones pointer, which we leak if it was previously
280 * allocated, and the number of zones allocated */
281 protect_size
= alloc_size
;
282 malloc_zones
= new_zones
;
283 malloc_num_zones_allocated
= alloc_size
/ sizeof(malloc_zone_t
*);
285 /* If we don't need to reallocate zones, we need to briefly change the
286 * page protection the malloc zones to allow writes */
287 protect_size
= malloc_num_zones_allocated
* sizeof(malloc_zone_t
*);
288 mprotect(malloc_zones
, protect_size
, PROT_READ
| PROT_WRITE
);
290 malloc_zones
[malloc_num_zones
++] = zone
;
292 /* Finally, now that the zone is registered, disallow write access to the
293 * malloc_zones array */
294 mprotect(malloc_zones
, protect_size
, PROT_READ
);
295 //_malloc_printf(ASL_LEVEL_INFO, "Registered malloc_zone %p in malloc_zones %p [%u zones, %u bytes]\n", zone, malloc_zones, malloc_num_zones, protect_size);
299 _malloc_initialize(void) {
301 if (malloc_def_zone_state
< 2) {
305 malloc_def_zone_state
+= 2;
306 set_flags_from_environment(); // will only set flags up to two times
307 n
= malloc_num_zones
;
308 zone
= create_scalable_zone(0, malloc_debug_flags
);
309 malloc_zone_register_while_locked(zone
);
310 malloc_set_zone_name(zone
, "DefaultMallocZone");
311 if (n
!= 0) { // make the default first, for efficiency
312 unsigned protect_size
= malloc_num_zones_allocated
* sizeof(malloc_zone_t
*);
313 malloc_zone_t
*hold
= malloc_zones
[0];
315 if(hold
->zone_name
&& strcmp(hold
->zone_name
, "DefaultMallocZone") == 0) {
316 malloc_set_zone_name(hold
, NULL
);
319 mprotect(malloc_zones
, protect_size
, PROT_READ
| PROT_WRITE
);
320 malloc_zones
[0] = malloc_zones
[n
];
321 malloc_zones
[n
] = hold
;
322 mprotect(malloc_zones
, protect_size
, PROT_READ
);
324 // _malloc_printf(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones);
325 // _malloc_printf(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, (unsigned)&malloc_num_zones);
330 static inline malloc_zone_t
*inline_malloc_default_zone(void) __attribute__((always_inline
));
331 static inline malloc_zone_t
*
332 inline_malloc_default_zone(void) {
333 if (malloc_def_zone_state
< 2) _malloc_initialize();
334 // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
335 return malloc_zones
[0];
339 malloc_default_zone(void) {
340 return inline_malloc_default_zone();
343 static inline malloc_zone_t
*inline_malloc_default_scalable_zone(void) __attribute__((always_inline
));
344 static inline malloc_zone_t
*
345 inline_malloc_default_scalable_zone(void) {
348 if (malloc_def_zone_state
< 2) _malloc_initialize();
349 // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_scalable_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
352 for (index
= 0; index
< malloc_num_zones
; ++index
) {
353 malloc_zone_t
*z
= malloc_zones
[index
];
355 if(z
->zone_name
&& strcmp(z
->zone_name
, "DefaultMallocZone") == 0) {
362 malloc_printf("*** malloc_default_scalable_zone() failed to find 'DefaultMallocZone'\n");
363 return NULL
; // FIXME: abort() instead?
367 malloc_default_purgeable_zone(void) {
368 static malloc_zone_t
*dpz
;
372 // PR_7288598: Must pass a *scalable* zone (szone) as the helper for create_purgeable_zone().
373 // Take care that the zone so obtained is not subject to interposing.
375 malloc_zone_t
*tmp
= create_purgeable_zone(0, inline_malloc_default_scalable_zone(), malloc_debug_flags
);
376 malloc_zone_register(tmp
);
377 malloc_set_zone_name(tmp
, "DefaultPurgeableMallocZone");
378 if (!__sync_bool_compare_and_swap(&dpz
, NULL
, tmp
))
379 malloc_destroy_zone(tmp
);
384 // For debugging, allow stack logging to both memory and disk to compare their results.
386 stack_logging_log_stack_debug(uint32_t type_flags
, uintptr_t zone_ptr
, uintptr_t size
, uintptr_t ptr_arg
, uintptr_t return_val
, uint32_t num_hot_to_skip
)
388 __disk_stack_logging_log_stack(type_flags
, zone_ptr
, size
, ptr_arg
, return_val
, num_hot_to_skip
);
389 stack_logging_log_stack(type_flags
, zone_ptr
, size
, ptr_arg
, return_val
, num_hot_to_skip
);
393 set_flags_from_environment(void) {
396 char **env
= * _NSGetEnviron();
400 if (malloc_debug_file
!= STDERR_FILENO
) {
401 close(malloc_debug_file
);
402 malloc_debug_file
= STDERR_FILENO
;
405 malloc_debug_flags
= SCALABLE_MALLOC_ABORT_ON_CORRUPTION
; // Set always on 64-bit processes
407 int libSystemVersion
= NSVersionOfLinkTimeLibrary("System");
408 if ((-1 != libSystemVersion
) && ((libSystemVersion
>> 16) < 126))
409 malloc_debug_flags
= 0;
411 malloc_debug_flags
= SCALABLE_MALLOC_ABORT_ON_CORRUPTION
;
413 stack_logging_enable_logging
= 0;
414 stack_logging_dontcompact
= 0;
415 malloc_logger
= NULL
;
416 malloc_check_start
= 0;
417 malloc_check_each
= 1000;
418 malloc_check_abort
= 0;
419 malloc_check_sleep
= 100;
421 * Given that all environment variables start with "Malloc" we optimize by scanning quickly
422 * first the environment, therefore avoiding repeated calls to getenv().
423 * If we are setu/gid these flags are ignored to prevent a malicious invoker from changing
426 for (p
= env
; (c
= *p
) != NULL
; ++p
) {
427 if (!strncmp(c
, "Malloc", 6)) {
435 flag
= getenv("MallocLogFile");
437 fd
= open(flag
, O_WRONLY
|O_APPEND
|O_CREAT
, 0644);
439 malloc_debug_file
= fd
;
440 fcntl(fd
, F_SETFD
, 0); // clear close-on-exec flag XXX why?
442 malloc_printf("Could not open %s, using stderr\n", flag
);
445 if (getenv("MallocGuardEdges")) {
446 malloc_debug_flags
|= SCALABLE_MALLOC_ADD_GUARD_PAGES
;
447 _malloc_printf(ASL_LEVEL_INFO
, "protecting edges\n");
448 if (getenv("MallocDoNotProtectPrelude")) {
449 malloc_debug_flags
|= SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
;
450 _malloc_printf(ASL_LEVEL_INFO
, "... but not protecting prelude guard page\n");
452 if (getenv("MallocDoNotProtectPostlude")) {
453 malloc_debug_flags
|= SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
;
454 _malloc_printf(ASL_LEVEL_INFO
, "... but not protecting postlude guard page\n");
457 flag
= getenv("MallocStackLogging");
459 flag
= getenv("MallocStackLoggingNoCompact");
460 stack_logging_dontcompact
= 1;
462 // For debugging, the MallocStackLogging or MallocStackLoggingNoCompact environment variables can be set to
463 // values of "memory", "disk", or "both" to control which stack logging mechanism to use. Those strings appear
464 // in the flag variable, and the strtoul() call below will return 0, so then we can do string comparison on the
465 // value of flag. The default stack logging now is disk stack logging, since memory stack logging is not 64-bit-aware.
467 unsigned long val
= strtoul(flag
, NULL
, 0);
468 if (val
== 1) val
= 0;
469 if (val
== -1) val
= 0;
471 malloc_logger
= (void *)val
;
472 _malloc_printf(ASL_LEVEL_INFO
, "recording stacks using recorder %p\n", malloc_logger
);
473 } else if (strcmp(flag
,"memory") == 0) {
474 malloc_logger
= (malloc_logger_t
*)stack_logging_log_stack
;
475 _malloc_printf(ASL_LEVEL_INFO
, "recording malloc stacks in memory using standard recorder\n");
476 } else if (strcmp(flag
,"both") == 0) {
477 malloc_logger
= stack_logging_log_stack_debug
;
478 _malloc_printf(ASL_LEVEL_INFO
, "recording malloc stacks to both memory and disk for comparison debugging\n");
479 } else { // the default is to log to disk
480 malloc_logger
= __disk_stack_logging_log_stack
;
481 _malloc_printf(ASL_LEVEL_INFO
, "recording malloc stacks to disk using standard recorder\n");
483 stack_logging_enable_logging
= 1;
484 if (stack_logging_dontcompact
) {
485 if (malloc_logger
== __disk_stack_logging_log_stack
) {
486 _malloc_printf(ASL_LEVEL_INFO
, "stack logging compaction turned off; size of log files on disk can increase rapidly\n");
488 _malloc_printf(ASL_LEVEL_INFO
, "stack logging compaction turned off; VM can increase rapidly\n");
492 if (getenv("MallocScribble")) {
493 malloc_debug_flags
|= SCALABLE_MALLOC_DO_SCRIBBLE
;
494 _malloc_printf(ASL_LEVEL_INFO
, "enabling scribbling to detect mods to free blocks\n");
496 if (getenv("MallocErrorAbort")) {
497 malloc_debug_flags
|= SCALABLE_MALLOC_ABORT_ON_ERROR
;
498 _malloc_printf(ASL_LEVEL_INFO
, "enabling abort() on bad malloc or free\n");
501 /* initialization above forces SCALABLE_MALLOC_ABORT_ON_CORRUPTION of 64-bit processes */
503 flag
= getenv("MallocCorruptionAbort");
504 if (flag
&& (flag
[0] == '0')) { // Set from an environment variable in 32-bit processes
505 malloc_debug_flags
&= ~SCALABLE_MALLOC_ABORT_ON_CORRUPTION
;
507 malloc_debug_flags
|= SCALABLE_MALLOC_ABORT_ON_CORRUPTION
;
510 flag
= getenv("MallocCheckHeapStart");
512 malloc_check_start
= strtoul(flag
, NULL
, 0);
513 if (malloc_check_start
== 0) malloc_check_start
= 1;
514 if (malloc_check_start
== -1) malloc_check_start
= 1;
515 flag
= getenv("MallocCheckHeapEach");
517 malloc_check_each
= strtoul(flag
, NULL
, 0);
518 if (malloc_check_each
== 0) malloc_check_each
= 1;
519 if (malloc_check_each
== -1) malloc_check_each
= 1;
521 _malloc_printf(ASL_LEVEL_INFO
, "checks heap after %dth operation and each %d operations\n", malloc_check_start
, malloc_check_each
);
522 flag
= getenv("MallocCheckHeapAbort");
524 malloc_check_abort
= strtol(flag
, NULL
, 0);
525 if (malloc_check_abort
)
526 _malloc_printf(ASL_LEVEL_INFO
, "will abort on heap corruption\n");
528 flag
= getenv("MallocCheckHeapSleep");
530 malloc_check_sleep
= strtol(flag
, NULL
, 0);
531 if (malloc_check_sleep
> 0)
532 _malloc_printf(ASL_LEVEL_INFO
, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep
);
533 else if (malloc_check_sleep
< 0)
534 _malloc_printf(ASL_LEVEL_INFO
, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep
);
536 _malloc_printf(ASL_LEVEL_INFO
, "no sleep on heap corruption\n");
539 if (getenv("MallocHelp")) {
540 _malloc_printf(ASL_LEVEL_INFO
,
541 "environment variables that can be set for debug:\n"
542 "- MallocLogFile <f> to create/append messages to file <f> instead of stderr\n"
543 "- MallocGuardEdges to add 2 guard pages for each large block\n"
544 "- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n"
545 "- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n"
546 "- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n"
547 "- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n"
548 "- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n"
549 "- MallocScribble to detect writing on free blocks and missing initializers:\n"
550 " 0x55 is written upon free and 0xaa is written on allocation\n"
551 "- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n"
552 "- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n"
553 "- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n"
554 "- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n"
555 "- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n"
556 " MallocCorruptionAbort is always set on 64-bit processes\n"
557 "- MallocErrorAbort to abort on any malloc error, including out of memory\n"
558 "- MallocHelp - this help!\n");
563 malloc_create_zone(vm_size_t start_size
, unsigned flags
)
567 /* start_size doesn't seemed to actually be used, but we test anyways */
568 if (start_size
> MALLOC_ABSOLUTE_MAX_SIZE
) {
571 if (malloc_def_zone_state
< 2) _malloc_initialize();
572 zone
= create_scalable_zone(start_size
, flags
| malloc_debug_flags
);
573 malloc_zone_register(zone
);
578 * For use by CheckFix: establish a new default zone whose behavior is, apart from
579 * the use of death-row and per-CPU magazines, that of Leopard.
582 malloc_create_legacy_default_zone(void)
587 if (malloc_def_zone_state
< 2) _malloc_initialize();
588 zone
= create_legacy_scalable_zone(0, malloc_debug_flags
);
591 malloc_zone_register_while_locked(zone
);
594 // Establish the legacy scalable zone just created as the default zone.
596 malloc_zone_t
*hold
= malloc_zones
[0];
597 if(hold
->zone_name
&& strcmp(hold
->zone_name
, "DefaultMallocZone") == 0) {
598 malloc_set_zone_name(hold
, NULL
);
600 malloc_set_zone_name(zone
, "DefaultMallocZone");
602 unsigned protect_size
= malloc_num_zones_allocated
* sizeof(malloc_zone_t
*);
603 mprotect(malloc_zones
, protect_size
, PROT_READ
| PROT_WRITE
);
605 // assert(zone == malloc_zones[malloc_num_zones - 1];
606 for (i
= malloc_num_zones
- 1; i
> 0; --i
) {
607 malloc_zones
[i
] = malloc_zones
[i
- 1];
609 malloc_zones
[0] = zone
;
611 mprotect(malloc_zones
, protect_size
, PROT_READ
);
616 malloc_destroy_zone(malloc_zone_t
*zone
) {
617 malloc_set_zone_name(zone
, NULL
); // Deallocate zone name wherever it may reside PR_7701095
618 malloc_zone_unregister(zone
);
622 /* called from the {put,set,unset}env routine */
623 __private_extern__
void
624 __malloc_check_env_name(const char *name
)
629 * 2. malloc will no longer take notice of *programmatic* changes to the MALLOC_* environment variables
630 * (i.e. calls to putenv() or setenv() that manipulate these environment variables.)
634 if(malloc_def_zone_state
== 2 && strncmp(name
, "Malloc", 6) == 0)
635 malloc_def_zone_state
= 1;
640 /********* Block creation and manipulation ************/
643 internal_check(void) {
644 static vm_address_t
*frames
= NULL
;
645 static unsigned num_frames
;
646 if (malloc_zone_check(NULL
)) {
647 if (!frames
) vm_allocate(mach_task_self(), (void *)&frames
, vm_page_size
, 1);
648 thread_stack_pcs(frames
, vm_page_size
/sizeof(vm_address_t
) - 1, &num_frames
);
650 _SIMPLE_STRING b
= _simple_salloc();
652 _simple_sprintf(b
, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter
-1);
654 _malloc_printf(MALLOC_PRINTF_NOLOG
, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter
-1);
655 malloc_printf("*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter
-1);
659 _simple_sappend(b
, "Stack for last operation where the malloc check succeeded: ");
660 while (index
< num_frames
) _simple_sprintf(b
, "%p ", frames
[index
++]);
661 malloc_printf("%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b
));
664 * Should only get here if vm_allocate() can't get a single page of
665 * memory, implying _simple_asl_log() would also fail. So we just
666 * print to the file descriptor.
668 _malloc_printf(MALLOC_PRINTF_NOLOG
, "Stack for last operation where the malloc check succeeded: ");
669 while (index
< num_frames
) _malloc_printf(MALLOC_PRINTF_NOLOG
, "%p ", frames
[index
++]);
670 _malloc_printf(MALLOC_PRINTF_NOLOG
, "\n(Use 'atos' for a symbolic stack)\n");
673 if (malloc_check_each
> 1) {
674 unsigned recomm_each
= (malloc_check_each
> 10) ? malloc_check_each
/10 : 1;
675 unsigned recomm_start
= (malloc_check_counter
> malloc_check_each
+1) ? malloc_check_counter
-1-malloc_check_each
: 1;
676 malloc_printf("*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", recomm_start
, recomm_each
);
678 if (malloc_check_abort
) {
679 CRSetCrashLogMessage(b
? _simple_string(b
) : "*** MallocCheckHeap: FAILED check");
683 if (malloc_check_sleep
> 0) {
684 _malloc_printf(ASL_LEVEL_NOTICE
, "*** Sleeping for %d seconds to leave time to attach\n",
686 sleep(malloc_check_sleep
);
687 } else if (malloc_check_sleep
< 0) {
688 _malloc_printf(ASL_LEVEL_NOTICE
, "*** Sleeping once for %d seconds to leave time to attach\n",
689 -malloc_check_sleep
);
690 sleep(-malloc_check_sleep
);
691 malloc_check_sleep
= 0;
694 malloc_check_start
+= malloc_check_each
;
698 malloc_zone_malloc(malloc_zone_t
*zone
, size_t size
) {
700 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
703 if (size
> MALLOC_ABSOLUTE_MAX_SIZE
) {
706 ptr
= zone
->malloc(zone
, size
);
708 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)size
, 0, (uintptr_t)ptr
, 0);
713 malloc_zone_calloc(malloc_zone_t
*zone
, size_t num_items
, size_t size
) {
715 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
718 if (size
> MALLOC_ABSOLUTE_MAX_SIZE
) {
721 ptr
= zone
->calloc(zone
, num_items
, size
);
723 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
| MALLOC_LOG_TYPE_CLEARED
, (uintptr_t)zone
, (uintptr_t)(num_items
* size
), 0,
729 malloc_zone_valloc(malloc_zone_t
*zone
, size_t size
) {
731 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
734 if (size
> MALLOC_ABSOLUTE_MAX_SIZE
) {
737 ptr
= zone
->valloc(zone
, size
);
739 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)size
, 0, (uintptr_t)ptr
, 0);
744 malloc_zone_realloc(malloc_zone_t
*zone
, void *ptr
, size_t size
) {
746 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
749 if (size
> MALLOC_ABSOLUTE_MAX_SIZE
) {
752 new_ptr
= zone
->realloc(zone
, ptr
, size
);
754 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_DEALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)ptr
, (uintptr_t)size
,
755 (uintptr_t)new_ptr
, 0);
760 malloc_zone_free(malloc_zone_t
*zone
, void *ptr
) {
762 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)ptr
, 0, 0, 0);
763 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
766 zone
->free(zone
, ptr
);
770 malloc_zone_free_definite_size(malloc_zone_t
*zone
, void *ptr
, size_t size
) {
772 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)ptr
, 0, 0, 0);
773 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
776 zone
->free_definite_size(zone
, ptr
, size
);
780 malloc_zone_from_ptr(const void *ptr
) {
784 return find_registered_zone(ptr
, NULL
);
788 malloc_zone_memalign(malloc_zone_t
*zone
, size_t alignment
, size_t size
) {
790 if (zone
->version
< 5) // Version must be >= 5 to look at the new memalign field.
792 if (!(zone
->memalign
))
794 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
797 if (size
> MALLOC_ABSOLUTE_MAX_SIZE
) {
800 if (alignment
< sizeof( void *) || // excludes 0 == alignment
801 0 != (alignment
& (alignment
- 1))) { // relies on sizeof(void *) being a power of two.
804 ptr
= zone
->memalign(zone
, alignment
, size
);
806 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)size
, 0, (uintptr_t)ptr
, 0);
810 /********* Functions for zone implementors ************/
813 malloc_zone_register(malloc_zone_t
*zone
) {
815 malloc_zone_register_while_locked(zone
);
820 malloc_zone_unregister(malloc_zone_t
*z
) {
823 if (malloc_num_zones
== 0)
827 for (index
= 0; index
< malloc_num_zones
; ++index
) {
828 if (z
!= malloc_zones
[index
])
831 // Modify the page to be allow write access, so that we can update the
832 // malloc_zones array.
833 size_t protect_size
= malloc_num_zones_allocated
* sizeof(malloc_zone_t
*);
834 mprotect(malloc_zones
, protect_size
, PROT_READ
| PROT_WRITE
);
836 // If we found a match, replace it with the entry at the end of the list, shrink the list,
837 // and leave the end of the list intact to avoid racing with find_registered_zone().
839 malloc_zones
[index
] = malloc_zones
[malloc_num_zones
- 1];
842 mprotect(malloc_zones
, protect_size
, PROT_READ
);
844 // Exchange the roles of the FRZ counters. The counter that has captured the number of threads presently
845 // executing *inside* find_regiatered_zone is swapped with the counter drained to zero last time through.
846 // The former is then allowed to drain to zero while this thread yields.
847 int *p
= pFRZCounterLive
;
848 pFRZCounterLive
= pFRZCounterDrain
;
849 pFRZCounterDrain
= p
;
850 __sync_synchronize(); // Full memory barrier
852 while (0 != *pFRZCounterDrain
) { pthread_yield_np(); }
859 malloc_printf("*** malloc_zone_unregister() failed for %p\n", z
);
863 malloc_set_zone_name(malloc_zone_t
*z
, const char *name
) {
866 mprotect(z
, sizeof(malloc_zone_t
), PROT_READ
| PROT_WRITE
);
868 free((char *)z
->zone_name
);
872 size_t buflen
= strlen(name
) + 1;
873 newName
= malloc_zone_malloc(z
, buflen
);
875 strlcpy(newName
, name
, buflen
);
876 z
->zone_name
= (const char *)newName
;
881 mprotect(z
, sizeof(malloc_zone_t
), PROT_READ
);
885 malloc_get_zone_name(malloc_zone_t
*zone
) {
886 return zone
->zone_name
;
890 * XXX malloc_printf now uses _simple_*printf. It only deals with a
891 * subset of printf format specifiers, but it doesn't call malloc.
894 __private_extern__
void
895 _malloc_vprintf(int flags
, const char *format
, va_list ap
)
899 if (_malloc_no_asl_log
|| (flags
& MALLOC_PRINTF_NOLOG
) || (b
= _simple_salloc()) == NULL
) {
900 if (!(flags
& MALLOC_PRINTF_NOPREFIX
)) {
902 /* XXX somewhat rude 'knowing' that pthread_t is a pointer */
903 _simple_dprintf(malloc_debug_file
, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self());
905 _simple_dprintf(malloc_debug_file
, "%s(%d) malloc: ", getprogname(), getpid());
908 _simple_vdprintf(malloc_debug_file
, format
, ap
);
911 if (!(flags
& MALLOC_PRINTF_NOPREFIX
)) {
913 /* XXX somewhat rude 'knowing' that pthread_t is a pointer */
914 _simple_sprintf(b
, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self());
916 _simple_sprintf(b
, "%s(%d) malloc: ", getprogname(), getpid());
919 _simple_vsprintf(b
, format
, ap
);
920 _simple_put(b
, malloc_debug_file
);
921 _simple_asl_log(flags
& MALLOC_PRINTF_LEVEL_MASK
, Malloc_Facility
, _simple_string(b
));
925 __private_extern__
void
926 _malloc_printf(int flags
, const char *format
, ...)
930 va_start(ap
, format
);
931 _malloc_vprintf(flags
, format
, ap
);
936 malloc_printf(const char *format
, ...)
940 va_start(ap
, format
);
941 _malloc_vprintf(ASL_LEVEL_ERR
, format
, ap
);
945 /********* Generic ANSI callouts ************/
948 malloc(size_t size
) {
950 retval
= malloc_zone_malloc(inline_malloc_default_zone(), size
);
951 if (retval
== NULL
) {
958 calloc(size_t num_items
, size_t size
) {
960 retval
= malloc_zone_calloc(inline_malloc_default_zone(), num_items
, size
);
961 if (retval
== NULL
) {
973 zone
= find_registered_zone(ptr
, &size
);
975 malloc_printf("*** error for object %p: pointer being freed was not allocated\n"
976 "*** set a breakpoint in malloc_error_break to debug\n", ptr
);
977 malloc_error_break();
978 if ((malloc_debug_flags
& (SCALABLE_MALLOC_ABORT_ON_CORRUPTION
|SCALABLE_MALLOC_ABORT_ON_ERROR
))) {
979 _SIMPLE_STRING b
= _simple_salloc();
981 _simple_sprintf(b
, "*** error for object %p: pointer being freed was not allocated\n", ptr
);
982 CRSetCrashLogMessage(_simple_string(b
));
984 CRSetCrashLogMessage("*** error: pointer being freed was not allocated\n");
988 } else if (zone
->version
>= 6 && zone
->free_definite_size
)
989 malloc_zone_free_definite_size(zone
, ptr
, size
);
991 malloc_zone_free(zone
, ptr
);
995 realloc(void *in_ptr
, size_t new_size
) {
1001 // SUSv3: "If size is 0 and ptr is not a null pointer, the object
1002 // pointed to is freed. If the space cannot be allocated, the object
1003 // shall remain unchanged." Also "If size is 0, either a null pointer
1004 // or a unique pointer that can be successfully passed to free() shall
1005 // be returned." We choose to allocate a minimum size object by calling
1006 // malloc_zone_malloc with zero size, which matches "If ptr is a null
1007 // pointer, realloc() shall be equivalent to malloc() for the specified
1008 // size." So we only free the original memory if the allocation succeeds.
1009 old_ptr
= (new_size
== 0) ? NULL
: in_ptr
;
1011 retval
= malloc_zone_malloc(inline_malloc_default_zone(), new_size
);
1013 zone
= find_registered_zone(old_ptr
, &old_size
);
1015 malloc_printf("*** error for object %p: pointer being realloc'd was not allocated\n"
1016 "*** set a breakpoint in malloc_error_break to debug\n", old_ptr
);
1017 malloc_error_break();
1018 if ((malloc_debug_flags
& (SCALABLE_MALLOC_ABORT_ON_CORRUPTION
|SCALABLE_MALLOC_ABORT_ON_ERROR
))) {
1019 _SIMPLE_STRING b
= _simple_salloc();
1021 _simple_sprintf(b
, "*** error for object %p: pointer being realloc'd was not allocated\n", old_ptr
);
1022 CRSetCrashLogMessage(_simple_string(b
));
1024 CRSetCrashLogMessage("*** error: pointer being realloc'd was not allocated\n");
1029 retval
= malloc_zone_realloc(zone
, old_ptr
, new_size
);
1032 if (retval
== NULL
) {
1034 } else if (new_size
== 0) {
1041 valloc(size_t size
) {
1043 malloc_zone_t
*zone
= inline_malloc_default_zone();
1044 retval
= malloc_zone_valloc(zone
, size
);
1045 if (retval
== NULL
) {
1057 malloc_size(const void *ptr
) {
1063 (void)find_registered_zone(ptr
, &size
);
1068 malloc_good_size (size_t size
) {
1069 malloc_zone_t
*zone
= inline_malloc_default_zone();
1070 return zone
->introspect
->good_size(zone
, size
);
1074 * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment,
1075 * and shall return a pointer to the allocated memory in memptr.
1076 * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two.
1077 * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment.
1079 * Upon successful completion, posix_memalign() shall return zero; otherwise,
1080 * an error number shall be returned to indicate the error.
1082 * The posix_memalign() function shall fail if:
1084 * The value of the alignment parameter is not a power of two multiple of sizeof( void *).
1086 * There is insufficient memory available with the requested alignment.
1090 posix_memalign(void **memptr
, size_t alignment
, size_t size
)
1094 /* POSIX is silent on NULL == memptr !?! */
1096 retval
= malloc_zone_memalign(inline_malloc_default_zone(), alignment
, size
);
1097 if (retval
== NULL
) {
1098 // To avoid testing the alignment constraints redundantly, we'll rely on the
1099 // test made in malloc_zone_memalign to vet each request. Only if that test fails
1100 // and returns NULL, do we arrive here to detect the bogus alignment and give the
1101 // required EINVAL return.
1102 if (alignment
< sizeof( void *) || // excludes 0 == alignment
1103 0 != (alignment
& (alignment
- 1))) { // relies on sizeof(void *) being a power of two.
1108 *memptr
= retval
; // Set iff allocation succeeded
1113 static malloc_zone_t
*
1114 find_registered_purgeable_zone(void *ptr
) {
1119 * Look for a zone which contains ptr. If that zone does not have the purgeable malloc flag
1120 * set, or the allocation is too small, do nothing. Otherwise, set the allocation volatile.
1121 * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones
1122 * and only search those.
1125 malloc_zone_t
*zone
= find_registered_zone(ptr
, &size
);
1127 /* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined
1128 * binary compatibility impact of changing the introspect struct yet. */
1132 /* Check to make sure pointer is page aligned and size is multiple of page size */
1133 if ((size
< vm_page_size
) || ((size
% vm_page_size
) != 0))
1140 malloc_make_purgeable(void *ptr
) {
1141 malloc_zone_t
*zone
= find_registered_purgeable_zone(ptr
);
1145 int state
= VM_PURGABLE_VOLATILE
;
1146 vm_purgable_control(mach_task_self(), (vm_address_t
)ptr
, VM_PURGABLE_SET_STATE
, &state
);
1150 /* Returns true if ptr is valid. Ignore the return value from vm_purgeable_control and only report
1153 malloc_make_nonpurgeable(void *ptr
) {
1154 malloc_zone_t
*zone
= find_registered_purgeable_zone(ptr
);
1158 int state
= VM_PURGABLE_NONVOLATILE
;
1159 vm_purgable_control(mach_task_self(), (vm_address_t
)ptr
, VM_PURGABLE_SET_STATE
, &state
);
1161 if (state
== VM_PURGABLE_EMPTY
)
1167 size_t malloc_zone_pressure_relief(malloc_zone_t
*zone
, size_t goal
)
1173 // Take lock to defend against malloc_destroy_zone()
1175 while (index
< malloc_num_zones
) {
1176 zone
= malloc_zones
[index
++];
1177 if (zone
->version
< 8)
1179 if (NULL
== zone
->pressure_relief
)
1181 if (0 == goal
) /* Greedy */
1182 total
+= zone
->pressure_relief(zone
, 0);
1183 else if (goal
> total
)
1184 total
+= zone
->pressure_relief(zone
, goal
- total
);
1185 else /* total >= goal */
1191 // Assumes zone is not destroyed for the duration of this call
1192 if (zone
->version
< 8)
1194 if (NULL
== zone
->pressure_relief
)
1196 return zone
->pressure_relief(zone
, goal
);
1200 /********* Batch methods ************/
1203 malloc_zone_batch_malloc(malloc_zone_t
*zone
, size_t size
, void **results
, unsigned num_requested
) {
1204 unsigned (*batch_malloc
)(malloc_zone_t
*, size_t, void **, unsigned) = zone
-> batch_malloc
;
1207 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
1210 unsigned batched
= batch_malloc(zone
, size
, results
, num_requested
);
1211 if (malloc_logger
) {
1213 while (index
< batched
) {
1214 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)size
, 0, (uintptr_t)results
[index
], 0);
1222 malloc_zone_batch_free(malloc_zone_t
*zone
, void **to_be_freed
, unsigned num
) {
1223 if (malloc_check_start
&& (malloc_check_counter
++ >= malloc_check_start
)) {
1226 if (malloc_logger
) {
1228 while (index
< num
) {
1229 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE
| MALLOC_LOG_TYPE_HAS_ZONE
, (uintptr_t)zone
, (uintptr_t)to_be_freed
[index
], 0, 0, 0);
1233 void (*batch_free
)(malloc_zone_t
*, void **, unsigned) = zone
-> batch_free
;
1235 batch_free(zone
, to_be_freed
, num
);
1237 void (*free_fun
)(malloc_zone_t
*, void *) = zone
->free
;
1239 void *ptr
= *to_be_freed
++;
1240 free_fun(zone
, ptr
);
1245 /********* Functions for performance tools ************/
1247 static kern_return_t
1248 _malloc_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
1249 *ptr
= (void *)address
;
1254 malloc_get_all_zones(task_t task
, memory_reader_t reader
, vm_address_t
**addresses
, unsigned *count
) {
1255 // Note that the 2 following addresses are not correct if the address of the target is different from your own. This notably occurs if the address of System.framework is slid (e.g. different than at B & I )
1256 vm_address_t remote_malloc_zones
= (vm_address_t
)&malloc_zones
;
1257 vm_address_t remote_malloc_num_zones
= (vm_address_t
)&malloc_num_zones
;
1259 vm_address_t zones_address
;
1260 vm_address_t
*zones_address_ref
;
1262 unsigned *num_zones_ref
;
1263 if (!reader
) reader
= _malloc_default_reader
;
1264 // printf("Read malloc_zones at address %p should be %p\n", &malloc_zones, malloc_zones);
1265 err
= reader(task
, remote_malloc_zones
, sizeof(void *), (void **)&zones_address_ref
);
1266 // printf("Read malloc_zones[%p]=%p\n", remote_malloc_zones, *zones_address_ref);
1268 malloc_printf("*** malloc_get_all_zones: error reading zones_address at %p\n", (unsigned)remote_malloc_zones
);
1271 zones_address
= *zones_address_ref
;
1272 // printf("Reading num_zones at address %p\n", remote_malloc_num_zones);
1273 err
= reader(task
, remote_malloc_num_zones
, sizeof(unsigned), (void **)&num_zones_ref
);
1275 malloc_printf("*** malloc_get_all_zones: error reading num_zones at %p\n", (unsigned)remote_malloc_num_zones
);
1278 num_zones
= *num_zones_ref
;
1279 // printf("Read malloc_num_zones[%p]=%d\n", remote_malloc_num_zones, num_zones);
1281 // printf("malloc_get_all_zones succesfully found %d zones\n", num_zones);
1282 err
= reader(task
, zones_address
, sizeof(malloc_zone_t
*) * num_zones
, (void **)addresses
);
1284 malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", &zones_address
);
1287 // printf("malloc_get_all_zones succesfully read %d zones\n", num_zones);
1291 /********* Debug helpers ************/
1294 malloc_zone_print_ptr_info(void *ptr
) {
1295 malloc_zone_t
*zone
;
1297 zone
= malloc_zone_from_ptr(ptr
);
1299 printf("ptr %p in registered zone %p\n", ptr
, zone
);
1301 printf("ptr %p not in heap\n", ptr
);
1306 malloc_zone_check(malloc_zone_t
*zone
) {
1310 while (index
< malloc_num_zones
) {
1311 zone
= malloc_zones
[index
++];
1312 if (!zone
->introspect
->check(zone
)) ok
= 0;
1315 ok
= zone
->introspect
->check(zone
);
1321 malloc_zone_print(malloc_zone_t
*zone
, boolean_t verbose
) {
1324 while (index
< malloc_num_zones
) {
1325 zone
= malloc_zones
[index
++];
1326 zone
->introspect
->print(zone
, verbose
);
1329 zone
->introspect
->print(zone
, verbose
);
1334 malloc_zone_statistics(malloc_zone_t
*zone
, malloc_statistics_t
*stats
) {
1336 memset(stats
, 0, sizeof(*stats
));
1338 while (index
< malloc_num_zones
) {
1339 zone
= malloc_zones
[index
++];
1340 malloc_statistics_t this_stats
;
1341 zone
->introspect
->statistics(zone
, &this_stats
);
1342 stats
->blocks_in_use
+= this_stats
.blocks_in_use
;
1343 stats
->size_in_use
+= this_stats
.size_in_use
;
1344 stats
->max_size_in_use
+= this_stats
.max_size_in_use
;
1345 stats
->size_allocated
+= this_stats
.size_allocated
;
1348 zone
->introspect
->statistics(zone
, stats
);
1353 malloc_zone_log(malloc_zone_t
*zone
, void *address
) {
1356 while (index
< malloc_num_zones
) {
1357 zone
= malloc_zones
[index
++];
1358 zone
->introspect
->log(zone
, address
);
1361 zone
->introspect
->log(zone
, address
);
1365 /********* Misc other entry points ************/
1368 DefaultMallocError(int x
) {
1369 #if USE_SLEEP_RATHER_THAN_ABORT
1370 malloc_printf("*** error %d\n", x
);
1373 _SIMPLE_STRING b
= _simple_salloc();
1375 _simple_sprintf(b
, "*** error %d", x
);
1376 malloc_printf("%s\n", _simple_string(b
));
1377 CRSetCrashLogMessage(_simple_string(b
));
1379 _malloc_printf(MALLOC_PRINTF_NOLOG
, "*** error %d", x
);
1380 CRSetCrashLogMessage("*** DefaultMallocError called");
1387 malloc_error(void (*func
)(int)))(int) {
1388 return DefaultMallocError
;
1391 /* Stack logging fork-handling prototypes */
1392 extern void __stack_logging_fork_prepare();
1393 extern void __stack_logging_fork_parent();
1394 extern void __stack_logging_fork_child();
1397 _malloc_fork_prepare() {
1398 /* Prepare the malloc module for a fork by insuring that no thread is in a malloc critical section */
1401 while (index
< malloc_num_zones
) {
1402 malloc_zone_t
*zone
= malloc_zones
[index
++];
1403 zone
->introspect
->force_lock(zone
);
1405 __stack_logging_fork_prepare();
1409 _malloc_fork_parent() {
1410 /* Called in the parent process after a fork() to resume normal operation. */
1412 __stack_logging_fork_parent();
1414 while (index
< malloc_num_zones
) {
1415 malloc_zone_t
*zone
= malloc_zones
[index
++];
1416 zone
->introspect
->force_unlock(zone
);
1421 _malloc_fork_child() {
1422 /* Called in the child process after a fork() to resume normal operation. In the MTASK case we also have to change memory inheritance so that the child does not share memory with the parent. */
1424 __stack_logging_fork_child();
1426 while (index
< malloc_num_zones
) {
1427 malloc_zone_t
*zone
= malloc_zones
[index
++];
1428 zone
->introspect
->force_unlock(zone
);
1433 * A Glibc-like mstats() interface.
1435 * Note that this interface really isn't very good, as it doesn't understand
1436 * that we may have multiple allocators running at once. We just massage
1437 * the result from malloc_zone_statistics in any case.
1442 malloc_statistics_t s
;
1445 malloc_zone_statistics(NULL
, &s
);
1446 m
.bytes_total
= s
.size_allocated
;
1447 m
.chunks_used
= s
.blocks_in_use
;
1448 m
.bytes_used
= s
.size_in_use
;
1450 m
.bytes_free
= m
.bytes_total
- m
.bytes_used
; /* isn't this somewhat obvious? */
1456 malloc_zone_enable_discharge_checking(malloc_zone_t
*zone
)
1458 if (zone
->version
< 7) // Version must be >= 7 to look at the new discharge checking fields.
1460 if (NULL
== zone
->introspect
->enable_discharge_checking
)
1462 return zone
->introspect
->enable_discharge_checking(zone
);
1466 malloc_zone_disable_discharge_checking(malloc_zone_t
*zone
)
1468 if (zone
->version
< 7) // Version must be >= 7 to look at the new discharge checking fields.
1470 if (NULL
== zone
->introspect
->disable_discharge_checking
)
1472 zone
->introspect
->disable_discharge_checking(zone
);
1476 malloc_zone_discharge(malloc_zone_t
*zone
, void *memory
)
1479 zone
= malloc_zone_from_ptr(memory
);
1482 if (zone
->version
< 7) // Version must be >= 7 to look at the new discharge checking fields.
1484 if (NULL
== zone
->introspect
->discharge
)
1486 zone
->introspect
->discharge(zone
, memory
);
1490 malloc_zone_enumerate_discharged_pointers(malloc_zone_t
*zone
, void (^report_discharged
)(void *memory
, void *info
))
1494 while (index
< malloc_num_zones
) {
1495 zone
= malloc_zones
[index
++];
1496 if (zone
->version
< 7)
1498 if (NULL
== zone
->introspect
->enumerate_discharged_pointers
)
1500 zone
->introspect
->enumerate_discharged_pointers(zone
, report_discharged
);
1503 if (zone
->version
< 7)
1505 if (NULL
== zone
->introspect
->enumerate_discharged_pointers
)
1507 zone
->introspect
->enumerate_discharged_pointers(zone
, report_discharged
);
1511 /***************** OBSOLETE ENTRY POINTS ********************/
1513 #if PHASE_OUT_OLD_MALLOC
1514 #error PHASE OUT THE FOLLOWING FUNCTIONS
1516 #warning PHASE OUT THE FOLLOWING FUNCTIONS
1520 set_malloc_singlethreaded(boolean_t single
) {
1521 static boolean_t warned
= 0;
1523 #if PHASE_OUT_OLD_MALLOC
1524 malloc_printf("*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single
);
1531 malloc_singlethreaded() {
1532 static boolean_t warned
= 0;
1534 malloc_printf("*** OBSOLETE: malloc_singlethreaded()\n");
1540 malloc_debug(int level
) {
1541 malloc_printf("*** OBSOLETE: malloc_debug()\n");