]> git.saurik.com Git - apple/libc.git/blob - gen/malloc.c
Libc-825.40.1.tar.gz
[apple/libc.git] / gen / malloc.c
1 /*
2 * Copyright (c) 1999, 2006-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <pthread_internals.h>
25 #include "magmallocProvider.h"
26 #include <mach-o/dyld.h> /* for NSVersionOfLinkTimeLibrary() */
27
28 #import <stdlib.h>
29 #import <stdio.h>
30 #import <string.h>
31 #import <unistd.h>
32 #import <malloc/malloc.h>
33 #import <fcntl.h>
34 #import <crt_externs.h>
35 #import <errno.h>
36 #import <pthread_internals.h>
37 #import <limits.h>
38 #import <dlfcn.h>
39 #import <mach/mach_vm.h>
40 #import <mach/mach_init.h>
41 #import <sys/mman.h>
42
43 #import "scalable_malloc.h"
44 #import "stack_logging.h"
45 #import "malloc_printf.h"
46 #import "_simple.h"
47 #import "CrashReporterClient.h"
48
49 /*
50 * MALLOC_ABSOLUTE_MAX_SIZE - There are many instances of addition to a
51 * user-specified size_t, which can cause overflow (and subsequent crashes)
52 * for values near SIZE_T_MAX. Rather than add extra "if" checks everywhere
53 * this occurs, it is easier to just set an absolute maximum request size,
54 * and immediately return an error if the requested size exceeds this maximum.
55 * Of course, values less than this absolute max can fail later if the value
56 * is still too large for the available memory. The largest value added
57 * seems to be PAGE_SIZE (in the macro round_page()), so to be safe, we set
58 * the maximum to be 2 * PAGE_SIZE less than SIZE_T_MAX.
59 */
60 #define MALLOC_ABSOLUTE_MAX_SIZE (SIZE_T_MAX - (2 * PAGE_SIZE))
61
62 #define USE_SLEEP_RATHER_THAN_ABORT 0
63
64 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
65
66 __private_extern__ pthread_lock_t _malloc_lock = 0; // initialized in __libc_init
67
68 /* The following variables are exported for the benefit of performance tools
69 *
70 * It should always be safe to first read malloc_num_zones, then read
71 * malloc_zones without taking the lock, if only iteration is required and
72 * provided that when malloc_destroy_zone is called all prior operations on that
73 * zone are complete and no further calls referencing that zone can be made.
74 */
75 unsigned malloc_num_zones = 0;
76 unsigned malloc_num_zones_allocated = 0;
77 malloc_zone_t **malloc_zones = 0;
78 malloc_logger_t *malloc_logger = NULL;
79
80 unsigned malloc_debug_flags = 0;
81
82 unsigned malloc_check_start = 0; // 0 means don't check
83 unsigned malloc_check_counter = 0;
84 unsigned malloc_check_each = 1000;
85
86 /* global flag to suppress ASL logging e.g. for syslogd */
87 int _malloc_no_asl_log = 0;
88
89 static int malloc_check_sleep = 100; // default 100 second sleep
90 static int malloc_check_abort = 0; // default is to sleep, not abort
91
92 static int malloc_debug_file = STDERR_FILENO;
93 /*
94 * State indicated by malloc_def_zone_state
95 * 0 - the default zone has not yet been created
96 * 1 - a Malloc* environment variable has been set
97 * 2 - the default zone has been created and an environment variable scan done
98 * 3 - a new default zone has been created and another environment variable scan
99 */
100 __private_extern__ int malloc_def_zone_state = 0;
101
102 static const char Malloc_Facility[] = "com.apple.Libsystem.malloc";
103
104 #define MALLOC_LOCK() LOCK(_malloc_lock)
105 #define MALLOC_UNLOCK() UNLOCK(_malloc_lock)
106
107 /*
108 * Counters that coordinate zone destruction (in malloc_zone_unregister) with
109 * find_registered_zone (here abbreviated as FRZ).
110 */
111 static int counterAlice = 0, counterBob = 0;
112 static int *pFRZCounterLive= &counterAlice, *pFRZCounterDrain = &counterBob;
113
114 #define MALLOC_LOG_TYPE_ALLOCATE stack_logging_type_alloc
115 #define MALLOC_LOG_TYPE_DEALLOCATE stack_logging_type_dealloc
116 #define MALLOC_LOG_TYPE_HAS_ZONE stack_logging_flag_zone
117 #define MALLOC_LOG_TYPE_CLEARED stack_logging_flag_cleared
118
119 /********* Utilities ************/
120 __private_extern__ uint64_t malloc_entropy[2] = {0, 0};
121
122 void __malloc_entropy_setup(const char *apple[]) __attribute__ ((visibility ("hidden")));
123
124 static int
125 __entropy_from_kernel(const char *str)
126 {
127 unsigned long long val;
128 char tmp[20], *p;
129 int idx = 0;
130
131 /* Skip over key to the first value */
132 str = strchr(str, '=');
133 if (str == NULL)
134 return 0;
135 str++;
136
137 while (str && idx < sizeof(malloc_entropy)/sizeof(malloc_entropy[0])) {
138 strlcpy(tmp, str, 20);
139 p = strchr(tmp, ',');
140 if (p) *p = '\0';
141 val = strtoull(tmp, NULL, 0);
142 malloc_entropy[idx] = (uint64_t)val;
143 idx++;
144 if ((str = strchr(str, ',')) != NULL)
145 str++;
146 }
147 return idx;
148 }
149
150 void
151 __malloc_entropy_setup(const char *apple[])
152 {
153 const char **p;
154 for (p = apple; p && *p; p++) {
155 if (strstr(*p, "malloc_entropy") == *p) {
156 if (sizeof(malloc_entropy)/sizeof(malloc_entropy[0]) == __entropy_from_kernel(*p))
157 return;
158 else
159 break;
160 }
161 }
162
163 malloc_entropy[0] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
164 malloc_entropy[1] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
165 return;
166 }
167
168 static inline malloc_zone_t * find_registered_zone(const void *, size_t *) __attribute__((always_inline));
169 static inline malloc_zone_t *
170 find_registered_zone(const void *ptr, size_t *returned_size) {
171 // Returns a zone which contains ptr, else NULL
172
173 if (0 == malloc_num_zones) {
174 if (returned_size) *returned_size = 0;
175 return NULL;
176 }
177
178 // The default zone is registered in malloc_zones[0]. There's no danger that it will ever be unregistered.
179 // So don't advance the FRZ counter yet.
180 malloc_zone_t *zone = malloc_zones[0];
181 size_t size = zone->size(zone, ptr);
182 if (size) { // Claimed by this zone?
183 if (returned_size) *returned_size = size;
184 return zone;
185 }
186
187 int *pFRZCounter = pFRZCounterLive; // Capture pointer to the counter of the moment
188 __sync_fetch_and_add(pFRZCounter, 1); // Advance this counter -- our thread is in FRZ
189
190 unsigned index;
191 unsigned limit = malloc_num_zones;
192 malloc_zone_t **zones = &malloc_zones[1];
193
194 for (index = 1; index < limit; ++index, ++zones) {
195 zone = *zones;
196 size = zone->size(zone, ptr);
197 if (size) { // Claimed by this zone?
198 if (returned_size) *returned_size = size;
199 __sync_fetch_and_sub(pFRZCounter, 1); // our thread is leaving FRZ
200 return zone;
201 }
202 }
203 // Unclaimed by any zone.
204 if (returned_size) *returned_size = 0;
205 __sync_fetch_and_sub(pFRZCounter, 1); // our thread is leaving FRZ
206 return NULL;
207 }
208
209 __private_extern__ __attribute__((noinline)) void
210 malloc_error_break(void) {
211 // Provides a non-inlined place for various malloc error procedures to call
212 // that will be called after an error message appears. It does not make
213 // sense for developers to call this function, so it is marked
214 // __private_extern__ to prevent it from becoming API.
215 MAGMALLOC_MALLOCERRORBREAK(); // DTrace USDT probe
216 }
217
218 __private_extern__ boolean_t __stack_logging_locked();
219
220 __private_extern__ __attribute__((noinline)) __attribute__((used)) int
221 malloc_gdb_po_unsafe(void) {
222 // In order to implement "po" other data formatters in gdb, the debugger
223 // calls functions that call malloc. The debugger will only run one thread
224 // of the program in this case, so if another thread is holding a zone lock,
225 // gdb may deadlock in this case.
226 //
227 // Iterate over the zones in malloc_zones, and call "trylock" on the zone
228 // lock. If trylock succeeds, unlock it, otherwise return "locked". Returns
229 // 0 == safe, 1 == locked/unsafe.
230
231 if (__stack_logging_locked())
232 return 1;
233
234 malloc_zone_t **zones = malloc_zones;
235 unsigned i, e = malloc_num_zones;
236
237 for (i = 0; i != e; ++i) {
238 malloc_zone_t *zone = zones[i];
239
240 // Version must be >= 5 to look at the new introspection field.
241 if (zone->version < 5)
242 continue;
243
244 if (zone->introspect->zone_locked && zone->introspect->zone_locked(zone))
245 return 1;
246 }
247 return 0;
248 }
249
250 /********* Creation and destruction ************/
251
252 static void set_flags_from_environment(void);
253
254 static void
255 malloc_zone_register_while_locked(malloc_zone_t *zone) {
256 size_t protect_size;
257 unsigned i;
258
259 /* scan the list of zones, to see if this zone is already registered. If
260 * so, print an error message and return. */
261 for (i = 0; i != malloc_num_zones; ++i)
262 if (zone == malloc_zones[i]) {
263 _malloc_printf(ASL_LEVEL_ERR, "Attempted to register zone more than once: %p\n", zone);
264 return;
265 }
266
267 if (malloc_num_zones == malloc_num_zones_allocated) {
268 size_t malloc_zones_size = malloc_num_zones * sizeof(malloc_zone_t *);
269 size_t alloc_size = malloc_zones_size + vm_page_size;
270
271 malloc_zone_t **new_zones = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, VM_MAKE_TAG(VM_MEMORY_MALLOC), 0);
272
273 /* If there were previously allocated malloc zones, we need to copy them
274 * out of the previous array and into the new zones array */
275 if (malloc_zones)
276 memcpy(new_zones, malloc_zones, malloc_zones_size);
277
278 /* Update the malloc_zones pointer, which we leak if it was previously
279 * allocated, and the number of zones allocated */
280 protect_size = alloc_size;
281 malloc_zones = new_zones;
282 malloc_num_zones_allocated = alloc_size / sizeof(malloc_zone_t *);
283 } else {
284 /* If we don't need to reallocate zones, we need to briefly change the
285 * page protection the malloc zones to allow writes */
286 protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
287 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
288 }
289 malloc_zones[malloc_num_zones++] = zone;
290
291 /* Finally, now that the zone is registered, disallow write access to the
292 * malloc_zones array */
293 mprotect(malloc_zones, protect_size, PROT_READ);
294 //_malloc_printf(ASL_LEVEL_INFO, "Registered malloc_zone %p in malloc_zones %p [%u zones, %u bytes]\n", zone, malloc_zones, malloc_num_zones, protect_size);
295 }
296
297 static void
298 _malloc_initialize(void) {
299 MALLOC_LOCK();
300 if (malloc_def_zone_state < 2) {
301 unsigned n;
302 malloc_zone_t *zone;
303
304 malloc_def_zone_state += 2;
305 set_flags_from_environment(); // will only set flags up to two times
306 n = malloc_num_zones;
307 zone = create_scalable_zone(0, malloc_debug_flags);
308 malloc_zone_register_while_locked(zone);
309 malloc_set_zone_name(zone, "DefaultMallocZone");
310 if (n != 0) { // make the default first, for efficiency
311 unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
312 malloc_zone_t *hold = malloc_zones[0];
313
314 if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
315 malloc_set_zone_name(hold, NULL);
316 }
317
318 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
319 malloc_zones[0] = malloc_zones[n];
320 malloc_zones[n] = hold;
321 mprotect(malloc_zones, protect_size, PROT_READ);
322 }
323 // _malloc_printf(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones);
324 // _malloc_printf(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, (unsigned)&malloc_num_zones);
325 }
326 MALLOC_UNLOCK();
327 }
328
329 static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((always_inline));
330 static inline malloc_zone_t *
331 inline_malloc_default_zone(void) {
332 if (malloc_def_zone_state < 2) _malloc_initialize();
333 // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
334 return malloc_zones[0];
335 }
336
337 malloc_zone_t *
338 malloc_default_zone(void) {
339 return inline_malloc_default_zone();
340 }
341
342 static inline malloc_zone_t *inline_malloc_default_scalable_zone(void) __attribute__((always_inline));
343 static inline malloc_zone_t *
344 inline_malloc_default_scalable_zone(void) {
345 unsigned index;
346
347 if (malloc_def_zone_state < 2) _malloc_initialize();
348 // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_scalable_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
349
350 MALLOC_LOCK();
351 for (index = 0; index < malloc_num_zones; ++index) {
352 malloc_zone_t *z = malloc_zones[index];
353
354 if(z->zone_name && strcmp(z->zone_name, "DefaultMallocZone") == 0) {
355 MALLOC_UNLOCK();
356 return z;
357 }
358 }
359 MALLOC_UNLOCK();
360
361 malloc_printf("*** malloc_default_scalable_zone() failed to find 'DefaultMallocZone'\n");
362 return NULL; // FIXME: abort() instead?
363 }
364
365 malloc_zone_t *
366 malloc_default_purgeable_zone(void) {
367 static malloc_zone_t *dpz;
368
369 if (!dpz) {
370 //
371 // PR_7288598: Must pass a *scalable* zone (szone) as the helper for create_purgeable_zone().
372 // Take care that the zone so obtained is not subject to interposing.
373 //
374 malloc_zone_t *tmp = create_purgeable_zone(0, inline_malloc_default_scalable_zone(), malloc_debug_flags);
375 malloc_zone_register(tmp);
376 malloc_set_zone_name(tmp, "DefaultPurgeableMallocZone");
377 if (!__sync_bool_compare_and_swap(&dpz, NULL, tmp))
378 malloc_destroy_zone(tmp);
379 }
380 return dpz;
381 }
382
383 // For debugging, allow stack logging to both memory and disk to compare their results.
384 static void
385 stack_logging_log_stack_debug(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
386 {
387 __disk_stack_logging_log_stack(type_flags, zone_ptr, size, ptr_arg, return_val, num_hot_to_skip);
388 stack_logging_log_stack(type_flags, zone_ptr, size, ptr_arg, return_val, num_hot_to_skip);
389 }
390
391 static void
392 set_flags_from_environment(void) {
393 const char *flag;
394 int fd;
395 char **env = * _NSGetEnviron();
396 char **p;
397 char *c;
398
399 if (malloc_debug_file != STDERR_FILENO) {
400 close(malloc_debug_file);
401 malloc_debug_file = STDERR_FILENO;
402 }
403 #if __LP64__
404 malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION; // Set always on 64-bit processes
405 #else
406 int libSystemVersion = NSVersionOfLinkTimeLibrary("System");
407 if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 126) /* Lion or greater */)
408 malloc_debug_flags = 0;
409 else
410 malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
411 #endif
412 stack_logging_enable_logging = 0;
413 stack_logging_dontcompact = 0;
414 malloc_logger = NULL;
415 malloc_check_start = 0;
416 malloc_check_each = 1000;
417 malloc_check_abort = 0;
418 malloc_check_sleep = 100;
419 /*
420 * Given that all environment variables start with "Malloc" we optimize by scanning quickly
421 * first the environment, therefore avoiding repeated calls to getenv().
422 * If we are setu/gid these flags are ignored to prevent a malicious invoker from changing
423 * our behaviour.
424 */
425 for (p = env; (c = *p) != NULL; ++p) {
426 if (!strncmp(c, "Malloc", 6)) {
427 if (issetugid())
428 return;
429 break;
430 }
431 }
432 if (c == NULL)
433 return;
434 flag = getenv("MallocLogFile");
435 if (flag) {
436 fd = open(flag, O_WRONLY|O_APPEND|O_CREAT, 0644);
437 if (fd >= 0) {
438 malloc_debug_file = fd;
439 fcntl(fd, F_SETFD, 0); // clear close-on-exec flag XXX why?
440 } else {
441 malloc_printf("Could not open %s, using stderr\n", flag);
442 }
443 }
444 if (getenv("MallocGuardEdges")) {
445 malloc_debug_flags |= SCALABLE_MALLOC_ADD_GUARD_PAGES;
446 _malloc_printf(ASL_LEVEL_INFO, "protecting edges\n");
447 if (getenv("MallocDoNotProtectPrelude")) {
448 malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_PRELUDE;
449 _malloc_printf(ASL_LEVEL_INFO, "... but not protecting prelude guard page\n");
450 }
451 if (getenv("MallocDoNotProtectPostlude")) {
452 malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE;
453 _malloc_printf(ASL_LEVEL_INFO, "... but not protecting postlude guard page\n");
454 }
455 }
456 flag = getenv("MallocStackLogging");
457 if (!flag) {
458 flag = getenv("MallocStackLoggingNoCompact");
459 stack_logging_dontcompact = 1;
460 }
461 // For debugging, the MallocStackLogging or MallocStackLoggingNoCompact environment variables can be set to
462 // values of "memory", "disk", or "both" to control which stack logging mechanism to use. Those strings appear
463 // in the flag variable, and the strtoul() call below will return 0, so then we can do string comparison on the
464 // value of flag. The default stack logging now is disk stack logging, since memory stack logging is not 64-bit-aware.
465 if (flag) {
466 unsigned long val = strtoul(flag, NULL, 0);
467 if (val == 1) val = 0;
468 if (val == -1) val = 0;
469 if (val) {
470 malloc_logger = (void *)val;
471 _malloc_printf(ASL_LEVEL_INFO, "recording stacks using recorder %p\n", malloc_logger);
472 } else if (strcmp(flag,"memory") == 0) {
473 malloc_logger = (malloc_logger_t *)stack_logging_log_stack;
474 _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks in memory using standard recorder\n");
475 } else if (strcmp(flag,"both") == 0) {
476 malloc_logger = stack_logging_log_stack_debug;
477 _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks to both memory and disk for comparison debugging\n");
478 } else { // the default is to log to disk
479 malloc_logger = __disk_stack_logging_log_stack;
480 _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks to disk using standard recorder\n");
481 }
482 stack_logging_enable_logging = 1;
483 if (stack_logging_dontcompact) {
484 if (malloc_logger == __disk_stack_logging_log_stack) {
485 _malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; size of log files on disk can increase rapidly\n");
486 } else {
487 _malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; VM can increase rapidly\n");
488 }
489 }
490 }
491 if (getenv("MallocScribble")) {
492 malloc_debug_flags |= SCALABLE_MALLOC_DO_SCRIBBLE;
493 _malloc_printf(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n");
494 }
495 if (getenv("MallocErrorAbort")) {
496 malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_ERROR;
497 _malloc_printf(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n");
498 }
499 #if __LP64__
500 /* initialization above forces SCALABLE_MALLOC_ABORT_ON_CORRUPTION of 64-bit processes */
501 #else
502 flag = getenv("MallocCorruptionAbort");
503 if (flag && (flag[0] == '0')) { // Set from an environment variable in 32-bit processes
504 malloc_debug_flags &= ~SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
505 } else if (flag) {
506 malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
507 }
508 #endif
509 flag = getenv("MallocCheckHeapStart");
510 if (flag) {
511 malloc_check_start = strtoul(flag, NULL, 0);
512 if (malloc_check_start == 0) malloc_check_start = 1;
513 if (malloc_check_start == -1) malloc_check_start = 1;
514 flag = getenv("MallocCheckHeapEach");
515 if (flag) {
516 malloc_check_each = strtoul(flag, NULL, 0);
517 if (malloc_check_each == 0) malloc_check_each = 1;
518 if (malloc_check_each == -1) malloc_check_each = 1;
519 }
520 _malloc_printf(ASL_LEVEL_INFO, "checks heap after %dth operation and each %d operations\n", malloc_check_start, malloc_check_each);
521 flag = getenv("MallocCheckHeapAbort");
522 if (flag)
523 malloc_check_abort = strtol(flag, NULL, 0);
524 if (malloc_check_abort)
525 _malloc_printf(ASL_LEVEL_INFO, "will abort on heap corruption\n");
526 else {
527 flag = getenv("MallocCheckHeapSleep");
528 if (flag)
529 malloc_check_sleep = strtol(flag, NULL, 0);
530 if (malloc_check_sleep > 0)
531 _malloc_printf(ASL_LEVEL_INFO, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep);
532 else if (malloc_check_sleep < 0)
533 _malloc_printf(ASL_LEVEL_INFO, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep);
534 else
535 _malloc_printf(ASL_LEVEL_INFO, "no sleep on heap corruption\n");
536 }
537 }
538 if (getenv("MallocHelp")) {
539 _malloc_printf(ASL_LEVEL_INFO,
540 "environment variables that can be set for debug:\n"
541 "- MallocLogFile <f> to create/append messages to file <f> instead of stderr\n"
542 "- MallocGuardEdges to add 2 guard pages for each large block\n"
543 "- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n"
544 "- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n"
545 "- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n"
546 "- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n"
547 "- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n"
548 "- MallocScribble to detect writing on free blocks and missing initializers:\n"
549 " 0x55 is written upon free and 0xaa is written on allocation\n"
550 "- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n"
551 "- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n"
552 "- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n"
553 "- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n"
554 "- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n"
555 " MallocCorruptionAbort is always set on 64-bit processes\n"
556 "- MallocErrorAbort to abort on any malloc error, including out of memory\n"
557 "- MallocHelp - this help!\n");
558 }
559 }
560
561 malloc_zone_t *
562 malloc_create_zone(vm_size_t start_size, unsigned flags)
563 {
564 malloc_zone_t *zone;
565
566 /* start_size doesn't seemed to actually be used, but we test anyways */
567 if (start_size > MALLOC_ABSOLUTE_MAX_SIZE) {
568 return NULL;
569 }
570 if (malloc_def_zone_state < 2) _malloc_initialize();
571 zone = create_scalable_zone(start_size, flags | malloc_debug_flags);
572 malloc_zone_register(zone);
573 return zone;
574 }
575
576 /*
577 * For use by CheckFix: establish a new default zone whose behavior is, apart from
578 * the use of death-row and per-CPU magazines, that of Leopard.
579 */
580 void
581 malloc_create_legacy_default_zone(void)
582 {
583 malloc_zone_t *zone;
584 int i;
585
586 if (malloc_def_zone_state < 2) _malloc_initialize();
587 zone = create_legacy_scalable_zone(0, malloc_debug_flags);
588
589 MALLOC_LOCK();
590 malloc_zone_register_while_locked(zone);
591
592 //
593 // Establish the legacy scalable zone just created as the default zone.
594 //
595 malloc_zone_t *hold = malloc_zones[0];
596 if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
597 malloc_set_zone_name(hold, NULL);
598 }
599 malloc_set_zone_name(zone, "DefaultMallocZone");
600
601 unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
602 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
603
604 // assert(zone == malloc_zones[malloc_num_zones - 1];
605 for (i = malloc_num_zones - 1; i > 0; --i) {
606 malloc_zones[i] = malloc_zones[i - 1];
607 }
608 malloc_zones[0] = zone;
609
610 mprotect(malloc_zones, protect_size, PROT_READ);
611 MALLOC_UNLOCK();
612 }
613
614 void
615 malloc_destroy_zone(malloc_zone_t *zone) {
616 malloc_set_zone_name(zone, NULL); // Deallocate zone name wherever it may reside PR_7701095
617 malloc_zone_unregister(zone);
618 zone->destroy(zone);
619 }
620
621 /********* Block creation and manipulation ************/
622
623 static void
624 internal_check(void) {
625 static vm_address_t *frames = NULL;
626 static unsigned num_frames;
627 if (malloc_zone_check(NULL)) {
628 if (!frames) vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1);
629 thread_stack_pcs(frames, vm_page_size/sizeof(vm_address_t) - 1, &num_frames);
630 } else {
631 _SIMPLE_STRING b = _simple_salloc();
632 if (b)
633 _simple_sprintf(b, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
634 else
635 _malloc_printf(MALLOC_PRINTF_NOLOG, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
636 malloc_printf("*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
637 if (frames) {
638 unsigned index = 1;
639 if (b) {
640 _simple_sappend(b, "Stack for last operation where the malloc check succeeded: ");
641 while (index < num_frames) _simple_sprintf(b, "%p ", frames[index++]);
642 malloc_printf("%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b));
643 } else {
644 /*
645 * Should only get here if vm_allocate() can't get a single page of
646 * memory, implying _simple_asl_log() would also fail. So we just
647 * print to the file descriptor.
648 */
649 _malloc_printf(MALLOC_PRINTF_NOLOG, "Stack for last operation where the malloc check succeeded: ");
650 while (index < num_frames) _malloc_printf(MALLOC_PRINTF_NOLOG, "%p ", frames[index++]);
651 _malloc_printf(MALLOC_PRINTF_NOLOG, "\n(Use 'atos' for a symbolic stack)\n");
652 }
653 }
654 if (malloc_check_each > 1) {
655 unsigned recomm_each = (malloc_check_each > 10) ? malloc_check_each/10 : 1;
656 unsigned recomm_start = (malloc_check_counter > malloc_check_each+1) ? malloc_check_counter-1-malloc_check_each : 1;
657 malloc_printf("*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", recomm_start, recomm_each);
658 }
659 if (malloc_check_abort) {
660 CRSetCrashLogMessage(b ? _simple_string(b) : "*** MallocCheckHeap: FAILED check");
661 abort();
662 } else if (b)
663 _simple_sfree(b);
664 if (malloc_check_sleep > 0) {
665 _malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping for %d seconds to leave time to attach\n",
666 malloc_check_sleep);
667 sleep(malloc_check_sleep);
668 } else if (malloc_check_sleep < 0) {
669 _malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping once for %d seconds to leave time to attach\n",
670 -malloc_check_sleep);
671 sleep(-malloc_check_sleep);
672 malloc_check_sleep = 0;
673 }
674 }
675 malloc_check_start += malloc_check_each;
676 }
677
678 void *
679 malloc_zone_malloc(malloc_zone_t *zone, size_t size) {
680 void *ptr;
681 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
682 internal_check();
683 }
684 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
685 return NULL;
686 }
687 ptr = zone->malloc(zone, size);
688 if (malloc_logger)
689 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
690 return ptr;
691 }
692
693 void *
694 malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) {
695 void *ptr;
696 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
697 internal_check();
698 }
699 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
700 return NULL;
701 }
702 ptr = zone->calloc(zone, num_items, size);
703 if (malloc_logger)
704 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone, (uintptr_t)(num_items * size), 0,
705 (uintptr_t)ptr, 0);
706 return ptr;
707 }
708
709 void *
710 malloc_zone_valloc(malloc_zone_t *zone, size_t size) {
711 void *ptr;
712 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
713 internal_check();
714 }
715 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
716 return NULL;
717 }
718 ptr = zone->valloc(zone, size);
719 if (malloc_logger)
720 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
721 return ptr;
722 }
723
724 void *
725 malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
726 void *new_ptr;
727 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
728 internal_check();
729 }
730 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
731 return NULL;
732 }
733 new_ptr = zone->realloc(zone, ptr, size);
734 if (malloc_logger)
735 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, (uintptr_t)size,
736 (uintptr_t)new_ptr, 0);
737 return new_ptr;
738 }
739
740 void
741 malloc_zone_free(malloc_zone_t *zone, void *ptr) {
742 if (malloc_logger)
743 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
744 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
745 internal_check();
746 }
747 zone->free(zone, ptr);
748 }
749
750 static void
751 malloc_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
752 if (malloc_logger)
753 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
754 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
755 internal_check();
756 }
757 zone->free_definite_size(zone, ptr, size);
758 }
759
760 malloc_zone_t *
761 malloc_zone_from_ptr(const void *ptr) {
762 if (!ptr)
763 return NULL;
764 else
765 return find_registered_zone(ptr, NULL);
766 }
767
768 void *
769 malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
770 void *ptr;
771 if (zone->version < 5) // Version must be >= 5 to look at the new memalign field.
772 return NULL;
773 if (!(zone->memalign))
774 return NULL;
775 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
776 internal_check();
777 }
778 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
779 return NULL;
780 }
781 if (alignment < sizeof( void *) || // excludes 0 == alignment
782 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
783 return NULL;
784 }
785 ptr = zone->memalign(zone, alignment, size);
786 if (malloc_logger)
787 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
788 return ptr;
789 }
790
791 /********* Functions for zone implementors ************/
792
793 void
794 malloc_zone_register(malloc_zone_t *zone) {
795 MALLOC_LOCK();
796 malloc_zone_register_while_locked(zone);
797 MALLOC_UNLOCK();
798 }
799
800 void
801 malloc_zone_unregister(malloc_zone_t *z) {
802 unsigned index;
803
804 if (malloc_num_zones == 0)
805 return;
806
807 MALLOC_LOCK();
808 for (index = 0; index < malloc_num_zones; ++index) {
809 if (z != malloc_zones[index])
810 continue;
811
812 // Modify the page to be allow write access, so that we can update the
813 // malloc_zones array.
814 size_t protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
815 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
816
817 // If we found a match, replace it with the entry at the end of the list, shrink the list,
818 // and leave the end of the list intact to avoid racing with find_registered_zone().
819
820 malloc_zones[index] = malloc_zones[malloc_num_zones - 1];
821 --malloc_num_zones;
822
823 mprotect(malloc_zones, protect_size, PROT_READ);
824
825 // Exchange the roles of the FRZ counters. The counter that has captured the number of threads presently
826 // executing *inside* find_regiatered_zone is swapped with the counter drained to zero last time through.
827 // The former is then allowed to drain to zero while this thread yields.
828 int *p = pFRZCounterLive;
829 pFRZCounterLive = pFRZCounterDrain;
830 pFRZCounterDrain = p;
831 __sync_synchronize(); // Full memory barrier
832
833 while (0 != *pFRZCounterDrain) { pthread_yield_np(); }
834
835 MALLOC_UNLOCK();
836
837 return;
838 }
839 MALLOC_UNLOCK();
840 malloc_printf("*** malloc_zone_unregister() failed for %p\n", z);
841 }
842
843 void
844 malloc_set_zone_name(malloc_zone_t *z, const char *name) {
845 char *newName;
846
847 mprotect(z, sizeof(malloc_zone_t), PROT_READ | PROT_WRITE);
848 if (z->zone_name) {
849 free((char *)z->zone_name);
850 z->zone_name = NULL;
851 }
852 if (name) {
853 size_t buflen = strlen(name) + 1;
854 newName = malloc_zone_malloc(z, buflen);
855 if (newName) {
856 strlcpy(newName, name, buflen);
857 z->zone_name = (const char *)newName;
858 } else {
859 z->zone_name = NULL;
860 }
861 }
862 mprotect(z, sizeof(malloc_zone_t), PROT_READ);
863 }
864
865 const char *
866 malloc_get_zone_name(malloc_zone_t *zone) {
867 return zone->zone_name;
868 }
869
870 /*
871 * XXX malloc_printf now uses _simple_*printf. It only deals with a
872 * subset of printf format specifiers, but it doesn't call malloc.
873 */
874
875 __private_extern__ void
876 _malloc_vprintf(int flags, const char *format, va_list ap)
877 {
878 _SIMPLE_STRING b;
879
880 if (_malloc_no_asl_log || (flags & MALLOC_PRINTF_NOLOG) || (b = _simple_salloc()) == NULL) {
881 if (!(flags & MALLOC_PRINTF_NOPREFIX)) {
882 if (__is_threaded) {
883 /* XXX somewhat rude 'knowing' that pthread_t is a pointer */
884 _simple_dprintf(malloc_debug_file, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self());
885 } else {
886 _simple_dprintf(malloc_debug_file, "%s(%d) malloc: ", getprogname(), getpid());
887 }
888 }
889 _simple_vdprintf(malloc_debug_file, format, ap);
890 return;
891 }
892 if (!(flags & MALLOC_PRINTF_NOPREFIX)) {
893 if (__is_threaded) {
894 /* XXX somewhat rude 'knowing' that pthread_t is a pointer */
895 _simple_sprintf(b, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self());
896 } else {
897 _simple_sprintf(b, "%s(%d) malloc: ", getprogname(), getpid());
898 }
899 }
900 _simple_vsprintf(b, format, ap);
901 _simple_put(b, malloc_debug_file);
902 _simple_asl_log(flags & MALLOC_PRINTF_LEVEL_MASK, Malloc_Facility, _simple_string(b));
903 _simple_sfree(b);
904 }
905
906 __private_extern__ void
907 _malloc_printf(int flags, const char *format, ...)
908 {
909 va_list ap;
910
911 va_start(ap, format);
912 _malloc_vprintf(flags, format, ap);
913 va_end(ap);
914 }
915
916 void
917 malloc_printf(const char *format, ...)
918 {
919 va_list ap;
920
921 va_start(ap, format);
922 _malloc_vprintf(ASL_LEVEL_ERR, format, ap);
923 va_end(ap);
924 }
925
926 /********* Generic ANSI callouts ************/
927
928 void *
929 malloc(size_t size) {
930 void *retval;
931 retval = malloc_zone_malloc(inline_malloc_default_zone(), size);
932 if (retval == NULL) {
933 errno = ENOMEM;
934 }
935 return retval;
936 }
937
938 void *
939 calloc(size_t num_items, size_t size) {
940 void *retval;
941 retval = malloc_zone_calloc(inline_malloc_default_zone(), num_items, size);
942 if (retval == NULL) {
943 errno = ENOMEM;
944 }
945 return retval;
946 }
947
948 void
949 free(void *ptr) {
950 malloc_zone_t *zone;
951 size_t size;
952 if (!ptr)
953 return;
954 zone = find_registered_zone(ptr, &size);
955 if (!zone) {
956 malloc_printf("*** error for object %p: pointer being freed was not allocated\n"
957 "*** set a breakpoint in malloc_error_break to debug\n", ptr);
958 malloc_error_break();
959 if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR))) {
960 _SIMPLE_STRING b = _simple_salloc();
961 if (b) {
962 _simple_sprintf(b, "*** error for object %p: pointer being freed was not allocated\n", ptr);
963 CRSetCrashLogMessage(_simple_string(b));
964 } else {
965 CRSetCrashLogMessage("*** error: pointer being freed was not allocated\n");
966 }
967 abort();
968 }
969 } else if (zone->version >= 6 && zone->free_definite_size)
970 malloc_zone_free_definite_size(zone, ptr, size);
971 else
972 malloc_zone_free(zone, ptr);
973 }
974
975 void *
976 realloc(void *in_ptr, size_t new_size) {
977 void *retval = NULL;
978 void *old_ptr;
979 malloc_zone_t *zone;
980 size_t old_size = 0;
981
982 // SUSv3: "If size is 0 and ptr is not a null pointer, the object
983 // pointed to is freed. If the space cannot be allocated, the object
984 // shall remain unchanged." Also "If size is 0, either a null pointer
985 // or a unique pointer that can be successfully passed to free() shall
986 // be returned." We choose to allocate a minimum size object by calling
987 // malloc_zone_malloc with zero size, which matches "If ptr is a null
988 // pointer, realloc() shall be equivalent to malloc() for the specified
989 // size." So we only free the original memory if the allocation succeeds.
990 old_ptr = (new_size == 0) ? NULL : in_ptr;
991 if (!old_ptr) {
992 retval = malloc_zone_malloc(inline_malloc_default_zone(), new_size);
993 } else {
994 zone = find_registered_zone(old_ptr, &old_size);
995 if (!zone) {
996 malloc_printf("*** error for object %p: pointer being realloc'd was not allocated\n"
997 "*** set a breakpoint in malloc_error_break to debug\n", old_ptr);
998 malloc_error_break();
999 if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR))) {
1000 _SIMPLE_STRING b = _simple_salloc();
1001 if (b) {
1002 _simple_sprintf(b, "*** error for object %p: pointer being realloc'd was not allocated\n", old_ptr);
1003 CRSetCrashLogMessage(_simple_string(b));
1004 } else {
1005 CRSetCrashLogMessage("*** error: pointer being realloc'd was not allocated\n");
1006 }
1007 abort();
1008 }
1009 } else {
1010 retval = malloc_zone_realloc(zone, old_ptr, new_size);
1011 }
1012 }
1013 if (retval == NULL) {
1014 errno = ENOMEM;
1015 } else if (new_size == 0) {
1016 free(in_ptr);
1017 }
1018 return retval;
1019 }
1020
1021 void *
1022 valloc(size_t size) {
1023 void *retval;
1024 malloc_zone_t *zone = inline_malloc_default_zone();
1025 retval = malloc_zone_valloc(zone, size);
1026 if (retval == NULL) {
1027 errno = ENOMEM;
1028 }
1029 return retval;
1030 }
1031
1032 extern void
1033 vfree(void *ptr) {
1034 free(ptr);
1035 }
1036
1037 size_t
1038 malloc_size(const void *ptr) {
1039 size_t size = 0;
1040
1041 if (!ptr)
1042 return size;
1043
1044 (void)find_registered_zone(ptr, &size);
1045 return size;
1046 }
1047
1048 size_t
1049 malloc_good_size (size_t size) {
1050 malloc_zone_t *zone = inline_malloc_default_zone();
1051 return zone->introspect->good_size(zone, size);
1052 }
1053
1054 /*
1055 * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment,
1056 * and shall return a pointer to the allocated memory in memptr.
1057 * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two.
1058 * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment.
1059 *
1060 * Upon successful completion, posix_memalign() shall return zero; otherwise,
1061 * an error number shall be returned to indicate the error.
1062 *
1063 * The posix_memalign() function shall fail if:
1064 * EINVAL
1065 * The value of the alignment parameter is not a power of two multiple of sizeof( void *).
1066 * ENOMEM
1067 * There is insufficient memory available with the requested alignment.
1068 */
1069
1070 int
1071 posix_memalign(void **memptr, size_t alignment, size_t size)
1072 {
1073 void *retval;
1074
1075 /* POSIX is silent on NULL == memptr !?! */
1076
1077 retval = malloc_zone_memalign(inline_malloc_default_zone(), alignment, size);
1078 if (retval == NULL) {
1079 // To avoid testing the alignment constraints redundantly, we'll rely on the
1080 // test made in malloc_zone_memalign to vet each request. Only if that test fails
1081 // and returns NULL, do we arrive here to detect the bogus alignment and give the
1082 // required EINVAL return.
1083 if (alignment < sizeof( void *) || // excludes 0 == alignment
1084 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
1085 return EINVAL;
1086 }
1087 return ENOMEM;
1088 } else {
1089 *memptr = retval; // Set iff allocation succeeded
1090 return 0;
1091 }
1092 }
1093
1094 static malloc_zone_t *
1095 find_registered_purgeable_zone(void *ptr) {
1096 if (!ptr)
1097 return NULL;
1098
1099 /*
1100 * Look for a zone which contains ptr. If that zone does not have the purgeable malloc flag
1101 * set, or the allocation is too small, do nothing. Otherwise, set the allocation volatile.
1102 * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones
1103 * and only search those.
1104 */
1105 size_t size = 0;
1106 malloc_zone_t *zone = find_registered_zone(ptr, &size);
1107
1108 /* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined
1109 * binary compatibility impact of changing the introspect struct yet. */
1110 if (!zone)
1111 return NULL;
1112
1113 /* Check to make sure pointer is page aligned and size is multiple of page size */
1114 if ((size < vm_page_size) || ((size % vm_page_size) != 0))
1115 return NULL;
1116
1117 return zone;
1118 }
1119
1120 void
1121 malloc_make_purgeable(void *ptr) {
1122 malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
1123 if (!zone)
1124 return;
1125
1126 int state = VM_PURGABLE_VOLATILE;
1127 vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
1128 return;
1129 }
1130
1131 /* Returns true if ptr is valid. Ignore the return value from vm_purgeable_control and only report
1132 * state. */
1133 int
1134 malloc_make_nonpurgeable(void *ptr) {
1135 malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
1136 if (!zone)
1137 return 0;
1138
1139 int state = VM_PURGABLE_NONVOLATILE;
1140 vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
1141
1142 if (state == VM_PURGABLE_EMPTY)
1143 return EFAULT;
1144
1145 return 0;
1146 }
1147
1148 size_t malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal)
1149 {
1150 if (!zone) {
1151 unsigned index = 0;
1152 size_t total = 0;
1153
1154 // Take lock to defend against malloc_destroy_zone()
1155 MALLOC_LOCK();
1156 while (index < malloc_num_zones) {
1157 zone = malloc_zones[index++];
1158 if (zone->version < 8)
1159 continue;
1160 if (NULL == zone->pressure_relief)
1161 continue;
1162 if (0 == goal) /* Greedy */
1163 total += zone->pressure_relief(zone, 0);
1164 else if (goal > total)
1165 total += zone->pressure_relief(zone, goal - total);
1166 else /* total >= goal */
1167 break;
1168 }
1169 MALLOC_UNLOCK();
1170 return total;
1171 } else {
1172 // Assumes zone is not destroyed for the duration of this call
1173 if (zone->version < 8)
1174 return 0;
1175 if (NULL == zone->pressure_relief)
1176 return 0;
1177 return zone->pressure_relief(zone, goal);
1178 }
1179 }
1180
1181 /********* Batch methods ************/
1182
1183 unsigned
1184 malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) {
1185 unsigned (*batch_malloc)(malloc_zone_t *, size_t, void **, unsigned) = zone-> batch_malloc;
1186 if (! batch_malloc)
1187 return 0;
1188 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
1189 internal_check();
1190 }
1191 unsigned batched = batch_malloc(zone, size, results, num_requested);
1192 if (malloc_logger) {
1193 unsigned index = 0;
1194 while (index < batched) {
1195 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)results[index], 0);
1196 index++;
1197 }
1198 }
1199 return batched;
1200 }
1201
1202 void
1203 malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) {
1204 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
1205 internal_check();
1206 }
1207 if (malloc_logger) {
1208 unsigned index = 0;
1209 while (index < num) {
1210 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0);
1211 index++;
1212 }
1213 }
1214 void (*batch_free)(malloc_zone_t *, void **, unsigned) = zone-> batch_free;
1215 if (batch_free) {
1216 batch_free(zone, to_be_freed, num);
1217 } else {
1218 void (*free_fun)(malloc_zone_t *, void *) = zone->free;
1219 while (num--) {
1220 void *ptr = *to_be_freed++;
1221 free_fun(zone, ptr);
1222 }
1223 }
1224 }
1225
1226 /********* Functions for performance tools ************/
1227
1228 static kern_return_t
1229 _malloc_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) {
1230 *ptr = (void *)address;
1231 return 0;
1232 }
1233
1234 kern_return_t
1235 malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count) {
1236 // Note that the 2 following addresses are not correct if the address of the target is different from your own. This notably occurs if the address of System.framework is slid (e.g. different than at B & I )
1237 vm_address_t remote_malloc_zones = (vm_address_t)&malloc_zones;
1238 vm_address_t remote_malloc_num_zones = (vm_address_t)&malloc_num_zones;
1239 kern_return_t err;
1240 vm_address_t zones_address;
1241 vm_address_t *zones_address_ref;
1242 unsigned num_zones;
1243 unsigned *num_zones_ref;
1244 if (!reader) reader = _malloc_default_reader;
1245 // printf("Read malloc_zones at address %p should be %p\n", &malloc_zones, malloc_zones);
1246 err = reader(task, remote_malloc_zones, sizeof(void *), (void **)&zones_address_ref);
1247 // printf("Read malloc_zones[%p]=%p\n", remote_malloc_zones, *zones_address_ref);
1248 if (err) {
1249 malloc_printf("*** malloc_get_all_zones: error reading zones_address at %p\n", (unsigned)remote_malloc_zones);
1250 return err;
1251 }
1252 zones_address = *zones_address_ref;
1253 // printf("Reading num_zones at address %p\n", remote_malloc_num_zones);
1254 err = reader(task, remote_malloc_num_zones, sizeof(unsigned), (void **)&num_zones_ref);
1255 if (err) {
1256 malloc_printf("*** malloc_get_all_zones: error reading num_zones at %p\n", (unsigned)remote_malloc_num_zones);
1257 return err;
1258 }
1259 num_zones = *num_zones_ref;
1260 // printf("Read malloc_num_zones[%p]=%d\n", remote_malloc_num_zones, num_zones);
1261 *count = num_zones;
1262 // printf("malloc_get_all_zones succesfully found %d zones\n", num_zones);
1263 err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses);
1264 if (err) {
1265 malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", &zones_address);
1266 return err;
1267 }
1268 // printf("malloc_get_all_zones succesfully read %d zones\n", num_zones);
1269 return err;
1270 }
1271
1272 /********* Debug helpers ************/
1273
1274 void
1275 malloc_zone_print_ptr_info(void *ptr) {
1276 malloc_zone_t *zone;
1277 if (!ptr) return;
1278 zone = malloc_zone_from_ptr(ptr);
1279 if (zone) {
1280 printf("ptr %p in registered zone %p\n", ptr, zone);
1281 } else {
1282 printf("ptr %p not in heap\n", ptr);
1283 }
1284 }
1285
1286 boolean_t
1287 malloc_zone_check(malloc_zone_t *zone) {
1288 boolean_t ok = 1;
1289 if (!zone) {
1290 unsigned index = 0;
1291 while (index < malloc_num_zones) {
1292 zone = malloc_zones[index++];
1293 if (!zone->introspect->check(zone)) ok = 0;
1294 }
1295 } else {
1296 ok = zone->introspect->check(zone);
1297 }
1298 return ok;
1299 }
1300
1301 void
1302 malloc_zone_print(malloc_zone_t *zone, boolean_t verbose) {
1303 if (!zone) {
1304 unsigned index = 0;
1305 while (index < malloc_num_zones) {
1306 zone = malloc_zones[index++];
1307 zone->introspect->print(zone, verbose);
1308 }
1309 } else {
1310 zone->introspect->print(zone, verbose);
1311 }
1312 }
1313
1314 void
1315 malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
1316 if (!zone) {
1317 memset(stats, 0, sizeof(*stats));
1318 unsigned index = 0;
1319 while (index < malloc_num_zones) {
1320 zone = malloc_zones[index++];
1321 malloc_statistics_t this_stats;
1322 zone->introspect->statistics(zone, &this_stats);
1323 stats->blocks_in_use += this_stats.blocks_in_use;
1324 stats->size_in_use += this_stats.size_in_use;
1325 stats->max_size_in_use += this_stats.max_size_in_use;
1326 stats->size_allocated += this_stats.size_allocated;
1327 }
1328 } else {
1329 zone->introspect->statistics(zone, stats);
1330 }
1331 }
1332
1333 void
1334 malloc_zone_log(malloc_zone_t *zone, void *address) {
1335 if (!zone) {
1336 unsigned index = 0;
1337 while (index < malloc_num_zones) {
1338 zone = malloc_zones[index++];
1339 zone->introspect->log(zone, address);
1340 }
1341 } else {
1342 zone->introspect->log(zone, address);
1343 }
1344 }
1345
1346 /********* Misc other entry points ************/
1347
1348 static void
1349 DefaultMallocError(int x) {
1350 #if USE_SLEEP_RATHER_THAN_ABORT
1351 malloc_printf("*** error %d\n", x);
1352 sleep(3600);
1353 #else
1354 _SIMPLE_STRING b = _simple_salloc();
1355 if (b) {
1356 _simple_sprintf(b, "*** error %d", x);
1357 malloc_printf("%s\n", _simple_string(b));
1358 CRSetCrashLogMessage(_simple_string(b));
1359 } else {
1360 _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error %d", x);
1361 CRSetCrashLogMessage("*** DefaultMallocError called");
1362 }
1363 abort();
1364 #endif
1365 }
1366
1367 void (*
1368 malloc_error(void (*func)(int)))(int) {
1369 return DefaultMallocError;
1370 }
1371
1372 /* Stack logging fork-handling prototypes */
1373 extern void __stack_logging_fork_prepare();
1374 extern void __stack_logging_fork_parent();
1375 extern void __stack_logging_fork_child();
1376 extern void __stack_logging_early_finished();
1377
1378 void
1379 _malloc_fork_prepare() {
1380 /* Prepare the malloc module for a fork by insuring that no thread is in a malloc critical section */
1381 unsigned index = 0;
1382 MALLOC_LOCK();
1383 while (index < malloc_num_zones) {
1384 malloc_zone_t *zone = malloc_zones[index++];
1385 zone->introspect->force_lock(zone);
1386 }
1387 __stack_logging_fork_prepare();
1388 }
1389
1390 void
1391 _malloc_fork_parent() {
1392 /* Called in the parent process after a fork() to resume normal operation. */
1393 unsigned index = 0;
1394 __stack_logging_fork_parent();
1395 MALLOC_UNLOCK();
1396 while (index < malloc_num_zones) {
1397 malloc_zone_t *zone = malloc_zones[index++];
1398 zone->introspect->force_unlock(zone);
1399 }
1400 }
1401
1402 void
1403 _malloc_fork_child() {
1404 /* Called in the child process after a fork() to resume normal operation. In the MTASK case we also have to change memory inheritance so that the child does not share memory with the parent. */
1405 unsigned index = 0;
1406 __stack_logging_fork_child();
1407 MALLOC_UNLOCK();
1408 while (index < malloc_num_zones) {
1409 malloc_zone_t *zone = malloc_zones[index++];
1410 zone->introspect->force_unlock(zone);
1411 }
1412 }
1413
1414 /*
1415 * A Glibc-like mstats() interface.
1416 *
1417 * Note that this interface really isn't very good, as it doesn't understand
1418 * that we may have multiple allocators running at once. We just massage
1419 * the result from malloc_zone_statistics in any case.
1420 */
1421 struct mstats
1422 mstats(void)
1423 {
1424 malloc_statistics_t s;
1425 struct mstats m;
1426
1427 malloc_zone_statistics(NULL, &s);
1428 m.bytes_total = s.size_allocated;
1429 m.chunks_used = s.blocks_in_use;
1430 m.bytes_used = s.size_in_use;
1431 m.chunks_free = 0;
1432 m.bytes_free = m.bytes_total - m.bytes_used; /* isn't this somewhat obvious? */
1433
1434 return(m);
1435 }
1436
1437 boolean_t
1438 malloc_zone_enable_discharge_checking(malloc_zone_t *zone)
1439 {
1440 if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1441 return FALSE;
1442 if (NULL == zone->introspect->enable_discharge_checking)
1443 return FALSE;
1444 return zone->introspect->enable_discharge_checking(zone);
1445 }
1446
1447 void
1448 malloc_zone_disable_discharge_checking(malloc_zone_t *zone)
1449 {
1450 if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1451 return;
1452 if (NULL == zone->introspect->disable_discharge_checking)
1453 return;
1454 zone->introspect->disable_discharge_checking(zone);
1455 }
1456
1457 void
1458 malloc_zone_discharge(malloc_zone_t *zone, void *memory)
1459 {
1460 if (NULL == zone)
1461 zone = malloc_zone_from_ptr(memory);
1462 if (NULL == zone)
1463 return;
1464 if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1465 return;
1466 if (NULL == zone->introspect->discharge)
1467 return;
1468 zone->introspect->discharge(zone, memory);
1469 }
1470
1471 void
1472 malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info))
1473 {
1474 if (!zone) {
1475 unsigned index = 0;
1476 while (index < malloc_num_zones) {
1477 zone = malloc_zones[index++];
1478 if (zone->version < 7)
1479 continue;
1480 if (NULL == zone->introspect->enumerate_discharged_pointers)
1481 continue;
1482 zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
1483 }
1484 } else {
1485 if (zone->version < 7)
1486 return;
1487 if (NULL == zone->introspect->enumerate_discharged_pointers)
1488 return;
1489 zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
1490 }
1491 }
1492
1493 /***************** OBSOLETE ENTRY POINTS ********************/
1494
1495 #if PHASE_OUT_OLD_MALLOC
1496 #error PHASE OUT THE FOLLOWING FUNCTIONS
1497 #else
1498 #warning PHASE OUT THE FOLLOWING FUNCTIONS
1499 #endif
1500
1501 void
1502 set_malloc_singlethreaded(boolean_t single) {
1503 static boolean_t warned = 0;
1504 if (!warned) {
1505 #if PHASE_OUT_OLD_MALLOC
1506 malloc_printf("*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single);
1507 #endif
1508 warned = 1;
1509 }
1510 }
1511
1512 void
1513 malloc_singlethreaded() {
1514 static boolean_t warned = 0;
1515 if (!warned) {
1516 malloc_printf("*** OBSOLETE: malloc_singlethreaded()\n");
1517 warned = 1;
1518 }
1519 }
1520
1521 int
1522 malloc_debug(int level) {
1523 malloc_printf("*** OBSOLETE: malloc_debug()\n");
1524 return 0;
1525 }