]> git.saurik.com Git - apple/libc.git/blob - gen/malloc.c
497a9b5a515a8a6fddefa8f62a32ef63e019a485
[apple/libc.git] / gen / malloc.c
1 /*
2 * Copyright (c) 1999, 2006-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <pthread_internals.h>
25 #include "magmallocProvider.h"
26 #include <mach-o/dyld.h> /* for NSVersionOfLinkTimeLibrary() */
27
28 #import <stdlib.h>
29 #import <stdio.h>
30 #import <string.h>
31 #import <unistd.h>
32 #import <malloc/malloc.h>
33 #import <fcntl.h>
34 #import <crt_externs.h>
35 #import <errno.h>
36 #import <pthread_internals.h>
37 #import <limits.h>
38 #import <dlfcn.h>
39 #import <mach/mach_vm.h>
40 #import <mach/mach_init.h>
41 #import <sys/mman.h>
42
43 #import "scalable_malloc.h"
44 #import "stack_logging.h"
45 #import "malloc_printf.h"
46 #import "_simple.h"
47 #import "CrashReporterClient.h"
48
49 /*
50 * MALLOC_ABSOLUTE_MAX_SIZE - There are many instances of addition to a
51 * user-specified size_t, which can cause overflow (and subsequent crashes)
52 * for values near SIZE_T_MAX. Rather than add extra "if" checks everywhere
53 * this occurs, it is easier to just set an absolute maximum request size,
54 * and immediately return an error if the requested size exceeds this maximum.
55 * Of course, values less than this absolute max can fail later if the value
56 * is still too large for the available memory. The largest value added
57 * seems to be PAGE_SIZE (in the macro round_page()), so to be safe, we set
58 * the maximum to be 2 * PAGE_SIZE less than SIZE_T_MAX.
59 */
60 #define MALLOC_ABSOLUTE_MAX_SIZE (SIZE_T_MAX - (2 * PAGE_SIZE))
61
62 #define USE_SLEEP_RATHER_THAN_ABORT 0
63
64 typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
65
66 __private_extern__ pthread_lock_t _malloc_lock = 0; // initialized in __libc_init
67
68 /* The following variables are exported for the benefit of performance tools
69 *
70 * It should always be safe to first read malloc_num_zones, then read
71 * malloc_zones without taking the lock, if only iteration is required and
72 * provided that when malloc_destroy_zone is called all prior operations on that
73 * zone are complete and no further calls referencing that zone can be made.
74 */
75 unsigned malloc_num_zones = 0;
76 unsigned malloc_num_zones_allocated = 0;
77 malloc_zone_t **malloc_zones = 0;
78 malloc_logger_t *malloc_logger = NULL;
79
80 unsigned malloc_debug_flags = 0;
81
82 unsigned malloc_check_start = 0; // 0 means don't check
83 unsigned malloc_check_counter = 0;
84 unsigned malloc_check_each = 1000;
85
86 /* global flag to suppress ASL logging e.g. for syslogd */
87 int _malloc_no_asl_log = 0;
88
89 static int malloc_check_sleep = 100; // default 100 second sleep
90 static int malloc_check_abort = 0; // default is to sleep, not abort
91
92 static int malloc_debug_file = STDERR_FILENO;
93 /*
94 * State indicated by malloc_def_zone_state
95 * 0 - the default zone has not yet been created
96 * 1 - a Malloc* environment variable has been set
97 * 2 - the default zone has been created and an environment variable scan done
98 * 3 - a new default zone has been created and another environment variable scan
99 */
100 __private_extern__ int malloc_def_zone_state = 0;
101 __private_extern__ malloc_zone_t *__zone0 = NULL;
102
103 static const char Malloc_Facility[] = "com.apple.Libsystem.malloc";
104
105 #define MALLOC_LOCK() LOCK(_malloc_lock)
106 #define MALLOC_UNLOCK() UNLOCK(_malloc_lock)
107
108 /*
109 * Counters that coordinate zone destruction (in malloc_zone_unregister) with
110 * find_registered_zone (here abbreviated as FRZ).
111 */
112 static int counterAlice = 0, counterBob = 0;
113 static int *pFRZCounterLive= &counterAlice, *pFRZCounterDrain = &counterBob;
114
115 #define MALLOC_LOG_TYPE_ALLOCATE stack_logging_type_alloc
116 #define MALLOC_LOG_TYPE_DEALLOCATE stack_logging_type_dealloc
117 #define MALLOC_LOG_TYPE_HAS_ZONE stack_logging_flag_zone
118 #define MALLOC_LOG_TYPE_CLEARED stack_logging_flag_cleared
119
120 /********* Utilities ************/
121 __private_extern__ uint64_t malloc_entropy[2] = {0, 0};
122
123 void __malloc_entropy_setup(const char *apple[]) __attribute__ ((visibility ("hidden")));
124
125 static int
126 __entropy_from_kernel(const char *str)
127 {
128 unsigned long long val;
129 char tmp[20], *p;
130 int idx = 0;
131
132 /* Skip over key to the first value */
133 str = strchr(str, '=');
134 if (str == NULL)
135 return 0;
136 str++;
137
138 while (str && idx < sizeof(malloc_entropy)/sizeof(malloc_entropy[0])) {
139 strlcpy(tmp, str, 20);
140 p = strchr(tmp, ',');
141 if (p) *p = '\0';
142 val = strtoull(tmp, NULL, 0);
143 malloc_entropy[idx] = (uint64_t)val;
144 idx++;
145 if ((str = strchr(str, ',')) != NULL)
146 str++;
147 }
148 return idx;
149 }
150
151 void
152 __malloc_entropy_setup(const char *apple[])
153 {
154 const char **p;
155 for (p = apple; p && *p; p++) {
156 if (strstr(*p, "malloc_entropy") == *p) {
157 if (sizeof(malloc_entropy)/sizeof(malloc_entropy[0]) == __entropy_from_kernel(*p))
158 return;
159 else
160 break;
161 }
162 }
163
164 malloc_entropy[0] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
165 malloc_entropy[1] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
166 return;
167 }
168
169 static inline malloc_zone_t * find_registered_zone(const void *, size_t *) __attribute__((always_inline));
170 static inline malloc_zone_t *
171 find_registered_zone(const void *ptr, size_t *returned_size) {
172 // Returns a zone which contains ptr, else NULL
173
174 if (0 == malloc_num_zones) {
175 if (returned_size) *returned_size = 0;
176 return NULL;
177 }
178
179 // The default zone is registered in malloc_zones[0]. There's no danger that it will ever be unregistered.
180 // So don't advance the FRZ counter yet.
181 malloc_zone_t *zone = malloc_zones[0];
182 size_t size = zone->size(zone, ptr);
183 if (size) { // Claimed by this zone?
184 if (returned_size) *returned_size = size;
185 return zone;
186 }
187
188 int *pFRZCounter = pFRZCounterLive; // Capture pointer to the counter of the moment
189 __sync_fetch_and_add(pFRZCounter, 1); // Advance this counter -- our thread is in FRZ
190
191 unsigned index;
192 unsigned limit = malloc_num_zones;
193 malloc_zone_t **zones = &malloc_zones[1];
194
195 for (index = 1; index < limit; ++index, ++zones) {
196 zone = *zones;
197 size = zone->size(zone, ptr);
198 if (size) { // Claimed by this zone?
199 if (returned_size) *returned_size = size;
200 __sync_fetch_and_sub(pFRZCounter, 1); // our thread is leaving FRZ
201 return zone;
202 }
203 }
204 // Unclaimed by any zone.
205 if (returned_size) *returned_size = 0;
206 __sync_fetch_and_sub(pFRZCounter, 1); // our thread is leaving FRZ
207 return NULL;
208 }
209
210 __private_extern__ __attribute__((noinline)) void
211 malloc_error_break(void) {
212 // Provides a non-inlined place for various malloc error procedures to call
213 // that will be called after an error message appears. It does not make
214 // sense for developers to call this function, so it is marked
215 // __private_extern__ to prevent it from becoming API.
216 MAGMALLOC_MALLOCERRORBREAK(); // DTrace USDT probe
217 }
218
219 __private_extern__ boolean_t __stack_logging_locked();
220
221 __private_extern__ __attribute__((noinline)) int
222 malloc_gdb_po_unsafe(void) {
223 // In order to implement "po" other data formatters in gdb, the debugger
224 // calls functions that call malloc. The debugger will only run one thread
225 // of the program in this case, so if another thread is holding a zone lock,
226 // gdb may deadlock in this case.
227 //
228 // Iterate over the zones in malloc_zones, and call "trylock" on the zone
229 // lock. If trylock succeeds, unlock it, otherwise return "locked". Returns
230 // 0 == safe, 1 == locked/unsafe.
231
232 if (__stack_logging_locked())
233 return 1;
234
235 malloc_zone_t **zones = malloc_zones;
236 unsigned i, e = malloc_num_zones;
237
238 for (i = 0; i != e; ++i) {
239 malloc_zone_t *zone = zones[i];
240
241 // Version must be >= 5 to look at the new introspection field.
242 if (zone->version < 5)
243 continue;
244
245 if (zone->introspect->zone_locked && zone->introspect->zone_locked(zone))
246 return 1;
247 }
248 return 0;
249 }
250
251 /********* Creation and destruction ************/
252
253 static void set_flags_from_environment(void);
254
255 static void
256 malloc_zone_register_while_locked(malloc_zone_t *zone) {
257 size_t protect_size;
258 unsigned i;
259
260 /* scan the list of zones, to see if this zone is already registered. If
261 * so, print an error message and return. */
262 for (i = 0; i != malloc_num_zones; ++i)
263 if (zone == malloc_zones[i]) {
264 _malloc_printf(ASL_LEVEL_ERR, "Attempted to register zone more than once: %p\n", zone);
265 return;
266 }
267
268 if (malloc_num_zones == malloc_num_zones_allocated) {
269 size_t malloc_zones_size = malloc_num_zones * sizeof(malloc_zone_t *);
270 size_t alloc_size = malloc_zones_size + vm_page_size;
271
272 malloc_zone_t **new_zones = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, VM_MAKE_TAG(VM_MEMORY_MALLOC), 0);
273
274 /* If there were previously allocated malloc zones, we need to copy them
275 * out of the previous array and into the new zones array */
276 if (malloc_zones)
277 memcpy(new_zones, malloc_zones, malloc_zones_size);
278
279 /* Update the malloc_zones pointer, which we leak if it was previously
280 * allocated, and the number of zones allocated */
281 protect_size = alloc_size;
282 malloc_zones = new_zones;
283 malloc_num_zones_allocated = alloc_size / sizeof(malloc_zone_t *);
284 } else {
285 /* If we don't need to reallocate zones, we need to briefly change the
286 * page protection the malloc zones to allow writes */
287 protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
288 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
289 }
290 malloc_zones[malloc_num_zones++] = zone;
291
292 /* Finally, now that the zone is registered, disallow write access to the
293 * malloc_zones array */
294 mprotect(malloc_zones, protect_size, PROT_READ);
295 //_malloc_printf(ASL_LEVEL_INFO, "Registered malloc_zone %p in malloc_zones %p [%u zones, %u bytes]\n", zone, malloc_zones, malloc_num_zones, protect_size);
296 }
297
298 static void
299 _malloc_initialize(void) {
300 MALLOC_LOCK();
301 if (malloc_def_zone_state < 2) {
302 unsigned n;
303 malloc_zone_t *zone;
304
305 malloc_def_zone_state += 2;
306 set_flags_from_environment(); // will only set flags up to two times
307 n = malloc_num_zones;
308 zone = create_scalable_zone(0, malloc_debug_flags);
309 malloc_zone_register_while_locked(zone);
310 malloc_set_zone_name(zone, "DefaultMallocZone");
311 if (n != 0) { // make the default first, for efficiency
312 unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
313 malloc_zone_t *hold = malloc_zones[0];
314
315 if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
316 malloc_set_zone_name(hold, NULL);
317 }
318
319 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
320 malloc_zones[0] = malloc_zones[n];
321 malloc_zones[n] = hold;
322 mprotect(malloc_zones, protect_size, PROT_READ);
323 }
324 // _malloc_printf(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones);
325 // _malloc_printf(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, (unsigned)&malloc_num_zones);
326 }
327 MALLOC_UNLOCK();
328 }
329
330 static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((always_inline));
331 static inline malloc_zone_t *
332 inline_malloc_default_zone(void) {
333 if (malloc_def_zone_state < 2) _malloc_initialize();
334 // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
335 return malloc_zones[0];
336 }
337
338 malloc_zone_t *
339 malloc_default_zone(void) {
340 return inline_malloc_default_zone();
341 }
342
343 static inline malloc_zone_t *inline_malloc_default_scalable_zone(void) __attribute__((always_inline));
344 static inline malloc_zone_t *
345 inline_malloc_default_scalable_zone(void) {
346 unsigned index;
347
348 if (malloc_def_zone_state < 2) _malloc_initialize();
349 // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_scalable_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
350
351 MALLOC_LOCK();
352 for (index = 0; index < malloc_num_zones; ++index) {
353 malloc_zone_t *z = malloc_zones[index];
354
355 if(z->zone_name && strcmp(z->zone_name, "DefaultMallocZone") == 0) {
356 MALLOC_UNLOCK();
357 return z;
358 }
359 }
360 MALLOC_UNLOCK();
361
362 malloc_printf("*** malloc_default_scalable_zone() failed to find 'DefaultMallocZone'\n");
363 return NULL; // FIXME: abort() instead?
364 }
365
366 malloc_zone_t *
367 malloc_default_purgeable_zone(void) {
368 static malloc_zone_t *dpz;
369
370 if (!dpz) {
371 //
372 // PR_7288598: Must pass a *scalable* zone (szone) as the helper for create_purgeable_zone().
373 // Take care that the zone so obtained is not subject to interposing.
374 //
375 malloc_zone_t *tmp = create_purgeable_zone(0, inline_malloc_default_scalable_zone(), malloc_debug_flags);
376 malloc_zone_register(tmp);
377 malloc_set_zone_name(tmp, "DefaultPurgeableMallocZone");
378 if (!__sync_bool_compare_and_swap(&dpz, NULL, tmp))
379 malloc_destroy_zone(tmp);
380 }
381 return dpz;
382 }
383
384 // For debugging, allow stack logging to both memory and disk to compare their results.
385 static void
386 stack_logging_log_stack_debug(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip)
387 {
388 __disk_stack_logging_log_stack(type_flags, zone_ptr, size, ptr_arg, return_val, num_hot_to_skip);
389 stack_logging_log_stack(type_flags, zone_ptr, size, ptr_arg, return_val, num_hot_to_skip);
390 }
391
392 static void
393 set_flags_from_environment(void) {
394 const char *flag;
395 int fd;
396 char **env = * _NSGetEnviron();
397 char **p;
398 char *c;
399
400 if (malloc_debug_file != STDERR_FILENO) {
401 close(malloc_debug_file);
402 malloc_debug_file = STDERR_FILENO;
403 }
404 #if __LP64__
405 malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION; // Set always on 64-bit processes
406 #else
407 int libSystemVersion = NSVersionOfLinkTimeLibrary("System");
408 if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 126) /* CFSystemVersionBarolo */)
409 malloc_debug_flags = 0;
410 else
411 malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
412 #endif
413 stack_logging_enable_logging = 0;
414 stack_logging_dontcompact = 0;
415 malloc_logger = NULL;
416 malloc_check_start = 0;
417 malloc_check_each = 1000;
418 malloc_check_abort = 0;
419 malloc_check_sleep = 100;
420 /*
421 * Given that all environment variables start with "Malloc" we optimize by scanning quickly
422 * first the environment, therefore avoiding repeated calls to getenv().
423 * If we are setu/gid these flags are ignored to prevent a malicious invoker from changing
424 * our behaviour.
425 */
426 for (p = env; (c = *p) != NULL; ++p) {
427 if (!strncmp(c, "Malloc", 6)) {
428 if (issetugid())
429 return;
430 break;
431 }
432 }
433 if (c == NULL)
434 return;
435 flag = getenv("MallocLogFile");
436 if (flag) {
437 fd = open(flag, O_WRONLY|O_APPEND|O_CREAT, 0644);
438 if (fd >= 0) {
439 malloc_debug_file = fd;
440 fcntl(fd, F_SETFD, 0); // clear close-on-exec flag XXX why?
441 } else {
442 malloc_printf("Could not open %s, using stderr\n", flag);
443 }
444 }
445 if (getenv("MallocGuardEdges")) {
446 malloc_debug_flags |= SCALABLE_MALLOC_ADD_GUARD_PAGES;
447 _malloc_printf(ASL_LEVEL_INFO, "protecting edges\n");
448 if (getenv("MallocDoNotProtectPrelude")) {
449 malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_PRELUDE;
450 _malloc_printf(ASL_LEVEL_INFO, "... but not protecting prelude guard page\n");
451 }
452 if (getenv("MallocDoNotProtectPostlude")) {
453 malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE;
454 _malloc_printf(ASL_LEVEL_INFO, "... but not protecting postlude guard page\n");
455 }
456 }
457 flag = getenv("MallocStackLogging");
458 if (!flag) {
459 flag = getenv("MallocStackLoggingNoCompact");
460 stack_logging_dontcompact = 1;
461 }
462 // For debugging, the MallocStackLogging or MallocStackLoggingNoCompact environment variables can be set to
463 // values of "memory", "disk", or "both" to control which stack logging mechanism to use. Those strings appear
464 // in the flag variable, and the strtoul() call below will return 0, so then we can do string comparison on the
465 // value of flag. The default stack logging now is disk stack logging, since memory stack logging is not 64-bit-aware.
466 if (flag) {
467 unsigned long val = strtoul(flag, NULL, 0);
468 if (val == 1) val = 0;
469 if (val == -1) val = 0;
470 if (val) {
471 malloc_logger = (void *)val;
472 _malloc_printf(ASL_LEVEL_INFO, "recording stacks using recorder %p\n", malloc_logger);
473 } else if (strcmp(flag,"memory") == 0) {
474 malloc_logger = (malloc_logger_t *)stack_logging_log_stack;
475 _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks in memory using standard recorder\n");
476 } else if (strcmp(flag,"both") == 0) {
477 malloc_logger = stack_logging_log_stack_debug;
478 _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks to both memory and disk for comparison debugging\n");
479 } else { // the default is to log to disk
480 malloc_logger = __disk_stack_logging_log_stack;
481 _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks to disk using standard recorder\n");
482 }
483 stack_logging_enable_logging = 1;
484 if (stack_logging_dontcompact) {
485 if (malloc_logger == __disk_stack_logging_log_stack) {
486 _malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; size of log files on disk can increase rapidly\n");
487 } else {
488 _malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; VM can increase rapidly\n");
489 }
490 }
491 }
492 if (getenv("MallocScribble")) {
493 malloc_debug_flags |= SCALABLE_MALLOC_DO_SCRIBBLE;
494 _malloc_printf(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n");
495 }
496 if (getenv("MallocErrorAbort")) {
497 malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_ERROR;
498 _malloc_printf(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n");
499 }
500 #if __LP64__
501 /* initialization above forces SCALABLE_MALLOC_ABORT_ON_CORRUPTION of 64-bit processes */
502 #else
503 flag = getenv("MallocCorruptionAbort");
504 if (flag && (flag[0] == '0')) { // Set from an environment variable in 32-bit processes
505 malloc_debug_flags &= ~SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
506 } else if (flag) {
507 malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
508 }
509 #endif
510 flag = getenv("MallocCheckHeapStart");
511 if (flag) {
512 malloc_check_start = strtoul(flag, NULL, 0);
513 if (malloc_check_start == 0) malloc_check_start = 1;
514 if (malloc_check_start == -1) malloc_check_start = 1;
515 flag = getenv("MallocCheckHeapEach");
516 if (flag) {
517 malloc_check_each = strtoul(flag, NULL, 0);
518 if (malloc_check_each == 0) malloc_check_each = 1;
519 if (malloc_check_each == -1) malloc_check_each = 1;
520 }
521 _malloc_printf(ASL_LEVEL_INFO, "checks heap after %dth operation and each %d operations\n", malloc_check_start, malloc_check_each);
522 flag = getenv("MallocCheckHeapAbort");
523 if (flag)
524 malloc_check_abort = strtol(flag, NULL, 0);
525 if (malloc_check_abort)
526 _malloc_printf(ASL_LEVEL_INFO, "will abort on heap corruption\n");
527 else {
528 flag = getenv("MallocCheckHeapSleep");
529 if (flag)
530 malloc_check_sleep = strtol(flag, NULL, 0);
531 if (malloc_check_sleep > 0)
532 _malloc_printf(ASL_LEVEL_INFO, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep);
533 else if (malloc_check_sleep < 0)
534 _malloc_printf(ASL_LEVEL_INFO, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep);
535 else
536 _malloc_printf(ASL_LEVEL_INFO, "no sleep on heap corruption\n");
537 }
538 }
539 if (getenv("MallocHelp")) {
540 _malloc_printf(ASL_LEVEL_INFO,
541 "environment variables that can be set for debug:\n"
542 "- MallocLogFile <f> to create/append messages to file <f> instead of stderr\n"
543 "- MallocGuardEdges to add 2 guard pages for each large block\n"
544 "- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n"
545 "- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n"
546 "- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n"
547 "- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n"
548 "- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n"
549 "- MallocScribble to detect writing on free blocks and missing initializers:\n"
550 " 0x55 is written upon free and 0xaa is written on allocation\n"
551 "- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n"
552 "- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n"
553 "- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n"
554 "- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n"
555 "- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n"
556 " MallocCorruptionAbort is always set on 64-bit processes\n"
557 "- MallocErrorAbort to abort on any malloc error, including out of memory\n"
558 "- MallocHelp - this help!\n");
559 }
560 }
561
562 malloc_zone_t *
563 malloc_create_zone(vm_size_t start_size, unsigned flags)
564 {
565 malloc_zone_t *zone;
566
567 /* start_size doesn't seemed to actually be used, but we test anyways */
568 if (start_size > MALLOC_ABSOLUTE_MAX_SIZE) {
569 return NULL;
570 }
571 if (malloc_def_zone_state < 2) _malloc_initialize();
572 zone = create_scalable_zone(start_size, flags | malloc_debug_flags);
573 malloc_zone_register(zone);
574 return zone;
575 }
576
577 /*
578 * For use by CheckFix: establish a new default zone whose behavior is, apart from
579 * the use of death-row and per-CPU magazines, that of Leopard.
580 */
581 void
582 malloc_create_legacy_default_zone(void)
583 {
584 malloc_zone_t *zone;
585 int i;
586
587 if (malloc_def_zone_state < 2) _malloc_initialize();
588 zone = create_legacy_scalable_zone(0, malloc_debug_flags);
589
590 MALLOC_LOCK();
591 malloc_zone_register_while_locked(zone);
592
593 //
594 // Establish the legacy scalable zone just created as the default zone.
595 //
596 malloc_zone_t *hold = malloc_zones[0];
597 if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) {
598 malloc_set_zone_name(hold, NULL);
599 }
600 malloc_set_zone_name(zone, "DefaultMallocZone");
601
602 unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
603 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
604
605 // assert(zone == malloc_zones[malloc_num_zones - 1];
606 for (i = malloc_num_zones - 1; i > 0; --i) {
607 malloc_zones[i] = malloc_zones[i - 1];
608 }
609 malloc_zones[0] = zone;
610
611 mprotect(malloc_zones, protect_size, PROT_READ);
612 MALLOC_UNLOCK();
613 }
614
615 void
616 malloc_destroy_zone(malloc_zone_t *zone) {
617 malloc_set_zone_name(zone, NULL); // Deallocate zone name wherever it may reside PR_7701095
618 malloc_zone_unregister(zone);
619 zone->destroy(zone);
620 }
621
622 /* called from the {put,set,unset}env routine */
623 __private_extern__ void
624 __malloc_check_env_name(const char *name)
625 {
626 MALLOC_LOCK();
627 /*
628 * <rdar://problem/8686255>
629 *
630 * 2. malloc will no longer take notice of *programmatic* changes to the MALLOC_* environment variables
631 * (i.e. calls to putenv() or setenv() that manipulate these environment variables.)
632 *
633 */
634 #if 0
635 if(malloc_def_zone_state == 2 && strncmp(name, "Malloc", 6) == 0)
636 malloc_def_zone_state = 1;
637 #endif
638 MALLOC_UNLOCK();
639 }
640
641 /********* Block creation and manipulation ************/
642
643 static void
644 internal_check(void) {
645 static vm_address_t *frames = NULL;
646 static unsigned num_frames;
647 if (malloc_zone_check(NULL)) {
648 if (!frames) vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1);
649 thread_stack_pcs(frames, vm_page_size/sizeof(vm_address_t) - 1, &num_frames);
650 } else {
651 _SIMPLE_STRING b = _simple_salloc();
652 if (b)
653 _simple_sprintf(b, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
654 else
655 _malloc_printf(MALLOC_PRINTF_NOLOG, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
656 malloc_printf("*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
657 if (frames) {
658 unsigned index = 1;
659 if (b) {
660 _simple_sappend(b, "Stack for last operation where the malloc check succeeded: ");
661 while (index < num_frames) _simple_sprintf(b, "%p ", frames[index++]);
662 malloc_printf("%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b));
663 } else {
664 /*
665 * Should only get here if vm_allocate() can't get a single page of
666 * memory, implying _simple_asl_log() would also fail. So we just
667 * print to the file descriptor.
668 */
669 _malloc_printf(MALLOC_PRINTF_NOLOG, "Stack for last operation where the malloc check succeeded: ");
670 while (index < num_frames) _malloc_printf(MALLOC_PRINTF_NOLOG, "%p ", frames[index++]);
671 _malloc_printf(MALLOC_PRINTF_NOLOG, "\n(Use 'atos' for a symbolic stack)\n");
672 }
673 }
674 if (malloc_check_each > 1) {
675 unsigned recomm_each = (malloc_check_each > 10) ? malloc_check_each/10 : 1;
676 unsigned recomm_start = (malloc_check_counter > malloc_check_each+1) ? malloc_check_counter-1-malloc_check_each : 1;
677 malloc_printf("*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", recomm_start, recomm_each);
678 }
679 if (malloc_check_abort) {
680 CRSetCrashLogMessage(b ? _simple_string(b) : "*** MallocCheckHeap: FAILED check");
681 abort();
682 } else if (b)
683 _simple_sfree(b);
684 if (malloc_check_sleep > 0) {
685 _malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping for %d seconds to leave time to attach\n",
686 malloc_check_sleep);
687 sleep(malloc_check_sleep);
688 } else if (malloc_check_sleep < 0) {
689 _malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping once for %d seconds to leave time to attach\n",
690 -malloc_check_sleep);
691 sleep(-malloc_check_sleep);
692 malloc_check_sleep = 0;
693 }
694 }
695 malloc_check_start += malloc_check_each;
696 }
697
698 void *
699 malloc_zone_malloc(malloc_zone_t *zone, size_t size) {
700 void *ptr;
701 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
702 internal_check();
703 }
704 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
705 return NULL;
706 }
707 ptr = zone->malloc(zone, size);
708 if (malloc_logger)
709 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
710 return ptr;
711 }
712
713 void *
714 malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) {
715 void *ptr;
716 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
717 internal_check();
718 }
719 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
720 return NULL;
721 }
722 ptr = zone->calloc(zone, num_items, size);
723 if (malloc_logger)
724 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone, (uintptr_t)(num_items * size), 0,
725 (uintptr_t)ptr, 0);
726 return ptr;
727 }
728
729 void *
730 malloc_zone_valloc(malloc_zone_t *zone, size_t size) {
731 void *ptr;
732 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
733 internal_check();
734 }
735 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
736 return NULL;
737 }
738 ptr = zone->valloc(zone, size);
739 if (malloc_logger)
740 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
741 return ptr;
742 }
743
744 void *
745 malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
746 void *new_ptr;
747 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
748 internal_check();
749 }
750 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
751 return NULL;
752 }
753 new_ptr = zone->realloc(zone, ptr, size);
754 if (malloc_logger)
755 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, (uintptr_t)size,
756 (uintptr_t)new_ptr, 0);
757 return new_ptr;
758 }
759
760 void
761 malloc_zone_free(malloc_zone_t *zone, void *ptr) {
762 if (malloc_logger)
763 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
764 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
765 internal_check();
766 }
767 zone->free(zone, ptr);
768 }
769
770 static void
771 malloc_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
772 if (malloc_logger)
773 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
774 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
775 internal_check();
776 }
777 zone->free_definite_size(zone, ptr, size);
778 }
779
780 malloc_zone_t *
781 malloc_zone_from_ptr(const void *ptr) {
782 if (!ptr)
783 return NULL;
784 else
785 return find_registered_zone(ptr, NULL);
786 }
787
788 void *
789 malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
790 void *ptr;
791 if (zone->version < 5) // Version must be >= 5 to look at the new memalign field.
792 return NULL;
793 if (!(zone->memalign))
794 return NULL;
795 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
796 internal_check();
797 }
798 if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
799 return NULL;
800 }
801 if (alignment < sizeof( void *) || // excludes 0 == alignment
802 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
803 return NULL;
804 }
805 ptr = zone->memalign(zone, alignment, size);
806 if (malloc_logger)
807 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
808 return ptr;
809 }
810
811 /********* Functions for zone implementors ************/
812
813 void
814 malloc_zone_register(malloc_zone_t *zone) {
815 MALLOC_LOCK();
816 malloc_zone_register_while_locked(zone);
817 MALLOC_UNLOCK();
818 }
819
820 void
821 malloc_zone_unregister(malloc_zone_t *z) {
822 unsigned index;
823
824 if (malloc_num_zones == 0)
825 return;
826
827 MALLOC_LOCK();
828 for (index = 0; index < malloc_num_zones; ++index) {
829 if (z != malloc_zones[index])
830 continue;
831
832 // Modify the page to be allow write access, so that we can update the
833 // malloc_zones array.
834 size_t protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
835 mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
836
837 // If we found a match, replace it with the entry at the end of the list, shrink the list,
838 // and leave the end of the list intact to avoid racing with find_registered_zone().
839
840 malloc_zones[index] = malloc_zones[malloc_num_zones - 1];
841 --malloc_num_zones;
842
843 mprotect(malloc_zones, protect_size, PROT_READ);
844
845 // Exchange the roles of the FRZ counters. The counter that has captured the number of threads presently
846 // executing *inside* find_regiatered_zone is swapped with the counter drained to zero last time through.
847 // The former is then allowed to drain to zero while this thread yields.
848 int *p = pFRZCounterLive;
849 pFRZCounterLive = pFRZCounterDrain;
850 pFRZCounterDrain = p;
851 __sync_synchronize(); // Full memory barrier
852
853 while (0 != *pFRZCounterDrain) { pthread_yield_np(); }
854
855 MALLOC_UNLOCK();
856
857 return;
858 }
859 MALLOC_UNLOCK();
860 malloc_printf("*** malloc_zone_unregister() failed for %p\n", z);
861 }
862
863 void
864 malloc_set_zone_name(malloc_zone_t *z, const char *name) {
865 char *newName;
866
867 mprotect(z, sizeof(malloc_zone_t), PROT_READ | PROT_WRITE);
868 if (z->zone_name) {
869 free((char *)z->zone_name);
870 z->zone_name = NULL;
871 }
872 if (name) {
873 size_t buflen = strlen(name) + 1;
874 newName = malloc_zone_malloc(z, buflen);
875 if (newName) {
876 strlcpy(newName, name, buflen);
877 z->zone_name = (const char *)newName;
878 } else {
879 z->zone_name = NULL;
880 }
881 }
882 mprotect(z, sizeof(malloc_zone_t), PROT_READ);
883 }
884
885 const char *
886 malloc_get_zone_name(malloc_zone_t *zone) {
887 return zone->zone_name;
888 }
889
890 /*
891 * XXX malloc_printf now uses _simple_*printf. It only deals with a
892 * subset of printf format specifiers, but it doesn't call malloc.
893 */
894
895 __private_extern__ void
896 _malloc_vprintf(int flags, const char *format, va_list ap)
897 {
898 _SIMPLE_STRING b;
899
900 if (_malloc_no_asl_log || (flags & MALLOC_PRINTF_NOLOG) || (b = _simple_salloc()) == NULL) {
901 if (!(flags & MALLOC_PRINTF_NOPREFIX)) {
902 if (__is_threaded) {
903 /* XXX somewhat rude 'knowing' that pthread_t is a pointer */
904 _simple_dprintf(malloc_debug_file, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self());
905 } else {
906 _simple_dprintf(malloc_debug_file, "%s(%d) malloc: ", getprogname(), getpid());
907 }
908 }
909 _simple_vdprintf(malloc_debug_file, format, ap);
910 return;
911 }
912 if (!(flags & MALLOC_PRINTF_NOPREFIX)) {
913 if (__is_threaded) {
914 /* XXX somewhat rude 'knowing' that pthread_t is a pointer */
915 _simple_sprintf(b, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self());
916 } else {
917 _simple_sprintf(b, "%s(%d) malloc: ", getprogname(), getpid());
918 }
919 }
920 _simple_vsprintf(b, format, ap);
921 _simple_put(b, malloc_debug_file);
922 _simple_asl_log(flags & MALLOC_PRINTF_LEVEL_MASK, Malloc_Facility, _simple_string(b));
923 _simple_sfree(b);
924 }
925
926 __private_extern__ void
927 _malloc_printf(int flags, const char *format, ...)
928 {
929 va_list ap;
930
931 va_start(ap, format);
932 _malloc_vprintf(flags, format, ap);
933 va_end(ap);
934 }
935
936 void
937 malloc_printf(const char *format, ...)
938 {
939 va_list ap;
940
941 va_start(ap, format);
942 _malloc_vprintf(ASL_LEVEL_ERR, format, ap);
943 va_end(ap);
944 }
945
946 /********* Generic ANSI callouts ************/
947
948 void *
949 malloc(size_t size) {
950 void *retval;
951 retval = malloc_zone_malloc(inline_malloc_default_zone(), size);
952 if (retval == NULL) {
953 errno = ENOMEM;
954 }
955 return retval;
956 }
957
958 void *
959 calloc(size_t num_items, size_t size) {
960 void *retval;
961 retval = malloc_zone_calloc(inline_malloc_default_zone(), num_items, size);
962 if (retval == NULL) {
963 errno = ENOMEM;
964 }
965 return retval;
966 }
967
968 void
969 free(void *ptr) {
970 malloc_zone_t *zone;
971 size_t size;
972 if (!ptr)
973 return;
974 zone = find_registered_zone(ptr, &size);
975 if (!zone) {
976 malloc_printf("*** error for object %p: pointer being freed was not allocated\n"
977 "*** set a breakpoint in malloc_error_break to debug\n", ptr);
978 malloc_error_break();
979 if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR))) {
980 _SIMPLE_STRING b = _simple_salloc();
981 if (b) {
982 _simple_sprintf(b, "*** error for object %p: pointer being freed was not allocated\n", ptr);
983 CRSetCrashLogMessage(_simple_string(b));
984 } else {
985 CRSetCrashLogMessage("*** error: pointer being freed was not allocated\n");
986 }
987 abort();
988 }
989 } else if (zone->version >= 6 && zone->free_definite_size)
990 malloc_zone_free_definite_size(zone, ptr, size);
991 else
992 malloc_zone_free(zone, ptr);
993 }
994
995 void *
996 realloc(void *in_ptr, size_t new_size) {
997 void *retval = NULL;
998 void *old_ptr;
999 malloc_zone_t *zone;
1000 size_t old_size = 0;
1001
1002 // SUSv3: "If size is 0 and ptr is not a null pointer, the object
1003 // pointed to is freed. If the space cannot be allocated, the object
1004 // shall remain unchanged." Also "If size is 0, either a null pointer
1005 // or a unique pointer that can be successfully passed to free() shall
1006 // be returned." We choose to allocate a minimum size object by calling
1007 // malloc_zone_malloc with zero size, which matches "If ptr is a null
1008 // pointer, realloc() shall be equivalent to malloc() for the specified
1009 // size." So we only free the original memory if the allocation succeeds.
1010 old_ptr = (new_size == 0) ? NULL : in_ptr;
1011 if (!old_ptr) {
1012 retval = malloc_zone_malloc(inline_malloc_default_zone(), new_size);
1013 } else {
1014 zone = find_registered_zone(old_ptr, &old_size);
1015 if (!zone) {
1016 malloc_printf("*** error for object %p: pointer being realloc'd was not allocated\n"
1017 "*** set a breakpoint in malloc_error_break to debug\n", old_ptr);
1018 malloc_error_break();
1019 if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR))) {
1020 _SIMPLE_STRING b = _simple_salloc();
1021 if (b) {
1022 _simple_sprintf(b, "*** error for object %p: pointer being realloc'd was not allocated\n", old_ptr);
1023 CRSetCrashLogMessage(_simple_string(b));
1024 } else {
1025 CRSetCrashLogMessage("*** error: pointer being realloc'd was not allocated\n");
1026 }
1027 abort();
1028 }
1029 } else {
1030 retval = malloc_zone_realloc(zone, old_ptr, new_size);
1031 }
1032 }
1033 if (retval == NULL) {
1034 errno = ENOMEM;
1035 } else if (new_size == 0) {
1036 free(in_ptr);
1037 }
1038 return retval;
1039 }
1040
1041 void *
1042 valloc(size_t size) {
1043 void *retval;
1044 malloc_zone_t *zone = inline_malloc_default_zone();
1045 retval = malloc_zone_valloc(zone, size);
1046 if (retval == NULL) {
1047 errno = ENOMEM;
1048 }
1049 return retval;
1050 }
1051
1052 extern void
1053 vfree(void *ptr) {
1054 free(ptr);
1055 }
1056
1057 size_t
1058 malloc_size(const void *ptr) {
1059 size_t size = 0;
1060
1061 if (!ptr)
1062 return size;
1063
1064 (void)find_registered_zone(ptr, &size);
1065 return size;
1066 }
1067
1068 size_t
1069 malloc_good_size (size_t size) {
1070 malloc_zone_t *zone = inline_malloc_default_zone();
1071 return zone->introspect->good_size(zone, size);
1072 }
1073
1074 /*
1075 * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment,
1076 * and shall return a pointer to the allocated memory in memptr.
1077 * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two.
1078 * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment.
1079 *
1080 * Upon successful completion, posix_memalign() shall return zero; otherwise,
1081 * an error number shall be returned to indicate the error.
1082 *
1083 * The posix_memalign() function shall fail if:
1084 * EINVAL
1085 * The value of the alignment parameter is not a power of two multiple of sizeof( void *).
1086 * ENOMEM
1087 * There is insufficient memory available with the requested alignment.
1088 */
1089
1090 int
1091 posix_memalign(void **memptr, size_t alignment, size_t size)
1092 {
1093 void *retval;
1094
1095 /* POSIX is silent on NULL == memptr !?! */
1096
1097 retval = malloc_zone_memalign(inline_malloc_default_zone(), alignment, size);
1098 if (retval == NULL) {
1099 // To avoid testing the alignment constraints redundantly, we'll rely on the
1100 // test made in malloc_zone_memalign to vet each request. Only if that test fails
1101 // and returns NULL, do we arrive here to detect the bogus alignment and give the
1102 // required EINVAL return.
1103 if (alignment < sizeof( void *) || // excludes 0 == alignment
1104 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
1105 return EINVAL;
1106 }
1107 return ENOMEM;
1108 } else {
1109 *memptr = retval; // Set iff allocation succeeded
1110 return 0;
1111 }
1112 }
1113
1114 static malloc_zone_t *
1115 find_registered_purgeable_zone(void *ptr) {
1116 if (!ptr)
1117 return NULL;
1118
1119 /*
1120 * Look for a zone which contains ptr. If that zone does not have the purgeable malloc flag
1121 * set, or the allocation is too small, do nothing. Otherwise, set the allocation volatile.
1122 * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones
1123 * and only search those.
1124 */
1125 size_t size = 0;
1126 malloc_zone_t *zone = find_registered_zone(ptr, &size);
1127
1128 /* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined
1129 * binary compatibility impact of changing the introspect struct yet. */
1130 if (!zone)
1131 return NULL;
1132
1133 /* Check to make sure pointer is page aligned and size is multiple of page size */
1134 if ((size < vm_page_size) || ((size % vm_page_size) != 0))
1135 return NULL;
1136
1137 return zone;
1138 }
1139
1140 void
1141 malloc_make_purgeable(void *ptr) {
1142 malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
1143 if (!zone)
1144 return;
1145
1146 int state = VM_PURGABLE_VOLATILE;
1147 vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
1148 return;
1149 }
1150
1151 /* Returns true if ptr is valid. Ignore the return value from vm_purgeable_control and only report
1152 * state. */
1153 int
1154 malloc_make_nonpurgeable(void *ptr) {
1155 malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
1156 if (!zone)
1157 return 0;
1158
1159 int state = VM_PURGABLE_NONVOLATILE;
1160 vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
1161
1162 if (state == VM_PURGABLE_EMPTY)
1163 return EFAULT;
1164
1165 return 0;
1166 }
1167
1168 size_t malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal)
1169 {
1170 if (!zone) {
1171 unsigned index = 0;
1172 size_t total = 0;
1173
1174 // Take lock to defend against malloc_destroy_zone()
1175 MALLOC_LOCK();
1176 while (index < malloc_num_zones) {
1177 zone = malloc_zones[index++];
1178 if (zone->version < 8)
1179 continue;
1180 if (NULL == zone->pressure_relief)
1181 continue;
1182 if (0 == goal) /* Greedy */
1183 total += zone->pressure_relief(zone, 0);
1184 else if (goal > total)
1185 total += zone->pressure_relief(zone, goal - total);
1186 else /* total >= goal */
1187 break;
1188 }
1189 MALLOC_UNLOCK();
1190 return total;
1191 } else {
1192 // Assumes zone is not destroyed for the duration of this call
1193 if (zone->version < 8)
1194 return 0;
1195 if (NULL == zone->pressure_relief)
1196 return 0;
1197 return zone->pressure_relief(zone, goal);
1198 }
1199 }
1200
1201 /********* Batch methods ************/
1202
1203 unsigned
1204 malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) {
1205 unsigned (*batch_malloc)(malloc_zone_t *, size_t, void **, unsigned) = zone-> batch_malloc;
1206 if (! batch_malloc)
1207 return 0;
1208 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
1209 internal_check();
1210 }
1211 unsigned batched = batch_malloc(zone, size, results, num_requested);
1212 if (malloc_logger) {
1213 unsigned index = 0;
1214 while (index < batched) {
1215 malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)results[index], 0);
1216 index++;
1217 }
1218 }
1219 return batched;
1220 }
1221
1222 void
1223 malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) {
1224 if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
1225 internal_check();
1226 }
1227 if (malloc_logger) {
1228 unsigned index = 0;
1229 while (index < num) {
1230 malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0);
1231 index++;
1232 }
1233 }
1234 void (*batch_free)(malloc_zone_t *, void **, unsigned) = zone-> batch_free;
1235 if (batch_free) {
1236 batch_free(zone, to_be_freed, num);
1237 } else {
1238 void (*free_fun)(malloc_zone_t *, void *) = zone->free;
1239 while (num--) {
1240 void *ptr = *to_be_freed++;
1241 free_fun(zone, ptr);
1242 }
1243 }
1244 }
1245
1246 /********* Functions for performance tools ************/
1247
1248 static kern_return_t
1249 _malloc_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) {
1250 *ptr = (void *)address;
1251 return 0;
1252 }
1253
1254 kern_return_t
1255 malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count) {
1256 // Note that the 2 following addresses are not correct if the address of the target is different from your own. This notably occurs if the address of System.framework is slid (e.g. different than at B & I )
1257 vm_address_t remote_malloc_zones = (vm_address_t)&malloc_zones;
1258 vm_address_t remote_malloc_num_zones = (vm_address_t)&malloc_num_zones;
1259 kern_return_t err;
1260 vm_address_t zones_address;
1261 vm_address_t *zones_address_ref;
1262 unsigned num_zones;
1263 unsigned *num_zones_ref;
1264 if (!reader) reader = _malloc_default_reader;
1265 // printf("Read malloc_zones at address %p should be %p\n", &malloc_zones, malloc_zones);
1266 err = reader(task, remote_malloc_zones, sizeof(void *), (void **)&zones_address_ref);
1267 // printf("Read malloc_zones[%p]=%p\n", remote_malloc_zones, *zones_address_ref);
1268 if (err) {
1269 malloc_printf("*** malloc_get_all_zones: error reading zones_address at %p\n", (unsigned)remote_malloc_zones);
1270 return err;
1271 }
1272 zones_address = *zones_address_ref;
1273 // printf("Reading num_zones at address %p\n", remote_malloc_num_zones);
1274 err = reader(task, remote_malloc_num_zones, sizeof(unsigned), (void **)&num_zones_ref);
1275 if (err) {
1276 malloc_printf("*** malloc_get_all_zones: error reading num_zones at %p\n", (unsigned)remote_malloc_num_zones);
1277 return err;
1278 }
1279 num_zones = *num_zones_ref;
1280 // printf("Read malloc_num_zones[%p]=%d\n", remote_malloc_num_zones, num_zones);
1281 *count = num_zones;
1282 // printf("malloc_get_all_zones succesfully found %d zones\n", num_zones);
1283 err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses);
1284 if (err) {
1285 malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", &zones_address);
1286 return err;
1287 }
1288 // printf("malloc_get_all_zones succesfully read %d zones\n", num_zones);
1289 return err;
1290 }
1291
1292 /********* Debug helpers ************/
1293
1294 void
1295 malloc_zone_print_ptr_info(void *ptr) {
1296 malloc_zone_t *zone;
1297 if (!ptr) return;
1298 zone = malloc_zone_from_ptr(ptr);
1299 if (zone) {
1300 printf("ptr %p in registered zone %p\n", ptr, zone);
1301 } else {
1302 printf("ptr %p not in heap\n", ptr);
1303 }
1304 }
1305
1306 boolean_t
1307 malloc_zone_check(malloc_zone_t *zone) {
1308 boolean_t ok = 1;
1309 if (!zone) {
1310 unsigned index = 0;
1311 while (index < malloc_num_zones) {
1312 zone = malloc_zones[index++];
1313 if (!zone->introspect->check(zone)) ok = 0;
1314 }
1315 } else {
1316 ok = zone->introspect->check(zone);
1317 }
1318 return ok;
1319 }
1320
1321 void
1322 malloc_zone_print(malloc_zone_t *zone, boolean_t verbose) {
1323 if (!zone) {
1324 unsigned index = 0;
1325 while (index < malloc_num_zones) {
1326 zone = malloc_zones[index++];
1327 zone->introspect->print(zone, verbose);
1328 }
1329 } else {
1330 zone->introspect->print(zone, verbose);
1331 }
1332 }
1333
1334 void
1335 malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
1336 if (!zone) {
1337 memset(stats, 0, sizeof(*stats));
1338 unsigned index = 0;
1339 while (index < malloc_num_zones) {
1340 zone = malloc_zones[index++];
1341 malloc_statistics_t this_stats;
1342 zone->introspect->statistics(zone, &this_stats);
1343 stats->blocks_in_use += this_stats.blocks_in_use;
1344 stats->size_in_use += this_stats.size_in_use;
1345 stats->max_size_in_use += this_stats.max_size_in_use;
1346 stats->size_allocated += this_stats.size_allocated;
1347 }
1348 } else {
1349 zone->introspect->statistics(zone, stats);
1350 }
1351 }
1352
1353 void
1354 malloc_zone_log(malloc_zone_t *zone, void *address) {
1355 if (!zone) {
1356 unsigned index = 0;
1357 while (index < malloc_num_zones) {
1358 zone = malloc_zones[index++];
1359 zone->introspect->log(zone, address);
1360 }
1361 } else {
1362 zone->introspect->log(zone, address);
1363 }
1364 }
1365
1366 /********* Misc other entry points ************/
1367
1368 static void
1369 DefaultMallocError(int x) {
1370 #if USE_SLEEP_RATHER_THAN_ABORT
1371 malloc_printf("*** error %d\n", x);
1372 sleep(3600);
1373 #else
1374 _SIMPLE_STRING b = _simple_salloc();
1375 if (b) {
1376 _simple_sprintf(b, "*** error %d", x);
1377 malloc_printf("%s\n", _simple_string(b));
1378 CRSetCrashLogMessage(_simple_string(b));
1379 } else {
1380 _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error %d", x);
1381 CRSetCrashLogMessage("*** DefaultMallocError called");
1382 }
1383 abort();
1384 #endif
1385 }
1386
1387 void (*
1388 malloc_error(void (*func)(int)))(int) {
1389 return DefaultMallocError;
1390 }
1391
1392 /* Stack logging fork-handling prototypes */
1393 extern void __stack_logging_fork_prepare();
1394 extern void __stack_logging_fork_parent();
1395 extern void __stack_logging_fork_child();
1396
1397 void
1398 _malloc_fork_prepare() {
1399 /* Prepare the malloc module for a fork by insuring that no thread is in a malloc critical section */
1400 unsigned index = 0;
1401 MALLOC_LOCK();
1402 while (index < malloc_num_zones) {
1403 malloc_zone_t *zone = malloc_zones[index++];
1404 zone->introspect->force_lock(zone);
1405 }
1406 __stack_logging_fork_prepare();
1407 }
1408
1409 void
1410 _malloc_fork_parent() {
1411 /* Called in the parent process after a fork() to resume normal operation. */
1412 unsigned index = 0;
1413 __stack_logging_fork_parent();
1414 MALLOC_UNLOCK();
1415 while (index < malloc_num_zones) {
1416 malloc_zone_t *zone = malloc_zones[index++];
1417 zone->introspect->force_unlock(zone);
1418 }
1419 }
1420
1421 void
1422 _malloc_fork_child() {
1423 /* Called in the child process after a fork() to resume normal operation. In the MTASK case we also have to change memory inheritance so that the child does not share memory with the parent. */
1424 unsigned index = 0;
1425 __stack_logging_fork_child();
1426 MALLOC_UNLOCK();
1427 while (index < malloc_num_zones) {
1428 malloc_zone_t *zone = malloc_zones[index++];
1429 zone->introspect->force_unlock(zone);
1430 }
1431 }
1432
1433 /*
1434 * A Glibc-like mstats() interface.
1435 *
1436 * Note that this interface really isn't very good, as it doesn't understand
1437 * that we may have multiple allocators running at once. We just massage
1438 * the result from malloc_zone_statistics in any case.
1439 */
1440 struct mstats
1441 mstats(void)
1442 {
1443 malloc_statistics_t s;
1444 struct mstats m;
1445
1446 malloc_zone_statistics(NULL, &s);
1447 m.bytes_total = s.size_allocated;
1448 m.chunks_used = s.blocks_in_use;
1449 m.bytes_used = s.size_in_use;
1450 m.chunks_free = 0;
1451 m.bytes_free = m.bytes_total - m.bytes_used; /* isn't this somewhat obvious? */
1452
1453 return(m);
1454 }
1455
1456 boolean_t
1457 malloc_zone_enable_discharge_checking(malloc_zone_t *zone)
1458 {
1459 if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1460 return FALSE;
1461 if (NULL == zone->introspect->enable_discharge_checking)
1462 return FALSE;
1463 return zone->introspect->enable_discharge_checking(zone);
1464 }
1465
1466 void
1467 malloc_zone_disable_discharge_checking(malloc_zone_t *zone)
1468 {
1469 if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1470 return;
1471 if (NULL == zone->introspect->disable_discharge_checking)
1472 return;
1473 zone->introspect->disable_discharge_checking(zone);
1474 }
1475
1476 void
1477 malloc_zone_discharge(malloc_zone_t *zone, void *memory)
1478 {
1479 if (NULL == zone)
1480 zone = malloc_zone_from_ptr(memory);
1481 if (NULL == zone)
1482 return;
1483 if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1484 return;
1485 if (NULL == zone->introspect->discharge)
1486 return;
1487 zone->introspect->discharge(zone, memory);
1488 }
1489
1490 void
1491 malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info))
1492 {
1493 if (!zone) {
1494 unsigned index = 0;
1495 while (index < malloc_num_zones) {
1496 zone = malloc_zones[index++];
1497 if (zone->version < 7)
1498 continue;
1499 if (NULL == zone->introspect->enumerate_discharged_pointers)
1500 continue;
1501 zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
1502 }
1503 } else {
1504 if (zone->version < 7)
1505 return;
1506 if (NULL == zone->introspect->enumerate_discharged_pointers)
1507 return;
1508 zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
1509 }
1510 }
1511
1512 /***************** OBSOLETE ENTRY POINTS ********************/
1513
1514 #if PHASE_OUT_OLD_MALLOC
1515 #error PHASE OUT THE FOLLOWING FUNCTIONS
1516 #else
1517 #warning PHASE OUT THE FOLLOWING FUNCTIONS
1518 #endif
1519
1520 void
1521 set_malloc_singlethreaded(boolean_t single) {
1522 static boolean_t warned = 0;
1523 if (!warned) {
1524 #if PHASE_OUT_OLD_MALLOC
1525 malloc_printf("*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single);
1526 #endif
1527 warned = 1;
1528 }
1529 }
1530
1531 void
1532 malloc_singlethreaded() {
1533 static boolean_t warned = 0;
1534 if (!warned) {
1535 malloc_printf("*** OBSOLETE: malloc_singlethreaded()\n");
1536 warned = 1;
1537 }
1538 }
1539
1540 int
1541 malloc_debug(int level) {
1542 malloc_printf("*** OBSOLETE: malloc_debug()\n");
1543 return 0;
1544 }