]>
Commit | Line | Data |
---|---|---|
e9ce8d39 | 1 | /* |
224c7076 | 2 | * Copyright (c) 1999, 2006, 2007 Apple Inc. All rights reserved. |
e9ce8d39 A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
734aad71 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
e9ce8d39 A |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
734aad71 A |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
e9ce8d39 A |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
9385eb3d A |
24 | #include <pthread_internals.h> |
25 | ||
e9ce8d39 A |
26 | #import <stdlib.h> |
27 | #import <stdio.h> | |
28 | #import <string.h> | |
29 | #import <unistd.h> | |
30 | #import <objc/zone.h> | |
9385eb3d A |
31 | #import <malloc/malloc.h> |
32 | #import <fcntl.h> | |
3d9156a7 A |
33 | #import <crt_externs.h> |
34 | #import <errno.h> | |
59e0d9fe | 35 | #import <pthread_internals.h> |
224c7076 A |
36 | #import <limits.h> |
37 | #import <dlfcn.h> | |
e9ce8d39 A |
38 | |
39 | #import "scalable_malloc.h" | |
40 | #import "stack_logging.h" | |
224c7076 A |
41 | #import "malloc_printf.h" |
42 | #import "_simple.h" | |
43 | ||
44 | /* | |
45 | * MALLOC_ABSOLUTE_MAX_SIZE - There are many instances of addition to a | |
46 | * user-specified size_t, which can cause overflow (and subsequent crashes) | |
47 | * for values near SIZE_T_MAX. Rather than add extra "if" checks everywhere | |
48 | * this occurs, it is easier to just set an absolute maximum request size, | |
49 | * and immediately return an error if the requested size exceeds this maximum. | |
50 | * Of course, values less than this absolute max can fail later if the value | |
51 | * is still too large for the available memory. The largest value added | |
52 | * seems to be PAGE_SIZE (in the macro round_page()), so to be safe, we set | |
53 | * the maximum to be 2 * PAGE_SIZE less than SIZE_T_MAX. | |
54 | */ | |
55 | #define MALLOC_ABSOLUTE_MAX_SIZE (SIZE_T_MAX - (2 * PAGE_SIZE)) | |
e9ce8d39 A |
56 | |
57 | #define USE_SLEEP_RATHER_THAN_ABORT 0 | |
58 | ||
e9ce8d39 A |
59 | #define INITIAL_ZONES 8 // After this number, we reallocate for new zones |
60 | ||
224c7076 | 61 | typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip); |
e9ce8d39 | 62 | |
224c7076 | 63 | __private_extern__ pthread_lock_t _malloc_lock = 0; // initialized in __libc_init |
e9ce8d39 A |
64 | static malloc_zone_t *initial_malloc_zones[INITIAL_ZONES] = {0}; |
65 | ||
66 | /* The following variables are exported for the benefit of performance tools */ | |
67 | unsigned malloc_num_zones = 0; | |
68 | malloc_zone_t **malloc_zones = initial_malloc_zones; | |
69 | malloc_logger_t *malloc_logger = NULL; | |
70 | ||
71 | unsigned malloc_debug_flags = 0; | |
72 | ||
73 | unsigned malloc_check_start = 0; // 0 means don't check | |
74 | unsigned malloc_check_counter = 0; | |
75 | unsigned malloc_check_each = 1000; | |
76 | ||
224c7076 A |
77 | /* global flag to suppress ASL logging e.g. for syslogd */ |
78 | int _malloc_no_asl_log = 0; | |
79 | ||
9385eb3d A |
80 | static int malloc_check_sleep = 100; // default 100 second sleep |
81 | static int malloc_check_abort = 0; // default is to sleep, not abort | |
82 | ||
224c7076 A |
83 | static int malloc_debug_file = STDERR_FILENO; |
84 | /* | |
85 | * State indicated by malloc_def_zone_state | |
86 | * 0 - the default zone has not yet been created | |
87 | * 1 - a Malloc* environment variable has been set | |
88 | * 2 - the default zone has been created and an environment variable scan done | |
89 | * 3 - a new default zone has been created and another environment variable scan | |
90 | */ | |
91 | __private_extern__ int malloc_def_zone_state = 0; | |
92 | __private_extern__ malloc_zone_t *__zone0 = NULL; | |
9385eb3d | 93 | |
224c7076 | 94 | static const char Malloc_Facility[] = "com.apple.Libsystem.malloc"; |
9385eb3d | 95 | |
e9ce8d39 A |
96 | #define MALLOC_LOCK() LOCK(_malloc_lock) |
97 | #define MALLOC_UNLOCK() UNLOCK(_malloc_lock) | |
98 | ||
99 | #define MALLOC_LOG_TYPE_ALLOCATE stack_logging_type_alloc | |
100 | #define MALLOC_LOG_TYPE_DEALLOCATE stack_logging_type_dealloc | |
101 | #define MALLOC_LOG_TYPE_HAS_ZONE stack_logging_flag_zone | |
102 | #define MALLOC_LOG_TYPE_CLEARED stack_logging_flag_cleared | |
103 | ||
104 | /********* Utilities ************/ | |
105 | ||
3d9156a7 | 106 | static inline malloc_zone_t * find_registered_zone(const void *, size_t *) __attribute__((always_inline)); |
9385eb3d A |
107 | static inline malloc_zone_t * |
108 | find_registered_zone(const void *ptr, size_t *returned_size) { | |
224c7076 A |
109 | // Returns a zone which may contain ptr, or NULL. |
110 | // Speed is critical for this function, so it is not guaranteed to return | |
111 | // the zone which contains ptr. For N zones, zones 1 through N - 1 are | |
112 | // checked to see if they contain ptr. If so, the zone containing ptr is | |
113 | // returned. Otherwise the last zone is returned, since it is the last zone | |
114 | // in which ptr may reside. Clients should call zone->size(ptr) on the | |
115 | // return value to determine whether or not ptr is an allocated object. | |
116 | // This behavior optimizes for the case where ptr is an allocated object, | |
117 | // and there is only one zone. | |
118 | unsigned index, limit = malloc_num_zones; | |
119 | if (limit == 0) | |
120 | return NULL; | |
121 | ||
e9ce8d39 | 122 | malloc_zone_t **zones = malloc_zones; |
224c7076 A |
123 | for (index = 0; index < limit - 1; ++index, ++zones) { |
124 | malloc_zone_t *zone = *zones; | |
125 | size_t size = zone->size(zone, ptr); | |
e9ce8d39 A |
126 | if (size) { |
127 | if (returned_size) *returned_size = size; | |
128 | return zone; | |
129 | } | |
130 | } | |
224c7076 A |
131 | return malloc_zones[index]; |
132 | } | |
133 | ||
134 | __private_extern__ __attribute__((noinline)) void | |
135 | malloc_error_break(void) { | |
136 | // Provides a non-inlined place for various malloc error procedures to call | |
137 | // that will be called after an error message appears. It does not make | |
138 | // sense for developers to call this function, so it is marked | |
139 | // __private_extern__ to prevent it from becoming API. | |
e9ce8d39 A |
140 | } |
141 | ||
142 | /********* Creation and destruction ************/ | |
143 | ||
224c7076 A |
144 | static void set_flags_from_environment(void); |
145 | ||
146 | // malloc_zone_register_while_locked may drop the lock temporarily | |
147 | static void | |
148 | malloc_zone_register_while_locked(malloc_zone_t *zone) { | |
149 | /* Note that given the sequencing it is always safe to first get the number of zones, then get malloc_zones without taking the lock, if all you need is to iterate through the list */ | |
150 | if (malloc_num_zones >= INITIAL_ZONES) { | |
151 | malloc_zone_t **zones = malloc_zones; | |
152 | malloc_zone_t *pzone = malloc_zones[0]; | |
153 | boolean_t copy = malloc_num_zones == INITIAL_ZONES; | |
154 | if (copy) zones = NULL; // to avoid realloc on something not allocated | |
155 | MALLOC_UNLOCK(); | |
156 | zones = pzone->realloc(pzone, zones, (malloc_num_zones + 1) * sizeof(malloc_zone_t *)); // we leak initial_malloc_zones, not worth tracking it | |
157 | MALLOC_LOCK(); | |
158 | if (copy) memcpy(zones, malloc_zones, malloc_num_zones * sizeof(malloc_zone_t *)); | |
159 | malloc_zones = zones; | |
160 | } | |
161 | malloc_zones[malloc_num_zones] = zone; | |
162 | malloc_num_zones++; // note that we do this after setting malloc_num_zones, so enumerations without taking the lock are safe | |
163 | // _malloc_printf(ASL_LEVEL_INFO, "Registered %p malloc_zones at address %p is %p [%d zones]\n", zone, &malloc_zones, malloc_zones, malloc_num_zones); | |
164 | } | |
165 | ||
9385eb3d A |
166 | static void |
167 | _malloc_initialize(void) { | |
224c7076 A |
168 | MALLOC_LOCK(); |
169 | if (malloc_def_zone_state < 2) { | |
170 | unsigned n; | |
171 | malloc_zone_t *zone; | |
172 | ||
173 | malloc_def_zone_state += 2; | |
174 | set_flags_from_environment(); // will only set flags up to two times | |
175 | n = malloc_num_zones; | |
176 | zone = create_scalable_zone(0, malloc_debug_flags); | |
177 | //malloc_zone_register_while_locked may drop the lock temporarily | |
178 | malloc_zone_register_while_locked(zone); | |
179 | malloc_set_zone_name(zone, "DefaultMallocZone"); | |
180 | if (n != 0) { // make the default first, for efficiency | |
181 | malloc_zone_t *hold = malloc_zones[0]; | |
182 | if(hold->zone_name && strcmp(hold->zone_name, "DefaultMallocZone") == 0) { | |
183 | free((void *)hold->zone_name); | |
184 | hold->zone_name = NULL; | |
185 | } | |
186 | malloc_zones[0] = malloc_zones[n]; | |
187 | malloc_zones[n] = hold; | |
188 | } | |
189 | // _malloc_printf(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones); | |
190 | // _malloc_printf(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, (unsigned)&malloc_num_zones); | |
191 | } | |
192 | MALLOC_UNLOCK(); | |
e9ce8d39 A |
193 | } |
194 | ||
3d9156a7 | 195 | static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((always_inline)); |
9385eb3d A |
196 | static inline malloc_zone_t * |
197 | inline_malloc_default_zone(void) { | |
224c7076 A |
198 | if (malloc_def_zone_state < 2) _malloc_initialize(); |
199 | // _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone); | |
e9ce8d39 A |
200 | return malloc_zones[0]; |
201 | } | |
202 | ||
9385eb3d A |
203 | malloc_zone_t * |
204 | malloc_default_zone(void) { | |
e9ce8d39 A |
205 | return inline_malloc_default_zone(); |
206 | } | |
207 | ||
224c7076 | 208 | // For debugging, allow stack logging to both memory and disk to compare their results. |
9385eb3d | 209 | static void |
224c7076 A |
210 | stack_logging_log_stack_debug(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t size, uintptr_t ptr_arg, uintptr_t return_val, uint32_t num_hot_to_skip) |
211 | { | |
212 | __disk_stack_logging_log_stack(type_flags, zone_ptr, size, ptr_arg, return_val, num_hot_to_skip); | |
213 | stack_logging_log_stack(type_flags, zone_ptr, size, ptr_arg, return_val, num_hot_to_skip); | |
214 | } | |
59e0d9fe | 215 | |
224c7076 A |
216 | static void |
217 | set_flags_from_environment(void) { | |
218 | const char *flag; | |
219 | int fd; | |
220 | char **env = * _NSGetEnviron(); | |
221 | char **p; | |
222 | char *c; | |
223 | ||
224 | if (malloc_debug_file != STDERR_FILENO) { | |
225 | close(malloc_debug_file); | |
226 | malloc_debug_file = STDERR_FILENO; | |
227 | } | |
228 | malloc_debug_flags = 0; | |
229 | stack_logging_enable_logging = 0; | |
230 | stack_logging_dontcompact = 0; | |
231 | malloc_logger = NULL; | |
232 | malloc_check_start = 0; | |
233 | malloc_check_each = 1000; | |
234 | malloc_check_abort = 0; | |
235 | malloc_check_sleep = 100; | |
236 | /* | |
237 | * Given that all environment variables start with "Malloc" we optimize by scanning quickly | |
238 | * first the environment, therefore avoiding repeated calls to getenv(). | |
239 | * If we are setu/gid these flags are ignored to prevent a malicious invoker from changing | |
240 | * our behaviour. | |
241 | */ | |
242 | for (p = env; (c = *p) != NULL; ++p) { | |
243 | if (!strncmp(c, "Malloc", 6)) { | |
244 | if (issetugid()) | |
245 | return; | |
246 | break; | |
247 | } | |
248 | } | |
249 | if (c == NULL) | |
250 | return; | |
9385eb3d A |
251 | flag = getenv("MallocLogFile"); |
252 | if (flag) { | |
59e0d9fe | 253 | fd = open(flag, O_WRONLY|O_APPEND|O_CREAT, 0644); |
9385eb3d | 254 | if (fd >= 0) { |
7c78c529 A |
255 | malloc_debug_file = fd; |
256 | fcntl(fd, F_SETFD, 0); // clear close-on-exec flag XXX why? | |
59e0d9fe A |
257 | } else { |
258 | malloc_printf("Could not open %s, using stderr\n", flag); | |
259 | } | |
9385eb3d | 260 | } |
e9ce8d39 A |
261 | if (getenv("MallocGuardEdges")) { |
262 | malloc_debug_flags = SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
224c7076 | 263 | _malloc_printf(ASL_LEVEL_INFO, "protecting edges\n"); |
e9ce8d39 A |
264 | if (getenv("MallocDoNotProtectPrelude")) { |
265 | malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_PRELUDE; | |
224c7076 | 266 | _malloc_printf(ASL_LEVEL_INFO, "... but not protecting prelude guard page\n"); |
e9ce8d39 A |
267 | } |
268 | if (getenv("MallocDoNotProtectPostlude")) { | |
269 | malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE; | |
224c7076 | 270 | _malloc_printf(ASL_LEVEL_INFO, "... but not protecting postlude guard page\n"); |
e9ce8d39 A |
271 | } |
272 | } | |
273 | flag = getenv("MallocStackLogging"); | |
274 | if (!flag) { | |
275 | flag = getenv("MallocStackLoggingNoCompact"); | |
276 | stack_logging_dontcompact = 1; | |
224c7076 A |
277 | } |
278 | // For debugging, the MallocStackLogging or MallocStackLoggingNoCompact environment variables can be set to | |
279 | // values of "memory", "disk", or "both" to control which stack logging mechanism to use. Those strings appear | |
280 | // in the flag variable, and the strtoul() call below will return 0, so then we can do string comparison on the | |
281 | // value of flag. The default stack logging now is disk stack logging, since memory stack logging is not 64-bit-aware. | |
e9ce8d39 | 282 | if (flag) { |
224c7076 | 283 | unsigned long val = strtoul(flag, NULL, 0); |
e9ce8d39 A |
284 | if (val == 1) val = 0; |
285 | if (val == -1) val = 0; | |
224c7076 A |
286 | if (val) { |
287 | malloc_logger = (void *)val; | |
288 | _malloc_printf(ASL_LEVEL_INFO, "recording stacks using recorder %p\n", malloc_logger); | |
289 | } else if (strcmp(flag,"memory") == 0) { | |
290 | malloc_logger = stack_logging_log_stack; | |
291 | _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks in memory using standard recorder\n"); | |
292 | } else if (strcmp(flag,"both") == 0) { | |
293 | malloc_logger = stack_logging_log_stack_debug; | |
294 | _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks to both memory and disk for comparison debugging\n"); | |
295 | } else { // the default is to log to disk | |
296 | malloc_logger = __disk_stack_logging_log_stack; | |
297 | _malloc_printf(ASL_LEVEL_INFO, "recording malloc stacks to disk using standard recorder\n"); | |
298 | } | |
e9ce8d39 | 299 | stack_logging_enable_logging = 1; |
224c7076 A |
300 | if (stack_logging_dontcompact) { |
301 | if (malloc_logger == __disk_stack_logging_log_stack) { | |
302 | _malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; size of log files on disk can increase rapidly\n"); | |
303 | } else { | |
304 | _malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; VM can increase rapidly\n"); | |
305 | } | |
e9ce8d39 | 306 | } |
e9ce8d39 A |
307 | } |
308 | if (getenv("MallocScribble")) { | |
309 | malloc_debug_flags |= SCALABLE_MALLOC_DO_SCRIBBLE; | |
224c7076 A |
310 | _malloc_printf(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n"); |
311 | } | |
312 | if (getenv("MallocErrorAbort")) { | |
313 | malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_ERROR; | |
314 | _malloc_printf(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n"); | |
e9ce8d39 A |
315 | } |
316 | flag = getenv("MallocCheckHeapStart"); | |
317 | if (flag) { | |
318 | malloc_check_start = strtoul(flag, NULL, 0); | |
319 | if (malloc_check_start == 0) malloc_check_start = 1; | |
320 | if (malloc_check_start == -1) malloc_check_start = 1; | |
321 | flag = getenv("MallocCheckHeapEach"); | |
322 | if (flag) { | |
323 | malloc_check_each = strtoul(flag, NULL, 0); | |
324 | if (malloc_check_each == 0) malloc_check_each = 1; | |
325 | if (malloc_check_each == -1) malloc_check_each = 1; | |
326 | } | |
224c7076 | 327 | _malloc_printf(ASL_LEVEL_INFO, "checks heap after %dth operation and each %d operations\n", malloc_check_start, malloc_check_each); |
9385eb3d A |
328 | flag = getenv("MallocCheckHeapAbort"); |
329 | if (flag) | |
330 | malloc_check_abort = strtol(flag, NULL, 0); | |
331 | if (malloc_check_abort) | |
224c7076 | 332 | _malloc_printf(ASL_LEVEL_INFO, "will abort on heap corruption\n"); |
9385eb3d A |
333 | else { |
334 | flag = getenv("MallocCheckHeapSleep"); | |
335 | if (flag) | |
336 | malloc_check_sleep = strtol(flag, NULL, 0); | |
337 | if (malloc_check_sleep > 0) | |
224c7076 | 338 | _malloc_printf(ASL_LEVEL_INFO, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep); |
9385eb3d | 339 | else if (malloc_check_sleep < 0) |
224c7076 | 340 | _malloc_printf(ASL_LEVEL_INFO, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep); |
9385eb3d | 341 | else |
224c7076 | 342 | _malloc_printf(ASL_LEVEL_INFO, "no sleep on heap corruption\n"); |
9385eb3d | 343 | } |
e9ce8d39 A |
344 | } |
345 | if (getenv("MallocHelp")) { | |
224c7076 | 346 | _malloc_printf(ASL_LEVEL_INFO, |
59e0d9fe | 347 | "environment variables that can be set for debug:\n" |
9385eb3d | 348 | "- MallocLogFile <f> to create/append messages to file <f> instead of stderr\n" |
e9ce8d39 A |
349 | "- MallocGuardEdges to add 2 guard pages for each large block\n" |
350 | "- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n" | |
351 | "- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n" | |
352 | "- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n" | |
353 | "- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n" | |
59e0d9fe A |
354 | "- MallocScribble to detect writing on free blocks and missing initializers:\n" |
355 | " 0x55 is written upon free and 0xaa is written on allocation\n" | |
9385eb3d A |
356 | "- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n" |
357 | "- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n" | |
358 | "- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n" | |
359 | "- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n" | |
224c7076 | 360 | "- MallocErrorAbort to abort on a bad malloc or free\n" |
59e0d9fe | 361 | "- MallocHelp - this help!\n"); |
e9ce8d39 A |
362 | } |
363 | } | |
364 | ||
9385eb3d | 365 | malloc_zone_t * |
7c78c529 A |
366 | malloc_create_zone(vm_size_t start_size, unsigned flags) |
367 | { | |
e9ce8d39 | 368 | malloc_zone_t *zone; |
7c78c529 | 369 | |
224c7076 A |
370 | /* start_size doesn't seemed to actually be used, but we test anyways */ |
371 | if (start_size > MALLOC_ABSOLUTE_MAX_SIZE) { | |
372 | return NULL; | |
e9ce8d39 | 373 | } |
224c7076 | 374 | if (malloc_def_zone_state < 2) _malloc_initialize(); |
e9ce8d39 A |
375 | zone = create_scalable_zone(start_size, malloc_debug_flags); |
376 | malloc_zone_register(zone); | |
377 | return zone; | |
378 | } | |
379 | ||
9385eb3d A |
380 | void |
381 | malloc_destroy_zone(malloc_zone_t *zone) { | |
e9ce8d39 A |
382 | malloc_zone_unregister(zone); |
383 | zone->destroy(zone); | |
384 | } | |
385 | ||
224c7076 A |
386 | /* called from the {put,set,unset}env routine */ |
387 | __private_extern__ void | |
388 | __malloc_check_env_name(const char *name) | |
389 | { | |
390 | MALLOC_LOCK(); | |
391 | if(malloc_def_zone_state == 2 && strncmp(name, "Malloc", 6) == 0) | |
392 | malloc_def_zone_state = 1; | |
393 | MALLOC_UNLOCK(); | |
394 | } | |
395 | ||
e9ce8d39 A |
396 | /********* Block creation and manipulation ************/ |
397 | ||
9385eb3d A |
398 | static void |
399 | internal_check(void) { | |
e9ce8d39 A |
400 | static vm_address_t *frames = NULL; |
401 | static unsigned num_frames; | |
402 | if (malloc_zone_check(NULL)) { | |
224c7076 | 403 | _malloc_printf(ASL_LEVEL_NOTICE, "MallocCheckHeap: PASSED check at %dth operation\n", malloc_check_counter-1); |
e9ce8d39 A |
404 | if (!frames) vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1); |
405 | thread_stack_pcs(frames, vm_page_size/sizeof(vm_address_t) - 1, &num_frames); | |
406 | } else { | |
407 | malloc_printf("*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1); | |
408 | if (frames) { | |
409 | unsigned index = 1; | |
224c7076 A |
410 | _SIMPLE_STRING b = _simple_salloc(); |
411 | if (b) { | |
412 | _simple_sappend(b, "Stack for last operation where the malloc check succeeded: "); | |
413 | while (index < num_frames) _simple_sprintf(b, "%p ", frames[index++]); | |
414 | malloc_printf("%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b)); | |
415 | _simple_sfree(b); | |
416 | } else { | |
417 | /* | |
418 | * Should only get here if vm_allocate() can't get a single page of | |
419 | * memory, implying _simple_asl_log() would also fail. So we just | |
420 | * print to the file descriptor. | |
421 | */ | |
422 | _malloc_printf(MALLOC_PRINTF_NOLOG, "Stack for last operation where the malloc check succeeded: "); | |
423 | while (index < num_frames) _malloc_printf(MALLOC_PRINTF_NOLOG, "%p ", frames[index++]); | |
424 | _malloc_printf(MALLOC_PRINTF_NOLOG, "\n(Use 'atos' for a symbolic stack)\n"); | |
425 | } | |
e9ce8d39 A |
426 | } |
427 | if (malloc_check_each > 1) { | |
428 | unsigned recomm_each = (malloc_check_each > 10) ? malloc_check_each/10 : 1; | |
429 | unsigned recomm_start = (malloc_check_counter > malloc_check_each+1) ? malloc_check_counter-1-malloc_check_each : 1; | |
430 | malloc_printf("*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", recomm_start, recomm_each); | |
431 | } | |
9385eb3d A |
432 | if (malloc_check_abort) |
433 | abort(); | |
434 | if (malloc_check_sleep > 0) { | |
224c7076 | 435 | _malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping for %d seconds to leave time to attach\n", |
9385eb3d A |
436 | malloc_check_sleep); |
437 | sleep(malloc_check_sleep); | |
438 | } else if (malloc_check_sleep < 0) { | |
224c7076 | 439 | _malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping once for %d seconds to leave time to attach\n", |
9385eb3d A |
440 | -malloc_check_sleep); |
441 | sleep(-malloc_check_sleep); | |
442 | malloc_check_sleep = 0; | |
443 | } | |
e9ce8d39 A |
444 | } |
445 | malloc_check_start += malloc_check_each; | |
446 | } | |
447 | ||
9385eb3d A |
448 | void * |
449 | malloc_zone_malloc(malloc_zone_t *zone, size_t size) { | |
e9ce8d39 | 450 | void *ptr; |
e9ce8d39 A |
451 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { |
452 | internal_check(); | |
453 | } | |
224c7076 A |
454 | if (size > MALLOC_ABSOLUTE_MAX_SIZE) { |
455 | return NULL; | |
456 | } | |
e9ce8d39 | 457 | ptr = zone->malloc(zone, size); |
224c7076 | 458 | if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0); |
e9ce8d39 A |
459 | return ptr; |
460 | } | |
461 | ||
9385eb3d A |
462 | void * |
463 | malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) { | |
e9ce8d39 A |
464 | void *ptr; |
465 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { | |
466 | internal_check(); | |
467 | } | |
224c7076 A |
468 | if (size > MALLOC_ABSOLUTE_MAX_SIZE) { |
469 | return NULL; | |
470 | } | |
e9ce8d39 | 471 | ptr = zone->calloc(zone, num_items, size); |
224c7076 | 472 | if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone, (uintptr_t)(num_items * size), 0, (uintptr_t)ptr, 0); |
e9ce8d39 A |
473 | return ptr; |
474 | } | |
475 | ||
9385eb3d A |
476 | void * |
477 | malloc_zone_valloc(malloc_zone_t *zone, size_t size) { | |
e9ce8d39 | 478 | void *ptr; |
e9ce8d39 A |
479 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { |
480 | internal_check(); | |
481 | } | |
224c7076 A |
482 | if (size > MALLOC_ABSOLUTE_MAX_SIZE) { |
483 | return NULL; | |
484 | } | |
e9ce8d39 | 485 | ptr = zone->valloc(zone, size); |
224c7076 | 486 | if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0); |
e9ce8d39 A |
487 | return ptr; |
488 | } | |
489 | ||
9385eb3d A |
490 | void * |
491 | malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { | |
e9ce8d39 A |
492 | void *new_ptr; |
493 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { | |
494 | internal_check(); | |
495 | } | |
224c7076 A |
496 | if (size > MALLOC_ABSOLUTE_MAX_SIZE) { |
497 | return NULL; | |
498 | } | |
e9ce8d39 | 499 | new_ptr = zone->realloc(zone, ptr, size); |
224c7076 | 500 | if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, (uintptr_t)size, (uintptr_t)new_ptr, 0); |
e9ce8d39 A |
501 | return new_ptr; |
502 | } | |
503 | ||
9385eb3d A |
504 | void |
505 | malloc_zone_free(malloc_zone_t *zone, void *ptr) { | |
224c7076 | 506 | if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0); |
e9ce8d39 A |
507 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { |
508 | internal_check(); | |
509 | } | |
510 | zone->free(zone, ptr); | |
511 | } | |
512 | ||
9385eb3d A |
513 | malloc_zone_t * |
514 | malloc_zone_from_ptr(const void *ptr) { | |
e9ce8d39 | 515 | malloc_zone_t *zone; |
224c7076 A |
516 | if (!ptr) |
517 | return NULL; | |
e9ce8d39 | 518 | zone = find_registered_zone(ptr, NULL); |
224c7076 | 519 | if (zone && zone->size(zone, ptr)) |
e9ce8d39 | 520 | return zone; |
224c7076 | 521 | return NULL; |
e9ce8d39 A |
522 | } |
523 | ||
524 | /********* Functions for zone implementors ************/ | |
525 | ||
9385eb3d A |
526 | void |
527 | malloc_zone_register(malloc_zone_t *zone) { | |
e9ce8d39 | 528 | MALLOC_LOCK(); |
224c7076 | 529 | malloc_zone_register_while_locked(zone); |
e9ce8d39 | 530 | MALLOC_UNLOCK(); |
e9ce8d39 A |
531 | } |
532 | ||
9385eb3d A |
533 | void |
534 | malloc_zone_unregister(malloc_zone_t *z) { | |
e9ce8d39 A |
535 | unsigned index; |
536 | MALLOC_LOCK(); | |
537 | index = malloc_num_zones; | |
538 | while (index--) { | |
539 | malloc_zone_t *zone = malloc_zones[index]; | |
540 | if (zone == z) { | |
541 | malloc_zones[index] = malloc_zones[--malloc_num_zones]; | |
542 | MALLOC_UNLOCK(); | |
543 | return; | |
544 | } | |
545 | } | |
546 | MALLOC_UNLOCK(); | |
59e0d9fe | 547 | malloc_printf("*** malloc_zone_unregister() failed for %p\n", z); |
e9ce8d39 A |
548 | } |
549 | ||
9385eb3d A |
550 | void |
551 | malloc_set_zone_name(malloc_zone_t *z, const char *name) { | |
e9ce8d39 A |
552 | char *newName; |
553 | if (z->zone_name) { | |
554 | free((char *)z->zone_name); | |
555 | z->zone_name = NULL; | |
556 | } | |
557 | newName = malloc_zone_malloc(z, strlen(name) + 1); | |
558 | strcpy(newName, name); | |
559 | z->zone_name = (const char *)newName; | |
560 | } | |
561 | ||
9385eb3d A |
562 | const char * |
563 | malloc_get_zone_name(malloc_zone_t *zone) { | |
e9ce8d39 A |
564 | return zone->zone_name; |
565 | } | |
566 | ||
59e0d9fe | 567 | /* |
224c7076 | 568 | * XXX malloc_printf now uses _simple_*printf. It only deals with a |
3d9156a7 | 569 | * subset of printf format specifiers, but it doesn't call malloc. |
59e0d9fe | 570 | */ |
224c7076 A |
571 | |
572 | __private_extern__ void | |
573 | _malloc_vprintf(int flags, const char *format, va_list ap) | |
574 | { | |
575 | _SIMPLE_STRING b; | |
576 | ||
577 | if (_malloc_no_asl_log || (flags & MALLOC_PRINTF_NOLOG) || (b = _simple_salloc()) == NULL) { | |
578 | if (!(flags & MALLOC_PRINTF_NOPREFIX)) { | |
579 | if (__is_threaded) { | |
580 | /* XXX somewhat rude 'knowing' that pthread_t is a pointer */ | |
581 | _simple_dprintf(malloc_debug_file, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self()); | |
582 | } else { | |
583 | _simple_dprintf(malloc_debug_file, "%s(%d) malloc: ", getprogname(), getpid()); | |
584 | } | |
585 | } | |
586 | _simple_vdprintf(malloc_debug_file, format, ap); | |
587 | return; | |
588 | } | |
589 | if (!(flags & MALLOC_PRINTF_NOPREFIX)) { | |
590 | if (__is_threaded) { | |
591 | /* XXX somewhat rude 'knowing' that pthread_t is a pointer */ | |
592 | _simple_sprintf(b, "%s(%d,%p) malloc: ", getprogname(), getpid(), (void *)pthread_self()); | |
593 | } else { | |
594 | _simple_sprintf(b, "%s(%d) malloc: ", getprogname(), getpid()); | |
595 | } | |
596 | } | |
597 | _simple_vsprintf(b, format, ap); | |
598 | _simple_put(b, malloc_debug_file); | |
599 | _simple_asl_log(flags & MALLOC_PRINTF_LEVEL_MASK, Malloc_Facility, _simple_string(b)); | |
600 | _simple_sfree(b); | |
601 | } | |
602 | ||
603 | __private_extern__ void | |
604 | _malloc_printf(int flags, const char *format, ...) | |
605 | { | |
606 | va_list ap; | |
607 | ||
608 | va_start(ap, format); | |
609 | _malloc_vprintf(flags, format, ap); | |
610 | va_end(ap); | |
611 | } | |
3d9156a7 | 612 | |
9385eb3d | 613 | void |
59e0d9fe A |
614 | malloc_printf(const char *format, ...) |
615 | { | |
616 | va_list ap; | |
617 | ||
59e0d9fe | 618 | va_start(ap, format); |
224c7076 | 619 | _malloc_vprintf(ASL_LEVEL_ERR, format, ap); |
59e0d9fe | 620 | va_end(ap); |
e9ce8d39 A |
621 | } |
622 | ||
623 | /********* Generic ANSI callouts ************/ | |
624 | ||
9385eb3d A |
625 | void * |
626 | malloc(size_t size) { | |
627 | void *retval; | |
628 | retval = malloc_zone_malloc(inline_malloc_default_zone(), size); | |
629 | if (retval == NULL) { | |
630 | errno = ENOMEM; | |
631 | } | |
632 | return retval; | |
e9ce8d39 A |
633 | } |
634 | ||
9385eb3d A |
635 | void * |
636 | calloc(size_t num_items, size_t size) { | |
637 | void *retval; | |
638 | retval = malloc_zone_calloc(inline_malloc_default_zone(), num_items, size); | |
639 | if (retval == NULL) { | |
640 | errno = ENOMEM; | |
641 | } | |
642 | return retval; | |
e9ce8d39 A |
643 | } |
644 | ||
9385eb3d A |
645 | void |
646 | free(void *ptr) { | |
e9ce8d39 A |
647 | malloc_zone_t *zone; |
648 | if (!ptr) return; | |
649 | zone = find_registered_zone(ptr, NULL); | |
224c7076 | 650 | if (zone) |
e9ce8d39 | 651 | malloc_zone_free(zone, ptr); |
e9ce8d39 A |
652 | } |
653 | ||
9385eb3d | 654 | void * |
224c7076 A |
655 | realloc(void *in_ptr, size_t new_size) { |
656 | void *retval; | |
657 | void *old_ptr; | |
e9ce8d39 | 658 | malloc_zone_t *zone; |
224c7076 A |
659 | size_t old_size = 0; |
660 | ||
661 | // SUSv3: "If size is 0 and ptr is not a null pointer, the object | |
662 | // pointed to is freed. If the space cannot be allocated, the object | |
663 | // shall remain unchanged." Also "If size is 0, either a null pointer | |
664 | // or a unique pointer that can be successfully passed to free() shall | |
665 | // be returned." We choose to allocate a minimum size object by calling | |
666 | // malloc_zone_malloc with zero size, which matches "If ptr is a null | |
667 | // pointer, realloc() shall be equivalent to malloc() for the specified | |
668 | // size." So we only free the original memory if the allocation succeeds. | |
669 | old_ptr = (new_size == 0) ? NULL : in_ptr; | |
9385eb3d A |
670 | if (!old_ptr) { |
671 | retval = malloc_zone_malloc(inline_malloc_default_zone(), new_size); | |
672 | } else { | |
673 | zone = find_registered_zone(old_ptr, &old_size); | |
224c7076 A |
674 | if (zone && (old_size == 0)) |
675 | old_size = zone->size(zone, old_ptr); | |
676 | if (zone && (old_size >= new_size)) | |
677 | return old_ptr; | |
678 | /* | |
679 | * if old_size is still 0 here, it means that either zone was NULL or | |
680 | * the call to zone->size() returned 0, indicating the pointer is not | |
681 | * not in that zone. In this case, just use the default zone. | |
682 | */ | |
683 | if (old_size == 0) | |
684 | zone = inline_malloc_default_zone(); | |
9385eb3d A |
685 | retval = malloc_zone_realloc(zone, old_ptr, new_size); |
686 | } | |
687 | if (retval == NULL) { | |
688 | errno = ENOMEM; | |
224c7076 A |
689 | } else if (new_size == 0) { |
690 | free(in_ptr); | |
9385eb3d A |
691 | } |
692 | return retval; | |
e9ce8d39 A |
693 | } |
694 | ||
9385eb3d A |
695 | void * |
696 | valloc(size_t size) { | |
697 | void *retval; | |
e9ce8d39 | 698 | malloc_zone_t *zone = inline_malloc_default_zone(); |
9385eb3d A |
699 | retval = malloc_zone_valloc(zone, size); |
700 | if (retval == NULL) { | |
701 | errno = ENOMEM; | |
702 | } | |
703 | return retval; | |
e9ce8d39 A |
704 | } |
705 | ||
9385eb3d A |
706 | extern void |
707 | vfree(void *ptr) { | |
e9ce8d39 A |
708 | free(ptr); |
709 | } | |
710 | ||
9385eb3d A |
711 | size_t |
712 | malloc_size(const void *ptr) { | |
e9ce8d39 A |
713 | size_t size = 0; |
714 | if (!ptr) return size; | |
224c7076 A |
715 | malloc_zone_t *zone = find_registered_zone(ptr, &size); |
716 | /* | |
717 | * If we found a zone, and size is 0 then we need to check to see if that | |
718 | * zone contains ptr. If size is nonzero, then we know zone contains ptr. | |
719 | */ | |
720 | if (zone && (size == 0)) | |
721 | size = zone->size(zone, ptr); | |
e9ce8d39 A |
722 | return size; |
723 | } | |
724 | ||
9385eb3d A |
725 | size_t |
726 | malloc_good_size (size_t size) { | |
e9ce8d39 A |
727 | malloc_zone_t *zone = inline_malloc_default_zone(); |
728 | return zone->introspect->good_size(zone, size); | |
729 | } | |
730 | ||
9385eb3d A |
731 | /********* Batch methods ************/ |
732 | ||
733 | unsigned | |
734 | malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) { | |
735 | unsigned (*batch_malloc)(malloc_zone_t *, size_t, void **, unsigned) = zone-> batch_malloc; | |
736 | if (! batch_malloc) return 0; | |
737 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { | |
738 | internal_check(); | |
739 | } | |
740 | unsigned batched = batch_malloc(zone, size, results, num_requested); | |
741 | if (malloc_logger) { | |
742 | unsigned index = 0; | |
743 | while (index < batched) { | |
224c7076 | 744 | malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)results[index], 0); |
9385eb3d A |
745 | index++; |
746 | } | |
747 | } | |
748 | return batched; | |
749 | } | |
750 | ||
751 | void | |
752 | malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) { | |
753 | if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) { | |
754 | internal_check(); | |
755 | } | |
756 | if (malloc_logger) { | |
757 | unsigned index = 0; | |
758 | while (index < num) { | |
224c7076 | 759 | malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0); |
9385eb3d A |
760 | index++; |
761 | } | |
762 | } | |
763 | void (*batch_free)(malloc_zone_t *, void **, unsigned) = zone-> batch_free; | |
764 | if (batch_free) { | |
765 | batch_free(zone, to_be_freed, num); | |
766 | } else { | |
767 | void (*free_fun)(malloc_zone_t *, void *) = zone->free; | |
768 | while (num--) { | |
769 | void *ptr = *to_be_freed++; | |
770 | free_fun(zone, ptr); | |
771 | } | |
772 | } | |
773 | } | |
774 | ||
e9ce8d39 A |
775 | /********* Functions for performance tools ************/ |
776 | ||
9385eb3d A |
777 | static kern_return_t |
778 | _malloc_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) { | |
e9ce8d39 A |
779 | *ptr = (void *)address; |
780 | return 0; | |
781 | } | |
782 | ||
9385eb3d A |
783 | kern_return_t |
784 | malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count) { | |
e9ce8d39 A |
785 | // Note that the 2 following addresses are not correct if the address of the target is different from your own. This notably occurs if the address of System.framework is slid (e.g. different than at B & I ) |
786 | vm_address_t remote_malloc_zones = (vm_address_t)&malloc_zones; | |
787 | vm_address_t remote_malloc_num_zones = (vm_address_t)&malloc_num_zones; | |
788 | kern_return_t err; | |
789 | vm_address_t zones_address; | |
790 | vm_address_t *zones_address_ref; | |
791 | unsigned num_zones; | |
792 | unsigned *num_zones_ref; | |
793 | if (!reader) reader = _malloc_default_reader; | |
794 | // printf("Read malloc_zones at address %p should be %p\n", &malloc_zones, malloc_zones); | |
795 | err = reader(task, remote_malloc_zones, sizeof(void *), (void **)&zones_address_ref); | |
9385eb3d | 796 | // printf("Read malloc_zones[%p]=%p\n", remote_malloc_zones, *zones_address_ref); |
e9ce8d39 | 797 | if (err) { |
59e0d9fe | 798 | malloc_printf("*** malloc_get_all_zones: error reading zones_address at %p\n", (unsigned)remote_malloc_zones); |
e9ce8d39 A |
799 | return err; |
800 | } | |
801 | zones_address = *zones_address_ref; | |
802 | // printf("Reading num_zones at address %p\n", remote_malloc_num_zones); | |
803 | err = reader(task, remote_malloc_num_zones, sizeof(unsigned), (void **)&num_zones_ref); | |
804 | if (err) { | |
59e0d9fe | 805 | malloc_printf("*** malloc_get_all_zones: error reading num_zones at %p\n", (unsigned)remote_malloc_num_zones); |
e9ce8d39 A |
806 | return err; |
807 | } | |
808 | num_zones = *num_zones_ref; | |
9385eb3d | 809 | // printf("Read malloc_num_zones[%p]=%d\n", remote_malloc_num_zones, num_zones); |
e9ce8d39 A |
810 | *count = num_zones; |
811 | // printf("malloc_get_all_zones succesfully found %d zones\n", num_zones); | |
812 | err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses); | |
813 | if (err) { | |
59e0d9fe | 814 | malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", (unsigned)&zones_address); |
e9ce8d39 A |
815 | return err; |
816 | } | |
817 | // printf("malloc_get_all_zones succesfully read %d zones\n", num_zones); | |
818 | return err; | |
819 | } | |
820 | ||
821 | /********* Debug helpers ************/ | |
822 | ||
9385eb3d A |
823 | void |
824 | malloc_zone_print_ptr_info(void *ptr) { | |
e9ce8d39 A |
825 | malloc_zone_t *zone; |
826 | if (!ptr) return; | |
224c7076 | 827 | zone = malloc_zone_from_ptr(ptr); |
e9ce8d39 A |
828 | if (zone) { |
829 | printf("ptr %p in registered zone %p\n", ptr, zone); | |
830 | } else { | |
831 | printf("ptr %p not in heap\n", ptr); | |
832 | } | |
833 | } | |
834 | ||
9385eb3d A |
835 | boolean_t |
836 | malloc_zone_check(malloc_zone_t *zone) { | |
e9ce8d39 A |
837 | boolean_t ok = 1; |
838 | if (!zone) { | |
839 | unsigned index = 0; | |
840 | while (index < malloc_num_zones) { | |
841 | zone = malloc_zones[index++]; | |
842 | if (!zone->introspect->check(zone)) ok = 0; | |
843 | } | |
844 | } else { | |
845 | ok = zone->introspect->check(zone); | |
846 | } | |
847 | return ok; | |
848 | } | |
849 | ||
9385eb3d A |
850 | void |
851 | malloc_zone_print(malloc_zone_t *zone, boolean_t verbose) { | |
e9ce8d39 A |
852 | if (!zone) { |
853 | unsigned index = 0; | |
854 | while (index < malloc_num_zones) { | |
855 | zone = malloc_zones[index++]; | |
856 | zone->introspect->print(zone, verbose); | |
857 | } | |
858 | } else { | |
859 | zone->introspect->print(zone, verbose); | |
860 | } | |
861 | } | |
862 | ||
9385eb3d A |
863 | void |
864 | malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { | |
865 | if (!zone) { | |
224c7076 | 866 | memset(stats, 0, sizeof(*stats)); |
9385eb3d A |
867 | unsigned index = 0; |
868 | while (index < malloc_num_zones) { | |
869 | zone = malloc_zones[index++]; | |
870 | malloc_statistics_t this_stats; | |
871 | zone->introspect->statistics(zone, &this_stats); | |
872 | stats->blocks_in_use += this_stats.blocks_in_use; | |
873 | stats->size_in_use += this_stats.size_in_use; | |
874 | stats->max_size_in_use += this_stats.max_size_in_use; | |
875 | stats->size_allocated += this_stats.size_allocated; | |
876 | } | |
877 | } else { | |
878 | zone->introspect->statistics(zone, stats); | |
879 | } | |
880 | } | |
881 | ||
882 | void | |
883 | malloc_zone_log(malloc_zone_t *zone, void *address) { | |
e9ce8d39 A |
884 | if (!zone) { |
885 | unsigned index = 0; | |
886 | while (index < malloc_num_zones) { | |
887 | zone = malloc_zones[index++]; | |
888 | zone->introspect->log(zone, address); | |
889 | } | |
890 | } else { | |
891 | zone->introspect->log(zone, address); | |
892 | } | |
893 | } | |
894 | ||
895 | /********* Misc other entry points ************/ | |
896 | ||
9385eb3d A |
897 | static void |
898 | DefaultMallocError(int x) { | |
59e0d9fe | 899 | malloc_printf("*** error %d\n", x); |
e9ce8d39 A |
900 | #if USE_SLEEP_RATHER_THAN_ABORT |
901 | sleep(3600); | |
902 | #else | |
903 | abort(); | |
904 | #endif | |
905 | } | |
906 | ||
9385eb3d A |
907 | void (* |
908 | malloc_error(void (*func)(int)))(int) { | |
e9ce8d39 A |
909 | return DefaultMallocError; |
910 | } | |
911 | ||
9385eb3d A |
912 | void |
913 | _malloc_fork_prepare() { | |
e9ce8d39 A |
914 | /* Prepare the malloc module for a fork by insuring that no thread is in a malloc critical section */ |
915 | unsigned index = 0; | |
916 | MALLOC_LOCK(); | |
917 | while (index < malloc_num_zones) { | |
918 | malloc_zone_t *zone = malloc_zones[index++]; | |
919 | zone->introspect->force_lock(zone); | |
920 | } | |
921 | } | |
922 | ||
9385eb3d A |
923 | void |
924 | _malloc_fork_parent() { | |
e9ce8d39 A |
925 | /* Called in the parent process after a fork() to resume normal operation. */ |
926 | unsigned index = 0; | |
927 | MALLOC_UNLOCK(); | |
928 | while (index < malloc_num_zones) { | |
929 | malloc_zone_t *zone = malloc_zones[index++]; | |
930 | zone->introspect->force_unlock(zone); | |
931 | } | |
932 | } | |
933 | ||
9385eb3d A |
934 | void |
935 | _malloc_fork_child() { | |
e9ce8d39 A |
936 | /* Called in the child process after a fork() to resume normal operation. In the MTASK case we also have to change memory inheritance so that the child does not share memory with the parent. */ |
937 | unsigned index = 0; | |
938 | MALLOC_UNLOCK(); | |
939 | while (index < malloc_num_zones) { | |
940 | malloc_zone_t *zone = malloc_zones[index++]; | |
941 | zone->introspect->force_unlock(zone); | |
942 | } | |
943 | } | |
944 | ||
59e0d9fe A |
945 | /* |
946 | * A Glibc-like mstats() interface. | |
947 | * | |
948 | * Note that this interface really isn't very good, as it doesn't understand | |
949 | * that we may have multiple allocators running at once. We just massage | |
950 | * the result from malloc_zone_statistics in any case. | |
951 | */ | |
952 | struct mstats | |
953 | mstats(void) | |
954 | { | |
955 | malloc_statistics_t s; | |
956 | struct mstats m; | |
957 | ||
958 | malloc_zone_statistics(NULL, &s); | |
959 | m.bytes_total = s.size_allocated; | |
960 | m.chunks_used = s.blocks_in_use; | |
961 | m.bytes_used = s.size_in_use; | |
962 | m.chunks_free = 0; | |
963 | m.bytes_free = m.bytes_total - m.bytes_used; /* isn't this somewhat obvious? */ | |
964 | ||
965 | return(m); | |
e9ce8d39 A |
966 | } |
967 | ||
968 | /***************** OBSOLETE ENTRY POINTS ********************/ | |
969 | ||
970 | #if PHASE_OUT_OLD_MALLOC | |
971 | #error PHASE OUT THE FOLLOWING FUNCTIONS | |
972 | #else | |
973 | #warning PHASE OUT THE FOLLOWING FUNCTIONS | |
974 | #endif | |
975 | ||
9385eb3d A |
976 | void |
977 | set_malloc_singlethreaded(boolean_t single) { | |
e9ce8d39 A |
978 | static boolean_t warned = 0; |
979 | if (!warned) { | |
980 | #if PHASE_OUT_OLD_MALLOC | |
59e0d9fe | 981 | malloc_printf("*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single); |
e9ce8d39 A |
982 | #endif |
983 | warned = 1; | |
984 | } | |
985 | } | |
986 | ||
9385eb3d A |
987 | void |
988 | malloc_singlethreaded() { | |
e9ce8d39 A |
989 | static boolean_t warned = 0; |
990 | if (!warned) { | |
59e0d9fe | 991 | malloc_printf("*** OBSOLETE: malloc_singlethreaded()\n"); |
e9ce8d39 A |
992 | warned = 1; |
993 | } | |
994 | } | |
995 | ||
9385eb3d A |
996 | int |
997 | malloc_debug(int level) { | |
59e0d9fe | 998 | malloc_printf("*** OBSOLETE: malloc_debug()\n"); |
e9ce8d39 A |
999 | return 0; |
1000 | } |