]>
Commit | Line | Data |
---|---|---|
2bfd4448 A |
1 | /* |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 2004 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * objc-auto.m | |
27 | * Copyright 2004 Apple Computer, Inc. | |
28 | */ | |
29 | ||
30 | #import "objc-auto.h" | |
31 | ||
32 | #import <stdint.h> | |
33 | #import <stdbool.h> | |
34 | #import <fcntl.h> | |
35 | #import <mach/mach.h> | |
36 | #import <mach-o/dyld.h> | |
37 | ||
38 | #import "objc-private.h" | |
39 | #import "objc-rtp.h" | |
40 | #import "maptable.h" | |
41 | ||
42 | ||
43 | ||
44 | // Types and prototypes from non-open-source auto_zone.h | |
45 | ||
46 | #include <sys/types.h> | |
47 | #include <malloc/malloc.h> | |
48 | ||
49 | typedef malloc_zone_t auto_zone_t; | |
50 | ||
51 | typedef uint64_t auto_date_t; | |
52 | ||
53 | typedef struct { | |
54 | unsigned version; // reserved - 0 for now | |
55 | /* Memory usage */ | |
56 | unsigned long long num_allocs; // number of allocations performed | |
57 | volatile unsigned blocks_in_use;// number of pointers in use | |
58 | unsigned bytes_in_use; // sum of the sizes of all pointers in use | |
59 | unsigned max_bytes_in_use; // high water mark | |
60 | unsigned bytes_allocated; | |
61 | /* GC stats */ | |
62 | /* When there is an array, 0 stands for full collection, 1 for generational */ | |
63 | unsigned num_collections[2]; | |
64 | boolean_t last_collection_was_generational; | |
65 | unsigned bytes_in_use_after_last_collection[2]; | |
66 | unsigned bytes_allocated_after_last_collection[2]; | |
67 | unsigned bytes_freed_during_last_collection[2]; | |
68 | auto_date_t duration_last_collection[2]; | |
69 | auto_date_t duration_all_collections[2]; | |
70 | } auto_statistics_t; | |
71 | ||
72 | typedef enum { | |
73 | AUTO_COLLECTION_NO_COLLECTION = 0, | |
74 | AUTO_COLLECTION_GENERATIONAL_COLLECTION, | |
75 | AUTO_COLLECTION_FULL_COLLECTION | |
76 | } auto_collection_mode_t; | |
77 | ||
78 | typedef enum { | |
79 | AUTO_LOG_COLLECTIONS = (1 << 1), // log whenever a collection occurs | |
80 | AUTO_LOG_COLLECT_DECISION = (1 << 2), // logs when deciding whether to collect | |
81 | AUTO_LOG_GC_IMPL = (1 << 3), // logs to help debug GC | |
82 | AUTO_LOG_REGIONS = (1 << 4), // log whenever a new region is allocated | |
83 | AUTO_LOG_UNUSUAL = (1 << 5), // log unusual circumstances | |
84 | AUTO_LOG_WEAK = (1 << 6), // log weak reference manipulation | |
85 | AUTO_LOG_ALL = (~0u) | |
86 | } auto_log_mask_t; | |
87 | ||
88 | typedef struct auto_zone_cursor *auto_zone_cursor_t; | |
89 | ||
90 | typedef void (*auto_zone_foreach_object_t) (auto_zone_cursor_t cursor, void (*op) (void *ptr, void *data), void* data); | |
91 | ||
92 | typedef struct { | |
93 | unsigned version; // reserved - 0 for now | |
94 | boolean_t trace_stack_conservatively; | |
95 | boolean_t (*should_collect)(auto_zone_t *, const auto_statistics_t *stats, boolean_t about_to_create_a_new_region); | |
96 | // called back when a threshold is reached; must say whether to collect (and what type) | |
97 | // all locks are released when that call back is called | |
98 | // callee is free to call for statistics or reset the threshold | |
99 | unsigned ask_should_collect_frequency; | |
100 | // should_collect() is called each <N> allocations or free, where <N> is this field | |
101 | unsigned full_vs_gen_frequency; | |
102 | // ratio of generational vs. full GC for the frequency based ones | |
103 | int (*collection_should_interrupt)(void); | |
104 | // called during scan to see if garbage collection should be aborted | |
105 | void (*invalidate)(auto_zone_t *zone, void *ptr, void *collection_context); | |
106 | void (*batch_invalidate) (auto_zone_t *zone, auto_zone_foreach_object_t foreach, auto_zone_cursor_t cursor); | |
107 | // called back with an object that is unreferenced | |
108 | // callee is responsible for invalidating object state | |
109 | void (*resurrect) (auto_zone_t *zone, void *ptr); | |
110 | // convert the object into a safe-to-use, but otherwise "undead" object. no guarantees are made about the | |
111 | // contents of this object, other than its liveness. | |
112 | unsigned word0_mask; // mask for defining class | |
113 | void (*note_unknown_layout)(auto_zone_t *zone, unsigned class_field); | |
114 | // called once for each class encountered for which we don't know the layout | |
115 | // callee can decide to register class with auto_zone_register_layout(), or do nothing | |
116 | // Note that this function is called during GC and therefore should not do any auto-allocation | |
117 | char* (*name_for_address) (auto_zone_t *zone, vm_address_t base, vm_address_t offset); | |
118 | auto_log_mask_t log; | |
119 | // set to auto_log_mask_t bits as desired | |
120 | boolean_t disable_generational; | |
121 | // if true, ignores requests to do generational GC. | |
122 | boolean_t paranoid_generational; | |
123 | // if true, always compares generational GC result to full GC garbage list | |
124 | boolean_t malloc_stack_logging; | |
125 | // if true, uses malloc_zone_malloc() for stack logging. | |
126 | } auto_collection_control_t; | |
127 | ||
128 | typedef enum { | |
129 | AUTO_TYPE_UNKNOWN = -1, // this is an error value | |
130 | AUTO_UNSCANNED = 1, | |
131 | AUTO_OBJECT = 2, | |
132 | AUTO_MEMORY_SCANNED = 0, // holds conservatively scanned pointers | |
133 | AUTO_MEMORY_UNSCANNED = AUTO_UNSCANNED, // holds unscanned memory (bits) | |
134 | AUTO_OBJECT_SCANNED = AUTO_OBJECT, // first word is 'isa', may have 'exact' layout info elsewhere | |
135 | AUTO_OBJECT_UNSCANNED = AUTO_OBJECT | AUTO_UNSCANNED, // first word is 'isa', good for bits or auto_zone_retain'ed items | |
136 | } auto_memory_type_t; | |
137 | ||
138 | typedef struct | |
139 | { | |
140 | vm_address_t referent; | |
141 | vm_address_t referrer_base; | |
142 | intptr_t referrer_offset; | |
143 | } auto_reference_t; | |
144 | ||
145 | typedef void (*auto_reference_recorder_t)(auto_zone_t *zone, void *ctx, | |
146 | auto_reference_t reference); | |
147 | ||
148 | ||
149 | static void auto_collect(auto_zone_t *zone, auto_collection_mode_t mode, void *collection_context); | |
150 | static auto_collection_control_t *auto_collection_parameters(auto_zone_t *zone); | |
151 | static const auto_statistics_t *auto_collection_statistics(auto_zone_t *zone); | |
152 | static void auto_enumerate_references(auto_zone_t *zone, void *referent, | |
153 | auto_reference_recorder_t callback, | |
154 | void *stack_bottom, void *ctx); | |
155 | static void auto_enumerate_references_no_lock(auto_zone_t *zone, void *referent, auto_reference_recorder_t callback, void *stack_bottom, void *ctx); | |
156 | static auto_zone_t *auto_zone(void); | |
157 | static void auto_zone_add_root(auto_zone_t *zone, void *root, size_t size); | |
158 | static void* auto_zone_allocate_object(auto_zone_t *zone, size_t size, auto_memory_type_t type, boolean_t initial_refcount_to_one, boolean_t clear); | |
159 | static const void *auto_zone_base_pointer(auto_zone_t *zone, const void *ptr); | |
160 | static auto_memory_type_t auto_zone_get_layout_type(auto_zone_t *zone, void *ptr); | |
161 | static auto_memory_type_t auto_zone_get_layout_type_no_lock(auto_zone_t *zone, void *ptr); | |
162 | static boolean_t auto_zone_is_finalized(auto_zone_t *zone, const void *ptr); | |
163 | static boolean_t auto_zone_is_valid_pointer(auto_zone_t *zone, const void *ptr); | |
164 | static unsigned int auto_zone_release(auto_zone_t *zone, void *ptr); | |
165 | static void auto_zone_retain(auto_zone_t *zone, void *ptr); | |
166 | static unsigned int auto_zone_retain_count_no_lock(auto_zone_t *zone, const void *ptr); | |
167 | static void auto_zone_set_class_list(int (*get_class_list)(void **buffer, int count)); | |
168 | static size_t auto_zone_size_no_lock(auto_zone_t *zone, const void *ptr); | |
169 | static void auto_zone_start_monitor(boolean_t force); | |
170 | static void auto_zone_write_barrier(auto_zone_t *zone, void *recipient, const unsigned int offset_in_bytes, const void *new_value); | |
171 | static void *auto_zone_write_barrier_memmove(auto_zone_t *zone, void *dst, const void *src, size_t size); | |
172 | ||
173 | ||
174 | ||
175 | static void record_allocation(Class cls); | |
176 | static auto_zone_t *gc_zone_init(void); | |
177 | ||
178 | ||
179 | __private_extern__ BOOL UseGC NOBSS = NO; | |
180 | static BOOL RecordAllocations = NO; | |
181 | static int IsaStompBits = 0x0; | |
182 | ||
183 | static auto_zone_t *gc_zone = NULL; | |
184 | static BOOL gc_zone_finalizing = NO; | |
185 | static intptr_t gc_collection_threshold = 128 * 1024; | |
186 | static size_t gc_collection_ratio = 100, gc_collection_counter = 0; | |
187 | static NXMapTable *gc_finalization_safe_classes = NULL; | |
188 | static BOOL gc_roots_retained = YES; | |
189 | ||
190 | /*********************************************************************** | |
191 | * Internal utilities | |
192 | **********************************************************************/ | |
193 | ||
194 | #define ISAUTOOBJECT(x) (auto_zone_is_valid_pointer(gc_zone, (x))) | |
195 | ||
196 | ||
197 | // A should-collect callback that never allows collection. | |
198 | // Currently used to prevent on-demand collection. | |
199 | static boolean_t objc_never_collect(auto_zone_t *zone, const auto_statistics_t *stats, boolean_t about_to_create_a_new_region) | |
200 | { | |
201 | return false; | |
202 | } | |
203 | ||
204 | ||
205 | /*********************************************************************** | |
206 | * Utility exports | |
207 | * Called by various libraries. | |
208 | **********************************************************************/ | |
209 | ||
210 | void objc_collect(void) | |
211 | { | |
212 | if (UseGC) { | |
213 | auto_collect(gc_zone, AUTO_COLLECTION_FULL_COLLECTION, NULL); | |
214 | } | |
215 | } | |
216 | ||
217 | void objc_collect_if_needed(unsigned long options) { | |
218 | if (UseGC) { | |
219 | const auto_statistics_t *stats = auto_collection_statistics(gc_zone); | |
220 | if (options & OBJC_GENERATIONAL) { | |
221 | // use an absolute memory allocated threshold to decide when to generationally collect. | |
222 | intptr_t bytes_allocated_since_last_gc = stats->bytes_in_use - stats->bytes_in_use_after_last_collection[stats->last_collection_was_generational]; | |
223 | if (bytes_allocated_since_last_gc >= gc_collection_threshold) { | |
224 | // malloc_printf("bytes_allocated_since_last_gc = %ld\n", bytes_allocated_since_last_gc); | |
225 | // periodically run a full collection until to keep memory usage down, controlled by OBJC_COLLECTION_RATIO (100 to 1 is the default). | |
226 | auto_collection_mode_t mode = AUTO_COLLECTION_GENERATIONAL_COLLECTION; | |
227 | if (gc_collection_counter++ >= gc_collection_ratio) { | |
228 | mode = AUTO_COLLECTION_FULL_COLLECTION; | |
229 | gc_collection_counter = 0; | |
230 | } | |
231 | auto_collect(gc_zone, mode, NULL); | |
232 | } | |
233 | } else { | |
234 | // Run full collections until we no longer recover additional objects. We use two measurements | |
235 | // to determine whether or not the collector is being productive: the total number of blocks | |
236 | // must be shrinking, and the collector must itself be freeing bytes. Otherwise, another thread | |
237 | // could be responsible for reducing the block count. On the other hand, another thread could | |
238 | // be generating a lot of garbage, which would keep us collecting. This will need even more | |
239 | // tuning to prevent starvation, etc. | |
240 | unsigned blocks_in_use; | |
241 | do { | |
242 | blocks_in_use = stats->blocks_in_use; | |
243 | auto_collect(gc_zone, AUTO_COLLECTION_FULL_COLLECTION, NULL); | |
244 | // malloc_printf("bytes freed = %ld\n", stats->bytes_freed_during_last_collection[0]); | |
245 | } while (stats->bytes_freed_during_last_collection[0] > 0 && stats->blocks_in_use < blocks_in_use); | |
246 | gc_collection_counter = 0; | |
247 | } | |
248 | } | |
249 | } | |
250 | ||
251 | void objc_collect_generation(void) | |
252 | { | |
253 | if (UseGC) { | |
254 | auto_collect(gc_zone, AUTO_COLLECTION_GENERATIONAL_COLLECTION, NULL); | |
255 | } | |
256 | } | |
257 | ||
258 | ||
259 | unsigned int objc_numberAllocated(void) | |
260 | { | |
261 | const auto_statistics_t *stats = auto_collection_statistics(gc_zone); | |
262 | return stats->blocks_in_use; | |
263 | } | |
264 | ||
265 | ||
266 | BOOL objc_isAuto(id object) | |
267 | { | |
268 | return UseGC && ISAUTOOBJECT(object) != 0; | |
269 | } | |
270 | ||
271 | ||
272 | BOOL objc_collecting_enabled(void) | |
273 | { | |
274 | return UseGC; | |
275 | } | |
276 | ||
277 | ||
278 | /*********************************************************************** | |
279 | * Memory management. | |
280 | * Called by CF and Foundation. | |
281 | **********************************************************************/ | |
282 | ||
283 | // Allocate an object in the GC zone, with the given number of extra bytes. | |
284 | id objc_allocate_object(Class cls, int extra) | |
285 | { | |
286 | id result = | |
287 | (id)auto_zone_allocate_object(gc_zone, cls->instance_size + extra, | |
288 | AUTO_OBJECT_SCANNED, false, true); | |
289 | result->isa = cls; | |
290 | if (RecordAllocations) record_allocation(cls); | |
291 | return result; | |
292 | } | |
293 | ||
294 | ||
295 | /*********************************************************************** | |
296 | * Write barrier exports | |
297 | * Called by pretty much all GC-supporting code. | |
298 | **********************************************************************/ | |
299 | ||
300 | ||
301 | // Platform-independent write barriers | |
302 | // These contain the UseGC check that the platform-specific | |
303 | // runtime-rewritten implementations do not. | |
304 | ||
305 | id objc_assign_strongCast_generic(id value, id *dest) | |
306 | { | |
307 | if (UseGC) { | |
308 | return objc_assign_strongCast_gc(value, dest); | |
309 | } else { | |
310 | return (*dest = value); | |
311 | } | |
312 | } | |
313 | ||
314 | ||
315 | id objc_assign_global_generic(id value, id *dest) | |
316 | { | |
317 | if (UseGC) { | |
318 | return objc_assign_global_gc(value, dest); | |
319 | } else { | |
320 | return (*dest = value); | |
321 | } | |
322 | } | |
323 | ||
324 | ||
325 | id objc_assign_ivar_generic(id value, id dest, unsigned int offset) | |
326 | { | |
327 | if (UseGC) { | |
328 | return objc_assign_ivar_gc(value, dest, offset); | |
329 | } else { | |
330 | id *slot = (id*) ((char *)dest + offset); | |
331 | return (*slot = value); | |
332 | } | |
333 | } | |
334 | ||
335 | #if defined(__ppc__) | |
336 | ||
337 | // PPC write barriers are in objc-auto-ppc.s | |
338 | // write_barrier_init conditionally stomps those to jump to the _impl versions. | |
339 | ||
340 | #else | |
341 | ||
342 | // use generic implementation until time can be spent on optimizations | |
343 | id objc_assign_strongCast(id value, id *dest) { return objc_assign_strongCast_generic(value, dest); } | |
344 | id objc_assign_global(id value, id *dest) { return objc_assign_global_generic(value, dest); } | |
345 | id objc_assign_ivar(id value, id dest, unsigned int offset) { return objc_assign_ivar_generic(value, dest, offset); } | |
346 | ||
347 | // not defined(__ppc__) | |
348 | #endif | |
349 | ||
350 | ||
351 | void *objc_memmove_collectable(void *dst, const void *src, size_t size) | |
352 | { | |
353 | if (UseGC) { | |
354 | return auto_zone_write_barrier_memmove(gc_zone, dst, src, size); | |
355 | } else { | |
356 | return memmove(dst, src, size); | |
357 | } | |
358 | } | |
359 | ||
360 | ||
361 | /*********************************************************************** | |
362 | * Testing tools | |
363 | * Used to isolate resurrection of garbage objects during finalization. | |
364 | **********************************************************************/ | |
365 | BOOL objc_is_finalized(void *ptr) { | |
366 | return ptr != NULL && auto_zone_is_finalized(gc_zone, ptr); | |
367 | } | |
368 | ||
369 | ||
370 | /*********************************************************************** | |
371 | * CF-only write barrier exports | |
372 | * Called by CF only. | |
373 | * The gc_zone guards are not thought to be necessary | |
374 | **********************************************************************/ | |
375 | ||
376 | // Exported as very private SPI to Foundation to tell CF about | |
377 | void* objc_assign_ivar_address_CF(void *value, void *base, void **slot) | |
378 | { | |
379 | if (value && gc_zone) { | |
380 | if (auto_zone_is_valid_pointer(gc_zone, base)) { | |
381 | unsigned int offset = (((char *)slot)-(char *)base); | |
382 | auto_zone_write_barrier(gc_zone, base, offset, value); | |
383 | } | |
384 | } | |
385 | ||
386 | return (*slot = value); | |
387 | } | |
388 | ||
389 | ||
390 | // Same as objc_assign_strongCast_gc, should tell Foundation to use _gc version instead | |
391 | // exported as very private SPI to Foundation to tell CF about | |
392 | void* objc_assign_strongCast_CF(void* value, void **slot) | |
393 | { | |
394 | if (value && gc_zone) { | |
395 | void *base = (void *)auto_zone_base_pointer(gc_zone, (void*)slot); | |
396 | if (base) { | |
397 | unsigned int offset = (((char *)slot)-(char *)base); | |
398 | auto_zone_write_barrier(gc_zone, base, offset, value); | |
399 | } | |
400 | } | |
401 | return (*slot = value); | |
402 | } | |
403 | ||
404 | ||
405 | /*********************************************************************** | |
406 | * Write barrier implementations, optimized for when GC is known to be on | |
407 | * Called by the write barrier exports only. | |
408 | * These implementations assume GC is on. The exported function must | |
409 | * either perform the check itself or be conditionally stomped at | |
410 | * startup time. | |
411 | **********************************************************************/ | |
412 | ||
413 | __private_extern__ id objc_assign_strongCast_gc(id value, id *slot) | |
414 | { | |
415 | id base; | |
416 | ||
417 | base = (id) auto_zone_base_pointer(gc_zone, (void*)slot); | |
418 | if (base) { | |
419 | unsigned int offset = (((char *)slot)-(char *)base); | |
420 | auto_zone_write_barrier(gc_zone, base, (char*)slot - (char*)base, value); | |
421 | } | |
422 | return (*slot = value); | |
423 | } | |
424 | ||
425 | ||
426 | __private_extern__ id objc_assign_global_gc(id value, id *slot) | |
427 | { | |
428 | if (gc_roots_retained) { | |
429 | if (value && ISAUTOOBJECT(value)) { | |
430 | if (auto_zone_is_finalized(gc_zone, value)) | |
431 | _objc_inform("GC: storing an already collected object %p into global memory at %p\n", value, slot); | |
432 | auto_zone_retain(gc_zone, value); | |
433 | } | |
434 | if (*slot && ISAUTOOBJECT(*slot)) { | |
435 | auto_zone_release(gc_zone, *slot); | |
436 | } | |
437 | } else { | |
438 | // use explicit root registration. | |
439 | if (value && ISAUTOOBJECT(value)) { | |
440 | if (auto_zone_is_finalized(gc_zone, value)) | |
441 | _objc_inform("GC: storing an already collected object %p into global memory at %p\n", value, slot); | |
442 | auto_zone_add_root(gc_zone, slot, sizeof(id*)); | |
443 | } | |
444 | } | |
445 | return (*slot = value); | |
446 | } | |
447 | ||
448 | ||
449 | __private_extern__ id objc_assign_ivar_gc(id value, id base, unsigned int offset) | |
450 | { | |
451 | id *slot = (id*) ((char *)base + offset); | |
452 | ||
453 | if (value) { | |
454 | if (ISAUTOOBJECT(base)) { | |
455 | auto_zone_write_barrier(gc_zone, base, offset, value); | |
456 | if (gc_zone_finalizing && (auto_zone_get_layout_type(gc_zone, value) & AUTO_OBJECT) != AUTO_OBJECT) { | |
457 | // XXX_PCB: Hack, don't allow resurrection by inhibiting assigns of garbage, non-object, pointers. | |
458 | // XXX BG: move this check into auto & institute a new policy for resurrection, to wit: | |
459 | // Resurrected Objects should go on a special list during finalization & be zombified afterwards | |
460 | // using the noisy isa-slam hack. | |
461 | if (auto_zone_is_finalized(gc_zone, value) && !auto_zone_is_finalized(gc_zone, base)) { | |
462 | _objc_inform("GC: *** objc_assign_ivar_gc: preventing a resurrecting store of %p into %p + %d\n", value, base, offset); | |
463 | value = nil; | |
464 | } | |
465 | } | |
466 | } else { | |
467 | _objc_inform("GC: *** objc_assign_ivar_gc: %p + %d isn't in the auto_zone.\n", base, offset); | |
468 | } | |
469 | } | |
470 | ||
471 | return (*slot = value); | |
472 | } | |
473 | ||
474 | ||
475 | ||
476 | /*********************************************************************** | |
477 | * Finalization support | |
478 | * Called by auto and Foundation. | |
479 | **********************************************************************/ | |
480 | ||
481 | #define USE_ISA_HACK 1 | |
482 | #define DO_ISA_DEBUG 0 | |
483 | ||
484 | #if USE_ISA_HACK | |
485 | ||
486 | ||
487 | // NSDeallocatedObject silently ignores all messages sent to it. | |
488 | @interface NSDeallocatedObject { | |
489 | @public | |
490 | Class IsA; | |
491 | } | |
492 | + (Class)class; | |
493 | @end | |
494 | ||
495 | ||
496 | static unsigned int FTCount, FTSize; | |
497 | static struct FTTable { | |
498 | NSDeallocatedObject *object; | |
499 | Class class; | |
500 | } *FTTablePtr; | |
501 | ||
502 | /* a quick and very dirty table to map finalized pointers to their isa's */ | |
503 | static void addPointerFT(NSDeallocatedObject *object, Class class) { | |
504 | if (FTCount >= FTSize) { | |
505 | FTSize = 2*(FTSize + 10); | |
506 | FTTablePtr = realloc(FTTablePtr, FTSize*sizeof(struct FTTable)); | |
507 | } | |
508 | FTTablePtr[FTCount].object = object; | |
509 | FTTablePtr[FTCount].class = class; | |
510 | ++FTCount; | |
511 | } | |
512 | ||
513 | static Class classForPointerFT(NSDeallocatedObject *object) { | |
514 | int i; | |
515 | for (i = 0; i < FTCount; ++i) | |
516 | if (FTTablePtr[i].object == object) | |
517 | return FTTablePtr[i].class; | |
518 | return NULL; | |
519 | } | |
520 | ||
521 | void objc_stale(id object) { | |
522 | } | |
523 | ||
524 | @implementation NSDeallocatedObject | |
525 | + (Class)class { return self; } | |
526 | - (Class)class { return classForPointerFT(self); } | |
527 | - (BOOL)isKindOfClass:(Class)aClass { | |
528 | Class cls; | |
529 | for (cls = classForPointerFT(self); nil != cls; cls = cls->super_class) | |
530 | if (cls == (Class)aClass) return YES; | |
531 | return NO; | |
532 | } | |
533 | + forward:(SEL)aSelector :(marg_list)args { return nil; } | |
534 | - forward:(SEL)aSelector :(marg_list)args { | |
535 | Class class = classForPointerFT(self); | |
536 | if (!class) { | |
537 | if (IsaStompBits & 0x2) | |
538 | _objc_inform("***finalized & *recovered* object %p of being sent '%s'!!\n", self, sel_getName(aSelector)); | |
539 | // if its not in the current table, then its being messaged from a STALE REFERENCE!! | |
540 | objc_stale(self); | |
541 | return nil; | |
542 | } | |
543 | if (IsaStompBits & 0x4) | |
544 | _objc_inform("finalized object %p of class %s being sent %s\n", self, class->name, sel_getName(aSelector)); | |
545 | return nil; | |
546 | } | |
547 | @end | |
548 | ||
549 | ||
550 | static Class _NSDeallocatedObject = Nil; | |
551 | ||
552 | static IMP _NSObject_finalize = NULL; | |
553 | ||
554 | ||
555 | // Handed to and then called by auto | |
556 | static void sendFinalize(auto_zone_t *zone, void* ptr, void *context) | |
557 | { | |
558 | if (ptr == NULL) { | |
559 | // special signal to mark end of finalization phase | |
560 | if (IsaStompBits & 0x8) | |
561 | _objc_inform("----finalization phase over-----"); | |
562 | FTCount = 0; | |
563 | return; | |
564 | } | |
565 | ||
566 | id object = ptr; | |
567 | Class cls = object->isa; | |
568 | ||
569 | if (cls == _NSDeallocatedObject) { | |
570 | // already finalized, do nothing | |
571 | _objc_inform("sendFinalize called on NSDeallocatedObject %p", ptr); | |
572 | return; | |
573 | } | |
574 | ||
575 | IMP finalizeMethod = class_lookupMethod(cls, @selector(finalize)); | |
576 | if (finalizeMethod == &_objc_msgForward) { | |
577 | _objc_inform("GC: class '%s' does not implement -finalize!", cls->name); | |
578 | } | |
579 | ||
580 | gc_zone_finalizing = YES; | |
581 | ||
582 | @try { | |
583 | // fixme later, optimize away calls to NSObject's -finalize | |
584 | (*finalizeMethod)(object, @selector(finalize)); | |
585 | } @catch (id exception) { | |
586 | _objc_inform("GC: -finalize resulted in an exception being thrown %p!", exception); | |
587 | // FIXME: what about uncaught C++ exceptions? Here's an idea, define a category | |
588 | // in a .mm file, so we can catch both flavors of exceptions. | |
589 | // @interface NSObject (TryToFinalize) | |
590 | // - (BOOL)tryToFinalize { | |
591 | // try { | |
592 | // @try { | |
593 | // [self finalize]; | |
594 | // } @catch (id exception) { | |
595 | // return NO; | |
596 | // } | |
597 | // } catch (...) { | |
598 | // return NO; | |
599 | // } | |
600 | // return YES; | |
601 | // } | |
602 | // @end | |
603 | } | |
604 | ||
605 | gc_zone_finalizing = NO; | |
606 | ||
607 | if (IsaStompBits) { | |
608 | NSDeallocatedObject *dead = (NSDeallocatedObject *)object; | |
609 | // examine list of okay classes and leave alone XXX get from file | |
610 | // fixme hack: smash isa to dodge some out-of-order finalize bugs | |
611 | // the following are somewhat finalize order safe | |
612 | //if (!strcmp(dead->oldIsA->name, "NSCFArray")) return; | |
613 | //if (!strcmp(dead->oldIsA->name, "NSSortedArray")) return; | |
614 | if (IsaStompBits & 0x8) | |
615 | printf("adding [%d] %p %s\n", FTCount, dead, dead->IsA->name); | |
616 | addPointerFT(dead, dead->IsA); | |
617 | objc_assign_ivar(_NSDeallocatedObject, dead, 0); | |
618 | } | |
619 | } | |
620 | ||
621 | static void finalizeOneObject(void *ptr, void *data) { | |
622 | id object = ptr; | |
623 | Class cls = object->isa; | |
624 | ||
625 | if (cls == _NSDeallocatedObject) { | |
626 | // already finalized, do nothing | |
627 | _objc_inform("finalizeOneObject called on NSDeallocatedObject %p", ptr); | |
628 | return; | |
629 | } | |
630 | ||
631 | IMP finalizeMethod = class_lookupMethod(cls, @selector(finalize)); | |
632 | if (finalizeMethod == &_objc_msgForward) { | |
633 | _objc_inform("GC: class '%s' does not implement -finalize!", cls->name); | |
634 | } | |
635 | ||
636 | // fixme later, optimize away calls to NSObject's -finalize | |
637 | (*finalizeMethod)(object, @selector(finalize)); | |
638 | ||
639 | if (IsaStompBits) { | |
640 | NSDeallocatedObject *dead = (NSDeallocatedObject *)object; | |
641 | // examine list of okay classes and leave alone XXX get from file | |
642 | // fixme hack: smash isa to dodge some out-of-order finalize bugs | |
643 | // the following are somewhat finalize order safe | |
644 | //if (!strcmp(dead->oldIsA->name, "NSCFArray")) return; | |
645 | //if (!strcmp(dead->oldIsA->name, "NSSortedArray")) return; | |
646 | if (gc_finalization_safe_classes && NXMapGet(gc_finalization_safe_classes, cls->name)) { | |
647 | // malloc_printf("&&& finalized safe instance of %s &&&\n", cls->name); | |
648 | return; | |
649 | } | |
650 | if (IsaStompBits & 0x8) | |
651 | printf("adding [%d] %p %s\n", FTCount, dead, dead->IsA->name); | |
652 | addPointerFT(dead, dead->IsA); | |
653 | objc_assign_ivar(_NSDeallocatedObject, dead, 0); | |
654 | } | |
655 | } | |
656 | ||
657 | static void batchFinalize(auto_zone_t *zone, | |
658 | auto_zone_foreach_object_t foreach, | |
659 | auto_zone_cursor_t cursor) | |
660 | { | |
661 | gc_zone_finalizing = YES; | |
662 | for (;;) { | |
663 | @try { | |
664 | // eventually foreach(cursor, objc_msgSend, @selector(finalize)); | |
665 | // foreach(cursor, finalizeOneObject, NULL); | |
666 | foreach(cursor, objc_msgSend, @selector(finalize)); | |
667 | // non-exceptional return means finalization is complete. | |
668 | break; | |
669 | } @catch (id exception) { | |
670 | _objc_inform("GC: -finalize resulted in an exception being thrown %p!", exception); | |
671 | } | |
672 | } | |
673 | gc_zone_finalizing = NO; | |
674 | } | |
675 | ||
676 | @interface NSResurrectedObject { | |
677 | @public | |
678 | Class _isa; // [NSResurrectedObject class] | |
679 | Class _old_isa; // original class | |
680 | unsigned _resurrections; // how many times this object has been resurrected. | |
681 | } | |
682 | + (Class)class; | |
683 | @end | |
684 | ||
685 | @implementation NSResurrectedObject | |
686 | + (Class)class { return self; } | |
687 | - (Class)class { return _isa; } | |
688 | + forward:(SEL)aSelector :(marg_list)args { return nil; } | |
689 | - forward:(SEL)aSelector :(marg_list)args { | |
690 | _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", self, _old_isa->name, sel_getName(aSelector)); | |
691 | return nil; | |
692 | } | |
693 | - (void)finalize { | |
694 | _objc_inform("**resurrected** object %p of class %s being finalized\n", self, _old_isa->name); | |
695 | } | |
696 | @end | |
697 | ||
698 | static Class _NSResurrectedObject; | |
699 | ||
700 | static void resurrectZombie(auto_zone_t *zone, void *ptr) { | |
701 | NSResurrectedObject *zombie = (NSResurrectedObject*) ptr; | |
702 | if (zombie->_isa != _NSResurrectedObject) { | |
703 | Class old_isa = zombie->_isa; | |
704 | zombie->_isa = _NSResurrectedObject; | |
705 | zombie->_old_isa = old_isa; | |
706 | zombie->_resurrections = 1; | |
707 | } else { | |
708 | zombie->_resurrections++; | |
709 | } | |
710 | } | |
711 | ||
712 | /*********************************************************************** | |
713 | * Allocation recording | |
714 | * For development purposes. | |
715 | **********************************************************************/ | |
716 | ||
717 | static NXMapTable *the_histogram = NULL; | |
718 | static pthread_mutex_t the_histogram_lock = PTHREAD_MUTEX_INITIALIZER; | |
719 | ||
720 | ||
721 | static void record_allocation(Class cls) | |
722 | { | |
723 | pthread_mutex_lock(&the_histogram_lock); | |
724 | unsigned long count = (unsigned long) NXMapGet(the_histogram, cls); | |
725 | NXMapInsert(the_histogram, cls, (const void*) (count + 1)); | |
726 | pthread_mutex_unlock(&the_histogram_lock); | |
727 | } | |
728 | ||
729 | ||
730 | void objc_allocation_histogram(void) | |
731 | { | |
732 | Class cls; | |
733 | unsigned long count; | |
734 | NXMapState state = NXInitMapState(the_histogram); | |
735 | printf("struct histogram {\n\tconst char* name;\n\tunsigned long instance_size;\n\tunsigned long count;\n} the_histogram[] = {\n"); | |
736 | while (NXNextMapState(the_histogram, &state, (const void**) &cls, (const void**) &count)) { | |
737 | printf("\t{ \"%s\", %lu, %lu },\n", cls->name, (unsigned long) cls->instance_size, count); | |
738 | } | |
739 | printf("};\n"); | |
740 | } | |
741 | ||
742 | static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount); | |
743 | ||
744 | static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset) | |
745 | { | |
746 | return name_for_address(zone, base, offset, false); | |
747 | } | |
748 | ||
749 | /*********************************************************************** | |
750 | * Initialization | |
751 | **********************************************************************/ | |
752 | ||
753 | // Always called by _objcInit, even if GC is off. | |
754 | __private_extern__ void gc_init(BOOL on) | |
755 | { | |
756 | UseGC = on; | |
757 | ||
758 | if (PrintGC) { | |
759 | _objc_inform("GC: is %s", on ? "ON" : "OFF"); | |
760 | } | |
761 | ||
762 | if (UseGC) { | |
763 | // Set up the GC zone | |
764 | gc_zone = gc_zone_init(); | |
765 | ||
766 | // no NSObject until Foundation calls objc_collect_init() | |
767 | _NSObject_finalize = &_objc_msgForward; | |
768 | ||
769 | // Set up allocation recording | |
770 | RecordAllocations = (getenv("OBJC_RECORD_ALLOCATIONS") != NULL); | |
771 | if (RecordAllocations) the_histogram = NXCreateMapTable(NXPtrValueMapPrototype, 1024); | |
772 | ||
773 | if (getenv("OBJC_FINALIZATION_SAFE_CLASSES")) { | |
774 | FILE *f = fopen(getenv("OBJC_FINALIZATION_SAFE_CLASSES"), "r"); | |
775 | if (f != NULL) { | |
776 | char *line; | |
777 | size_t length; | |
778 | gc_finalization_safe_classes = NXCreateMapTable(NXStrValueMapPrototype, 17); | |
779 | while ((line = fgetln(f, &length)) != NULL) { | |
780 | char *last = &line[length - 1]; | |
781 | if (*last == '\n') *last = '\0'; // strip off trailing newline. | |
782 | char *className = strdup(line); | |
783 | NXMapInsert(gc_finalization_safe_classes, className, className); | |
784 | } | |
785 | fclose(f); | |
786 | } | |
787 | } | |
788 | } else { | |
789 | auto_zone_start_monitor(false); | |
790 | auto_zone_set_class_list(objc_getClassList); | |
791 | } | |
792 | } | |
793 | ||
794 | ||
795 | static auto_zone_t *gc_zone_init(void) | |
796 | { | |
797 | auto_zone_t *result; | |
798 | ||
799 | // result = auto_zone_create("objc auto collected zone"); | |
800 | result = auto_zone(); // honor existing entry point for now (fixme) | |
801 | ||
802 | auto_collection_control_t *control = auto_collection_parameters(result); | |
803 | ||
804 | // set up the magic control parameters | |
805 | control->invalidate = sendFinalize; | |
806 | control->batch_invalidate = batchFinalize; | |
807 | control->resurrect = resurrectZombie; | |
808 | control->name_for_address = objc_name_for_address; | |
809 | ||
810 | // don't collect "on-demand" until... all Cocoa allocations are outside locks | |
811 | control->should_collect = objc_never_collect; | |
812 | control->ask_should_collect_frequency = UINT_MAX; | |
813 | control->trace_stack_conservatively = YES; | |
814 | ||
815 | // No interruption callback yet. Foundation will install one later. | |
816 | control->collection_should_interrupt = NULL; | |
817 | ||
818 | // debug: if set, only do full generational; sometimes useful for bringup | |
819 | control->disable_generational = getenv("AUTO_DISABLE_GENERATIONAL") != NULL; | |
820 | ||
821 | // debug: always compare generational GC result to full GC garbage list | |
822 | // this *can* catch missing write-barriers and other bugs | |
823 | control->paranoid_generational = (getenv("AUTO_PARANOID_GENERATIONAL") != NULL); | |
824 | ||
825 | // if set take a slightly slower path for object allocation | |
826 | control->malloc_stack_logging = (getenv("MallocStackLogging") != NULL || getenv("MallocStackLoggingNoCompact") != NULL); | |
827 | ||
828 | // logging level: none by default | |
829 | control->log = 0; | |
830 | if (getenv("AUTO_LOG_NOISY")) control->log |= AUTO_LOG_COLLECTIONS; | |
831 | if (getenv("AUTO_LOG_ALL")) control->log |= AUTO_LOG_ALL; | |
832 | if (getenv("AUTO_LOG_COLLECTIONS")) control->log |= AUTO_LOG_COLLECTIONS; | |
833 | if (getenv("AUTO_LOG_COLLECT_DECISION")) control->log |= AUTO_LOG_COLLECT_DECISION; | |
834 | if (getenv("AUTO_LOG_GC_IMPL")) control->log |= AUTO_LOG_GC_IMPL; | |
835 | if (getenv("AUTO_LOG_REGIONS")) control->log |= AUTO_LOG_REGIONS; | |
836 | if (getenv("AUTO_LOG_UNUSUAL")) control->log |= AUTO_LOG_UNUSUAL; | |
837 | if (getenv("AUTO_LOG_WEAK")) control->log |= AUTO_LOG_WEAK; | |
838 | ||
839 | if (getenv("OBJC_ISA_STOMP")) { | |
840 | // != 0, stomp isa | |
841 | // 0x1, just stomp, no messages | |
842 | // 0x2, log messaging after reclaim (break on objc_stale()) | |
843 | // 0x4, log messages sent during finalize | |
844 | // 0x8, log all finalizations | |
845 | IsaStompBits = strtol(getenv("OBJC_ISA_STOMP"), NULL, 0); | |
846 | } | |
847 | ||
848 | if (getenv("OBJC_COLLECTION_THRESHOLD")) { | |
849 | gc_collection_threshold = (size_t) strtoul(getenv("OBJC_COLLECTION_THRESHOLD"), NULL, 0); | |
850 | } | |
851 | ||
852 | if (getenv("OBJC_COLLECTION_RATIO")) { | |
853 | gc_collection_ratio = (size_t) strtoul(getenv("OBJC_COLLECTION_RATIO"), NULL, 0); | |
854 | } | |
855 | ||
856 | if (getenv("OBJC_EXPLICIT_ROOTS")) gc_roots_retained = NO; | |
857 | ||
858 | return result; | |
859 | } | |
860 | ||
861 | ||
862 | // Called by Foundation to install auto's interruption callback. | |
863 | malloc_zone_t *objc_collect_init(int (*callback)(void)) | |
864 | { | |
865 | // Find NSObject's finalize method now that Foundation is loaded. | |
866 | // fixme only look for the base implementation, not a category's | |
867 | _NSDeallocatedObject = objc_getClass("NSDeallocatedObject"); | |
868 | _NSResurrectedObject = objc_getClass("NSResurrectedObject"); | |
869 | _NSObject_finalize = | |
870 | class_lookupMethod(objc_getClass("NSObject"), @selector(finalize)); | |
871 | if (_NSObject_finalize == &_objc_msgForward) { | |
872 | _objc_fatal("GC: -[NSObject finalize] unimplemented!"); | |
873 | } | |
874 | ||
875 | // Don't install the callback if OBJC_DISABLE_COLLECTION_INTERRUPT is set | |
876 | if (gc_zone && getenv("OBJC_DISABLE_COLLECTION_INTERRUPT") == NULL) { | |
877 | auto_collection_control_t *ctrl = auto_collection_parameters(gc_zone); | |
878 | ctrl->collection_should_interrupt = callback; | |
879 | } | |
880 | ||
881 | return (malloc_zone_t *)gc_zone; | |
882 | } | |
883 | ||
884 | ||
885 | ||
886 | ||
887 | ||
888 | ||
889 | /*********************************************************************** | |
890 | * Debugging | |
891 | **********************************************************************/ | |
892 | ||
893 | /* This is non-deadlocking with respect to malloc's locks EXCEPT: | |
894 | * %ls, %a, %A formats | |
895 | * more than 8 args | |
896 | */ | |
897 | static void objc_debug_printf(const char *format, ...) | |
898 | { | |
899 | va_list ap; | |
900 | va_start(ap, format); | |
901 | vfprintf(stderr, format, ap); | |
902 | va_end(ap); | |
903 | } | |
904 | ||
905 | static malloc_zone_t *objc_debug_zone(void) | |
906 | { | |
907 | static malloc_zone_t *z = NULL; | |
908 | if (!z) { | |
909 | z = malloc_create_zone(4096, 0); | |
910 | malloc_set_zone_name(z, "objc-auto debug"); | |
911 | } | |
912 | return z; | |
913 | } | |
914 | ||
915 | static char *_malloc_append_unsigned(unsigned value, unsigned base, char *head) { | |
916 | if (!value) { | |
917 | head[0] = '0'; | |
918 | } else { | |
919 | if (value >= base) head = _malloc_append_unsigned(value / base, base, head); | |
920 | value = value % base; | |
921 | head[0] = (value < 10) ? '0' + value : 'a' + value - 10; | |
922 | } | |
923 | return head+1; | |
924 | } | |
925 | ||
926 | static void strcati(char *str, unsigned value) | |
927 | { | |
928 | str = _malloc_append_unsigned(value, 10, str + strlen(str)); | |
929 | str[0] = '\0'; | |
930 | } | |
931 | ||
932 | static void strcatx(char *str, unsigned value) | |
933 | { | |
934 | str = _malloc_append_unsigned(value, 16, str + strlen(str)); | |
935 | str[0] = '\0'; | |
936 | } | |
937 | ||
938 | ||
939 | static Ivar ivar_for_offset(struct objc_class *cls, vm_address_t offset) | |
940 | { | |
941 | int i; | |
942 | int ivar_offset; | |
943 | Ivar super_ivar; | |
944 | struct objc_ivar_list *ivars; | |
945 | ||
946 | if (!cls) return NULL; | |
947 | ||
948 | // scan base classes FIRST | |
949 | super_ivar = ivar_for_offset(cls->super_class, offset); | |
950 | // result is best-effort; our ivars may be closer | |
951 | ||
952 | ivars = cls->ivars; | |
953 | // If we have no ivars, return super's ivar | |
954 | if (!ivars || ivars->ivar_count == 0) return super_ivar; | |
955 | ||
956 | // Try our first ivar. If it's too big, use super's best ivar. | |
957 | ivar_offset = ivars->ivar_list[0].ivar_offset; | |
958 | if (ivar_offset > offset) return super_ivar; | |
959 | else if (ivar_offset == offset) return &ivars->ivar_list[0]; | |
960 | ||
961 | // Try our other ivars. If any is too big, use the previous. | |
962 | for (i = 1; i < ivars->ivar_count; i++) { | |
963 | int ivar_offset = ivars->ivar_list[i].ivar_offset; | |
964 | if (ivar_offset == offset) { | |
965 | return &ivars->ivar_list[i]; | |
966 | } else if (ivar_offset > offset) { | |
967 | return &ivars->ivar_list[i-1]; | |
968 | } | |
969 | } | |
970 | ||
971 | // Found nothing. Return our last ivar. | |
972 | return &ivars->ivar_list[ivars->ivar_count - 1]; | |
973 | } | |
974 | ||
975 | static void append_ivar_at_offset(char *buf, struct objc_class *cls, vm_address_t offset) | |
976 | { | |
977 | Ivar ivar = NULL; | |
978 | ||
979 | if (offset == 0) return; // don't bother with isa | |
980 | if (offset >= cls->instance_size) { | |
981 | strcat(buf, ".<extra>+"); | |
982 | strcati(buf, offset); | |
983 | return; | |
984 | } | |
985 | ||
986 | ivar = ivar_for_offset(cls, offset); | |
987 | if (!ivar) { | |
988 | strcat(buf, ".<?>"); | |
989 | return; | |
990 | } | |
991 | ||
992 | // fixme doesn't handle structs etc. | |
993 | ||
994 | strcat(buf, "."); | |
995 | if (ivar->ivar_name) strcat(buf, ivar->ivar_name); | |
996 | else strcat(buf, "<anonymous ivar>"); | |
997 | ||
998 | offset -= ivar->ivar_offset; | |
999 | if (offset > 0) { | |
1000 | strcat(buf, "+"); | |
1001 | strcati(buf, offset); | |
1002 | } | |
1003 | } | |
1004 | ||
1005 | ||
1006 | static const char *cf_class_for_object(void *cfobj) | |
1007 | { | |
1008 | // ick - we don't link against CF anymore | |
1009 | ||
1010 | struct { | |
1011 | uint32_t version; | |
1012 | const char *className; | |
1013 | // don't care about the rest | |
1014 | } *cfcls; | |
1015 | uint32_t cfid; | |
1016 | NSSymbol sym; | |
1017 | uint32_t (*CFGetTypeID)(void *); | |
1018 | void * (*_CFRuntimeGetClassWithTypeID)(uint32_t); | |
1019 | ||
1020 | sym = NSLookupAndBindSymbolWithHint("_CFGetTypeID", "CoreFoundation"); | |
1021 | if (!sym) return "anonymous_NSCFType"; | |
1022 | CFGetTypeID = NSAddressOfSymbol(sym); | |
1023 | if (!CFGetTypeID) return "NSCFType"; | |
1024 | ||
1025 | sym = NSLookupAndBindSymbolWithHint("__CFRuntimeGetClassWithTypeID", "CoreFoundation"); | |
1026 | if (!sym) return "anonymous_NSCFType"; | |
1027 | _CFRuntimeGetClassWithTypeID = NSAddressOfSymbol(sym); | |
1028 | if (!_CFRuntimeGetClassWithTypeID) return "anonymous_NSCFType"; | |
1029 | ||
1030 | cfid = (*CFGetTypeID)(cfobj); | |
1031 | cfcls = (*_CFRuntimeGetClassWithTypeID)(cfid); | |
1032 | return cfcls->className; | |
1033 | } | |
1034 | ||
1035 | ||
1036 | static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount) | |
1037 | { | |
1038 | #define APPEND_SIZE(s) \ | |
1039 | strcat(buf, "["); \ | |
1040 | strcati(buf, s); \ | |
1041 | strcat(buf, "]"); | |
1042 | ||
1043 | char buf[500]; | |
1044 | char *result; | |
1045 | ||
1046 | buf[0] = '\0'; | |
1047 | ||
1048 | unsigned int size = | |
1049 | auto_zone_size_no_lock(zone, (void *)base); | |
1050 | auto_memory_type_t type = size ? | |
1051 | auto_zone_get_layout_type_no_lock(zone, (void *)base) : AUTO_TYPE_UNKNOWN; | |
1052 | unsigned int refcount = size ? | |
1053 | auto_zone_retain_count_no_lock(zone, (void *)base) : 0; | |
1054 | ||
1055 | switch (type) { | |
1056 | case AUTO_OBJECT_SCANNED: | |
1057 | case AUTO_OBJECT_UNSCANNED: { | |
1058 | Class cls = *(struct objc_class **)base; | |
1059 | if (0 == strcmp(cls->name, "NSCFType")) { | |
1060 | strcat(buf, cf_class_for_object((void *)base)); | |
1061 | } else { | |
1062 | strcat(buf, cls->name); | |
1063 | } | |
1064 | if (offset) { | |
1065 | append_ivar_at_offset(buf, cls, offset); | |
1066 | } | |
1067 | APPEND_SIZE(size); | |
1068 | break; | |
1069 | } | |
1070 | case AUTO_MEMORY_SCANNED: | |
1071 | strcat(buf, "{conservative-block}"); | |
1072 | APPEND_SIZE(size); | |
1073 | break; | |
1074 | case AUTO_MEMORY_UNSCANNED: | |
1075 | strcat(buf, "{no-pointers-block}"); | |
1076 | APPEND_SIZE(size); | |
1077 | break; | |
1078 | default: | |
1079 | strcat(buf, "{unallocated-or-stack}"); | |
1080 | } | |
1081 | ||
1082 | if (withRetainCount && refcount > 0) { | |
1083 | strcat(buf, " [[refcount="); | |
1084 | strcati(buf, refcount); | |
1085 | strcat(buf, "]]"); | |
1086 | } | |
1087 | ||
1088 | result = malloc_zone_malloc(objc_debug_zone(), 1 + strlen(buf)); | |
1089 | strcpy(result, buf); | |
1090 | return result; | |
1091 | ||
1092 | #undef APPEND_SIZE | |
1093 | } | |
1094 | ||
1095 | ||
1096 | struct objc_class_recorder_context { | |
1097 | malloc_zone_t *zone; | |
1098 | void *cls; | |
1099 | char *clsname; | |
1100 | unsigned int count; | |
1101 | }; | |
1102 | ||
1103 | static void objc_class_recorder(task_t task, void *context, unsigned type_mask, | |
1104 | vm_range_t *ranges, unsigned range_count) | |
1105 | { | |
1106 | struct objc_class_recorder_context *ctx = | |
1107 | (struct objc_class_recorder_context *)context; | |
1108 | ||
1109 | vm_range_t *r; | |
1110 | vm_range_t *end; | |
1111 | for (r = ranges, end = ranges + range_count; r < end; r++) { | |
1112 | auto_memory_type_t type = | |
1113 | auto_zone_get_layout_type_no_lock(ctx->zone, (void *)r->address); | |
1114 | if (type == AUTO_OBJECT_SCANNED || type == AUTO_OBJECT_UNSCANNED) { | |
1115 | // Check if this is an instance of class ctx->cls or some subclass | |
1116 | Class cls; | |
1117 | Class isa = *(Class *)r->address; | |
1118 | for (cls = isa; cls; cls = cls->super_class) { | |
1119 | if (cls == ctx->cls) { | |
1120 | unsigned int rc; | |
1121 | objc_debug_printf("[%p] : %s", r->address, isa->name); | |
1122 | if ((rc = auto_zone_retain_count_no_lock(ctx->zone, (void *)r->address))) { | |
1123 | objc_debug_printf(" [[refcount %u]]", rc); | |
1124 | } | |
1125 | objc_debug_printf("\n"); | |
1126 | ctx->count++; | |
1127 | break; | |
1128 | } | |
1129 | } | |
1130 | } | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | void objc_enumerate_class(char *clsname) | |
1135 | { | |
1136 | struct objc_class_recorder_context ctx; | |
1137 | ctx.zone = auto_zone(); | |
1138 | ctx.clsname = clsname; | |
1139 | ctx.cls = objc_getClass(clsname); // GrP fixme may deadlock if classHash lock is already owned | |
1140 | ctx.count = 0; | |
1141 | if (!ctx.cls) { | |
1142 | objc_debug_printf("No class '%s'\n", clsname); | |
1143 | return; | |
1144 | } | |
1145 | objc_debug_printf("\n\nINSTANCES OF CLASS '%s':\n\n", clsname); | |
1146 | (*ctx.zone->introspect->enumerator)(mach_task_self(), &ctx, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)ctx.zone, NULL, objc_class_recorder); | |
1147 | objc_debug_printf("\n%d instances\n\n", ctx.count); | |
1148 | } | |
1149 | ||
1150 | ||
1151 | static void objc_reference_printer(auto_zone_t *zone, void *ctx, | |
1152 | auto_reference_t ref) | |
1153 | { | |
1154 | char *referrer_name = name_for_address(zone, ref.referrer_base, ref.referrer_offset, true); | |
1155 | char *referent_name = name_for_address(zone, ref.referent, 0, true); | |
1156 | ||
1157 | objc_debug_printf("[%p%+d -> %p] : %s -> %s\n", | |
1158 | ref.referrer_base, ref.referrer_offset, ref.referent, | |
1159 | referrer_name, referent_name); | |
1160 | ||
1161 | malloc_zone_free(objc_debug_zone(), referrer_name); | |
1162 | malloc_zone_free(objc_debug_zone(), referent_name); | |
1163 | } | |
1164 | ||
1165 | ||
1166 | void objc_print_references(void *referent, void *stack_bottom, int lock) | |
1167 | { | |
1168 | if (lock) { | |
1169 | auto_enumerate_references(auto_zone(), referent, | |
1170 | objc_reference_printer, stack_bottom, NULL); | |
1171 | } else { | |
1172 | auto_enumerate_references_no_lock(auto_zone(), referent, | |
1173 | objc_reference_printer, stack_bottom, NULL); | |
1174 | } | |
1175 | } | |
1176 | ||
1177 | ||
1178 | ||
1179 | typedef struct { | |
1180 | vm_address_t address; // of this object | |
1181 | int refcount; // of this object - nonzero means ROOT | |
1182 | int depth; // number of links away from referent, or -1 | |
1183 | auto_reference_t *referrers; // of this object | |
1184 | int referrers_used; | |
1185 | int referrers_allocated; | |
1186 | auto_reference_t back; // reference from this object back toward the target | |
1187 | uint32_t ID; // Graphic ID for grafflization | |
1188 | } blob; | |
1189 | ||
1190 | ||
1191 | typedef struct { | |
1192 | blob **list; | |
1193 | unsigned int used; | |
1194 | unsigned int allocated; | |
1195 | } blob_queue; | |
1196 | ||
1197 | blob_queue blobs = {NULL, 0, 0}; | |
1198 | blob_queue untraced_blobs = {NULL, 0, 0}; | |
1199 | blob_queue root_blobs = {NULL, 0, 0}; | |
1200 | ||
1201 | ||
1202 | ||
1203 | static void spin(void) { | |
1204 | static char* spinner[] = {"\010\010| ", "\010\010/ ", "\010\010- ", "\010\010\\ "}; | |
1205 | static int spindex = 0; | |
1206 | ||
1207 | objc_debug_printf(spinner[spindex++]); | |
1208 | if (spindex == 4) spindex = 0; | |
1209 | } | |
1210 | ||
1211 | ||
1212 | static void enqueue_blob(blob_queue *q, blob *b) | |
1213 | { | |
1214 | if (q->used == q->allocated) { | |
1215 | q->allocated = q->allocated * 2 + 1; | |
1216 | q->list = malloc_zone_realloc(objc_debug_zone(), q->list, q->allocated * sizeof(blob *)); | |
1217 | } | |
1218 | q->list[q->used++] = b; | |
1219 | } | |
1220 | ||
1221 | ||
1222 | static blob *dequeue_blob(blob_queue *q) | |
1223 | { | |
1224 | blob *result = q->list[0]; | |
1225 | q->used--; | |
1226 | memmove(&q->list[0], &q->list[1], q->used * sizeof(blob *)); | |
1227 | return result; | |
1228 | } | |
1229 | ||
1230 | ||
1231 | static blob *blob_for_address(vm_address_t addr) | |
1232 | { | |
1233 | blob *b, **bp, **end; | |
1234 | ||
1235 | if (addr == 0) return NULL; | |
1236 | ||
1237 | for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) { | |
1238 | b = *bp; | |
1239 | if (b->address == addr) return b; | |
1240 | } | |
1241 | ||
1242 | b = malloc_zone_calloc(objc_debug_zone(), sizeof(blob), 1); | |
1243 | b->address = addr; | |
1244 | b->depth = -1; | |
1245 | b->refcount = auto_zone_size_no_lock(auto_zone(), (void *)addr) ? auto_zone_retain_count_no_lock(auto_zone(), (void *)addr) : 1; | |
1246 | enqueue_blob(&blobs, b); | |
1247 | return b; | |
1248 | } | |
1249 | ||
1250 | static int blob_exists(vm_address_t addr) | |
1251 | { | |
1252 | blob *b, **bp, **end; | |
1253 | for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) { | |
1254 | b = *bp; | |
1255 | if (b->address == addr) return 1; | |
1256 | } | |
1257 | return 0; | |
1258 | } | |
1259 | ||
1260 | ||
1261 | // Destroy the blobs table and all blob data in it | |
1262 | static void free_blobs(void) | |
1263 | { | |
1264 | blob *b, **bp, **end; | |
1265 | for (bp = blobs.list, end = blobs.list+blobs.used; bp < end; bp++) { | |
1266 | b = *bp; | |
1267 | malloc_zone_free(objc_debug_zone(), b); | |
1268 | } | |
1269 | if (blobs.list) malloc_zone_free(objc_debug_zone(), blobs.list); | |
1270 | } | |
1271 | ||
1272 | static void print_chain(auto_zone_t *zone, blob *root) | |
1273 | { | |
1274 | blob *b; | |
1275 | for (b = root; b != NULL; b = blob_for_address(b->back.referent)) { | |
1276 | char *name; | |
1277 | if (b->back.referent) { | |
1278 | name = name_for_address(zone, b->address, b->back.referrer_offset, true); | |
1279 | objc_debug_printf("[%p%+d] : %s ->\n", b->address, b->back.referrer_offset, name); | |
1280 | } else { | |
1281 | name = name_for_address(zone, b->address, 0, true); | |
1282 | objc_debug_printf("[%p] : %s\n", b->address, name); | |
1283 | } | |
1284 | malloc_zone_free(objc_debug_zone(), name); | |
1285 | } | |
1286 | } | |
1287 | ||
1288 | ||
1289 | static void objc_blob_recorder(auto_zone_t *zone, void *ctx, | |
1290 | auto_reference_t ref) | |
1291 | { | |
1292 | blob *b = (blob *)ctx; | |
1293 | ||
1294 | spin(); | |
1295 | ||
1296 | if (b->referrers_used == b->referrers_allocated) { | |
1297 | b->referrers_allocated = b->referrers_allocated * 2 + 1; | |
1298 | b->referrers = malloc_zone_realloc(objc_debug_zone(), b->referrers, | |
1299 | b->referrers_allocated * | |
1300 | sizeof(auto_reference_t)); | |
1301 | } | |
1302 | ||
1303 | b->referrers[b->referrers_used++] = ref; | |
1304 | if (!blob_exists(ref.referrer_base)) { | |
1305 | enqueue_blob(&untraced_blobs, blob_for_address(ref.referrer_base)); | |
1306 | } | |
1307 | } | |
1308 | ||
1309 | ||
1310 | #define INSTANCE_ROOTS 1 | |
1311 | #define HEAP_ROOTS 2 | |
1312 | #define ALL_REFS 3 | |
1313 | static void objc_print_recursive_refs(vm_address_t target, int which, void *stack_bottom, int lock); | |
1314 | static void grafflize(blob_queue *blobs, int everything); | |
1315 | ||
1316 | void objc_print_instance_roots(vm_address_t target, void *stack_bottom, int lock) | |
1317 | { | |
1318 | objc_print_recursive_refs(target, INSTANCE_ROOTS, stack_bottom, lock); | |
1319 | } | |
1320 | ||
1321 | void objc_print_heap_roots(vm_address_t target, void *stack_bottom, int lock) | |
1322 | { | |
1323 | objc_print_recursive_refs(target, HEAP_ROOTS, stack_bottom, lock); | |
1324 | } | |
1325 | ||
1326 | void objc_print_all_refs(vm_address_t target, void *stack_bottom, int lock) | |
1327 | { | |
1328 | objc_print_recursive_refs(target, ALL_REFS, stack_bottom, lock); | |
1329 | } | |
1330 | ||
1331 | static void sort_blobs_by_refcount(blob_queue *blobs) | |
1332 | { | |
1333 | int i, j; | |
1334 | ||
1335 | // simple bubble sort | |
1336 | for (i = 0; i < blobs->used; i++) { | |
1337 | for (j = i+1; j < blobs->used; j++) { | |
1338 | if (blobs->list[i]->refcount < blobs->list[j]->refcount) { | |
1339 | blob *temp = blobs->list[i]; | |
1340 | blobs->list[i] = blobs->list[j]; | |
1341 | blobs->list[j] = temp; | |
1342 | } | |
1343 | } | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | ||
1348 | static void sort_blobs_by_depth(blob_queue *blobs) | |
1349 | { | |
1350 | int i, j; | |
1351 | ||
1352 | // simple bubble sort | |
1353 | for (i = 0; i < blobs->used; i++) { | |
1354 | for (j = i+1; j < blobs->used; j++) { | |
1355 | if (blobs->list[i]->depth > blobs->list[j]->depth) { | |
1356 | blob *temp = blobs->list[i]; | |
1357 | blobs->list[i] = blobs->list[j]; | |
1358 | blobs->list[j] = temp; | |
1359 | } | |
1360 | } | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | ||
1365 | static void objc_print_recursive_refs(vm_address_t target, int which, void *stack_bottom, int lock) | |
1366 | { | |
1367 | objc_debug_printf("\n "); // make spinner draw in a pretty place | |
1368 | ||
1369 | // Construct pointed-to graph (of things eventually pointing to target) | |
1370 | ||
1371 | enqueue_blob(&untraced_blobs, blob_for_address(target)); | |
1372 | ||
1373 | while (untraced_blobs.used > 0) { | |
1374 | blob *b = dequeue_blob(&untraced_blobs); | |
1375 | spin(); | |
1376 | if (lock) { | |
1377 | auto_enumerate_references(auto_zone(), (void *)b->address, | |
1378 | objc_blob_recorder, stack_bottom, b); | |
1379 | } else { | |
1380 | auto_enumerate_references_no_lock(auto_zone(), (void *)b->address, | |
1381 | objc_blob_recorder, stack_bottom, b); | |
1382 | } | |
1383 | } | |
1384 | ||
1385 | // Walk pointed-to graph to find shortest paths from roots to target. | |
1386 | // This is BREADTH-FIRST order. | |
1387 | ||
1388 | blob_for_address(target)->depth = 0; | |
1389 | enqueue_blob(&untraced_blobs, blob_for_address(target)); | |
1390 | ||
1391 | while (untraced_blobs.used > 0) { | |
1392 | blob *b = dequeue_blob(&untraced_blobs); | |
1393 | blob *other; | |
1394 | auto_reference_t *r, *end; | |
1395 | int stop = NO; | |
1396 | ||
1397 | spin(); | |
1398 | ||
1399 | if (which == ALL_REFS) { | |
1400 | // Never stop at roots. | |
1401 | stop = NO; | |
1402 | } else if (which == HEAP_ROOTS) { | |
1403 | // Stop at any root (a block with positive retain count) | |
1404 | stop = (b->refcount > 0); | |
1405 | } else if (which == INSTANCE_ROOTS) { | |
1406 | // Only stop at roots that are instances | |
1407 | auto_memory_type_t type = auto_zone_get_layout_type_no_lock(auto_zone(), (void *)b->address); | |
1408 | stop = (b->refcount > 0 && (type == AUTO_OBJECT_SCANNED || type == AUTO_OBJECT_UNSCANNED)); // GREG XXX ??? | |
1409 | } | |
1410 | ||
1411 | // If this object is a root, save it and don't walk its referrers. | |
1412 | if (stop) { | |
1413 | enqueue_blob(&root_blobs, b); | |
1414 | continue; | |
1415 | } | |
1416 | ||
1417 | // For any "other object" that points to "this object" | |
1418 | // and does not yet have a depth: | |
1419 | // (1) other object is one level deeper than this object | |
1420 | // (2) (one of) the shortest path(s) from other object to the | |
1421 | // target goes through this object | |
1422 | ||
1423 | for (r = b->referrers, end = b->referrers + b->referrers_used; | |
1424 | r < end; | |
1425 | r++) | |
1426 | { | |
1427 | other = blob_for_address(r->referrer_base); | |
1428 | if (other->depth == -1) { | |
1429 | other->depth = b->depth + 1; | |
1430 | other->back = *r; | |
1431 | enqueue_blob(&untraced_blobs, other); | |
1432 | } | |
1433 | } | |
1434 | } | |
1435 | ||
1436 | { | |
1437 | char *name = name_for_address(auto_zone(), target, 0, true); | |
1438 | objc_debug_printf("\n\n%d %s %p (%s)\n\n", | |
1439 | (which==ALL_REFS) ? blobs.used : root_blobs.used, | |
1440 | (which==ALL_REFS) ? "INDIRECT REFS TO" : "ROOTS OF", | |
1441 | target, name); | |
1442 | malloc_zone_free(objc_debug_zone(), name); | |
1443 | } | |
1444 | ||
1445 | if (which == ALL_REFS) { | |
1446 | // Print all reference objects, biggest refcount first | |
1447 | int i; | |
1448 | sort_blobs_by_refcount(&blobs); | |
1449 | for (i = 0; i < blobs.used; i++) { | |
1450 | char *name = name_for_address(auto_zone(), blobs.list[i]->address, 0, true); | |
1451 | objc_debug_printf("[%p] : %s\n", blobs.list[i]->address, name); | |
1452 | malloc_zone_free(objc_debug_zone(), name); | |
1453 | } | |
1454 | } | |
1455 | else { | |
1456 | // Walk back chain from every root to the target, printing every step. | |
1457 | ||
1458 | while (root_blobs.used > 0) { | |
1459 | blob *root = dequeue_blob(&root_blobs); | |
1460 | print_chain(auto_zone(), root); | |
1461 | objc_debug_printf("\n"); | |
1462 | } | |
1463 | } | |
1464 | ||
1465 | grafflize(&blobs, which == ALL_REFS); | |
1466 | ||
1467 | objc_debug_printf("\ndone\n\n"); | |
1468 | ||
1469 | // Clean up | |
1470 | ||
1471 | free_blobs(); | |
1472 | if (untraced_blobs.list) malloc_zone_free(objc_debug_zone(), untraced_blobs.list); | |
1473 | if (root_blobs.list) malloc_zone_free(objc_debug_zone(), root_blobs.list); | |
1474 | ||
1475 | memset(&blobs, 0, sizeof(blobs)); | |
1476 | memset(&root_blobs, 0, sizeof(root_blobs)); | |
1477 | memset(&untraced_blobs, 0, sizeof(untraced_blobs)); | |
1478 | } | |
1479 | ||
1480 | ||
1481 | ||
1482 | struct objc_block_recorder_context { | |
1483 | malloc_zone_t *zone; | |
1484 | int fd; | |
1485 | unsigned int count; | |
1486 | }; | |
1487 | ||
1488 | ||
1489 | static void objc_block_recorder(task_t task, void *context, unsigned type_mask, | |
1490 | vm_range_t *ranges, unsigned range_count) | |
1491 | { | |
1492 | char buf[20]; | |
1493 | struct objc_block_recorder_context *ctx = | |
1494 | (struct objc_block_recorder_context *)context; | |
1495 | ||
1496 | vm_range_t *r; | |
1497 | vm_range_t *end; | |
1498 | for (r = ranges, end = ranges + range_count; r < end; r++) { | |
1499 | char *name = name_for_address(ctx->zone, r->address, 0, true); | |
1500 | buf[0] = '\0'; | |
1501 | strcatx(buf, r->address); | |
1502 | ||
1503 | write(ctx->fd, "0x", 2); | |
1504 | write(ctx->fd, buf, strlen(buf)); | |
1505 | write(ctx->fd, " ", 1); | |
1506 | write(ctx->fd, name, strlen(name)); | |
1507 | write(ctx->fd, "\n", 1); | |
1508 | ||
1509 | malloc_zone_free(objc_debug_zone(), name); | |
1510 | ctx->count++; | |
1511 | } | |
1512 | } | |
1513 | ||
1514 | ||
1515 | void objc_dump_block_list(const char* path) | |
1516 | { | |
1517 | struct objc_block_recorder_context ctx; | |
1518 | char filename[] = "/tmp/blocks-XXXXX.txt"; | |
1519 | ||
1520 | ctx.zone = auto_zone(); | |
1521 | ctx.count = 0; | |
1522 | ctx.fd = (path ? open(path, O_WRONLY | O_CREAT | O_TRUNC, 0666) : mkstemps(filename, strlen(strrchr(filename, '.')))); | |
1523 | ||
1524 | objc_debug_printf("\n\nALL AUTO-ALLOCATED BLOCKS\n\n"); | |
1525 | (*ctx.zone->introspect->enumerator)(mach_task_self(), &ctx, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)ctx.zone, NULL, objc_block_recorder); | |
1526 | objc_debug_printf("%d blocks written to file\n", ctx.count); | |
1527 | objc_debug_printf("open %s\n", (path ? path : filename)); | |
1528 | ||
1529 | close(ctx.fd); | |
1530 | } | |
1531 | ||
1532 | ||
1533 | ||
1534 | ||
1535 | static void grafflize_id(int gfile, int ID) | |
1536 | { | |
1537 | char buf[20] = ""; | |
1538 | char *c; | |
1539 | ||
1540 | strcati(buf, ID); | |
1541 | c = "<key>ID</key><integer>"; | |
1542 | write(gfile, c, strlen(c)); | |
1543 | write(gfile, buf, strlen(buf)); | |
1544 | c = "</integer>"; | |
1545 | write(gfile, c, strlen(c)); | |
1546 | } | |
1547 | ||
1548 | ||
1549 | // head = REFERENT end = arrow | |
1550 | // tail = REFERRER end = no arrow | |
1551 | static void grafflize_reference(int gfile, auto_reference_t reference, | |
1552 | int ID, int important) | |
1553 | { | |
1554 | blob *referrer = blob_for_address(reference.referrer_base); | |
1555 | blob *referent = blob_for_address(reference.referent); | |
1556 | char *c; | |
1557 | ||
1558 | // line | |
1559 | c = "<dict><key>Class</key><string>LineGraphic</string>"; | |
1560 | write(gfile, c, strlen(c)); | |
1561 | ||
1562 | // id | |
1563 | grafflize_id(gfile, ID); | |
1564 | ||
1565 | // head = REFERENT | |
1566 | c = "<key>Head</key><dict>"; | |
1567 | write(gfile, c, strlen(c)); | |
1568 | grafflize_id(gfile, referent->ID); | |
1569 | c = "</dict>"; | |
1570 | write(gfile, c, strlen(c)); | |
1571 | ||
1572 | // tail = REFERRER | |
1573 | c = "<key>Tail</key><dict>"; | |
1574 | write(gfile, c, strlen(c)); | |
1575 | grafflize_id(gfile, referrer->ID); | |
1576 | c = "</dict>"; | |
1577 | write(gfile, c, strlen(c)); | |
1578 | ||
1579 | // style - head arrow, thick line if important | |
1580 | c = "<key>Style</key><dict><key>stroke</key><dict>" | |
1581 | "<key>HeadArrow</key><string>FilledArrow</string>" | |
1582 | "<key>LineType</key><integer>1</integer>"; | |
1583 | write(gfile, c, strlen(c)); | |
1584 | if (important) { | |
1585 | c = "<key>Width</key><real>3</real>"; | |
1586 | write(gfile, c, strlen(c)); | |
1587 | } | |
1588 | c = "</dict></dict>"; | |
1589 | write(gfile, c, strlen(c)); | |
1590 | ||
1591 | // end line | |
1592 | c = "</dict>"; | |
1593 | write(gfile, c, strlen(c)); | |
1594 | } | |
1595 | ||
1596 | ||
1597 | static void grafflize_blob(int gfile, blob *b) | |
1598 | { | |
1599 | // fixme include ivar names too | |
1600 | char *name = name_for_address(auto_zone(), b->address, 0, false); | |
1601 | int width = 30 + strlen(name)*6; | |
1602 | int height = 40; | |
1603 | char buf[40] = ""; | |
1604 | char *c; | |
1605 | ||
1606 | // rectangle | |
1607 | c = "<dict>" | |
1608 | "<key>Class</key><string>ShapedGraphic</string>" | |
1609 | "<key>Shape</key><string>Rectangle</string>"; | |
1610 | write(gfile, c, strlen(c)); | |
1611 | ||
1612 | // id | |
1613 | grafflize_id(gfile, b->ID); | |
1614 | ||
1615 | // bounds | |
1616 | // order vertically by depth | |
1617 | c = "<key>Bounds</key><string>{{0,"; | |
1618 | write(gfile, c, strlen(c)); | |
1619 | buf[0] = '\0'; | |
1620 | strcati(buf, b->depth*60); | |
1621 | write(gfile, buf, strlen(buf)); | |
1622 | c = "},{"; | |
1623 | write(gfile, c, strlen(c)); | |
1624 | buf[0] = '\0'; | |
1625 | strcati(buf, width); | |
1626 | strcat(buf, ","); | |
1627 | strcati(buf, height); | |
1628 | write(gfile, buf, strlen(buf)); | |
1629 | c = "}}</string>"; | |
1630 | write(gfile, c, strlen(c)); | |
1631 | ||
1632 | // label | |
1633 | c = "<key>Text</key><dict><key>Text</key>" | |
1634 | "<string>{\\rtf1\\mac\\ansicpg10000\\cocoartf102\n" | |
1635 | "{\\fonttbl\\f0\\fswiss\\fcharset77 Helvetica;\\fonttbl\\f1\\fswiss\\fcharset77 Helvetica-Bold;}\n" | |
1636 | "{\\colortbl;\\red255\\green255\\blue255;}\n" | |
1637 | "\\pard\\tx560\\tx1120\\tx1680\\tx2240\\tx3360\\tx3920\\tx4480\\tx5040\\tx5600\\tx6160\\tx6720\\qc\n" | |
1638 | "\\f0\\fs20 \\cf0 "; | |
1639 | write(gfile, c, strlen(c)); | |
1640 | write(gfile, name, strlen(name)); | |
1641 | strcpy(buf, "\\\n0x"); | |
1642 | strcatx(buf, b->address); | |
1643 | write(gfile, buf, strlen(buf)); | |
1644 | c = "}</string></dict>"; | |
1645 | write(gfile, c, strlen(c)); | |
1646 | ||
1647 | // styles | |
1648 | c = "<key>Style</key><dict>"; | |
1649 | write(gfile, c, strlen(c)); | |
1650 | ||
1651 | // no shadow | |
1652 | c = "<key>shadow</key><dict><key>Draws</key><string>NO</string></dict>"; | |
1653 | write(gfile, c, strlen(c)); | |
1654 | ||
1655 | // fat border if refcount > 0 | |
1656 | if (b->refcount > 0) { | |
1657 | c = "<key>stroke</key><dict><key>Width</key><real>4</real></dict>"; | |
1658 | write(gfile, c, strlen(c)); | |
1659 | } | |
1660 | ||
1661 | // end styles | |
1662 | c = "</dict>"; | |
1663 | write(gfile, c, strlen(c)); | |
1664 | ||
1665 | // done | |
1666 | c = "</dict>\n"; | |
1667 | write(gfile, c, strlen(c)); | |
1668 | ||
1669 | malloc_zone_free(objc_debug_zone(), name); | |
1670 | } | |
1671 | ||
1672 | ||
1673 | #define gheader "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"><plist version=\"1.0\"><dict><key>GraphDocumentVersion</key><integer>3</integer><key>ReadOnly</key><string>NO</string><key>GraphicsList</key><array>\n" | |
1674 | ||
1675 | #define gfooter "</array></dict></plist>\n" | |
1676 | ||
1677 | ||
1678 | static void grafflize(blob_queue *blobs, int everything) | |
1679 | { | |
1680 | // Don't require linking to Foundation! | |
1681 | int i; | |
1682 | int gfile; | |
1683 | int nextid = 1; | |
1684 | char filename[] = "/tmp/gc-XXXXX.graffle"; | |
1685 | ||
1686 | // Open file | |
1687 | gfile = mkstemps(filename, strlen(strrchr(filename, '.'))); | |
1688 | if (gfile < 0) { | |
1689 | objc_debug_printf("couldn't create a graffle file in /tmp/ (errno %d)\n", errno); | |
1690 | return; | |
1691 | } | |
1692 | ||
1693 | // Write header | |
1694 | write(gfile, gheader, strlen(gheader)); | |
1695 | ||
1696 | // Write a rectangle for each blob | |
1697 | sort_blobs_by_depth(blobs); | |
1698 | for (i = 0; i < blobs->used; i++) { | |
1699 | blob *b = blobs->list[i]; | |
1700 | b->ID = nextid++; | |
1701 | if (everything || b->depth >= 0) { | |
1702 | grafflize_blob(gfile, b); | |
1703 | } | |
1704 | } | |
1705 | ||
1706 | for (i = 0; i < blobs->used; i++) { | |
1707 | int j; | |
1708 | blob *b = blobs->list[i]; | |
1709 | ||
1710 | if (everything) { | |
1711 | // Write an arrow for each reference | |
1712 | // Use big arrows for backreferences | |
1713 | for (j = 0; j < b->referrers_used; j++) { | |
1714 | int is_back_ref = (b->referrers[i].referent == b->back.referent && b->referrers[i].referrer_offset == b->back.referrer_offset && b->referrers[i].referrer_base == b->back.referrer_base); | |
1715 | ||
1716 | grafflize_reference(gfile, b->referrers[j], nextid++, | |
1717 | is_back_ref); | |
1718 | } | |
1719 | } | |
1720 | else { | |
1721 | // Write an arrow for each backreference | |
1722 | if (b->depth > 0) { | |
1723 | grafflize_reference(gfile, b->back, nextid++, false); | |
1724 | } | |
1725 | } | |
1726 | } | |
1727 | ||
1728 | // Write footer and close | |
1729 | write(gfile, gfooter, strlen(gfooter)); | |
1730 | close(gfile); | |
1731 | objc_debug_printf("wrote object graph (%d objects)\nopen %s\n", | |
1732 | blobs->used, filename); | |
1733 | } | |
1734 | ||
1735 | #endif | |
1736 | ||
1737 | ||
1738 | ||
1739 | // Stubs for non-open-source libauto functions | |
1740 | ||
1741 | static void auto_collect(auto_zone_t *zone, auto_collection_mode_t mode, void *collection_context) | |
1742 | { | |
1743 | } | |
1744 | ||
1745 | static auto_collection_control_t *auto_collection_parameters(auto_zone_t *zone) | |
1746 | { | |
1747 | return NULL; | |
1748 | } | |
1749 | ||
1750 | static const auto_statistics_t *auto_collection_statistics(auto_zone_t *zone) | |
1751 | { | |
1752 | return NULL; | |
1753 | } | |
1754 | ||
1755 | static void auto_enumerate_references(auto_zone_t *zone, void *referent, | |
1756 | auto_reference_recorder_t callback, | |
1757 | void *stack_bottom, void *ctx) | |
1758 | { | |
1759 | } | |
1760 | ||
1761 | static void auto_enumerate_references_no_lock(auto_zone_t *zone, void *referent, auto_reference_recorder_t callback, void *stack_bottom, void *ctx) | |
1762 | { | |
1763 | } | |
1764 | ||
1765 | static auto_zone_t *auto_zone(void) | |
1766 | { | |
1767 | return NULL; | |
1768 | } | |
1769 | ||
1770 | static void auto_zone_add_root(auto_zone_t *zone, void *root, size_t size) | |
1771 | { | |
1772 | } | |
1773 | ||
1774 | static void* auto_zone_allocate_object(auto_zone_t *zone, size_t size, auto_memory_type_t type, boolean_t initial_refcount_to_one, boolean_t clear) | |
1775 | { | |
1776 | return NULL; | |
1777 | } | |
1778 | ||
1779 | static const void *auto_zone_base_pointer(auto_zone_t *zone, const void *ptr) | |
1780 | { | |
1781 | return NULL; | |
1782 | } | |
1783 | ||
1784 | static auto_memory_type_t auto_zone_get_layout_type(auto_zone_t *zone, void *ptr) | |
1785 | { | |
1786 | return 0; | |
1787 | } | |
1788 | ||
1789 | static auto_memory_type_t auto_zone_get_layout_type_no_lock(auto_zone_t *zone, void *ptr) | |
1790 | { | |
1791 | return 0; | |
1792 | } | |
1793 | ||
1794 | static boolean_t auto_zone_is_finalized(auto_zone_t *zone, const void *ptr) | |
1795 | { | |
1796 | return NO; | |
1797 | } | |
1798 | ||
1799 | static boolean_t auto_zone_is_valid_pointer(auto_zone_t *zone, const void *ptr) | |
1800 | { | |
1801 | return NO; | |
1802 | } | |
1803 | ||
1804 | static unsigned int auto_zone_release(auto_zone_t *zone, void *ptr) | |
1805 | { | |
1806 | return 0; | |
1807 | } | |
1808 | ||
1809 | static void auto_zone_retain(auto_zone_t *zone, void *ptr) | |
1810 | { | |
1811 | } | |
1812 | ||
1813 | static unsigned int auto_zone_retain_count_no_lock(auto_zone_t *zone, const void *ptr) | |
1814 | { | |
1815 | return 0; | |
1816 | } | |
1817 | ||
1818 | static void auto_zone_set_class_list(int (*get_class_list)(void **buffer, int count)) | |
1819 | { | |
1820 | } | |
1821 | ||
1822 | static size_t auto_zone_size_no_lock(auto_zone_t *zone, const void *ptr) | |
1823 | { | |
1824 | return 0; | |
1825 | } | |
1826 | ||
1827 | static void auto_zone_start_monitor(boolean_t force) | |
1828 | { | |
1829 | } | |
1830 | ||
1831 | static void auto_zone_write_barrier(auto_zone_t *zone, void *recipient, const unsigned int offset_in_bytes, const void *new_value) | |
1832 | { | |
1833 | *(uintptr_t *)(offset_in_bytes + (uint8_t *)recipient) = (uintptr_t)new_value; | |
1834 | } | |
1835 | ||
1836 | static void *auto_zone_write_barrier_memmove(auto_zone_t *zone, void *dst, const void *src, size_t size) | |
1837 | { | |
1838 | return memmove(dst, src, size); | |
1839 | } | |
1840 |