]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2004-2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | #include "objc-private.h" | |
25 | ||
26 | #include "objc-config.h" | |
27 | #include "objc-auto.h" | |
28 | #include "objc-accessors.h" | |
29 | ||
30 | #ifndef OBJC_NO_GC | |
31 | ||
32 | #include <stdint.h> | |
33 | #include <stdbool.h> | |
34 | #include <fcntl.h> | |
35 | #include <dlfcn.h> | |
36 | #include <mach/mach.h> | |
37 | #include <mach-o/dyld.h> | |
38 | #include <mach-o/nlist.h> | |
39 | #include <sys/types.h> | |
40 | #include <sys/mman.h> | |
41 | #include <libkern/OSAtomic.h> | |
42 | #include <auto_zone.h> | |
43 | ||
44 | #include <Block_private.h> | |
45 | #include <dispatch/private.h> | |
46 | ||
47 | #include "objc-private.h" | |
48 | #include "objc-references.h" | |
49 | #include "maptable.h" | |
50 | #include "message.h" | |
51 | #include "objc-gdb.h" | |
52 | ||
53 | #if !defined(NDEBUG) && !__OBJC2__ | |
54 | #include "objc-exception.h" | |
55 | #endif | |
56 | ||
57 | ||
58 | static auto_zone_t *gc_zone_init(void); | |
59 | static void gc_block_init(void); | |
60 | static void registeredClassTableInit(void); | |
61 | static BOOL objc_isRegisteredClass(Class candidate); | |
62 | ||
63 | int8_t UseGC = -1; | |
64 | static BOOL WantsMainThreadFinalization = NO; | |
65 | ||
66 | auto_zone_t *gc_zone = nil; | |
67 | ||
68 | ||
69 | /* Method prototypes */ | |
70 | @interface DoesNotExist | |
71 | - (const char *)UTF8String; | |
72 | - (id)description; | |
73 | @end | |
74 | ||
75 | ||
76 | /*********************************************************************** | |
77 | * Break-on-error functions | |
78 | **********************************************************************/ | |
79 | ||
80 | BREAKPOINT_FUNCTION( | |
81 | void objc_assign_ivar_error(id base, ptrdiff_t offset) | |
82 | ); | |
83 | ||
84 | BREAKPOINT_FUNCTION( | |
85 | void objc_assign_global_error(id value, id *slot) | |
86 | ); | |
87 | ||
88 | BREAKPOINT_FUNCTION( | |
89 | void objc_exception_during_finalize_error(void) | |
90 | ); | |
91 | ||
92 | /*********************************************************************** | |
93 | * Utility exports | |
94 | * Called by various libraries. | |
95 | **********************************************************************/ | |
96 | ||
97 | OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming | |
98 | if (UseGC) { | |
99 | auto_collection_parameters(gc_zone)->collection_threshold = threshold; | |
100 | } | |
101 | } | |
102 | ||
103 | OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) { | |
104 | if (UseGC) { | |
105 | auto_collection_parameters(gc_zone)->collection_threshold = threshold; | |
106 | } | |
107 | } | |
108 | ||
109 | void objc_setCollectionRatio(size_t ratio) { | |
110 | if (UseGC) { | |
111 | auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio; | |
112 | } | |
113 | } | |
114 | ||
115 | void objc_set_collection_ratio(size_t ratio) { // old naming | |
116 | if (UseGC) { | |
117 | auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio; | |
118 | } | |
119 | } | |
120 | ||
121 | void objc_finalizeOnMainThread(Class cls) { | |
122 | if (UseGC) { | |
123 | WantsMainThreadFinalization = YES; | |
124 | cls->setShouldFinalizeOnMainThread(); | |
125 | } | |
126 | } | |
127 | ||
128 | // stack based data structure queued if/when there is main-thread-only finalization work TBD | |
129 | typedef struct BatchFinalizeBlock { | |
130 | auto_zone_foreach_object_t foreach; | |
131 | auto_zone_cursor_t cursor; | |
132 | size_t cursor_size; | |
133 | volatile BOOL finished; | |
134 | volatile BOOL started; | |
135 | struct BatchFinalizeBlock *next; | |
136 | } BatchFinalizeBlock_t; | |
137 | ||
138 | // The Main Thread Finalization Work Queue Head | |
139 | static struct { | |
140 | pthread_mutex_t mutex; | |
141 | pthread_cond_t condition; | |
142 | BatchFinalizeBlock_t *head; | |
143 | BatchFinalizeBlock_t *tail; | |
144 | } MainThreadWorkQ; | |
145 | ||
146 | ||
147 | void objc_startCollectorThread(void) { | |
148 | } | |
149 | ||
150 | void objc_start_collector_thread(void) { | |
151 | } | |
152 | ||
153 | static void batchFinalizeOnMainThread(void); | |
154 | ||
155 | void objc_collect(unsigned long options) { | |
156 | if (!UseGC) return; | |
157 | BOOL onMainThread = pthread_main_np() ? YES : NO; | |
158 | ||
159 | // while we're here, sneak off and do some finalization work (if any) | |
160 | if (onMainThread) batchFinalizeOnMainThread(); | |
161 | // now on with our normally scheduled programming | |
162 | auto_zone_options_t amode = AUTO_ZONE_COLLECT_NO_OPTIONS; | |
163 | if (!(options & OBJC_COLLECT_IF_NEEDED)) { | |
164 | switch (options & 0x3) { | |
165 | case OBJC_RATIO_COLLECTION: amode = AUTO_ZONE_COLLECT_RATIO_COLLECTION; break; | |
166 | case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION; break; | |
167 | case OBJC_FULL_COLLECTION: amode = AUTO_ZONE_COLLECT_FULL_COLLECTION; break; | |
168 | case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION; break; | |
169 | } | |
170 | amode |= AUTO_ZONE_COLLECT_COALESCE; | |
171 | amode |= AUTO_ZONE_COLLECT_LOCAL_COLLECTION; | |
172 | } | |
173 | if (options & OBJC_WAIT_UNTIL_DONE) { | |
174 | __block BOOL done = NO; | |
175 | // If executing on the main thread, use the main thread work queue condition to block, | |
176 | // so main thread finalization can complete. Otherwise, use a thread-local condition. | |
177 | pthread_mutex_t localMutex = PTHREAD_MUTEX_INITIALIZER, *mutex = &localMutex; | |
178 | pthread_cond_t localCondition = PTHREAD_COND_INITIALIZER, *condition = &localCondition; | |
179 | if (onMainThread) { | |
180 | mutex = &MainThreadWorkQ.mutex; | |
181 | condition = &MainThreadWorkQ.condition; | |
182 | } | |
183 | pthread_mutex_lock(mutex); | |
184 | auto_zone_collect_and_notify(gc_zone, amode, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ | |
185 | pthread_mutex_lock(mutex); | |
186 | done = YES; | |
187 | pthread_cond_signal(condition); | |
188 | pthread_mutex_unlock(mutex); | |
189 | }); | |
190 | while (!done) { | |
191 | pthread_cond_wait(condition, mutex); | |
192 | if (onMainThread && MainThreadWorkQ.head) { | |
193 | pthread_mutex_unlock(mutex); | |
194 | batchFinalizeOnMainThread(); | |
195 | pthread_mutex_lock(mutex); | |
196 | } | |
197 | } | |
198 | pthread_mutex_unlock(mutex); | |
199 | } else { | |
200 | auto_zone_collect(gc_zone, amode); | |
201 | } | |
202 | } | |
203 | ||
204 | ||
205 | // USED BY CF & ONE OTHER | |
206 | BOOL objc_isAuto(id object) | |
207 | { | |
208 | return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0; | |
209 | } | |
210 | ||
211 | ||
212 | BOOL objc_collectingEnabled(void) | |
213 | { | |
214 | return UseGC; | |
215 | } | |
216 | ||
217 | BOOL objc_collecting_enabled(void) // Old naming | |
218 | { | |
219 | return UseGC; | |
220 | } | |
221 | ||
222 | malloc_zone_t *objc_collectableZone(void) { | |
223 | return gc_zone; | |
224 | } | |
225 | ||
226 | BOOL objc_dumpHeap(char *filenamebuffer, unsigned long length) { | |
227 | static int counter = 0; | |
228 | ++counter; | |
229 | char buffer[1024]; | |
230 | sprintf(buffer, OBJC_HEAP_DUMP_FILENAME_FORMAT, getpid(), counter); | |
231 | if (!_objc_dumpHeap(gc_zone, buffer)) return NO; | |
232 | if (filenamebuffer) { | |
233 | unsigned long blen = strlen(buffer); | |
234 | if (blen < length) | |
235 | strncpy(filenamebuffer, buffer, blen+1); | |
236 | else if (length > 0) | |
237 | filenamebuffer[0] = 0; // give some answer | |
238 | } | |
239 | return YES; | |
240 | } | |
241 | ||
242 | ||
243 | /*********************************************************************** | |
244 | * Memory management. | |
245 | * Called by CF and Foundation. | |
246 | **********************************************************************/ | |
247 | ||
248 | // Allocate an object in the GC zone, with the given number of extra bytes. | |
249 | id objc_allocate_object(Class cls, int extra) | |
250 | { | |
251 | return class_createInstance(cls, extra); | |
252 | } | |
253 | ||
254 | ||
255 | /*********************************************************************** | |
256 | * Write barrier implementations, optimized for when GC is known to be on | |
257 | * Called by the write barrier exports only. | |
258 | * These implementations assume GC is on. The exported function must | |
259 | * either perform the check itself or be conditionally stomped at | |
260 | * startup time. | |
261 | **********************************************************************/ | |
262 | ||
263 | id objc_assign_strongCast_gc(id value, id *slot) { | |
264 | if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) { // stores & returns true if slot points into GC allocated memory | |
265 | auto_zone_root_write_barrier(gc_zone, slot, value); // always stores | |
266 | } | |
267 | return value; | |
268 | } | |
269 | ||
270 | id objc_assign_global_gc(id value, id *slot) { | |
271 | // use explicit root registration. | |
272 | if (value && auto_zone_is_valid_pointer(gc_zone, value)) { | |
273 | if (auto_zone_is_finalized(gc_zone, value)) { | |
274 | _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", (void*)value, slot); | |
275 | objc_assign_global_error(value, slot); | |
276 | } | |
277 | auto_zone_add_root(gc_zone, slot, value); | |
278 | } | |
279 | else | |
280 | *slot = value; | |
281 | ||
282 | return value; | |
283 | } | |
284 | ||
285 | id objc_assign_threadlocal_gc(id value, id *slot) | |
286 | { | |
287 | if (value && auto_zone_is_valid_pointer(gc_zone, value)) { | |
288 | auto_zone_add_root(gc_zone, slot, value); | |
289 | } | |
290 | else { | |
291 | *slot = value; | |
292 | } | |
293 | ||
294 | return value; | |
295 | } | |
296 | ||
297 | id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset) | |
298 | { | |
299 | id *slot = (id*) ((char *)base + offset); | |
300 | ||
301 | if (value) { | |
302 | if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) { | |
303 | _objc_inform("GC: %p + %tu isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", (void*)base, offset); | |
304 | objc_assign_ivar_error(base, offset); | |
305 | } | |
306 | } | |
307 | else | |
308 | *slot = value; | |
309 | ||
310 | return value; | |
311 | } | |
312 | ||
313 | id objc_assign_strongCast_non_gc(id value, id *slot) { | |
314 | return (*slot = value); | |
315 | } | |
316 | ||
317 | id objc_assign_global_non_gc(id value, id *slot) { | |
318 | return (*slot = value); | |
319 | } | |
320 | ||
321 | id objc_assign_threadlocal_non_gc(id value, id *slot) { | |
322 | return (*slot = value); | |
323 | } | |
324 | ||
325 | id objc_assign_ivar_non_gc(id value, id base, ptrdiff_t offset) { | |
326 | id *slot = (id*) ((char *)base + offset); | |
327 | return (*slot = value); | |
328 | } | |
329 | ||
330 | ||
331 | /*********************************************************************** | |
332 | * Non-trivial write barriers | |
333 | **********************************************************************/ | |
334 | ||
335 | void *objc_memmove_collectable(void *dst, const void *src, size_t size) | |
336 | { | |
337 | if (UseGC) { | |
338 | return auto_zone_write_barrier_memmove(gc_zone, dst, src, size); | |
339 | } else { | |
340 | return memmove(dst, src, size); | |
341 | } | |
342 | } | |
343 | ||
344 | BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) { | |
345 | const BOOL issueMemoryBarrier = NO; | |
346 | if (UseGC) | |
347 | return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier); | |
348 | else | |
349 | return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); | |
350 | } | |
351 | ||
352 | BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) { | |
353 | const BOOL issueMemoryBarrier = YES; | |
354 | if (UseGC) | |
355 | return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier); | |
356 | else | |
357 | return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); | |
358 | } | |
359 | ||
360 | BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) { | |
361 | const BOOL isGlobal = YES; | |
362 | const BOOL issueMemoryBarrier = NO; | |
363 | if (UseGC) | |
364 | return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier); | |
365 | else | |
366 | return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); | |
367 | } | |
368 | ||
369 | BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) { | |
370 | const BOOL isGlobal = YES; | |
371 | const BOOL issueMemoryBarrier = YES; | |
372 | if (UseGC) | |
373 | return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier); | |
374 | else | |
375 | return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); | |
376 | } | |
377 | ||
378 | BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) { | |
379 | const BOOL isGlobal = NO; | |
380 | const BOOL issueMemoryBarrier = NO; | |
381 | if (UseGC) | |
382 | return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier); | |
383 | else | |
384 | return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); | |
385 | } | |
386 | ||
387 | BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) { | |
388 | const BOOL isGlobal = NO; | |
389 | const BOOL issueMemoryBarrier = YES; | |
390 | if (UseGC) | |
391 | return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier); | |
392 | else | |
393 | return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); | |
394 | } | |
395 | ||
396 | ||
397 | /*********************************************************************** | |
398 | * Weak ivar support | |
399 | **********************************************************************/ | |
400 | ||
401 | id objc_read_weak_gc(id *location) { | |
402 | id result = *location; | |
403 | if (result) { | |
404 | result = (id)auto_read_weak_reference(gc_zone, (void **)location); | |
405 | } | |
406 | return result; | |
407 | } | |
408 | ||
409 | id objc_read_weak_non_gc(id *location) { | |
410 | return *location; | |
411 | } | |
412 | ||
413 | id objc_assign_weak_gc(id value, id *location) { | |
414 | auto_assign_weak_reference(gc_zone, value, (const void **)location, nil); | |
415 | return value; | |
416 | } | |
417 | ||
418 | id objc_assign_weak_non_gc(id value, id *location) { | |
419 | return (*location = value); | |
420 | } | |
421 | ||
422 | ||
423 | void gc_fixup_weakreferences(id newObject, id oldObject) { | |
424 | // fix up weak references if any. | |
425 | const unsigned char *weakLayout = (const unsigned char *)class_getWeakIvarLayout(newObject->ISA()); | |
426 | if (weakLayout) { | |
427 | void **newPtr = (void **)newObject, **oldPtr = (void **)oldObject; | |
428 | unsigned char byte; | |
429 | while ((byte = *weakLayout++)) { | |
430 | unsigned skips = (byte >> 4); | |
431 | unsigned weaks = (byte & 0x0F); | |
432 | newPtr += skips, oldPtr += skips; | |
433 | while (weaks--) { | |
434 | *newPtr = nil; | |
435 | auto_assign_weak_reference(gc_zone, auto_read_weak_reference(gc_zone, oldPtr), (const void **)newPtr, nil); | |
436 | ++newPtr, ++oldPtr; | |
437 | } | |
438 | } | |
439 | } | |
440 | } | |
441 | ||
442 | ||
443 | /*********************************************************************** | |
444 | * dyld resolver functions for basic GC write barriers | |
445 | * dyld calls the resolver function to bind the symbol. | |
446 | * We return the GC or non-GC variant as appropriate. | |
447 | **********************************************************************/ | |
448 | ||
449 | #define GC_RESOLVER(name) \ | |
450 | OBJC_EXPORT void *name##_resolver(void) __asm__("_" #name); \ | |
451 | void *name##_resolver(void) \ | |
452 | { \ | |
453 | __asm__(".symbol_resolver _" #name); \ | |
454 | if (UseGC) return (void*)name##_gc; \ | |
455 | else return (void*)name##_non_gc; \ | |
456 | } | |
457 | ||
458 | GC_RESOLVER(objc_assign_ivar) | |
459 | GC_RESOLVER(objc_assign_strongCast) | |
460 | GC_RESOLVER(objc_assign_global) | |
461 | GC_RESOLVER(objc_assign_threadlocal) | |
462 | GC_RESOLVER(objc_read_weak) | |
463 | GC_RESOLVER(objc_assign_weak) | |
464 | GC_RESOLVER(objc_getProperty) | |
465 | GC_RESOLVER(objc_setProperty) | |
466 | GC_RESOLVER(objc_getAssociatedObject) | |
467 | GC_RESOLVER(objc_setAssociatedObject) | |
468 | GC_RESOLVER(_object_addExternalReference) | |
469 | GC_RESOLVER(_object_readExternalReference) | |
470 | GC_RESOLVER(_object_removeExternalReference) | |
471 | ||
472 | ||
473 | /*********************************************************************** | |
474 | * Testing tools | |
475 | * Used to isolate resurrection of garbage objects during finalization. | |
476 | **********************************************************************/ | |
477 | BOOL objc_is_finalized(void *ptr) { | |
478 | if (ptr != nil && UseGC) { | |
479 | return auto_zone_is_finalized(gc_zone, ptr); | |
480 | } | |
481 | return NO; | |
482 | } | |
483 | ||
484 | ||
485 | /*********************************************************************** | |
486 | * Stack clearing. | |
487 | * Used by top-level thread loops to reduce false pointers from the stack. | |
488 | **********************************************************************/ | |
489 | void objc_clear_stack(unsigned long options) { | |
490 | if (!UseGC) return; | |
491 | auto_zone_clear_stack(gc_zone, 0); | |
492 | } | |
493 | ||
494 | ||
495 | /*********************************************************************** | |
496 | * Finalization support | |
497 | **********************************************************************/ | |
498 | ||
499 | // Finalizer crash debugging | |
500 | static void *finalizing_object; | |
501 | ||
502 | // finalize a single object without fuss | |
503 | // When there are no main-thread-only classes this is used directly | |
504 | // Otherwise, it is used indirectly by smarter code that knows main-thread-affinity requirements | |
505 | static void finalizeOneObject(void *obj, void *ignored) { | |
506 | id object = (id)obj; | |
507 | finalizing_object = obj; | |
508 | ||
509 | Class cls = object->ISA(); | |
510 | CRSetCrashLogMessage2(class_getName(cls)); | |
511 | ||
512 | /// call -finalize method. | |
513 | ((void(*)(id, SEL))objc_msgSend)(object, @selector(finalize)); | |
514 | ||
515 | // Call C++ destructors. | |
516 | // This would be objc_destructInstance() but for performance. | |
517 | if (cls->hasCxxDtor()) { | |
518 | object_cxxDestruct(object); | |
519 | } | |
520 | ||
521 | finalizing_object = nil; | |
522 | CRSetCrashLogMessage2(nil); | |
523 | } | |
524 | ||
525 | // finalize object only if it is a main-thread-only object. | |
526 | // Called only from the main thread. | |
527 | static void finalizeOneMainThreadOnlyObject(void *obj, void *arg) { | |
528 | id object = (id)obj; | |
529 | Class cls = object->ISA(); | |
530 | if (cls == nil) { | |
531 | _objc_fatal("object with nil ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj); | |
532 | } | |
533 | if (cls->shouldFinalizeOnMainThread()) { | |
534 | finalizeOneObject(obj, nil); | |
535 | } | |
536 | } | |
537 | ||
538 | // finalize one object only if it is not a main-thread-only object | |
539 | // called from any other thread than the main thread | |
540 | // Important: if a main-thread-only object is passed, return that fact in the needsMain argument | |
541 | static void finalizeOneAnywhereObject(void *obj, void *needsMain) { | |
542 | id object = (id)obj; | |
543 | Class cls = object->ISA(); | |
544 | bool *needsMainThreadWork = (bool *)needsMain; | |
545 | if (cls == nil) { | |
546 | _objc_fatal("object with nil ISA passed to finalizeOneAnywhereObject: %p\n", obj); | |
547 | } | |
548 | if (!cls->shouldFinalizeOnMainThread()) { | |
549 | finalizeOneObject(obj, nil); | |
550 | } | |
551 | else { | |
552 | *needsMainThreadWork = true; | |
553 | } | |
554 | } | |
555 | ||
556 | ||
557 | // Utility workhorse. | |
558 | // Set up the expensive @try block and ask the collector to hand the next object to | |
559 | // our finalizeAnObject function. | |
560 | // Track and return a boolean that records whether or not any main thread work is necessary. | |
561 | // (When we know that there are no main thread only objects then the boolean isn't even computed) | |
562 | static bool batchFinalize(auto_zone_t *zone, | |
563 | auto_zone_foreach_object_t foreach, | |
564 | auto_zone_cursor_t cursor, | |
565 | size_t cursor_size, | |
566 | void (*finalizeAnObject)(void *, void*)) | |
567 | { | |
568 | #if !defined(NDEBUG) && !__OBJC2__ | |
569 | // debug: don't call try/catch before exception handlers are installed | |
570 | objc_exception_functions_t table = {}; | |
571 | objc_exception_get_functions(&table); | |
572 | assert(table.throw_exc); | |
573 | #endif | |
574 | ||
575 | bool needsMainThreadWork = false; | |
576 | for (;;) { | |
577 | @try { | |
578 | foreach(cursor, finalizeAnObject, &needsMainThreadWork); | |
579 | // non-exceptional return means finalization is complete. | |
580 | break; | |
581 | } | |
582 | @catch (id exception) { | |
583 | // whoops, note exception, then restart at cursor's position | |
584 | _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]); | |
585 | objc_exception_during_finalize_error(); | |
586 | } | |
587 | @catch (...) { | |
588 | // whoops, note exception, then restart at cursor's position | |
589 | _objc_inform("GC: -finalize resulted in an exception being thrown, break on objc_exception_during_finalize_error to debug"); | |
590 | objc_exception_during_finalize_error(); | |
591 | } | |
592 | } | |
593 | return needsMainThreadWork; | |
594 | } | |
595 | ||
596 | // Called on main thread-only. | |
597 | // Pick up work from global queue. | |
598 | // called parasitically by anyone requesting a collection | |
599 | // called explicitly when there is known to be main thread only finalization work | |
600 | // In both cases we are on the main thread | |
601 | // Guard against recursion by something called from a finalizer | |
602 | static void batchFinalizeOnMainThread() { | |
603 | pthread_mutex_lock(&MainThreadWorkQ.mutex); | |
604 | if (!MainThreadWorkQ.head || MainThreadWorkQ.head->started) { | |
605 | // No work or we're already here | |
606 | pthread_mutex_unlock(&MainThreadWorkQ.mutex); | |
607 | return; | |
608 | } | |
609 | while (MainThreadWorkQ.head) { | |
610 | BatchFinalizeBlock_t *bfb = MainThreadWorkQ.head; | |
611 | bfb->started = YES; | |
612 | pthread_mutex_unlock(&MainThreadWorkQ.mutex); | |
613 | ||
614 | batchFinalize(gc_zone, bfb->foreach, bfb->cursor, bfb->cursor_size, finalizeOneMainThreadOnlyObject); | |
615 | // signal the collector thread(s) that finalization has finished. | |
616 | pthread_mutex_lock(&MainThreadWorkQ.mutex); | |
617 | bfb->finished = YES; | |
618 | pthread_cond_broadcast(&MainThreadWorkQ.condition); | |
619 | MainThreadWorkQ.head = bfb->next; | |
620 | } | |
621 | MainThreadWorkQ.tail = nil; | |
622 | pthread_mutex_unlock(&MainThreadWorkQ.mutex); | |
623 | } | |
624 | ||
625 | ||
626 | // Knowing that we possibly have main thread only work to do, first process everything | |
627 | // that is not main-thread-only. If we discover main thread only work, queue a work block | |
628 | // to the main thread that will do just the main thread only work. Wait for it. | |
629 | // Called from a non main thread. | |
630 | static void batchFinalizeOnTwoThreads(auto_zone_t *zone, | |
631 | auto_zone_foreach_object_t foreach, | |
632 | auto_zone_cursor_t cursor, | |
633 | size_t cursor_size) | |
634 | { | |
635 | // First, lets get rid of everything we can on this thread, then ask main thread to help if needed | |
636 | char cursor_copy[cursor_size]; | |
637 | memcpy(cursor_copy, cursor, cursor_size); | |
638 | bool needsMainThreadFinalization = batchFinalize(zone, foreach, (auto_zone_cursor_t)cursor_copy, cursor_size, finalizeOneAnywhereObject); | |
639 | ||
640 | if (! needsMainThreadFinalization) | |
641 | return; // no help needed | |
642 | ||
643 | // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or | |
644 | // an objc_collect(if_needed) will get to it. Either way, this block will be processed on the main thread. | |
645 | BatchFinalizeBlock_t bfb; | |
646 | bfb.foreach = foreach; | |
647 | bfb.cursor = cursor; | |
648 | bfb.cursor_size = cursor_size; | |
649 | bfb.started = NO; | |
650 | bfb.finished = NO; | |
651 | bfb.next = nil; | |
652 | pthread_mutex_lock(&MainThreadWorkQ.mutex); | |
653 | if (MainThreadWorkQ.tail) { | |
654 | ||
655 | // link to end so that ordering of finalization is preserved. | |
656 | MainThreadWorkQ.tail->next = &bfb; | |
657 | MainThreadWorkQ.tail = &bfb; | |
658 | } | |
659 | else { | |
660 | MainThreadWorkQ.head = &bfb; | |
661 | MainThreadWorkQ.tail = &bfb; | |
662 | } | |
663 | pthread_mutex_unlock(&MainThreadWorkQ.mutex); | |
664 | ||
665 | //printf("----->asking main thread to finalize\n"); | |
666 | dispatch_async(dispatch_get_main_queue(), ^{ batchFinalizeOnMainThread(); }); | |
667 | ||
668 | // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD. | |
669 | pthread_mutex_lock(&MainThreadWorkQ.mutex); | |
670 | while (!bfb.finished) { | |
671 | // the main thread might be blocked waiting for a synchronous collection to complete, so wake it here | |
672 | pthread_cond_signal(&MainThreadWorkQ.condition); | |
673 | pthread_cond_wait(&MainThreadWorkQ.condition, &MainThreadWorkQ.mutex); | |
674 | } | |
675 | pthread_mutex_unlock(&MainThreadWorkQ.mutex); | |
676 | //printf("<------ main thread finalize done\n"); | |
677 | ||
678 | } | |
679 | ||
680 | ||
681 | ||
682 | // collector calls this with garbage ready | |
683 | // thread collectors, too, so this needs to be thread-safe | |
684 | static void BatchInvalidate(auto_zone_t *zone, | |
685 | auto_zone_foreach_object_t foreach, | |
686 | auto_zone_cursor_t cursor, | |
687 | size_t cursor_size) | |
688 | { | |
689 | if (pthread_main_np() || !WantsMainThreadFinalization) { | |
690 | // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread | |
691 | // but no main-thread-only objects have been allocated. | |
692 | batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject); | |
693 | } | |
694 | else { | |
695 | // We're on the dedicated thread. Collect some on main thread, the rest here. | |
696 | batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size); | |
697 | } | |
698 | ||
699 | } | |
700 | ||
701 | ||
702 | /* | |
703 | * Zombie support | |
704 | * Collector calls into this system when it finds resurrected objects. | |
705 | * This keeps them pitifully alive and leaked, even if they reference garbage. | |
706 | */ | |
707 | ||
708 | // idea: keep a side table mapping resurrected object pointers to their original Class, so we don't | |
709 | // need to smash anything. alternatively, could use associative references to track against a secondary | |
710 | // object with information about the resurrection, such as a stack crawl, etc. | |
711 | ||
712 | static Class _NSResurrectedObjectClass; | |
713 | static NXMapTable *_NSResurrectedObjectMap = nil; | |
714 | static pthread_mutex_t _NSResurrectedObjectLock = PTHREAD_MUTEX_INITIALIZER; | |
715 | ||
716 | static Class resurrectedObjectOriginalClass(id object) { | |
717 | Class originalClass; | |
718 | pthread_mutex_lock(&_NSResurrectedObjectLock); | |
719 | originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object); | |
720 | pthread_mutex_unlock(&_NSResurrectedObjectLock); | |
721 | return originalClass; | |
722 | } | |
723 | ||
724 | static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; } | |
725 | ||
726 | static id _NSResurrectedObject_instanceMethod(id self, SEL name) { | |
727 | _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", (void*)self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name)); | |
728 | return self; | |
729 | } | |
730 | ||
731 | static void _NSResurrectedObject_finalize(id self, SEL _cmd) { | |
732 | Class originalClass; | |
733 | pthread_mutex_lock(&_NSResurrectedObjectLock); | |
734 | originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self); | |
735 | pthread_mutex_unlock(&_NSResurrectedObjectLock); | |
736 | if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", (void*)self, class_getName(originalClass)); | |
737 | _objc_rootFinalize(self); | |
738 | } | |
739 | ||
740 | static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) { | |
741 | class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:"); | |
742 | return YES; | |
743 | } | |
744 | ||
745 | static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) { | |
746 | class_addMethod(self->ISA(), name, (IMP)_NSResurrectedObject_classMethod, "@@:"); | |
747 | return YES; | |
748 | } | |
749 | ||
750 | static void _NSResurrectedObject_initialize() { | |
751 | _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128); | |
752 | _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0); | |
753 | class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:"); | |
754 | Class metaClass = _NSResurrectedObjectClass->ISA(); | |
755 | class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::"); | |
756 | class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::"); | |
757 | objc_registerClassPair(_NSResurrectedObjectClass); | |
758 | } | |
759 | ||
760 | static void resurrectZombie(auto_zone_t *zone, void *ptr) { | |
761 | id object = (id) ptr; | |
762 | Class cls = object->ISA(); | |
763 | if (cls != _NSResurrectedObjectClass) { | |
764 | // remember the original class for this instance. | |
765 | pthread_mutex_lock(&_NSResurrectedObjectLock); | |
766 | NXMapInsert(_NSResurrectedObjectMap, ptr, cls); | |
767 | pthread_mutex_unlock(&_NSResurrectedObjectLock); | |
768 | object_setClass(object, _NSResurrectedObjectClass); | |
769 | } | |
770 | } | |
771 | ||
772 | /*********************************************************************** | |
773 | * Pretty printing support | |
774 | * For development purposes. | |
775 | **********************************************************************/ | |
776 | ||
777 | ||
778 | static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount); | |
779 | ||
780 | static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset) | |
781 | { | |
782 | return name_for_address(zone, base, offset, false); | |
783 | } | |
784 | ||
785 | static const char* objc_name_for_object(auto_zone_t *zone, void *object) { | |
786 | Class cls = *(Class *)object; | |
787 | if (!objc_isRegisteredClass(cls)) return ""; | |
788 | return class_getName(cls); | |
789 | } | |
790 | ||
791 | /*********************************************************************** | |
792 | * Collection support | |
793 | **********************************************************************/ | |
794 | ||
795 | static BOOL objc_isRegisteredClass(Class candidate); | |
796 | ||
797 | static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address) { | |
798 | id object = (id)address; | |
799 | volatile void *clsptr = (void*)object->ISA(); | |
800 | Class cls = (Class)clsptr; | |
801 | return objc_isRegisteredClass(cls) ? _object_getIvarLayout(cls, object) : nil; | |
802 | } | |
803 | ||
804 | static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address) { | |
805 | id object = (id)address; | |
806 | volatile void *clsptr = (void*)object->ISA(); | |
807 | Class cls = (Class)clsptr; | |
808 | return objc_isRegisteredClass(cls) ? class_getWeakIvarLayout(cls) : nil; | |
809 | } | |
810 | ||
811 | void gc_register_datasegment(uintptr_t base, size_t size) { | |
812 | auto_zone_register_datasegment(gc_zone, (void*)base, size); | |
813 | } | |
814 | ||
815 | void gc_unregister_datasegment(uintptr_t base, size_t size) { | |
816 | auto_zone_unregister_datasegment(gc_zone, (void*)base, size); | |
817 | } | |
818 | ||
819 | #define countof(array) (sizeof(array) / sizeof(array[0])) | |
820 | ||
821 | ||
822 | /*********************************************************************** | |
823 | * Initialization | |
824 | **********************************************************************/ | |
825 | ||
826 | static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) { | |
827 | if (auto_zone_is_collecting(gc_zone)) { | |
828 | ; | |
829 | } | |
830 | else { | |
831 | auto_zone_collect(gc_zone, AUTO_ZONE_COLLECT_COALESCE|AUTO_ZONE_COLLECT_RATIO_COLLECTION); | |
832 | } | |
833 | } | |
834 | ||
835 | ||
836 | static auto_zone_t *gc_zone_init(void) | |
837 | { | |
838 | auto_zone_t *result; | |
839 | static int didOnce = 0; | |
840 | if (!didOnce) { | |
841 | didOnce = 1; | |
842 | ||
843 | // initialize the batch finalization queue | |
844 | MainThreadWorkQ.head = nil; | |
845 | MainThreadWorkQ.tail = nil; | |
846 | pthread_mutex_init(&MainThreadWorkQ.mutex, nil); | |
847 | pthread_cond_init(&MainThreadWorkQ.condition, nil); | |
848 | } | |
849 | ||
850 | result = auto_zone_create("auto_zone"); | |
851 | ||
852 | auto_zone_disable_compaction(result); | |
853 | ||
854 | auto_collection_control_t *control = auto_collection_parameters(result); | |
855 | ||
856 | // set up the magic control parameters | |
857 | control->batch_invalidate = BatchInvalidate; | |
858 | control->will_grow = objc_will_grow; | |
859 | control->resurrect = resurrectZombie; | |
860 | control->layout_for_address = objc_layout_for_address; | |
861 | control->weak_layout_for_address = objc_weak_layout_for_address; | |
862 | control->name_for_address = objc_name_for_address; | |
863 | ||
864 | if (control->version >= sizeof(auto_collection_control_t)) { | |
865 | control->name_for_object = objc_name_for_object; | |
866 | } | |
867 | ||
868 | return result; | |
869 | } | |
870 | ||
871 | ||
872 | /* should be defined in /usr/local/include/libdispatch_private.h. */ | |
873 | extern void (*dispatch_begin_thread_4GC)(void); | |
874 | extern void (*dispatch_end_thread_4GC)(void); | |
875 | ||
876 | static void objc_reapThreadLocalBlocks() | |
877 | { | |
878 | if (UseGC) auto_zone_reap_all_local_blocks(gc_zone); | |
879 | } | |
880 | ||
881 | void objc_registerThreadWithCollector() | |
882 | { | |
883 | if (UseGC) auto_zone_register_thread(gc_zone); | |
884 | } | |
885 | ||
886 | void objc_unregisterThreadWithCollector() | |
887 | { | |
888 | if (UseGC) auto_zone_unregister_thread(gc_zone); | |
889 | } | |
890 | ||
891 | void objc_assertRegisteredThreadWithCollector() | |
892 | { | |
893 | if (UseGC) auto_zone_assert_thread_registered(gc_zone); | |
894 | } | |
895 | ||
896 | // Always called by _objcInit, even if GC is off. | |
897 | void gc_init(BOOL wantsGC) | |
898 | { | |
899 | assert(UseGC == -1); | |
900 | UseGC = wantsGC; | |
901 | ||
902 | if (PrintGC) { | |
903 | _objc_inform("GC: is %s", wantsGC ? "ON" : "OFF"); | |
904 | } | |
905 | ||
906 | if (UseGC) { | |
907 | // Set up the GC zone | |
908 | gc_zone = gc_zone_init(); | |
909 | ||
910 | // tell libdispatch to register its threads with the GC. | |
911 | dispatch_begin_thread_4GC = objc_registerThreadWithCollector; | |
912 | dispatch_end_thread_4GC = objc_reapThreadLocalBlocks; | |
913 | ||
914 | // set up the registered classes list | |
915 | registeredClassTableInit(); | |
916 | ||
917 | // tell Blocks to use collectable memory. CF will cook up the classes separately. | |
918 | gc_block_init(); | |
919 | ||
920 | // Add GC state to crash log reports | |
921 | _objc_inform_on_crash("garbage collection is ON"); | |
922 | } | |
923 | } | |
924 | ||
925 | ||
926 | // Called by NSObject +load to perform late GC setup | |
927 | // This work must wait until after all of libSystem initializes. | |
928 | void gc_init2(void) | |
929 | { | |
930 | assert(UseGC); | |
931 | ||
932 | // create the _NSResurrectedObject class used to track resurrections. | |
933 | _NSResurrectedObject_initialize(); | |
934 | ||
935 | // tell libauto to set up its dispatch queues | |
936 | auto_collect_multithreaded(gc_zone); | |
937 | } | |
938 | ||
939 | // Called by Foundation. | |
940 | // This function used to initialize NSObject stuff, but now does nothing. | |
941 | malloc_zone_t *objc_collect_init(int (*callback)(void) __unused) | |
942 | { | |
943 | return (malloc_zone_t *)gc_zone; | |
944 | } | |
945 | ||
946 | /* | |
947 | * Support routines for the Block implementation | |
948 | */ | |
949 | ||
950 | ||
951 | // The Block runtime now needs to sometimes allocate a Block that is an Object - namely | |
952 | // when it neesd to have a finalizer which, for now, is only if there are C++ destructors | |
953 | // in the helper function. Hence the isObject parameter. | |
954 | // Under GC a -copy message should allocate a refcount 0 block, ergo the isOne parameter. | |
955 | static void *block_gc_alloc5(const unsigned long size, const bool isOne, const bool isObject) { | |
956 | auto_memory_type_t type = isObject ? (AUTO_OBJECT|AUTO_MEMORY_SCANNED) : AUTO_MEMORY_SCANNED; | |
957 | return auto_zone_allocate_object(gc_zone, size, type, isOne, false); | |
958 | } | |
959 | ||
960 | // The Blocks runtime keeps track of everything above 1 and so it only calls | |
961 | // up to the collector to tell it about the 0->1 transition and then the 1->0 transition | |
962 | static void block_gc_setHasRefcount(const void *block, const bool hasRefcount) { | |
963 | if (hasRefcount) | |
964 | auto_zone_retain(gc_zone, (void *)block); | |
965 | else | |
966 | auto_zone_release(gc_zone, (void *)block); | |
967 | } | |
968 | ||
969 | static void block_gc_memmove(void *dst, void *src, unsigned long size) { | |
970 | auto_zone_write_barrier_memmove(gc_zone, dst, src, (size_t)size); | |
971 | } | |
972 | ||
973 | static void gc_block_init(void) { | |
974 | _Block_use_GC( | |
975 | block_gc_alloc5, | |
976 | block_gc_setHasRefcount, | |
977 | (void (*)(void *, void **))objc_assign_strongCast_gc, | |
978 | (void (*)(const void *, void *))objc_assign_weak, | |
979 | block_gc_memmove | |
980 | ); | |
981 | } | |
982 | ||
983 | ||
984 | /*********************************************************************** | |
985 | * Track classes. | |
986 | * In addition to the global class hashtable (set) indexed by name, we | |
987 | * also keep one based purely by pointer when running under Garbage Collection. | |
988 | * This allows the background collector to race against objects recycled from TLC. | |
989 | * Specifically, the background collector can read the admin byte and see that | |
990 | * a thread local object is an object, get scheduled out, and the TLC recovers it, | |
991 | * linking it into the cache, then the background collector reads the isa field and | |
992 | * finds linkage info. By qualifying all isa fields read we avoid this. | |
993 | **********************************************************************/ | |
994 | ||
995 | // This is a self-contained hash table of all classes. The first two elements contain the (size-1) and count. | |
996 | static volatile Class *AllClasses = nil; | |
997 | ||
998 | #define SHIFT 3 | |
999 | #define INITIALSIZE 512 | |
1000 | #define REMOVED ~0ul | |
1001 | ||
1002 | // Allocate the side table. | |
1003 | static void registeredClassTableInit() { | |
1004 | assert(UseGC); | |
1005 | // allocate a collectable (refcount 0) zeroed hunk of unscanned memory | |
1006 | uintptr_t *table = (uintptr_t *)auto_zone_allocate_object(gc_zone, INITIALSIZE*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true); | |
1007 | // set initial capacity (as mask) | |
1008 | table[0] = INITIALSIZE - 1; | |
1009 | // set initial count | |
1010 | table[1] = 0; | |
1011 | AllClasses = (Class *)table; | |
1012 | } | |
1013 | ||
1014 | // Verify that a particular pointer is to a class. | |
1015 | // Safe from any thread anytime | |
1016 | static BOOL objc_isRegisteredClass(Class candidate) { | |
1017 | assert(UseGC); | |
1018 | // nil is never a valid ISA. | |
1019 | if (candidate == nil) return NO; | |
1020 | // We don't care about a race with another thread adding a class to which we randomly might have a pointer | |
1021 | // Get local copy of classes so that we're immune from updates. | |
1022 | // We keep the size of the list as the first element so there is no race as the list & size get updated. | |
1023 | uintptr_t *allClasses = (uintptr_t *)AllClasses; | |
1024 | // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2 | |
1025 | // Slot 1 is count | |
1026 | uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & allClasses[0]; | |
1027 | // avoid slot 0 and 1 | |
1028 | if (slot < 2) slot = 2; | |
1029 | for(;;) { | |
1030 | long int slotValue = allClasses[slot]; | |
1031 | if (slotValue == (long int)candidate) { | |
1032 | return YES; | |
1033 | } | |
1034 | if (slotValue == 0) { | |
1035 | return NO; | |
1036 | } | |
1037 | ++slot; | |
1038 | if (slot > allClasses[0]) | |
1039 | slot = 2; // skip size, count | |
1040 | } | |
1041 | } | |
1042 | ||
1043 | // Utility used when growing | |
1044 | // Assumes lock held | |
1045 | static void addClassHelper(uintptr_t *table, uintptr_t candidate) { | |
1046 | uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0]; | |
1047 | if (slot < 2) slot = 2; | |
1048 | for(;;) { | |
1049 | uintptr_t slotValue = table[slot]; | |
1050 | if (slotValue == 0) { | |
1051 | table[slot] = candidate; | |
1052 | ++table[1]; | |
1053 | return; | |
1054 | } | |
1055 | ++slot; | |
1056 | if (slot > table[0]) | |
1057 | slot = 2; // skip size, count | |
1058 | } | |
1059 | } | |
1060 | ||
1061 | // lock held by callers | |
1062 | void objc_addRegisteredClass(Class candidate) { | |
1063 | if (!UseGC) return; | |
1064 | uintptr_t *table = (uintptr_t *)AllClasses; | |
1065 | // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2 | |
1066 | // Slot 1 is count - always non-zero | |
1067 | uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0]; | |
1068 | if (slot < 2) slot = 2; | |
1069 | for(;;) { | |
1070 | uintptr_t slotValue = table[slot]; | |
1071 | assert(slotValue != (uintptr_t)candidate); | |
1072 | if (slotValue == REMOVED) { | |
1073 | table[slot] = (long)candidate; | |
1074 | return; | |
1075 | } | |
1076 | else if (slotValue == 0) { | |
1077 | table[slot] = (long)candidate; | |
1078 | if (2*++table[1] > table[0]) { // add to count; check if we cross 50% utilization | |
1079 | // grow | |
1080 | uintptr_t oldSize = table[0]+1; | |
1081 | uintptr_t *newTable = (uintptr_t *)auto_zone_allocate_object(gc_zone, oldSize*2*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true); | |
1082 | uintptr_t i; | |
1083 | newTable[0] = 2*oldSize - 1; | |
1084 | newTable[1] = 0; | |
1085 | for (i = 2; i < oldSize; ++i) { | |
1086 | if (table[i] && table[i] != REMOVED) | |
1087 | addClassHelper(newTable, table[i]); | |
1088 | } | |
1089 | AllClasses = (Class *)newTable; | |
1090 | // let the old table be collected when other threads are no longer reading it. | |
1091 | auto_zone_release(gc_zone, (void *)table); | |
1092 | } | |
1093 | return; | |
1094 | } | |
1095 | ++slot; | |
1096 | if (slot > table[0]) | |
1097 | slot = 2; // skip size, count | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | // lock held by callers | |
1102 | void objc_removeRegisteredClass(Class candidate) { | |
1103 | if (!UseGC) return; | |
1104 | uintptr_t *table = (uintptr_t *)AllClasses; | |
1105 | // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2 | |
1106 | // Slot 1 is count - always non-zero | |
1107 | uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & table[0]; | |
1108 | if (slot < 2) slot = 2; | |
1109 | for(;;) { | |
1110 | uintptr_t slotValue = table[slot]; | |
1111 | if (slotValue == (uintptr_t)candidate) { | |
1112 | table[slot] = REMOVED; // if next slot == 0 we could set to 0 here and decr count | |
1113 | return; | |
1114 | } | |
1115 | assert(slotValue != 0); | |
1116 | ++slot; | |
1117 | if (slot > table[0]) | |
1118 | slot = 2; // skip size, count | |
1119 | } | |
1120 | } | |
1121 | ||
1122 | ||
1123 | /*********************************************************************** | |
1124 | * Debugging - support for smart printouts when errors occur | |
1125 | **********************************************************************/ | |
1126 | ||
1127 | ||
1128 | static malloc_zone_t *objc_debug_zone(void) | |
1129 | { | |
1130 | static malloc_zone_t *z = nil; | |
1131 | if (!z) { | |
1132 | z = malloc_create_zone(PAGE_SIZE, 0); | |
1133 | malloc_set_zone_name(z, "objc-auto debug"); | |
1134 | } | |
1135 | return z; | |
1136 | } | |
1137 | ||
1138 | static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) { | |
1139 | if (!value) { | |
1140 | head[0] = '0'; | |
1141 | } else { | |
1142 | if (value >= base) head = _malloc_append_unsigned(value / base, base, head); | |
1143 | value = value % base; | |
1144 | head[0] = (value < 10) ? '0' + value : 'a' + value - 10; | |
1145 | } | |
1146 | return head+1; | |
1147 | } | |
1148 | ||
1149 | static void strlcati(char *str, uintptr_t value, size_t bufSize) | |
1150 | { | |
1151 | if ( (bufSize - strlen(str)) < 30) | |
1152 | return; | |
1153 | str = _malloc_append_unsigned(value, 10, str + strlen(str)); | |
1154 | str[0] = '\0'; | |
1155 | } | |
1156 | ||
1157 | ||
1158 | static Ivar ivar_for_offset(Class cls, vm_address_t offset) | |
1159 | { | |
1160 | unsigned i; | |
1161 | vm_address_t ivar_offset; | |
1162 | Ivar super_ivar, result; | |
1163 | Ivar *ivars; | |
1164 | unsigned int ivar_count; | |
1165 | ||
1166 | if (!cls) return nil; | |
1167 | ||
1168 | // scan base classes FIRST | |
1169 | super_ivar = ivar_for_offset(cls->superclass, offset); | |
1170 | // result is best-effort; our ivars may be closer | |
1171 | ||
1172 | ivars = class_copyIvarList(cls, &ivar_count); | |
1173 | if (ivars && ivar_count) { | |
1174 | // Try our first ivar. If it's too big, use super's best ivar. | |
1175 | // (lose 64-bit precision) | |
1176 | ivar_offset = ivar_getOffset(ivars[0]); | |
1177 | if (ivar_offset > offset) result = super_ivar; | |
1178 | else if (ivar_offset == offset) result = ivars[0]; | |
1179 | else result = nil; | |
1180 | ||
1181 | // Try our other ivars. If any is too big, use the previous. | |
1182 | for (i = 1; result == nil && i < ivar_count; i++) { | |
1183 | ivar_offset = ivar_getOffset(ivars[i]); | |
1184 | if (ivar_offset == offset) { | |
1185 | result = ivars[i]; | |
1186 | } else if (ivar_offset > offset) { | |
1187 | result = ivars[i - 1]; | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | // Found nothing. Return our last ivar. | |
1192 | if (result == nil) | |
1193 | result = ivars[ivar_count - 1]; | |
1194 | ||
1195 | free(ivars); | |
1196 | } else { | |
1197 | result = super_ivar; | |
1198 | } | |
1199 | ||
1200 | return result; | |
1201 | } | |
1202 | ||
1203 | static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset, size_t bufSize) | |
1204 | { | |
1205 | Ivar ivar = nil; | |
1206 | ||
1207 | if (offset == 0) return; // don't bother with isa | |
1208 | if (offset >= class_getInstanceSize(cls)) { | |
1209 | strlcat(buf, ".<extra>+", bufSize); | |
1210 | strlcati(buf, offset, bufSize); | |
1211 | return; | |
1212 | } | |
1213 | ||
1214 | ivar = ivar_for_offset(cls, offset); | |
1215 | if (!ivar) { | |
1216 | strlcat(buf, ".<?>", bufSize); | |
1217 | return; | |
1218 | } | |
1219 | ||
1220 | // fixme doesn't handle structs etc. | |
1221 | ||
1222 | strlcat(buf, ".", bufSize); | |
1223 | const char *ivar_name = ivar_getName(ivar); | |
1224 | if (ivar_name) strlcat(buf, ivar_name, bufSize); | |
1225 | else strlcat(buf, "<anonymous ivar>", bufSize); | |
1226 | ||
1227 | offset -= ivar_getOffset(ivar); | |
1228 | if (offset > 0) { | |
1229 | strlcat(buf, "+", bufSize); | |
1230 | strlcati(buf, offset, bufSize); | |
1231 | } | |
1232 | } | |
1233 | ||
1234 | ||
1235 | static const char *cf_class_for_object(void *cfobj) | |
1236 | { | |
1237 | // ick - we don't link against CF anymore | |
1238 | ||
1239 | struct fake_cfclass { | |
1240 | size_t version; | |
1241 | const char *className; | |
1242 | // don't care about the rest | |
1243 | }; | |
1244 | ||
1245 | const char *result; | |
1246 | void *dlh; | |
1247 | size_t (*CFGetTypeID)(void *); | |
1248 | fake_cfclass * (*_CFRuntimeGetClassWithTypeID)(size_t); | |
1249 | ||
1250 | result = "anonymous_NSCFType"; | |
1251 | ||
1252 | dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST); | |
1253 | if (!dlh) return result; | |
1254 | ||
1255 | CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID"); | |
1256 | _CFRuntimeGetClassWithTypeID = (fake_cfclass*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID"); | |
1257 | ||
1258 | if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) { | |
1259 | size_t cfid = (*CFGetTypeID)(cfobj); | |
1260 | result = (*_CFRuntimeGetClassWithTypeID)(cfid)->className; | |
1261 | } | |
1262 | ||
1263 | dlclose(dlh); | |
1264 | return result; | |
1265 | } | |
1266 | ||
1267 | ||
1268 | static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount) | |
1269 | { | |
1270 | #define APPEND_SIZE(s) \ | |
1271 | strlcat(buf, "[", sizeof(buf)); \ | |
1272 | strlcati(buf, s, sizeof(buf)); \ | |
1273 | strlcat(buf, "]", sizeof(buf)); | |
1274 | ||
1275 | char buf[1500]; | |
1276 | char *result; | |
1277 | ||
1278 | buf[0] = '\0'; | |
1279 | ||
1280 | size_t size = | |
1281 | auto_zone_size(zone, (void *)base); | |
1282 | auto_memory_type_t type = size ? | |
1283 | auto_zone_get_layout_type(zone, (void *)base) : AUTO_TYPE_UNKNOWN; | |
1284 | unsigned int refcount = size ? | |
1285 | auto_zone_retain_count(zone, (void *)base) : 0; | |
1286 | ||
1287 | switch (type) { | |
1288 | case AUTO_OBJECT_SCANNED: | |
1289 | case AUTO_OBJECT_UNSCANNED: | |
1290 | case AUTO_OBJECT_ALL_POINTERS: { | |
1291 | const char *class_name = object_getClassName((id)base); | |
1292 | if ((0 == strcmp(class_name, "__NSCFType")) || (0 == strcmp(class_name, "NSCFType"))) { | |
1293 | strlcat(buf, cf_class_for_object((void *)base), sizeof(buf)); | |
1294 | } else { | |
1295 | strlcat(buf, class_name, sizeof(buf)); | |
1296 | } | |
1297 | if (offset) { | |
1298 | append_ivar_at_offset(buf, ((id)base)->ISA(), offset, sizeof(buf)); | |
1299 | } | |
1300 | APPEND_SIZE(size); | |
1301 | break; | |
1302 | } | |
1303 | case AUTO_MEMORY_SCANNED: | |
1304 | strlcat(buf, "{conservative-block}", sizeof(buf)); | |
1305 | APPEND_SIZE(size); | |
1306 | break; | |
1307 | case AUTO_MEMORY_UNSCANNED: | |
1308 | strlcat(buf, "{no-pointers-block}", sizeof(buf)); | |
1309 | APPEND_SIZE(size); | |
1310 | break; | |
1311 | case AUTO_MEMORY_ALL_POINTERS: | |
1312 | strlcat(buf, "{all-pointers-block}", sizeof(buf)); | |
1313 | APPEND_SIZE(size); | |
1314 | break; | |
1315 | case AUTO_MEMORY_ALL_WEAK_POINTERS: | |
1316 | strlcat(buf, "{all-weak-pointers-block}", sizeof(buf)); | |
1317 | APPEND_SIZE(size); | |
1318 | break; | |
1319 | case AUTO_TYPE_UNKNOWN: | |
1320 | strlcat(buf, "{uncollectable-memory}", sizeof(buf)); | |
1321 | break; | |
1322 | default: | |
1323 | strlcat(buf, "{unknown-memory-type}", sizeof(buf)); | |
1324 | } | |
1325 | ||
1326 | if (withRetainCount && refcount > 0) { | |
1327 | strlcat(buf, " [[refcount=", sizeof(buf)); | |
1328 | strlcati(buf, refcount, sizeof(buf)); | |
1329 | strlcat(buf, "]]", sizeof(buf)); | |
1330 | } | |
1331 | ||
1332 | size_t len = 1 + strlen(buf); | |
1333 | result = (char *)malloc_zone_malloc(objc_debug_zone(), len); | |
1334 | memcpy(result, buf, len); | |
1335 | return result; | |
1336 | ||
1337 | #undef APPEND_SIZE | |
1338 | } | |
1339 | ||
1340 | ||
1341 | ||
1342 | ||
1343 | ||
1344 | #endif |