]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-818.2.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "DenseMapExtras.h"
29
30 #include <malloc/malloc.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <mach/mach.h>
34 #include <mach-o/dyld.h>
35 #include <mach-o/nlist.h>
36 #include <sys/types.h>
37 #include <sys/mman.h>
38 #include <Block.h>
39 #include <map>
40 #include <execinfo.h>
41 #include "NSObject-internal.h"
42 #include <os/feature_private.h>
43
44 extern "C" {
45 #include <os/reason_private.h>
46 #include <os/variant_private.h>
47 }
48
49 @interface NSInvocation
50 - (SEL)selector;
51 @end
52
53 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
54 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
55 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
56 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
57 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
58 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
59 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
60 OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_begin_offset = sizeof(AutoreleasePoolPageData);
61 #if __OBJC2__
62 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
63 OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = (AutoreleasePoolPageData::AutoreleasePoolEntry){ .ptr = ~(uintptr_t)0 }.ptr;
64 #else
65 OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = ~(uintptr_t)0;
66 #endif
67 OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX;
68 #endif
69
70 /***********************************************************************
71 * Weak ivar support
72 **********************************************************************/
73
74 static id defaultBadAllocHandler(Class cls)
75 {
76 _objc_fatal("attempt to allocate object of class '%s' failed",
77 cls->nameForLogging());
78 }
79
80 id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
81
82 id _objc_callBadAllocHandler(Class cls)
83 {
84 // fixme add re-entrancy protection in case allocation fails inside handler
85 return (*badAllocHandler)(cls);
86 }
87
88 void _objc_setBadAllocHandler(id(*newHandler)(Class))
89 {
90 badAllocHandler = newHandler;
91 }
92
93
94 static id _initializeSwiftRefcountingThenCallRetain(id objc);
95 static void _initializeSwiftRefcountingThenCallRelease(id objc);
96
97 explicit_atomic<id(*)(id)> swiftRetain{&_initializeSwiftRefcountingThenCallRetain};
98 explicit_atomic<void(*)(id)> swiftRelease{&_initializeSwiftRefcountingThenCallRelease};
99
100 static void _initializeSwiftRefcounting() {
101 void *const token = dlopen("/usr/lib/swift/libswiftCore.dylib", RTLD_LAZY | RTLD_LOCAL);
102 ASSERT(token);
103 swiftRetain.store((id(*)(id))dlsym(token, "swift_retain"), memory_order_relaxed);
104 ASSERT(swiftRetain.load(memory_order_relaxed));
105 swiftRelease.store((void(*)(id))dlsym(token, "swift_release"), memory_order_relaxed);
106 ASSERT(swiftRelease.load(memory_order_relaxed));
107 dlclose(token);
108 }
109
110 static id _initializeSwiftRefcountingThenCallRetain(id objc) {
111 _initializeSwiftRefcounting();
112 return swiftRetain.load(memory_order_relaxed)(objc);
113 }
114
115 static void _initializeSwiftRefcountingThenCallRelease(id objc) {
116 _initializeSwiftRefcounting();
117 swiftRelease.load(memory_order_relaxed)(objc);
118 }
119
120 namespace objc {
121 extern int PageCountWarning;
122 }
123
124 namespace {
125
126 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
127 uint32_t numFaults = 0;
128 #endif
129
130 // The order of these bits is important.
131 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
132 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
133 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
134 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
135
136 #define SIDE_TABLE_RC_SHIFT 2
137 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
138
139 struct RefcountMapValuePurgeable {
140 static inline bool isPurgeable(size_t x) {
141 return x == 0;
142 }
143 };
144
145 // RefcountMap disguises its pointers because we
146 // don't want the table to act as a root for `leaks`.
147 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
148
149 // Template parameters.
150 enum HaveOld { DontHaveOld = false, DoHaveOld = true };
151 enum HaveNew { DontHaveNew = false, DoHaveNew = true };
152
153 struct SideTable {
154 spinlock_t slock;
155 RefcountMap refcnts;
156 weak_table_t weak_table;
157
158 SideTable() {
159 memset(&weak_table, 0, sizeof(weak_table));
160 }
161
162 ~SideTable() {
163 _objc_fatal("Do not delete SideTable.");
164 }
165
166 void lock() { slock.lock(); }
167 void unlock() { slock.unlock(); }
168 void forceReset() { slock.forceReset(); }
169
170 // Address-ordered lock discipline for a pair of side tables.
171
172 template<HaveOld, HaveNew>
173 static void lockTwo(SideTable *lock1, SideTable *lock2);
174 template<HaveOld, HaveNew>
175 static void unlockTwo(SideTable *lock1, SideTable *lock2);
176 };
177
178
179 template<>
180 void SideTable::lockTwo<DoHaveOld, DoHaveNew>
181 (SideTable *lock1, SideTable *lock2)
182 {
183 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
184 }
185
186 template<>
187 void SideTable::lockTwo<DoHaveOld, DontHaveNew>
188 (SideTable *lock1, SideTable *)
189 {
190 lock1->lock();
191 }
192
193 template<>
194 void SideTable::lockTwo<DontHaveOld, DoHaveNew>
195 (SideTable *, SideTable *lock2)
196 {
197 lock2->lock();
198 }
199
200 template<>
201 void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
202 (SideTable *lock1, SideTable *lock2)
203 {
204 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
205 }
206
207 template<>
208 void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
209 (SideTable *lock1, SideTable *)
210 {
211 lock1->unlock();
212 }
213
214 template<>
215 void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
216 (SideTable *, SideTable *lock2)
217 {
218 lock2->unlock();
219 }
220
221 static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
222
223 static StripedMap<SideTable>& SideTables() {
224 return SideTablesMap.get();
225 }
226
227 // anonymous namespace
228 };
229
230 void SideTableLockAll() {
231 SideTables().lockAll();
232 }
233
234 void SideTableUnlockAll() {
235 SideTables().unlockAll();
236 }
237
238 void SideTableForceResetAll() {
239 SideTables().forceResetAll();
240 }
241
242 void SideTableDefineLockOrder() {
243 SideTables().defineLockOrder();
244 }
245
246 void SideTableLocksPrecedeLock(const void *newlock) {
247 SideTables().precedeLock(newlock);
248 }
249
250 void SideTableLocksSucceedLock(const void *oldlock) {
251 SideTables().succeedLock(oldlock);
252 }
253
254 void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
255 int i = 0;
256 const void *newlock;
257 while ((newlock = newlocks.getLock(i++))) {
258 SideTables().precedeLock(newlock);
259 }
260 }
261
262 void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
263 int i = 0;
264 const void *oldlock;
265 while ((oldlock = oldlocks.getLock(i++))) {
266 SideTables().succeedLock(oldlock);
267 }
268 }
269
270 // Call out to the _setWeaklyReferenced method on obj, if implemented.
271 static void callSetWeaklyReferenced(id obj) {
272 if (!obj)
273 return;
274
275 Class cls = obj->getIsa();
276
277 if (slowpath(cls->hasCustomRR() && !object_isClass(obj))) {
278 ASSERT(((objc_class *)cls)->isInitializing() || ((objc_class *)cls)->isInitialized());
279 void (*setWeaklyReferenced)(id, SEL) = (void(*)(id, SEL))
280 class_getMethodImplementation(cls, @selector(_setWeaklyReferenced));
281 if ((IMP)setWeaklyReferenced != _objc_msgForward) {
282 (*setWeaklyReferenced)(obj, @selector(_setWeaklyReferenced));
283 }
284 }
285 }
286
287 //
288 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
289 //
290
291 id objc_retainBlock(id x) {
292 return (id)_Block_copy(x);
293 }
294
295 //
296 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
297 //
298
299 BOOL objc_should_deallocate(id object) {
300 return YES;
301 }
302
303 id
304 objc_retain_autorelease(id obj)
305 {
306 return objc_autorelease(objc_retain(obj));
307 }
308
309
310 void
311 objc_storeStrong(id *location, id obj)
312 {
313 id prev = *location;
314 if (obj == prev) {
315 return;
316 }
317 objc_retain(obj);
318 *location = obj;
319 objc_release(prev);
320 }
321
322
323 // Update a weak variable.
324 // If HaveOld is true, the variable has an existing value
325 // that needs to be cleaned up. This value might be nil.
326 // If HaveNew is true, there is a new value that needs to be
327 // assigned into the variable. This value might be nil.
328 // If CrashIfDeallocating is true, the process is halted if newObj is
329 // deallocating or newObj's class does not support weak references.
330 // If CrashIfDeallocating is false, nil is stored instead.
331 enum CrashIfDeallocating {
332 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
333 };
334 template <HaveOld haveOld, HaveNew haveNew,
335 enum CrashIfDeallocating crashIfDeallocating>
336 static id
337 storeWeak(id *location, objc_object *newObj)
338 {
339 ASSERT(haveOld || haveNew);
340 if (!haveNew) ASSERT(newObj == nil);
341
342 Class previouslyInitializedClass = nil;
343 id oldObj;
344 SideTable *oldTable;
345 SideTable *newTable;
346
347 // Acquire locks for old and new values.
348 // Order by lock address to prevent lock ordering problems.
349 // Retry if the old value changes underneath us.
350 retry:
351 if (haveOld) {
352 oldObj = *location;
353 oldTable = &SideTables()[oldObj];
354 } else {
355 oldTable = nil;
356 }
357 if (haveNew) {
358 newTable = &SideTables()[newObj];
359 } else {
360 newTable = nil;
361 }
362
363 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
364
365 if (haveOld && *location != oldObj) {
366 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
367 goto retry;
368 }
369
370 // Prevent a deadlock between the weak reference machinery
371 // and the +initialize machinery by ensuring that no
372 // weakly-referenced object has an un-+initialized isa.
373 if (haveNew && newObj) {
374 Class cls = newObj->getIsa();
375 if (cls != previouslyInitializedClass &&
376 !((objc_class *)cls)->isInitialized())
377 {
378 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
379 class_initialize(cls, (id)newObj);
380
381 // If this class is finished with +initialize then we're good.
382 // If this class is still running +initialize on this thread
383 // (i.e. +initialize called storeWeak on an instance of itself)
384 // then we may proceed but it will appear initializing and
385 // not yet initialized to the check above.
386 // Instead set previouslyInitializedClass to recognize it on retry.
387 previouslyInitializedClass = cls;
388
389 goto retry;
390 }
391 }
392
393 // Clean up old value, if any.
394 if (haveOld) {
395 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
396 }
397
398 // Assign new value, if any.
399 if (haveNew) {
400 newObj = (objc_object *)
401 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
402 crashIfDeallocating ? CrashIfDeallocating : ReturnNilIfDeallocating);
403 // weak_register_no_lock returns nil if weak store should be rejected
404
405 // Set is-weakly-referenced bit in refcount table.
406 if (!newObj->isTaggedPointerOrNil()) {
407 newObj->setWeaklyReferenced_nolock();
408 }
409
410 // Do not set *location anywhere else. That would introduce a race.
411 *location = (id)newObj;
412 }
413 else {
414 // No new value. The storage is not changed.
415 }
416
417 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
418
419 // This must be called without the locks held, as it can invoke
420 // arbitrary code. In particular, even if _setWeaklyReferenced
421 // is not implemented, resolveInstanceMethod: may be, and may
422 // call back into the weak reference machinery.
423 callSetWeaklyReferenced((id)newObj);
424
425 return (id)newObj;
426 }
427
428
429 /**
430 * This function stores a new value into a __weak variable. It would
431 * be used anywhere a __weak variable is the target of an assignment.
432 *
433 * @param location The address of the weak pointer itself
434 * @param newObj The new object this weak ptr should now point to
435 *
436 * @return \e newObj
437 */
438 id
439 objc_storeWeak(id *location, id newObj)
440 {
441 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
442 (location, (objc_object *)newObj);
443 }
444
445
446 /**
447 * This function stores a new value into a __weak variable.
448 * If the new object is deallocating or the new object's class
449 * does not support weak references, stores nil instead.
450 *
451 * @param location The address of the weak pointer itself
452 * @param newObj The new object this weak ptr should now point to
453 *
454 * @return The value stored (either the new object or nil)
455 */
456 id
457 objc_storeWeakOrNil(id *location, id newObj)
458 {
459 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
460 (location, (objc_object *)newObj);
461 }
462
463
464 /**
465 * Initialize a fresh weak pointer to some object location.
466 * It would be used for code like:
467 *
468 * (The nil case)
469 * __weak id weakPtr;
470 * (The non-nil case)
471 * NSObject *o = ...;
472 * __weak id weakPtr = o;
473 *
474 * This function IS NOT thread-safe with respect to concurrent
475 * modifications to the weak variable. (Concurrent weak clear is safe.)
476 *
477 * @param location Address of __weak ptr.
478 * @param newObj Object ptr.
479 */
480 id
481 objc_initWeak(id *location, id newObj)
482 {
483 if (!newObj) {
484 *location = nil;
485 return nil;
486 }
487
488 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
489 (location, (objc_object*)newObj);
490 }
491
492 id
493 objc_initWeakOrNil(id *location, id newObj)
494 {
495 if (!newObj) {
496 *location = nil;
497 return nil;
498 }
499
500 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
501 (location, (objc_object*)newObj);
502 }
503
504
505 /**
506 * Destroys the relationship between a weak pointer
507 * and the object it is referencing in the internal weak
508 * table. If the weak pointer is not referencing anything,
509 * there is no need to edit the weak table.
510 *
511 * This function IS NOT thread-safe with respect to concurrent
512 * modifications to the weak variable. (Concurrent weak clear is safe.)
513 *
514 * @param location The weak pointer address.
515 */
516 void
517 objc_destroyWeak(id *location)
518 {
519 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
520 (location, nil);
521 }
522
523
524 /*
525 Once upon a time we eagerly cleared *location if we saw the object
526 was deallocating. This confuses code like NSPointerFunctions which
527 tries to pre-flight the raw storage and assumes if the storage is
528 zero then the weak system is done interfering. That is false: the
529 weak system is still going to check and clear the storage later.
530 This can cause objc_weak_error complaints and crashes.
531 So we now don't touch the storage until deallocation completes.
532 */
533
534 id
535 objc_loadWeakRetained(id *location)
536 {
537 id obj;
538 id result;
539 Class cls;
540
541 SideTable *table;
542
543 retry:
544 // fixme std::atomic this load
545 obj = *location;
546 if (obj->isTaggedPointerOrNil()) return obj;
547
548 table = &SideTables()[obj];
549
550 table->lock();
551 if (*location != obj) {
552 table->unlock();
553 goto retry;
554 }
555
556 result = obj;
557
558 cls = obj->ISA();
559 if (! cls->hasCustomRR()) {
560 // Fast case. We know +initialize is complete because
561 // default-RR can never be set before then.
562 ASSERT(cls->isInitialized());
563 if (! obj->rootTryRetain()) {
564 result = nil;
565 }
566 }
567 else {
568 // Slow case. We must check for +initialize and call it outside
569 // the lock if necessary in order to avoid deadlocks.
570 // Use lookUpImpOrForward so we can avoid the assert in
571 // class_getInstanceMethod, since we intentionally make this
572 // callout with the lock held.
573 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
574 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
575 lookUpImpOrForwardTryCache(obj, @selector(retainWeakReference), cls);
576 if ((IMP)tryRetain == _objc_msgForward) {
577 result = nil;
578 }
579 else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
580 result = nil;
581 }
582 }
583 else {
584 table->unlock();
585 class_initialize(cls, obj);
586 goto retry;
587 }
588 }
589
590 table->unlock();
591 return result;
592 }
593
594 /**
595 * This loads the object referenced by a weak pointer and returns it, after
596 * retaining and autoreleasing the object to ensure that it stays alive
597 * long enough for the caller to use it. This function would be used
598 * anywhere a __weak variable is used in an expression.
599 *
600 * @param location The weak pointer address
601 *
602 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
603 */
604 id
605 objc_loadWeak(id *location)
606 {
607 if (!*location) return nil;
608 return objc_autorelease(objc_loadWeakRetained(location));
609 }
610
611
612 /**
613 * This function copies a weak pointer from one location to another,
614 * when the destination doesn't already contain a weak pointer. It
615 * would be used for code like:
616 *
617 * __weak id src = ...;
618 * __weak id dst = src;
619 *
620 * This function IS NOT thread-safe with respect to concurrent
621 * modifications to the destination variable. (Concurrent weak clear is safe.)
622 *
623 * @param dst The destination variable.
624 * @param src The source variable.
625 */
626 void
627 objc_copyWeak(id *dst, id *src)
628 {
629 id obj = objc_loadWeakRetained(src);
630 objc_initWeak(dst, obj);
631 objc_release(obj);
632 }
633
634 /**
635 * Move a weak pointer from one location to another.
636 * Before the move, the destination must be uninitialized.
637 * After the move, the source is nil.
638 *
639 * This function IS NOT thread-safe with respect to concurrent
640 * modifications to either weak variable. (Concurrent weak clear is safe.)
641 *
642 */
643 void
644 objc_moveWeak(id *dst, id *src)
645 {
646 id obj;
647 SideTable *table;
648
649 retry:
650 obj = *src;
651 if (obj == nil) {
652 *dst = nil;
653 return;
654 }
655
656 table = &SideTables()[obj];
657 table->lock();
658 if (*src != obj) {
659 table->unlock();
660 goto retry;
661 }
662
663 weak_unregister_no_lock(&table->weak_table, obj, src);
664 weak_register_no_lock(&table->weak_table, obj, dst, DontCheckDeallocating);
665 *dst = obj;
666 *src = nil;
667 table->unlock();
668 }
669
670
671 /***********************************************************************
672 Autorelease pool implementation
673
674 A thread's autorelease pool is a stack of pointers.
675 Each pointer is either an object to release, or POOL_BOUNDARY which is
676 an autorelease pool boundary.
677 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
678 the pool is popped, every object hotter than the sentinel is released.
679 The stack is divided into a doubly-linked list of pages. Pages are added
680 and deleted as necessary.
681 Thread-local storage points to the hot page, where newly autoreleased
682 objects are stored.
683 **********************************************************************/
684
685 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
686 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
687
688 class AutoreleasePoolPage : private AutoreleasePoolPageData
689 {
690 friend struct thread_data_t;
691
692 public:
693 static size_t const SIZE =
694 #if PROTECT_AUTORELEASEPOOL
695 PAGE_MAX_SIZE; // must be multiple of vm page size
696 #else
697 PAGE_MIN_SIZE; // size and alignment, power of 2
698 #endif
699
700 private:
701 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
702 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
703 static size_t const COUNT = SIZE / sizeof(id);
704 static size_t const MAX_FAULTS = 2;
705
706 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
707 // pushed and it has never contained any objects. This saves memory
708 // when the top level (i.e. libdispatch) pushes and pops pools but
709 // never uses them.
710 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
711
712 # define POOL_BOUNDARY nil
713
714 // SIZE-sizeof(*this) bytes of contents follow
715
716 static void * operator new(size_t size) {
717 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
718 }
719 static void operator delete(void * p) {
720 return free(p);
721 }
722
723 inline void protect() {
724 #if PROTECT_AUTORELEASEPOOL
725 mprotect(this, SIZE, PROT_READ);
726 check();
727 #endif
728 }
729
730 inline void unprotect() {
731 #if PROTECT_AUTORELEASEPOOL
732 check();
733 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
734 #endif
735 }
736
737 void checkTooMuchAutorelease()
738 {
739 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
740 bool objcModeNoFaults = DisableFaults || getpid() == 1 ||
741 !os_variant_has_internal_diagnostics("com.apple.obj-c");
742 if (!objcModeNoFaults) {
743 if (depth+1 >= (uint32_t)objc::PageCountWarning && numFaults < MAX_FAULTS) { //depth is 0 when first page is allocated
744 os_fault_with_payload(OS_REASON_LIBSYSTEM,
745 OS_REASON_LIBSYSTEM_CODE_FAULT,
746 NULL, 0, "Large Autorelease Pool", 0);
747 numFaults++;
748 }
749 }
750 #endif
751 }
752
753 AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
754 AutoreleasePoolPageData(begin(),
755 objc_thread_self(),
756 newParent,
757 newParent ? 1+newParent->depth : 0,
758 newParent ? newParent->hiwat : 0)
759 {
760 if (objc::PageCountWarning != -1) {
761 checkTooMuchAutorelease();
762 }
763
764 if (parent) {
765 parent->check();
766 ASSERT(!parent->child);
767 parent->unprotect();
768 parent->child = this;
769 parent->protect();
770 }
771 protect();
772 }
773
774 ~AutoreleasePoolPage()
775 {
776 check();
777 unprotect();
778 ASSERT(empty());
779
780 // Not recursive: we don't want to blow out the stack
781 // if a thread accumulates a stupendous amount of garbage
782 ASSERT(!child);
783 }
784
785 template<typename Fn>
786 void
787 busted(Fn log) const
788 {
789 magic_t right;
790 log("autorelease pool page %p corrupted\n"
791 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
792 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
793 " pthread %p\n"
794 " should be %p\n",
795 this,
796 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
797 right.m[0], right.m[1], right.m[2], right.m[3],
798 this->thread, objc_thread_self());
799 }
800
801 __attribute__((noinline, cold, noreturn))
802 void
803 busted_die() const
804 {
805 busted(_objc_fatal);
806 __builtin_unreachable();
807 }
808
809 inline void
810 check(bool die = true) const
811 {
812 if (!magic.check() || thread != objc_thread_self()) {
813 if (die) {
814 busted_die();
815 } else {
816 busted(_objc_inform);
817 }
818 }
819 }
820
821 inline void
822 fastcheck() const
823 {
824 #if CHECK_AUTORELEASEPOOL
825 check();
826 #else
827 if (! magic.fastcheck()) {
828 busted_die();
829 }
830 #endif
831 }
832
833
834 id * begin() {
835 return (id *) ((uint8_t *)this+sizeof(*this));
836 }
837
838 id * end() {
839 return (id *) ((uint8_t *)this+SIZE);
840 }
841
842 bool empty() {
843 return next == begin();
844 }
845
846 bool full() {
847 return next == end();
848 }
849
850 bool lessThanHalfFull() {
851 return (next - begin() < (end() - begin()) / 2);
852 }
853
854 id *add(id obj)
855 {
856 ASSERT(!full());
857 unprotect();
858 id *ret;
859
860 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
861 if (!DisableAutoreleaseCoalescing || !DisableAutoreleaseCoalescingLRU) {
862 if (!DisableAutoreleaseCoalescingLRU) {
863 if (!empty() && (obj != POOL_BOUNDARY)) {
864 AutoreleasePoolEntry *topEntry = (AutoreleasePoolEntry *)next - 1;
865 for (uintptr_t offset = 0; offset < 4; offset++) {
866 AutoreleasePoolEntry *offsetEntry = topEntry - offset;
867 if (offsetEntry <= (AutoreleasePoolEntry*)begin() || *(id *)offsetEntry == POOL_BOUNDARY) {
868 break;
869 }
870 if (offsetEntry->ptr == (uintptr_t)obj && offsetEntry->count < AutoreleasePoolEntry::maxCount) {
871 if (offset > 0) {
872 AutoreleasePoolEntry found = *offsetEntry;
873 memmove(offsetEntry, offsetEntry + 1, offset * sizeof(*offsetEntry));
874 *topEntry = found;
875 }
876 topEntry->count++;
877 ret = (id *)topEntry; // need to reset ret
878 goto done;
879 }
880 }
881 }
882 } else {
883 if (!empty() && (obj != POOL_BOUNDARY)) {
884 AutoreleasePoolEntry *prevEntry = (AutoreleasePoolEntry *)next - 1;
885 if (prevEntry->ptr == (uintptr_t)obj && prevEntry->count < AutoreleasePoolEntry::maxCount) {
886 prevEntry->count++;
887 ret = (id *)prevEntry; // need to reset ret
888 goto done;
889 }
890 }
891 }
892 }
893 #endif
894 ret = next; // faster than `return next-1` because of aliasing
895 *next++ = obj;
896 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
897 // Make sure obj fits in the bits available for it
898 ASSERT(((AutoreleasePoolEntry *)ret)->ptr == (uintptr_t)obj);
899 #endif
900 done:
901 protect();
902 return ret;
903 }
904
905 void releaseAll()
906 {
907 releaseUntil(begin());
908 }
909
910 void releaseUntil(id *stop)
911 {
912 // Not recursive: we don't want to blow out the stack
913 // if a thread accumulates a stupendous amount of garbage
914
915 while (this->next != stop) {
916 // Restart from hotPage() every time, in case -release
917 // autoreleased more objects
918 AutoreleasePoolPage *page = hotPage();
919
920 // fixme I think this `while` can be `if`, but I can't prove it
921 while (page->empty()) {
922 page = page->parent;
923 setHotPage(page);
924 }
925
926 page->unprotect();
927 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
928 AutoreleasePoolEntry* entry = (AutoreleasePoolEntry*) --page->next;
929
930 // create an obj with the zeroed out top byte and release that
931 id obj = (id)entry->ptr;
932 int count = (int)entry->count; // grab these before memset
933 #else
934 id obj = *--page->next;
935 #endif
936 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
937 page->protect();
938
939 if (obj != POOL_BOUNDARY) {
940 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
941 // release count+1 times since it is count of the additional
942 // autoreleases beyond the first one
943 for (int i = 0; i < count + 1; i++) {
944 objc_release(obj);
945 }
946 #else
947 objc_release(obj);
948 #endif
949 }
950 }
951
952 setHotPage(this);
953
954 #if DEBUG
955 // we expect any children to be completely empty
956 for (AutoreleasePoolPage *page = child; page; page = page->child) {
957 ASSERT(page->empty());
958 }
959 #endif
960 }
961
962 void kill()
963 {
964 // Not recursive: we don't want to blow out the stack
965 // if a thread accumulates a stupendous amount of garbage
966 AutoreleasePoolPage *page = this;
967 while (page->child) page = page->child;
968
969 AutoreleasePoolPage *deathptr;
970 do {
971 deathptr = page;
972 page = page->parent;
973 if (page) {
974 page->unprotect();
975 page->child = nil;
976 page->protect();
977 }
978 delete deathptr;
979 } while (deathptr != this);
980 }
981
982 static void tls_dealloc(void *p)
983 {
984 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
985 // No objects or pool pages to clean up here.
986 return;
987 }
988
989 // reinstate TLS value while we work
990 setHotPage((AutoreleasePoolPage *)p);
991
992 if (AutoreleasePoolPage *page = coldPage()) {
993 if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
994 if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
995 // pop() killed the pages already
996 } else {
997 page->kill(); // free all of the pages
998 }
999 }
1000
1001 // clear TLS value so TLS destruction doesn't loop
1002 setHotPage(nil);
1003 }
1004
1005 static AutoreleasePoolPage *pageForPointer(const void *p)
1006 {
1007 return pageForPointer((uintptr_t)p);
1008 }
1009
1010 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
1011 {
1012 AutoreleasePoolPage *result;
1013 uintptr_t offset = p % SIZE;
1014
1015 ASSERT(offset >= sizeof(AutoreleasePoolPage));
1016
1017 result = (AutoreleasePoolPage *)(p - offset);
1018 result->fastcheck();
1019
1020 return result;
1021 }
1022
1023
1024 static inline bool haveEmptyPoolPlaceholder()
1025 {
1026 id *tls = (id *)tls_get_direct(key);
1027 return (tls == EMPTY_POOL_PLACEHOLDER);
1028 }
1029
1030 static inline id* setEmptyPoolPlaceholder()
1031 {
1032 ASSERT(tls_get_direct(key) == nil);
1033 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
1034 return EMPTY_POOL_PLACEHOLDER;
1035 }
1036
1037 static inline AutoreleasePoolPage *hotPage()
1038 {
1039 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
1040 tls_get_direct(key);
1041 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
1042 if (result) result->fastcheck();
1043 return result;
1044 }
1045
1046 static inline void setHotPage(AutoreleasePoolPage *page)
1047 {
1048 if (page) page->fastcheck();
1049 tls_set_direct(key, (void *)page);
1050 }
1051
1052 static inline AutoreleasePoolPage *coldPage()
1053 {
1054 AutoreleasePoolPage *result = hotPage();
1055 if (result) {
1056 while (result->parent) {
1057 result = result->parent;
1058 result->fastcheck();
1059 }
1060 }
1061 return result;
1062 }
1063
1064
1065 static inline id *autoreleaseFast(id obj)
1066 {
1067 AutoreleasePoolPage *page = hotPage();
1068 if (page && !page->full()) {
1069 return page->add(obj);
1070 } else if (page) {
1071 return autoreleaseFullPage(obj, page);
1072 } else {
1073 return autoreleaseNoPage(obj);
1074 }
1075 }
1076
1077 static __attribute__((noinline))
1078 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
1079 {
1080 // The hot page is full.
1081 // Step to the next non-full page, adding a new page if necessary.
1082 // Then add the object to that page.
1083 ASSERT(page == hotPage());
1084 ASSERT(page->full() || DebugPoolAllocation);
1085
1086 do {
1087 if (page->child) page = page->child;
1088 else page = new AutoreleasePoolPage(page);
1089 } while (page->full());
1090
1091 setHotPage(page);
1092 return page->add(obj);
1093 }
1094
1095 static __attribute__((noinline))
1096 id *autoreleaseNoPage(id obj)
1097 {
1098 // "No page" could mean no pool has been pushed
1099 // or an empty placeholder pool has been pushed and has no contents yet
1100 ASSERT(!hotPage());
1101
1102 bool pushExtraBoundary = false;
1103 if (haveEmptyPoolPlaceholder()) {
1104 // We are pushing a second pool over the empty placeholder pool
1105 // or pushing the first object into the empty placeholder pool.
1106 // Before doing that, push a pool boundary on behalf of the pool
1107 // that is currently represented by the empty placeholder.
1108 pushExtraBoundary = true;
1109 }
1110 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
1111 // We are pushing an object with no pool in place,
1112 // and no-pool debugging was requested by environment.
1113 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
1114 "autoreleased with no pool in place - "
1115 "just leaking - break on "
1116 "objc_autoreleaseNoPool() to debug",
1117 objc_thread_self(), (void*)obj, object_getClassName(obj));
1118 objc_autoreleaseNoPool(obj);
1119 return nil;
1120 }
1121 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
1122 // We are pushing a pool with no pool in place,
1123 // and alloc-per-pool debugging was not requested.
1124 // Install and return the empty pool placeholder.
1125 return setEmptyPoolPlaceholder();
1126 }
1127
1128 // We are pushing an object or a non-placeholder'd pool.
1129
1130 // Install the first page.
1131 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
1132 setHotPage(page);
1133
1134 // Push a boundary on behalf of the previously-placeholder'd pool.
1135 if (pushExtraBoundary) {
1136 page->add(POOL_BOUNDARY);
1137 }
1138
1139 // Push the requested object or pool.
1140 return page->add(obj);
1141 }
1142
1143
1144 static __attribute__((noinline))
1145 id *autoreleaseNewPage(id obj)
1146 {
1147 AutoreleasePoolPage *page = hotPage();
1148 if (page) return autoreleaseFullPage(obj, page);
1149 else return autoreleaseNoPage(obj);
1150 }
1151
1152 public:
1153 static inline id autorelease(id obj)
1154 {
1155 ASSERT(!obj->isTaggedPointerOrNil());
1156 id *dest __unused = autoreleaseFast(obj);
1157 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1158 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || (id)((AutoreleasePoolEntry *)dest)->ptr == obj);
1159 #else
1160 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
1161 #endif
1162 return obj;
1163 }
1164
1165
1166 static inline void *push()
1167 {
1168 id *dest;
1169 if (slowpath(DebugPoolAllocation)) {
1170 // Each autorelease pool starts on a new pool page.
1171 dest = autoreleaseNewPage(POOL_BOUNDARY);
1172 } else {
1173 dest = autoreleaseFast(POOL_BOUNDARY);
1174 }
1175 ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1176 return dest;
1177 }
1178
1179 __attribute__((noinline, cold))
1180 static void badPop(void *token)
1181 {
1182 // Error. For bincompat purposes this is not
1183 // fatal in executables built with old SDKs.
1184
1185 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
1186 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1187 _objc_fatal
1188 ("Invalid or prematurely-freed autorelease pool %p.", token);
1189 }
1190
1191 // Old SDK. Bad pop is warned once.
1192 static bool complained = false;
1193 if (!complained) {
1194 complained = true;
1195 _objc_inform_now_and_on_crash
1196 ("Invalid or prematurely-freed autorelease pool %p. "
1197 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1198 "Proceeding anyway because the app is old. Memory errors "
1199 "are likely.",
1200 token);
1201 }
1202 objc_autoreleasePoolInvalid(token);
1203 }
1204
1205 template<bool allowDebug>
1206 static void
1207 popPage(void *token, AutoreleasePoolPage *page, id *stop)
1208 {
1209 if (allowDebug && PrintPoolHiwat) printHiwat();
1210
1211 page->releaseUntil(stop);
1212
1213 // memory: delete empty children
1214 if (allowDebug && DebugPoolAllocation && page->empty()) {
1215 // special case: delete everything during page-per-pool debugging
1216 AutoreleasePoolPage *parent = page->parent;
1217 page->kill();
1218 setHotPage(parent);
1219 } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
1220 // special case: delete everything for pop(top)
1221 // when debugging missing autorelease pools
1222 page->kill();
1223 setHotPage(nil);
1224 } else if (page->child) {
1225 // hysteresis: keep one empty child if page is more than half full
1226 if (page->lessThanHalfFull()) {
1227 page->child->kill();
1228 }
1229 else if (page->child->child) {
1230 page->child->child->kill();
1231 }
1232 }
1233 }
1234
1235 __attribute__((noinline, cold))
1236 static void
1237 popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
1238 {
1239 popPage<true>(token, page, stop);
1240 }
1241
1242 static inline void
1243 pop(void *token)
1244 {
1245 AutoreleasePoolPage *page;
1246 id *stop;
1247 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1248 // Popping the top-level placeholder pool.
1249 page = hotPage();
1250 if (!page) {
1251 // Pool was never used. Clear the placeholder.
1252 return setHotPage(nil);
1253 }
1254 // Pool was used. Pop its contents normally.
1255 // Pool pages remain allocated for re-use as usual.
1256 page = coldPage();
1257 token = page->begin();
1258 } else {
1259 page = pageForPointer(token);
1260 }
1261
1262 stop = (id *)token;
1263 if (*stop != POOL_BOUNDARY) {
1264 if (stop == page->begin() && !page->parent) {
1265 // Start of coldest page may correctly not be POOL_BOUNDARY:
1266 // 1. top-level pool is popped, leaving the cold page in place
1267 // 2. an object is autoreleased with no pool
1268 } else {
1269 // Error. For bincompat purposes this is not
1270 // fatal in executables built with old SDKs.
1271 return badPop(token);
1272 }
1273 }
1274
1275 if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
1276 return popPageDebug(token, page, stop);
1277 }
1278
1279 return popPage<false>(token, page, stop);
1280 }
1281
1282 static void init()
1283 {
1284 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1285 AutoreleasePoolPage::tls_dealloc);
1286 ASSERT(r == 0);
1287 }
1288
1289 __attribute__((noinline, cold))
1290 void print()
1291 {
1292 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1293 full() ? "(full)" : "",
1294 this == hotPage() ? "(hot)" : "",
1295 this == coldPage() ? "(cold)" : "");
1296 check(false);
1297 for (id *p = begin(); p < next; p++) {
1298 if (*p == POOL_BOUNDARY) {
1299 _objc_inform("[%p] ################ POOL %p", p, p);
1300 } else {
1301 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1302 AutoreleasePoolEntry *entry = (AutoreleasePoolEntry *)p;
1303 if (entry->count > 0) {
1304 id obj = (id)entry->ptr;
1305 _objc_inform("[%p] %#16lx %s autorelease count %u",
1306 p, (unsigned long)obj, object_getClassName(obj),
1307 entry->count + 1);
1308 goto done;
1309 }
1310 #endif
1311 _objc_inform("[%p] %#16lx %s",
1312 p, (unsigned long)*p, object_getClassName(*p));
1313 done:;
1314 }
1315 }
1316 }
1317
1318 __attribute__((noinline, cold))
1319 static void printAll()
1320 {
1321 _objc_inform("##############");
1322 _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
1323
1324 AutoreleasePoolPage *page;
1325 ptrdiff_t objects = 0;
1326 for (page = coldPage(); page; page = page->child) {
1327 objects += page->next - page->begin();
1328 }
1329 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1330
1331 if (haveEmptyPoolPlaceholder()) {
1332 _objc_inform("[%p] ................ PAGE (placeholder)",
1333 EMPTY_POOL_PLACEHOLDER);
1334 _objc_inform("[%p] ################ POOL (placeholder)",
1335 EMPTY_POOL_PLACEHOLDER);
1336 }
1337 else {
1338 for (page = coldPage(); page; page = page->child) {
1339 page->print();
1340 }
1341 }
1342
1343 _objc_inform("##############");
1344 }
1345
1346 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1347 __attribute__((noinline, cold))
1348 unsigned sumOfExtraReleases()
1349 {
1350 unsigned sumOfExtraReleases = 0;
1351 for (id *p = begin(); p < next; p++) {
1352 if (*p != POOL_BOUNDARY) {
1353 sumOfExtraReleases += ((AutoreleasePoolEntry *)p)->count;
1354 }
1355 }
1356 return sumOfExtraReleases;
1357 }
1358 #endif
1359
1360 __attribute__((noinline, cold))
1361 static void printHiwat()
1362 {
1363 // Check and propagate high water mark
1364 // Ignore high water marks under 256 to suppress noise.
1365 AutoreleasePoolPage *p = hotPage();
1366 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1367 if (mark > p->hiwat + 256) {
1368 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1369 unsigned sumOfExtraReleases = 0;
1370 #endif
1371 for( ; p; p = p->parent) {
1372 p->unprotect();
1373 p->hiwat = mark;
1374 p->protect();
1375
1376 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1377 sumOfExtraReleases += p->sumOfExtraReleases();
1378 #endif
1379 }
1380
1381 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1382 "pending releases for thread %p:",
1383 mark, objc_thread_self());
1384 #if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1385 if (sumOfExtraReleases > 0) {
1386 _objc_inform("POOL HIGHWATER: extra sequential autoreleases of objects: %u",
1387 sumOfExtraReleases);
1388 }
1389 #endif
1390
1391 void *stack[128];
1392 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1393 char **sym = backtrace_symbols(stack, count);
1394 for (int i = 0; i < count; i++) {
1395 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1396 }
1397 free(sym);
1398 }
1399 }
1400
1401 #undef POOL_BOUNDARY
1402 };
1403
1404 /***********************************************************************
1405 * Slow paths for inline control
1406 **********************************************************************/
1407
1408 #if SUPPORT_NONPOINTER_ISA
1409
1410 NEVER_INLINE id
1411 objc_object::rootRetain_overflow(bool tryRetain)
1412 {
1413 return rootRetain(tryRetain, RRVariant::Full);
1414 }
1415
1416
1417 NEVER_INLINE uintptr_t
1418 objc_object::rootRelease_underflow(bool performDealloc)
1419 {
1420 return rootRelease(performDealloc, RRVariant::Full);
1421 }
1422
1423
1424 // Slow path of clearDeallocating()
1425 // for objects with nonpointer isa
1426 // that were ever weakly referenced
1427 // or whose retain count ever overflowed to the side table.
1428 NEVER_INLINE void
1429 objc_object::clearDeallocating_slow()
1430 {
1431 ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1432
1433 SideTable& table = SideTables()[this];
1434 table.lock();
1435 if (isa.weakly_referenced) {
1436 weak_clear_no_lock(&table.weak_table, (id)this);
1437 }
1438 if (isa.has_sidetable_rc) {
1439 table.refcnts.erase(this);
1440 }
1441 table.unlock();
1442 }
1443
1444 #endif
1445
1446 __attribute__((noinline,used))
1447 id
1448 objc_object::rootAutorelease2()
1449 {
1450 ASSERT(!isTaggedPointer());
1451 return AutoreleasePoolPage::autorelease((id)this);
1452 }
1453
1454
1455 BREAKPOINT_FUNCTION(
1456 void objc_overrelease_during_dealloc_error(void)
1457 );
1458
1459
1460 NEVER_INLINE uintptr_t
1461 objc_object::overrelease_error()
1462 {
1463 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1464 objc_overrelease_during_dealloc_error();
1465 return 0; // allow rootRelease() to tail-call this
1466 }
1467
1468
1469 /***********************************************************************
1470 * Retain count operations for side table.
1471 **********************************************************************/
1472
1473
1474 #if DEBUG
1475 // Used to assert that an object is not present in the side table.
1476 bool
1477 objc_object::sidetable_present()
1478 {
1479 bool result = false;
1480 SideTable& table = SideTables()[this];
1481
1482 table.lock();
1483
1484 RefcountMap::iterator it = table.refcnts.find(this);
1485 if (it != table.refcnts.end()) result = true;
1486
1487 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1488
1489 table.unlock();
1490
1491 return result;
1492 }
1493 #endif
1494
1495 #if SUPPORT_NONPOINTER_ISA
1496
1497 void
1498 objc_object::sidetable_lock()
1499 {
1500 SideTable& table = SideTables()[this];
1501 table.lock();
1502 }
1503
1504 void
1505 objc_object::sidetable_unlock()
1506 {
1507 SideTable& table = SideTables()[this];
1508 table.unlock();
1509 }
1510
1511
1512 // Move the entire retain count to the side table,
1513 // as well as isDeallocating and weaklyReferenced.
1514 void
1515 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1516 bool isDeallocating,
1517 bool weaklyReferenced)
1518 {
1519 ASSERT(!isa.nonpointer); // should already be changed to raw pointer
1520 SideTable& table = SideTables()[this];
1521
1522 size_t& refcntStorage = table.refcnts[this];
1523 size_t oldRefcnt = refcntStorage;
1524 // not deallocating - that was in the isa
1525 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1526 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1527
1528 uintptr_t carry;
1529 size_t refcnt = addc(oldRefcnt, (extra_rc - 1) << SIDE_TABLE_RC_SHIFT, 0, &carry);
1530 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1531 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1532 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1533
1534 refcntStorage = refcnt;
1535 }
1536
1537
1538 // Move some retain counts to the side table from the isa field.
1539 // Returns true if the object is now pinned.
1540 bool
1541 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1542 {
1543 ASSERT(isa.nonpointer);
1544 SideTable& table = SideTables()[this];
1545
1546 size_t& refcntStorage = table.refcnts[this];
1547 size_t oldRefcnt = refcntStorage;
1548 // isa-side bits should not be set here
1549 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1550 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1551
1552 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1553
1554 uintptr_t carry;
1555 size_t newRefcnt =
1556 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1557 if (carry) {
1558 refcntStorage =
1559 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1560 return true;
1561 }
1562 else {
1563 refcntStorage = newRefcnt;
1564 return false;
1565 }
1566 }
1567
1568
1569 // Move some retain counts from the side table to the isa field.
1570 // Returns the actual count subtracted, which may be less than the request.
1571 objc_object::SidetableBorrow
1572 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1573 {
1574 ASSERT(isa.nonpointer);
1575 SideTable& table = SideTables()[this];
1576
1577 RefcountMap::iterator it = table.refcnts.find(this);
1578 if (it == table.refcnts.end() || it->second == 0) {
1579 // Side table retain count is zero. Can't borrow.
1580 return { 0, 0 };
1581 }
1582 size_t oldRefcnt = it->second;
1583
1584 // isa-side bits should not be set here
1585 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1586 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1587
1588 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1589 ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
1590 it->second = newRefcnt;
1591 return { delta_rc, newRefcnt >> SIDE_TABLE_RC_SHIFT };
1592 }
1593
1594
1595 size_t
1596 objc_object::sidetable_getExtraRC_nolock()
1597 {
1598 ASSERT(isa.nonpointer);
1599 SideTable& table = SideTables()[this];
1600 RefcountMap::iterator it = table.refcnts.find(this);
1601 if (it == table.refcnts.end()) return 0;
1602 else return it->second >> SIDE_TABLE_RC_SHIFT;
1603 }
1604
1605
1606 void
1607 objc_object::sidetable_clearExtraRC_nolock()
1608 {
1609 ASSERT(isa.nonpointer);
1610 SideTable& table = SideTables()[this];
1611 RefcountMap::iterator it = table.refcnts.find(this);
1612 table.refcnts.erase(it);
1613 }
1614
1615
1616 // SUPPORT_NONPOINTER_ISA
1617 #endif
1618
1619
1620 id
1621 objc_object::sidetable_retain(bool locked)
1622 {
1623 #if SUPPORT_NONPOINTER_ISA
1624 ASSERT(!isa.nonpointer);
1625 #endif
1626 SideTable& table = SideTables()[this];
1627
1628 if (!locked) table.lock();
1629 size_t& refcntStorage = table.refcnts[this];
1630 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1631 refcntStorage += SIDE_TABLE_RC_ONE;
1632 }
1633 table.unlock();
1634
1635 return (id)this;
1636 }
1637
1638
1639 bool
1640 objc_object::sidetable_tryRetain()
1641 {
1642 #if SUPPORT_NONPOINTER_ISA
1643 ASSERT(!isa.nonpointer);
1644 #endif
1645 SideTable& table = SideTables()[this];
1646
1647 // NO SPINLOCK HERE
1648 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1649 // which already acquired the lock on our behalf.
1650
1651 // fixme can't do this efficiently with os_lock_handoff_s
1652 // if (table.slock == 0) {
1653 // _objc_fatal("Do not call -_tryRetain.");
1654 // }
1655
1656 bool result = true;
1657 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
1658 auto &refcnt = it.first->second;
1659 if (it.second) {
1660 // there was no entry
1661 } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
1662 result = false;
1663 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1664 refcnt += SIDE_TABLE_RC_ONE;
1665 }
1666
1667 return result;
1668 }
1669
1670
1671 uintptr_t
1672 objc_object::sidetable_retainCount()
1673 {
1674 SideTable& table = SideTables()[this];
1675
1676 size_t refcnt_result = 1;
1677
1678 table.lock();
1679 RefcountMap::iterator it = table.refcnts.find(this);
1680 if (it != table.refcnts.end()) {
1681 // this is valid for SIDE_TABLE_RC_PINNED too
1682 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1683 }
1684 table.unlock();
1685 return refcnt_result;
1686 }
1687
1688
1689 bool
1690 objc_object::sidetable_isDeallocating()
1691 {
1692 SideTable& table = SideTables()[this];
1693
1694 // NO SPINLOCK HERE
1695 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1696 // which already acquired the lock on our behalf.
1697
1698
1699 // fixme can't do this efficiently with os_lock_handoff_s
1700 // if (table.slock == 0) {
1701 // _objc_fatal("Do not call -_isDeallocating.");
1702 // }
1703
1704 RefcountMap::iterator it = table.refcnts.find(this);
1705 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1706 }
1707
1708
1709 bool
1710 objc_object::sidetable_isWeaklyReferenced()
1711 {
1712 bool result = false;
1713
1714 SideTable& table = SideTables()[this];
1715 table.lock();
1716
1717 RefcountMap::iterator it = table.refcnts.find(this);
1718 if (it != table.refcnts.end()) {
1719 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1720 }
1721
1722 table.unlock();
1723
1724 return result;
1725 }
1726
1727 #if OBJC_WEAK_FORMATION_CALLOUT_DEFINED
1728 //Clients can dlsym() for this symbol to see if an ObjC supporting
1729 //-_setWeaklyReferenced is present
1730 OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout = 0;
1731 static_assert(SUPPORT_NONPOINTER_ISA, "Weak formation callout must only be defined when nonpointer isa is supported.");
1732 #else
1733 static_assert(!SUPPORT_NONPOINTER_ISA, "If weak callout is not present then we must not support nonpointer isas.");
1734 #endif
1735
1736 void
1737 objc_object::sidetable_setWeaklyReferenced_nolock()
1738 {
1739 #if SUPPORT_NONPOINTER_ISA
1740 ASSERT(!isa.nonpointer);
1741 #endif
1742
1743 SideTable& table = SideTables()[this];
1744
1745 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1746 }
1747
1748
1749 // rdar://20206767
1750 // return uintptr_t instead of bool so that the various raw-isa
1751 // -release paths all return zero in eax
1752 uintptr_t
1753 objc_object::sidetable_release(bool locked, bool performDealloc)
1754 {
1755 #if SUPPORT_NONPOINTER_ISA
1756 ASSERT(!isa.nonpointer);
1757 #endif
1758 SideTable& table = SideTables()[this];
1759
1760 bool do_dealloc = false;
1761
1762 if (!locked) table.lock();
1763 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
1764 auto &refcnt = it.first->second;
1765 if (it.second) {
1766 do_dealloc = true;
1767 } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
1768 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1769 do_dealloc = true;
1770 refcnt |= SIDE_TABLE_DEALLOCATING;
1771 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1772 refcnt -= SIDE_TABLE_RC_ONE;
1773 }
1774 table.unlock();
1775 if (do_dealloc && performDealloc) {
1776 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
1777 }
1778 return do_dealloc;
1779 }
1780
1781
1782 void
1783 objc_object::sidetable_clearDeallocating()
1784 {
1785 SideTable& table = SideTables()[this];
1786
1787 // clear any weak table items
1788 // clear extra retain count and deallocating bit
1789 // (fixme warn or abort if extra retain count == 0 ?)
1790 table.lock();
1791 RefcountMap::iterator it = table.refcnts.find(this);
1792 if (it != table.refcnts.end()) {
1793 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1794 weak_clear_no_lock(&table.weak_table, (id)this);
1795 }
1796 table.refcnts.erase(it);
1797 }
1798 table.unlock();
1799 }
1800
1801
1802 /***********************************************************************
1803 * Optimized retain/release/autorelease entrypoints
1804 **********************************************************************/
1805
1806
1807 #if __OBJC2__
1808
1809 __attribute__((aligned(16), flatten, noinline))
1810 id
1811 objc_retain(id obj)
1812 {
1813 if (obj->isTaggedPointerOrNil()) return obj;
1814 return obj->retain();
1815 }
1816
1817
1818 __attribute__((aligned(16), flatten, noinline))
1819 void
1820 objc_release(id obj)
1821 {
1822 if (obj->isTaggedPointerOrNil()) return;
1823 return obj->release();
1824 }
1825
1826
1827 __attribute__((aligned(16), flatten, noinline))
1828 id
1829 objc_autorelease(id obj)
1830 {
1831 if (obj->isTaggedPointerOrNil()) return obj;
1832 return obj->autorelease();
1833 }
1834
1835
1836 // OBJC2
1837 #else
1838 // not OBJC2
1839
1840
1841 id objc_retain(id obj) { return [obj retain]; }
1842 void objc_release(id obj) { [obj release]; }
1843 id objc_autorelease(id obj) { return [obj autorelease]; }
1844
1845
1846 #endif
1847
1848
1849 /***********************************************************************
1850 * Basic operations for root class implementations a.k.a. _objc_root*()
1851 **********************************************************************/
1852
1853 bool
1854 _objc_rootTryRetain(id obj)
1855 {
1856 ASSERT(obj);
1857
1858 return obj->rootTryRetain();
1859 }
1860
1861 bool
1862 _objc_rootIsDeallocating(id obj)
1863 {
1864 ASSERT(obj);
1865
1866 return obj->rootIsDeallocating();
1867 }
1868
1869
1870 void
1871 objc_clear_deallocating(id obj)
1872 {
1873 ASSERT(obj);
1874
1875 if (obj->isTaggedPointer()) return;
1876 obj->clearDeallocating();
1877 }
1878
1879
1880 bool
1881 _objc_rootReleaseWasZero(id obj)
1882 {
1883 ASSERT(obj);
1884
1885 return obj->rootReleaseShouldDealloc();
1886 }
1887
1888
1889 NEVER_INLINE id
1890 _objc_rootAutorelease(id obj)
1891 {
1892 ASSERT(obj);
1893 return obj->rootAutorelease();
1894 }
1895
1896 uintptr_t
1897 _objc_rootRetainCount(id obj)
1898 {
1899 ASSERT(obj);
1900
1901 return obj->rootRetainCount();
1902 }
1903
1904
1905 NEVER_INLINE id
1906 _objc_rootRetain(id obj)
1907 {
1908 ASSERT(obj);
1909
1910 return obj->rootRetain();
1911 }
1912
1913 NEVER_INLINE void
1914 _objc_rootRelease(id obj)
1915 {
1916 ASSERT(obj);
1917
1918 obj->rootRelease();
1919 }
1920
1921 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1922 // shortcutting optimizations.
1923 static ALWAYS_INLINE id
1924 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1925 {
1926 #if __OBJC2__
1927 if (slowpath(checkNil && !cls)) return nil;
1928 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1929 return _objc_rootAllocWithZone(cls, nil);
1930 }
1931 #endif
1932
1933 // No shortcuts available.
1934 if (allocWithZone) {
1935 return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
1936 }
1937 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
1938 }
1939
1940
1941 // Base class implementation of +alloc. cls is not nil.
1942 // Calls [cls allocWithZone:nil].
1943 id
1944 _objc_rootAlloc(Class cls)
1945 {
1946 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1947 }
1948
1949 // Calls [cls alloc].
1950 id
1951 objc_alloc(Class cls)
1952 {
1953 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1954 }
1955
1956 // Calls [cls allocWithZone:nil].
1957 id
1958 objc_allocWithZone(Class cls)
1959 {
1960 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1961 }
1962
1963 // Calls [[cls alloc] init].
1964 id
1965 objc_alloc_init(Class cls)
1966 {
1967 return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
1968 }
1969
1970 // Calls [cls new]
1971 id
1972 objc_opt_new(Class cls)
1973 {
1974 #if __OBJC2__
1975 if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
1976 return [callAlloc(cls, false/*checkNil*/) init];
1977 }
1978 #endif
1979 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
1980 }
1981
1982 // Calls [obj self]
1983 id
1984 objc_opt_self(id obj)
1985 {
1986 #if __OBJC2__
1987 if (fastpath(obj->isTaggedPointerOrNil() || !obj->ISA()->hasCustomCore())) {
1988 return obj;
1989 }
1990 #endif
1991 return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
1992 }
1993
1994 // Calls [obj class]
1995 Class
1996 objc_opt_class(id obj)
1997 {
1998 #if __OBJC2__
1999 if (slowpath(!obj)) return nil;
2000 Class cls = obj->getIsa();
2001 if (fastpath(!cls->hasCustomCore())) {
2002 return cls->isMetaClass() ? obj : cls;
2003 }
2004 #endif
2005 return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
2006 }
2007
2008 // Calls [obj isKindOfClass]
2009 BOOL
2010 objc_opt_isKindOfClass(id obj, Class otherClass)
2011 {
2012 #if __OBJC2__
2013 if (slowpath(!obj)) return NO;
2014 Class cls = obj->getIsa();
2015 if (fastpath(!cls->hasCustomCore())) {
2016 for (Class tcls = cls; tcls; tcls = tcls->getSuperclass()) {
2017 if (tcls == otherClass) return YES;
2018 }
2019 return NO;
2020 }
2021 #endif
2022 return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
2023 }
2024
2025 // Calls [obj respondsToSelector]
2026 BOOL
2027 objc_opt_respondsToSelector(id obj, SEL sel)
2028 {
2029 #if __OBJC2__
2030 if (slowpath(!obj)) return NO;
2031 Class cls = obj->getIsa();
2032 if (fastpath(!cls->hasCustomCore())) {
2033 return class_respondsToSelector_inst(obj, sel, cls);
2034 }
2035 #endif
2036 return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
2037 }
2038
2039 void
2040 _objc_rootDealloc(id obj)
2041 {
2042 ASSERT(obj);
2043
2044 obj->rootDealloc();
2045 }
2046
2047 void
2048 _objc_rootFinalize(id obj __unused)
2049 {
2050 ASSERT(obj);
2051 _objc_fatal("_objc_rootFinalize called with garbage collection off");
2052 }
2053
2054
2055 id
2056 _objc_rootInit(id obj)
2057 {
2058 // In practice, it will be hard to rely on this function.
2059 // Many classes do not properly chain -init calls.
2060 return obj;
2061 }
2062
2063
2064 malloc_zone_t *
2065 _objc_rootZone(id obj)
2066 {
2067 (void)obj;
2068 #if __OBJC2__
2069 // allocWithZone under __OBJC2__ ignores the zone parameter
2070 return malloc_default_zone();
2071 #else
2072 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
2073 return rval ? rval : malloc_default_zone();
2074 #endif
2075 }
2076
2077 uintptr_t
2078 _objc_rootHash(id obj)
2079 {
2080 return (uintptr_t)obj;
2081 }
2082
2083 void *
2084 objc_autoreleasePoolPush(void)
2085 {
2086 return AutoreleasePoolPage::push();
2087 }
2088
2089 NEVER_INLINE
2090 void
2091 objc_autoreleasePoolPop(void *ctxt)
2092 {
2093 AutoreleasePoolPage::pop(ctxt);
2094 }
2095
2096
2097 void *
2098 _objc_autoreleasePoolPush(void)
2099 {
2100 return objc_autoreleasePoolPush();
2101 }
2102
2103 void
2104 _objc_autoreleasePoolPop(void *ctxt)
2105 {
2106 objc_autoreleasePoolPop(ctxt);
2107 }
2108
2109 void
2110 _objc_autoreleasePoolPrint(void)
2111 {
2112 AutoreleasePoolPage::printAll();
2113 }
2114
2115
2116 // Same as objc_release but suitable for tail-calling
2117 // if you need the value back and don't want to push a frame before this point.
2118 __attribute__((noinline))
2119 static id
2120 objc_releaseAndReturn(id obj)
2121 {
2122 objc_release(obj);
2123 return obj;
2124 }
2125
2126 // Same as objc_retainAutorelease but suitable for tail-calling
2127 // if you don't want to push a frame before this point.
2128 __attribute__((noinline))
2129 static id
2130 objc_retainAutoreleaseAndReturn(id obj)
2131 {
2132 return objc_retainAutorelease(obj);
2133 }
2134
2135
2136 // Prepare a value at +1 for return through a +0 autoreleasing convention.
2137 id
2138 objc_autoreleaseReturnValue(id obj)
2139 {
2140 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
2141
2142 return objc_autorelease(obj);
2143 }
2144
2145 // Prepare a value at +0 for return through a +0 autoreleasing convention.
2146 id
2147 objc_retainAutoreleaseReturnValue(id obj)
2148 {
2149 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
2150
2151 // not objc_autoreleaseReturnValue(objc_retain(obj))
2152 // because we don't need another optimization attempt
2153 return objc_retainAutoreleaseAndReturn(obj);
2154 }
2155
2156 // Accept a value returned through a +0 autoreleasing convention for use at +1.
2157 id
2158 objc_retainAutoreleasedReturnValue(id obj)
2159 {
2160 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
2161
2162 return objc_retain(obj);
2163 }
2164
2165 // Accept a value returned through a +0 autoreleasing convention for use at +0.
2166 id
2167 objc_unsafeClaimAutoreleasedReturnValue(id obj)
2168 {
2169 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
2170
2171 return objc_releaseAndReturn(obj);
2172 }
2173
2174 id
2175 objc_retainAutorelease(id obj)
2176 {
2177 return objc_autorelease(objc_retain(obj));
2178 }
2179
2180 void
2181 _objc_deallocOnMainThreadHelper(void *context)
2182 {
2183 id obj = (id)context;
2184 [obj dealloc];
2185 }
2186
2187 // convert objc_objectptr_t to id, callee must take ownership.
2188 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
2189
2190 // convert objc_objectptr_t to id, without ownership transfer.
2191 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
2192
2193 // convert id to objc_objectptr_t, no ownership transfer.
2194 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
2195
2196
2197 void arr_init(void)
2198 {
2199 AutoreleasePoolPage::init();
2200 SideTablesMap.init();
2201 _objc_associations_init();
2202 }
2203
2204
2205 #if SUPPORT_TAGGED_POINTERS
2206
2207 // Placeholder for old debuggers. When they inspect an
2208 // extended tagged pointer object they will see this isa.
2209
2210 @interface __NSUnrecognizedTaggedPointer : NSObject
2211 @end
2212
2213 __attribute__((objc_nonlazy_class))
2214 @implementation __NSUnrecognizedTaggedPointer
2215 -(id) retain { return self; }
2216 -(oneway void) release { }
2217 -(id) autorelease { return self; }
2218 @end
2219
2220 #endif
2221
2222 __attribute__((objc_nonlazy_class))
2223 @implementation NSObject
2224
2225 + (void)initialize {
2226 }
2227
2228 + (id)self {
2229 return (id)self;
2230 }
2231
2232 - (id)self {
2233 return self;
2234 }
2235
2236 + (Class)class {
2237 return self;
2238 }
2239
2240 - (Class)class {
2241 return object_getClass(self);
2242 }
2243
2244 + (Class)superclass {
2245 return self->getSuperclass();
2246 }
2247
2248 - (Class)superclass {
2249 return [self class]->getSuperclass();
2250 }
2251
2252 + (BOOL)isMemberOfClass:(Class)cls {
2253 return self->ISA() == cls;
2254 }
2255
2256 - (BOOL)isMemberOfClass:(Class)cls {
2257 return [self class] == cls;
2258 }
2259
2260 + (BOOL)isKindOfClass:(Class)cls {
2261 for (Class tcls = self->ISA(); tcls; tcls = tcls->getSuperclass()) {
2262 if (tcls == cls) return YES;
2263 }
2264 return NO;
2265 }
2266
2267 - (BOOL)isKindOfClass:(Class)cls {
2268 for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
2269 if (tcls == cls) return YES;
2270 }
2271 return NO;
2272 }
2273
2274 + (BOOL)isSubclassOfClass:(Class)cls {
2275 for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
2276 if (tcls == cls) return YES;
2277 }
2278 return NO;
2279 }
2280
2281 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2282 for (Class tcls = [obj class]; tcls; tcls = tcls->getSuperclass()) {
2283 if (tcls == self) return YES;
2284 }
2285 return NO;
2286 }
2287
2288 + (BOOL)instancesRespondToSelector:(SEL)sel {
2289 return class_respondsToSelector_inst(nil, sel, self);
2290 }
2291
2292 + (BOOL)respondsToSelector:(SEL)sel {
2293 return class_respondsToSelector_inst(self, sel, self->ISA());
2294 }
2295
2296 - (BOOL)respondsToSelector:(SEL)sel {
2297 return class_respondsToSelector_inst(self, sel, [self class]);
2298 }
2299
2300 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2301 if (!protocol) return NO;
2302 for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
2303 if (class_conformsToProtocol(tcls, protocol)) return YES;
2304 }
2305 return NO;
2306 }
2307
2308 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2309 if (!protocol) return NO;
2310 for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
2311 if (class_conformsToProtocol(tcls, protocol)) return YES;
2312 }
2313 return NO;
2314 }
2315
2316 + (NSUInteger)hash {
2317 return _objc_rootHash(self);
2318 }
2319
2320 - (NSUInteger)hash {
2321 return _objc_rootHash(self);
2322 }
2323
2324 + (BOOL)isEqual:(id)obj {
2325 return obj == (id)self;
2326 }
2327
2328 - (BOOL)isEqual:(id)obj {
2329 return obj == self;
2330 }
2331
2332
2333 + (BOOL)isFault {
2334 return NO;
2335 }
2336
2337 - (BOOL)isFault {
2338 return NO;
2339 }
2340
2341 + (BOOL)isProxy {
2342 return NO;
2343 }
2344
2345 - (BOOL)isProxy {
2346 return NO;
2347 }
2348
2349
2350 + (IMP)instanceMethodForSelector:(SEL)sel {
2351 if (!sel) [self doesNotRecognizeSelector:sel];
2352 return class_getMethodImplementation(self, sel);
2353 }
2354
2355 + (IMP)methodForSelector:(SEL)sel {
2356 if (!sel) [self doesNotRecognizeSelector:sel];
2357 return object_getMethodImplementation((id)self, sel);
2358 }
2359
2360 - (IMP)methodForSelector:(SEL)sel {
2361 if (!sel) [self doesNotRecognizeSelector:sel];
2362 return object_getMethodImplementation(self, sel);
2363 }
2364
2365 + (BOOL)resolveClassMethod:(SEL)sel {
2366 return NO;
2367 }
2368
2369 + (BOOL)resolveInstanceMethod:(SEL)sel {
2370 return NO;
2371 }
2372
2373 // Replaced by CF (throws an NSException)
2374 + (void)doesNotRecognizeSelector:(SEL)sel {
2375 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2376 class_getName(self), sel_getName(sel), self);
2377 }
2378
2379 // Replaced by CF (throws an NSException)
2380 - (void)doesNotRecognizeSelector:(SEL)sel {
2381 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2382 object_getClassName(self), sel_getName(sel), self);
2383 }
2384
2385
2386 + (id)performSelector:(SEL)sel {
2387 if (!sel) [self doesNotRecognizeSelector:sel];
2388 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2389 }
2390
2391 + (id)performSelector:(SEL)sel withObject:(id)obj {
2392 if (!sel) [self doesNotRecognizeSelector:sel];
2393 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2394 }
2395
2396 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2397 if (!sel) [self doesNotRecognizeSelector:sel];
2398 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2399 }
2400
2401 - (id)performSelector:(SEL)sel {
2402 if (!sel) [self doesNotRecognizeSelector:sel];
2403 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2404 }
2405
2406 - (id)performSelector:(SEL)sel withObject:(id)obj {
2407 if (!sel) [self doesNotRecognizeSelector:sel];
2408 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2409 }
2410
2411 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2412 if (!sel) [self doesNotRecognizeSelector:sel];
2413 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2414 }
2415
2416
2417 // Replaced by CF (returns an NSMethodSignature)
2418 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2419 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2420 "not available without CoreFoundation");
2421 }
2422
2423 // Replaced by CF (returns an NSMethodSignature)
2424 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2425 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2426 "not available without CoreFoundation");
2427 }
2428
2429 // Replaced by CF (returns an NSMethodSignature)
2430 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2431 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2432 "not available without CoreFoundation");
2433 }
2434
2435 + (void)forwardInvocation:(NSInvocation *)invocation {
2436 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2437 }
2438
2439 - (void)forwardInvocation:(NSInvocation *)invocation {
2440 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2441 }
2442
2443 + (id)forwardingTargetForSelector:(SEL)sel {
2444 return nil;
2445 }
2446
2447 - (id)forwardingTargetForSelector:(SEL)sel {
2448 return nil;
2449 }
2450
2451
2452 // Replaced by CF (returns an NSString)
2453 + (NSString *)description {
2454 return nil;
2455 }
2456
2457 // Replaced by CF (returns an NSString)
2458 - (NSString *)description {
2459 return nil;
2460 }
2461
2462 + (NSString *)debugDescription {
2463 return [self description];
2464 }
2465
2466 - (NSString *)debugDescription {
2467 return [self description];
2468 }
2469
2470
2471 + (id)new {
2472 return [callAlloc(self, false/*checkNil*/) init];
2473 }
2474
2475 + (id)retain {
2476 return (id)self;
2477 }
2478
2479 // Replaced by ObjectAlloc
2480 - (id)retain {
2481 return _objc_rootRetain(self);
2482 }
2483
2484
2485 + (BOOL)_tryRetain {
2486 return YES;
2487 }
2488
2489 // Replaced by ObjectAlloc
2490 - (BOOL)_tryRetain {
2491 return _objc_rootTryRetain(self);
2492 }
2493
2494 + (BOOL)_isDeallocating {
2495 return NO;
2496 }
2497
2498 - (BOOL)_isDeallocating {
2499 return _objc_rootIsDeallocating(self);
2500 }
2501
2502 + (BOOL)allowsWeakReference {
2503 return YES;
2504 }
2505
2506 + (BOOL)retainWeakReference {
2507 return YES;
2508 }
2509
2510 - (BOOL)allowsWeakReference {
2511 return ! [self _isDeallocating];
2512 }
2513
2514 - (BOOL)retainWeakReference {
2515 return [self _tryRetain];
2516 }
2517
2518 + (oneway void)release {
2519 }
2520
2521 // Replaced by ObjectAlloc
2522 - (oneway void)release {
2523 _objc_rootRelease(self);
2524 }
2525
2526 + (id)autorelease {
2527 return (id)self;
2528 }
2529
2530 // Replaced by ObjectAlloc
2531 - (id)autorelease {
2532 return _objc_rootAutorelease(self);
2533 }
2534
2535 + (NSUInteger)retainCount {
2536 return ULONG_MAX;
2537 }
2538
2539 - (NSUInteger)retainCount {
2540 return _objc_rootRetainCount(self);
2541 }
2542
2543 + (id)alloc {
2544 return _objc_rootAlloc(self);
2545 }
2546
2547 // Replaced by ObjectAlloc
2548 + (id)allocWithZone:(struct _NSZone *)zone {
2549 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2550 }
2551
2552 // Replaced by CF (throws an NSException)
2553 + (id)init {
2554 return (id)self;
2555 }
2556
2557 - (id)init {
2558 return _objc_rootInit(self);
2559 }
2560
2561 // Replaced by CF (throws an NSException)
2562 + (void)dealloc {
2563 }
2564
2565
2566 // Replaced by NSZombies
2567 - (void)dealloc {
2568 _objc_rootDealloc(self);
2569 }
2570
2571 // Previously used by GC. Now a placeholder for binary compatibility.
2572 - (void) finalize {
2573 }
2574
2575 + (struct _NSZone *)zone {
2576 return (struct _NSZone *)_objc_rootZone(self);
2577 }
2578
2579 - (struct _NSZone *)zone {
2580 return (struct _NSZone *)_objc_rootZone(self);
2581 }
2582
2583 + (id)copy {
2584 return (id)self;
2585 }
2586
2587 + (id)copyWithZone:(struct _NSZone *)zone {
2588 return (id)self;
2589 }
2590
2591 - (id)copy {
2592 return [(id)self copyWithZone:nil];
2593 }
2594
2595 + (id)mutableCopy {
2596 return (id)self;
2597 }
2598
2599 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2600 return (id)self;
2601 }
2602
2603 - (id)mutableCopy {
2604 return [(id)self mutableCopyWithZone:nil];
2605 }
2606
2607 @end
2608
2609