]> git.saurik.com Git - apple/objc4.git/blame - runtime/NSObject.mm
objc4-818.2.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
CommitLineData
8972963c 1/*
cd5f04f5 2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
8972963c
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
cd5f04f5 5 *
8972963c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
cd5f04f5 12 *
8972963c
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
cd5f04f5 20 *
8972963c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
cd5f04f5 24#include "objc-private.h"
7257e56c
A
25#include "NSObject.h"
26
27#include "objc-weak.h"
1807f628 28#include "DenseMapExtras.h"
8972963c 29
cd5f04f5 30#include <malloc/malloc.h>
8972963c
A
31#include <stdint.h>
32#include <stdbool.h>
8972963c
A
33#include <mach/mach.h>
34#include <mach-o/dyld.h>
35#include <mach-o/nlist.h>
36#include <sys/types.h>
37#include <sys/mman.h>
8972963c
A
38#include <Block.h>
39#include <map>
40#include <execinfo.h>
1807f628 41#include "NSObject-internal.h"
34d5b5e8
A
42#include <os/feature_private.h>
43
44extern "C" {
45#include <os/reason_private.h>
46#include <os/variant_private.h>
47}
8972963c 48
cd5f04f5
A
49@interface NSInvocation
50- (SEL)selector;
51@end
52
1807f628
A
53OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_magic_offset = __builtin_offsetof(AutoreleasePoolPageData, magic);
54OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_next_offset = __builtin_offsetof(AutoreleasePoolPageData, next);
55OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_thread_offset = __builtin_offsetof(AutoreleasePoolPageData, thread);
56OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_parent_offset = __builtin_offsetof(AutoreleasePoolPageData, parent);
57OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_child_offset = __builtin_offsetof(AutoreleasePoolPageData, child);
58OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_depth_offset = __builtin_offsetof(AutoreleasePoolPageData, depth);
59OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_hiwat_offset = __builtin_offsetof(AutoreleasePoolPageData, hiwat);
34d5b5e8 60OBJC_EXTERN const uint32_t objc_debug_autoreleasepoolpage_begin_offset = sizeof(AutoreleasePoolPageData);
f192a3e2 61#if __OBJC2__
34d5b5e8
A
62#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
63OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = (AutoreleasePoolPageData::AutoreleasePoolEntry){ .ptr = ~(uintptr_t)0 }.ptr;
64#else
65OBJC_EXTERN const uintptr_t objc_debug_autoreleasepoolpage_ptr_mask = ~(uintptr_t)0;
66#endif
f192a3e2
A
67OBJC_EXTERN const uint32_t objc_class_abi_version = OBJC_CLASS_ABI_VERSION_MAX;
68#endif
cd5f04f5 69
8972963c
A
70/***********************************************************************
71* Weak ivar support
72**********************************************************************/
73
cd5f04f5
A
74static id defaultBadAllocHandler(Class cls)
75{
76 _objc_fatal("attempt to allocate object of class '%s' failed",
8070259c 77 cls->nameForLogging());
cd5f04f5
A
78}
79
1807f628 80id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
cd5f04f5 81
1807f628 82id _objc_callBadAllocHandler(Class cls)
cd5f04f5
A
83{
84 // fixme add re-entrancy protection in case allocation fails inside handler
85 return (*badAllocHandler)(cls);
86}
87
88void _objc_setBadAllocHandler(id(*newHandler)(Class))
89{
90 badAllocHandler = newHandler;
91}
92
8972963c 93
34d5b5e8
A
94static id _initializeSwiftRefcountingThenCallRetain(id objc);
95static void _initializeSwiftRefcountingThenCallRelease(id objc);
96
97explicit_atomic<id(*)(id)> swiftRetain{&_initializeSwiftRefcountingThenCallRetain};
98explicit_atomic<void(*)(id)> swiftRelease{&_initializeSwiftRefcountingThenCallRelease};
99
100static void _initializeSwiftRefcounting() {
101 void *const token = dlopen("/usr/lib/swift/libswiftCore.dylib", RTLD_LAZY | RTLD_LOCAL);
102 ASSERT(token);
103 swiftRetain.store((id(*)(id))dlsym(token, "swift_retain"), memory_order_relaxed);
104 ASSERT(swiftRetain.load(memory_order_relaxed));
105 swiftRelease.store((void(*)(id))dlsym(token, "swift_release"), memory_order_relaxed);
106 ASSERT(swiftRelease.load(memory_order_relaxed));
107 dlclose(token);
108}
109
110static id _initializeSwiftRefcountingThenCallRetain(id objc) {
111 _initializeSwiftRefcounting();
112 return swiftRetain.load(memory_order_relaxed)(objc);
113}
114
115static void _initializeSwiftRefcountingThenCallRelease(id objc) {
116 _initializeSwiftRefcounting();
117 swiftRelease.load(memory_order_relaxed)(objc);
118}
119
120namespace objc {
121 extern int PageCountWarning;
122}
123
8972963c
A
124namespace {
125
34d5b5e8
A
126#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
127uint32_t numFaults = 0;
128#endif
129
7257e56c 130// The order of these bits is important.
8070259c
A
131#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
132#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
133#define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
134#define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
7257e56c
A
135
136#define SIDE_TABLE_RC_SHIFT 2
8070259c 137#define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
7257e56c 138
1807f628
A
139struct RefcountMapValuePurgeable {
140 static inline bool isPurgeable(size_t x) {
141 return x == 0;
142 }
143};
144
8070259c
A
145// RefcountMap disguises its pointers because we
146// don't want the table to act as a root for `leaks`.
1807f628 147typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;
8972963c 148
bd8dfcfc
A
149// Template parameters.
150enum HaveOld { DontHaveOld = false, DoHaveOld = true };
151enum HaveNew { DontHaveNew = false, DoHaveNew = true };
152
31875a97 153struct SideTable {
7257e56c 154 spinlock_t slock;
8972963c
A
155 RefcountMap refcnts;
156 weak_table_t weak_table;
157
31875a97 158 SideTable() {
8972963c
A
159 memset(&weak_table, 0, sizeof(weak_table));
160 }
8972963c 161
31875a97
A
162 ~SideTable() {
163 _objc_fatal("Do not delete SideTable.");
8972963c 164 }
31875a97
A
165
166 void lock() { slock.lock(); }
167 void unlock() { slock.unlock(); }
bd8dfcfc 168 void forceReset() { slock.forceReset(); }
31875a97
A
169
170 // Address-ordered lock discipline for a pair of side tables.
171
bd8dfcfc 172 template<HaveOld, HaveNew>
31875a97 173 static void lockTwo(SideTable *lock1, SideTable *lock2);
bd8dfcfc 174 template<HaveOld, HaveNew>
31875a97 175 static void unlockTwo(SideTable *lock1, SideTable *lock2);
8972963c
A
176};
177
31875a97
A
178
179template<>
bd8dfcfc
A
180void SideTable::lockTwo<DoHaveOld, DoHaveNew>
181 (SideTable *lock1, SideTable *lock2)
182{
31875a97
A
183 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
184}
185
186template<>
bd8dfcfc
A
187void SideTable::lockTwo<DoHaveOld, DontHaveNew>
188 (SideTable *lock1, SideTable *)
189{
31875a97
A
190 lock1->lock();
191}
192
193template<>
bd8dfcfc
A
194void SideTable::lockTwo<DontHaveOld, DoHaveNew>
195 (SideTable *, SideTable *lock2)
196{
31875a97
A
197 lock2->lock();
198}
199
200template<>
bd8dfcfc
A
201void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
202 (SideTable *lock1, SideTable *lock2)
203{
31875a97
A
204 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
205}
206
207template<>
bd8dfcfc
A
208void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
209 (SideTable *lock1, SideTable *)
210{
31875a97
A
211 lock1->unlock();
212}
213
214template<>
bd8dfcfc
A
215void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
216 (SideTable *, SideTable *lock2)
217{
31875a97
A
218 lock2->unlock();
219}
31875a97 220
1807f628 221static objc::ExplicitInit<StripedMap<SideTable>> SideTablesMap;
31875a97
A
222
223static StripedMap<SideTable>& SideTables() {
1807f628 224 return SideTablesMap.get();
31875a97 225}
8972963c 226
8972963c
A
227// anonymous namespace
228};
229
bd8dfcfc
A
230void SideTableLockAll() {
231 SideTables().lockAll();
232}
233
234void SideTableUnlockAll() {
235 SideTables().unlockAll();
236}
237
238void SideTableForceResetAll() {
239 SideTables().forceResetAll();
240}
241
242void SideTableDefineLockOrder() {
243 SideTables().defineLockOrder();
244}
245
246void SideTableLocksPrecedeLock(const void *newlock) {
247 SideTables().precedeLock(newlock);
248}
249
250void SideTableLocksSucceedLock(const void *oldlock) {
251 SideTables().succeedLock(oldlock);
252}
8972963c 253
4a109af3
A
254void SideTableLocksPrecedeLocks(StripedMap<spinlock_t>& newlocks) {
255 int i = 0;
256 const void *newlock;
257 while ((newlock = newlocks.getLock(i++))) {
258 SideTables().precedeLock(newlock);
259 }
260}
261
262void SideTableLocksSucceedLocks(StripedMap<spinlock_t>& oldlocks) {
263 int i = 0;
264 const void *oldlock;
265 while ((oldlock = oldlocks.getLock(i++))) {
266 SideTables().succeedLock(oldlock);
267 }
268}
269
34d5b5e8
A
270// Call out to the _setWeaklyReferenced method on obj, if implemented.
271static void callSetWeaklyReferenced(id obj) {
272 if (!obj)
273 return;
274
275 Class cls = obj->getIsa();
276
277 if (slowpath(cls->hasCustomRR() && !object_isClass(obj))) {
278 ASSERT(((objc_class *)cls)->isInitializing() || ((objc_class *)cls)->isInitialized());
279 void (*setWeaklyReferenced)(id, SEL) = (void(*)(id, SEL))
280 class_getMethodImplementation(cls, @selector(_setWeaklyReferenced));
281 if ((IMP)setWeaklyReferenced != _objc_msgForward) {
282 (*setWeaklyReferenced)(obj, @selector(_setWeaklyReferenced));
283 }
284 }
285}
286
8972963c 287//
cd5f04f5 288// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
8972963c
A
289//
290
291id objc_retainBlock(id x) {
8972963c
A
292 return (id)_Block_copy(x);
293}
294
295//
296// The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
297//
298
299BOOL objc_should_deallocate(id object) {
300 return YES;
301}
302
8972963c
A
303id
304objc_retain_autorelease(id obj)
305{
cd5f04f5 306 return objc_autorelease(objc_retain(obj));
8972963c
A
307}
308
8070259c
A
309
310void
311objc_storeStrong(id *location, id obj)
312{
313 id prev = *location;
314 if (obj == prev) {
315 return;
316 }
317 objc_retain(obj);
318 *location = obj;
319 objc_release(prev);
320}
321
322
31875a97
A
323// Update a weak variable.
324// If HaveOld is true, the variable has an existing value
325// that needs to be cleaned up. This value might be nil.
326// If HaveNew is true, there is a new value that needs to be
327// assigned into the variable. This value might be nil.
328// If CrashIfDeallocating is true, the process is halted if newObj is
329// deallocating or newObj's class does not support weak references.
330// If CrashIfDeallocating is false, nil is stored instead.
bd8dfcfc
A
331enum CrashIfDeallocating {
332 DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
333};
334template <HaveOld haveOld, HaveNew haveNew,
34d5b5e8 335 enum CrashIfDeallocating crashIfDeallocating>
31875a97
A
336static id
337storeWeak(id *location, objc_object *newObj)
8972963c 338{
1807f628
A
339 ASSERT(haveOld || haveNew);
340 if (!haveNew) ASSERT(newObj == nil);
31875a97
A
341
342 Class previouslyInitializedClass = nil;
8972963c
A
343 id oldObj;
344 SideTable *oldTable;
345 SideTable *newTable;
8972963c 346
8972963c
A
347 // Acquire locks for old and new values.
348 // Order by lock address to prevent lock ordering problems.
349 // Retry if the old value changes underneath us.
350 retry:
bd8dfcfc 351 if (haveOld) {
31875a97
A
352 oldObj = *location;
353 oldTable = &SideTables()[oldObj];
354 } else {
355 oldTable = nil;
356 }
bd8dfcfc 357 if (haveNew) {
31875a97
A
358 newTable = &SideTables()[newObj];
359 } else {
360 newTable = nil;
361 }
8972963c 362
bd8dfcfc 363 SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
31875a97 364
bd8dfcfc
A
365 if (haveOld && *location != oldObj) {
366 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
8972963c
A
367 goto retry;
368 }
369
31875a97
A
370 // Prevent a deadlock between the weak reference machinery
371 // and the +initialize machinery by ensuring that no
372 // weakly-referenced object has an un-+initialized isa.
bd8dfcfc 373 if (haveNew && newObj) {
31875a97
A
374 Class cls = newObj->getIsa();
375 if (cls != previouslyInitializedClass &&
376 !((objc_class *)cls)->isInitialized())
377 {
bd8dfcfc 378 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
13ba007e 379 class_initialize(cls, (id)newObj);
31875a97
A
380
381 // If this class is finished with +initialize then we're good.
382 // If this class is still running +initialize on this thread
383 // (i.e. +initialize called storeWeak on an instance of itself)
384 // then we may proceed but it will appear initializing and
385 // not yet initialized to the check above.
386 // Instead set previouslyInitializedClass to recognize it on retry.
387 previouslyInitializedClass = cls;
388
389 goto retry;
390 }
391 }
7257e56c 392
31875a97 393 // Clean up old value, if any.
bd8dfcfc 394 if (haveOld) {
31875a97 395 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
8972963c 396 }
7257e56c 397
31875a97 398 // Assign new value, if any.
bd8dfcfc
A
399 if (haveNew) {
400 newObj = (objc_object *)
401 weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
34d5b5e8 402 crashIfDeallocating ? CrashIfDeallocating : ReturnNilIfDeallocating);
31875a97
A
403 // weak_register_no_lock returns nil if weak store should be rejected
404
405 // Set is-weakly-referenced bit in refcount table.
34d5b5e8 406 if (!newObj->isTaggedPointerOrNil()) {
31875a97
A
407 newObj->setWeaklyReferenced_nolock();
408 }
409
410 // Do not set *location anywhere else. That would introduce a race.
411 *location = (id)newObj;
412 }
413 else {
414 // No new value. The storage is not changed.
415 }
8972963c 416
bd8dfcfc 417 SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
8972963c 418
34d5b5e8
A
419 // This must be called without the locks held, as it can invoke
420 // arbitrary code. In particular, even if _setWeaklyReferenced
421 // is not implemented, resolveInstanceMethod: may be, and may
422 // call back into the weak reference machinery.
423 callSetWeaklyReferenced((id)newObj);
424
31875a97 425 return (id)newObj;
8972963c
A
426}
427
31875a97
A
428
429/**
430 * This function stores a new value into a __weak variable. It would
431 * be used anywhere a __weak variable is the target of an assignment.
432 *
433 * @param location The address of the weak pointer itself
434 * @param newObj The new object this weak ptr should now point to
435 *
436 * @return \e newObj
437 */
8972963c 438id
31875a97 439objc_storeWeak(id *location, id newObj)
8972963c 440{
bd8dfcfc 441 return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
31875a97 442 (location, (objc_object *)newObj);
8972963c
A
443}
444
31875a97 445
7257e56c 446/**
31875a97
A
447 * This function stores a new value into a __weak variable.
448 * If the new object is deallocating or the new object's class
449 * does not support weak references, stores nil instead.
7257e56c 450 *
31875a97
A
451 * @param location The address of the weak pointer itself
452 * @param newObj The new object this weak ptr should now point to
7257e56c 453 *
31875a97 454 * @return The value stored (either the new object or nil)
7257e56c 455 */
8972963c 456id
31875a97 457objc_storeWeakOrNil(id *location, id newObj)
8972963c 458{
bd8dfcfc 459 return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
31875a97 460 (location, (objc_object *)newObj);
8972963c
A
461}
462
31875a97 463
7257e56c
A
464/**
465 * Initialize a fresh weak pointer to some object location.
466 * It would be used for code like:
467 *
468 * (The nil case)
469 * __weak id weakPtr;
470 * (The non-nil case)
471 * NSObject *o = ...;
472 * __weak id weakPtr = o;
473 *
31875a97
A
474 * This function IS NOT thread-safe with respect to concurrent
475 * modifications to the weak variable. (Concurrent weak clear is safe.)
476 *
477 * @param location Address of __weak ptr.
478 * @param newObj Object ptr.
7257e56c 479 */
8972963c 480id
31875a97
A
481objc_initWeak(id *location, id newObj)
482{
483 if (!newObj) {
484 *location = nil;
485 return nil;
486 }
487
bd8dfcfc 488 return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
31875a97
A
489 (location, (objc_object*)newObj);
490}
491
492id
493objc_initWeakOrNil(id *location, id newObj)
494{
495 if (!newObj) {
496 *location = nil;
497 return nil;
498 }
499
bd8dfcfc 500 return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
31875a97
A
501 (location, (objc_object*)newObj);
502}
503
504
505/**
506 * Destroys the relationship between a weak pointer
507 * and the object it is referencing in the internal weak
508 * table. If the weak pointer is not referencing anything,
509 * there is no need to edit the weak table.
510 *
511 * This function IS NOT thread-safe with respect to concurrent
512 * modifications to the weak variable. (Concurrent weak clear is safe.)
513 *
514 * @param location The weak pointer address.
515 */
516void
517objc_destroyWeak(id *location)
8972963c 518{
bd8dfcfc 519 (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
31875a97 520 (location, nil);
8972963c
A
521}
522
31875a97 523
c1e772c4
A
524/*
525 Once upon a time we eagerly cleared *location if we saw the object
526 was deallocating. This confuses code like NSPointerFunctions which
527 tries to pre-flight the raw storage and assumes if the storage is
528 zero then the weak system is done interfering. That is false: the
529 weak system is still going to check and clear the storage later.
530 This can cause objc_weak_error complaints and crashes.
531 So we now don't touch the storage until deallocation completes.
532*/
533
31875a97
A
534id
535objc_loadWeakRetained(id *location)
7257e56c 536{
c1e772c4 537 id obj;
31875a97 538 id result;
c1e772c4 539 Class cls;
7257e56c 540
31875a97 541 SideTable *table;
7257e56c 542
31875a97 543 retry:
c1e772c4
A
544 // fixme std::atomic this load
545 obj = *location;
34d5b5e8 546 if (obj->isTaggedPointerOrNil()) return obj;
7257e56c 547
c1e772c4 548 table = &SideTables()[obj];
7257e56c 549
31875a97 550 table->lock();
c1e772c4 551 if (*location != obj) {
31875a97 552 table->unlock();
7257e56c
A
553 goto retry;
554 }
c1e772c4
A
555
556 result = obj;
557
558 cls = obj->ISA();
559 if (! cls->hasCustomRR()) {
560 // Fast case. We know +initialize is complete because
561 // default-RR can never be set before then.
1807f628 562 ASSERT(cls->isInitialized());
c1e772c4
A
563 if (! obj->rootTryRetain()) {
564 result = nil;
565 }
566 }
567 else {
568 // Slow case. We must check for +initialize and call it outside
569 // the lock if necessary in order to avoid deadlocks.
34d5b5e8
A
570 // Use lookUpImpOrForward so we can avoid the assert in
571 // class_getInstanceMethod, since we intentionally make this
572 // callout with the lock held.
c1e772c4
A
573 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
574 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
34d5b5e8 575 lookUpImpOrForwardTryCache(obj, @selector(retainWeakReference), cls);
c1e772c4
A
576 if ((IMP)tryRetain == _objc_msgForward) {
577 result = nil;
578 }
1807f628 579 else if (! (*tryRetain)(obj, @selector(retainWeakReference))) {
c1e772c4
A
580 result = nil;
581 }
582 }
583 else {
584 table->unlock();
13ba007e 585 class_initialize(cls, obj);
c1e772c4
A
586 goto retry;
587 }
588 }
589
31875a97
A
590 table->unlock();
591 return result;
7257e56c
A
592}
593
594/**
31875a97
A
595 * This loads the object referenced by a weak pointer and returns it, after
596 * retaining and autoreleasing the object to ensure that it stays alive
597 * long enough for the caller to use it. This function would be used
598 * anywhere a __weak variable is used in an expression.
7257e56c 599 *
31875a97
A
600 * @param location The weak pointer address
601 *
602 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
7257e56c 603 */
31875a97
A
604id
605objc_loadWeak(id *location)
8972963c 606{
31875a97
A
607 if (!*location) return nil;
608 return objc_autorelease(objc_loadWeakRetained(location));
8972963c
A
609}
610
31875a97 611
7257e56c
A
612/**
613 * This function copies a weak pointer from one location to another,
614 * when the destination doesn't already contain a weak pointer. It
615 * would be used for code like:
616 *
31875a97
A
617 * __weak id src = ...;
618 * __weak id dst = src;
7257e56c 619 *
31875a97
A
620 * This function IS NOT thread-safe with respect to concurrent
621 * modifications to the destination variable. (Concurrent weak clear is safe.)
622 *
623 * @param dst The destination variable.
624 * @param src The source variable.
7257e56c 625 */
8972963c 626void
31875a97 627objc_copyWeak(id *dst, id *src)
8972963c 628{
31875a97
A
629 id obj = objc_loadWeakRetained(src);
630 objc_initWeak(dst, obj);
631 objc_release(obj);
8972963c
A
632}
633
7257e56c
A
634/**
635 * Move a weak pointer from one location to another.
636 * Before the move, the destination must be uninitialized.
637 * After the move, the source is nil.
31875a97
A
638 *
639 * This function IS NOT thread-safe with respect to concurrent
640 * modifications to either weak variable. (Concurrent weak clear is safe.)
641 *
7257e56c 642 */
8972963c 643void
31875a97 644objc_moveWeak(id *dst, id *src)
8972963c 645{
34d5b5e8
A
646 id obj;
647 SideTable *table;
648
649retry:
650 obj = *src;
651 if (obj == nil) {
652 *dst = nil;
653 return;
654 }
655
656 table = &SideTables()[obj];
657 table->lock();
658 if (*src != obj) {
659 table->unlock();
660 goto retry;
661 }
662
663 weak_unregister_no_lock(&table->weak_table, obj, src);
664 weak_register_no_lock(&table->weak_table, obj, dst, DontCheckDeallocating);
665 *dst = obj;
31875a97 666 *src = nil;
34d5b5e8 667 table->unlock();
8972963c
A
668}
669
670
8070259c
A
671/***********************************************************************
672 Autorelease pool implementation
673
8972963c 674 A thread's autorelease pool is a stack of pointers.
c1e772c4 675 Each pointer is either an object to release, or POOL_BOUNDARY which is
8972963c 676 an autorelease pool boundary.
c1e772c4 677 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
8972963c
A
678 the pool is popped, every object hotter than the sentinel is released.
679 The stack is divided into a doubly-linked list of pages. Pages are added
680 and deleted as necessary.
681 Thread-local storage points to the hot page, where newly autoreleased
682 objects are stored.
8070259c 683**********************************************************************/
8972963c 684
7257e56c 685BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
c1e772c4 686BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
8972963c 687
1807f628
A
688class AutoreleasePoolPage : private AutoreleasePoolPageData
689{
690 friend struct thread_data_t;
8972963c 691
1807f628
A
692public:
693 static size_t const SIZE =
694#if PROTECT_AUTORELEASEPOOL
695 PAGE_MAX_SIZE; // must be multiple of vm page size
31875a97 696#else
1807f628 697 PAGE_MIN_SIZE; // size and alignment, power of 2
8972963c 698#endif
8972963c 699
1807f628
A
700private:
701 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
702 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
703 static size_t const COUNT = SIZE / sizeof(id);
34d5b5e8 704 static size_t const MAX_FAULTS = 2;
8972963c 705
c1e772c4
A
706 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
707 // pushed and it has never contained any objects. This saves memory
708 // when the top level (i.e. libdispatch) pushes and pops pools but
709 // never uses them.
710# define EMPTY_POOL_PLACEHOLDER ((id*)1)
8972963c 711
c1e772c4 712# define POOL_BOUNDARY nil
8972963c
A
713
714 // SIZE-sizeof(*this) bytes of contents follow
715
716 static void * operator new(size_t size) {
717 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
718 }
719 static void operator delete(void * p) {
720 return free(p);
721 }
722
723 inline void protect() {
724#if PROTECT_AUTORELEASEPOOL
725 mprotect(this, SIZE, PROT_READ);
726 check();
727#endif
728 }
729
730 inline void unprotect() {
731#if PROTECT_AUTORELEASEPOOL
732 check();
733 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
734#endif
735 }
736
34d5b5e8
A
737 void checkTooMuchAutorelease()
738 {
739#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
740 bool objcModeNoFaults = DisableFaults || getpid() == 1 ||
741 !os_variant_has_internal_diagnostics("com.apple.obj-c");
742 if (!objcModeNoFaults) {
743 if (depth+1 >= (uint32_t)objc::PageCountWarning && numFaults < MAX_FAULTS) { //depth is 0 when first page is allocated
744 os_fault_with_payload(OS_REASON_LIBSYSTEM,
745 OS_REASON_LIBSYSTEM_CODE_FAULT,
746 NULL, 0, "Large Autorelease Pool", 0);
747 numFaults++;
748 }
749 }
750#endif
751 }
752
1807f628
A
753 AutoreleasePoolPage(AutoreleasePoolPage *newParent) :
754 AutoreleasePoolPageData(begin(),
755 objc_thread_self(),
756 newParent,
757 newParent ? 1+newParent->depth : 0,
758 newParent ? newParent->hiwat : 0)
34d5b5e8
A
759 {
760 if (objc::PageCountWarning != -1) {
761 checkTooMuchAutorelease();
762 }
763
8972963c
A
764 if (parent) {
765 parent->check();
1807f628 766 ASSERT(!parent->child);
8972963c
A
767 parent->unprotect();
768 parent->child = this;
769 parent->protect();
770 }
771 protect();
772 }
773
774 ~AutoreleasePoolPage()
775 {
776 check();
777 unprotect();
1807f628 778 ASSERT(empty());
8972963c
A
779
780 // Not recursive: we don't want to blow out the stack
781 // if a thread accumulates a stupendous amount of garbage
1807f628 782 ASSERT(!child);
8972963c
A
783 }
784
1807f628
A
785 template<typename Fn>
786 void
787 busted(Fn log) const
8972963c 788 {
8070259c 789 magic_t right;
1807f628 790 log("autorelease pool page %p corrupted\n"
8070259c
A
791 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
792 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
793 " pthread %p\n"
794 " should be %p\n",
795 this,
796 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
797 right.m[0], right.m[1], right.m[2], right.m[3],
1807f628 798 this->thread, objc_thread_self());
8972963c
A
799 }
800
1807f628
A
801 __attribute__((noinline, cold, noreturn))
802 void
803 busted_die() const
8972963c 804 {
1807f628
A
805 busted(_objc_fatal);
806 __builtin_unreachable();
807 }
808
809 inline void
810 check(bool die = true) const
811 {
812 if (!magic.check() || thread != objc_thread_self()) {
813 if (die) {
814 busted_die();
815 } else {
816 busted(_objc_inform);
817 }
8972963c
A
818 }
819 }
820
1807f628
A
821 inline void
822 fastcheck() const
8972963c 823 {
c1e772c4 824#if CHECK_AUTORELEASEPOOL
1807f628 825 check();
c1e772c4 826#else
8972963c 827 if (! magic.fastcheck()) {
1807f628 828 busted_die();
8972963c 829 }
c1e772c4 830#endif
8972963c
A
831 }
832
833
834 id * begin() {
835 return (id *) ((uint8_t *)this+sizeof(*this));
836 }
837
838 id * end() {
839 return (id *) ((uint8_t *)this+SIZE);
840 }
841
842 bool empty() {
843 return next == begin();
844 }
845
846 bool full() {
847 return next == end();
848 }
849
850 bool lessThanHalfFull() {
851 return (next - begin() < (end() - begin()) / 2);
852 }
853
854 id *add(id obj)
855 {
1807f628 856 ASSERT(!full());
8972963c 857 unprotect();
34d5b5e8
A
858 id *ret;
859
860#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
861 if (!DisableAutoreleaseCoalescing || !DisableAutoreleaseCoalescingLRU) {
862 if (!DisableAutoreleaseCoalescingLRU) {
863 if (!empty() && (obj != POOL_BOUNDARY)) {
864 AutoreleasePoolEntry *topEntry = (AutoreleasePoolEntry *)next - 1;
865 for (uintptr_t offset = 0; offset < 4; offset++) {
866 AutoreleasePoolEntry *offsetEntry = topEntry - offset;
867 if (offsetEntry <= (AutoreleasePoolEntry*)begin() || *(id *)offsetEntry == POOL_BOUNDARY) {
868 break;
869 }
870 if (offsetEntry->ptr == (uintptr_t)obj && offsetEntry->count < AutoreleasePoolEntry::maxCount) {
871 if (offset > 0) {
872 AutoreleasePoolEntry found = *offsetEntry;
873 memmove(offsetEntry, offsetEntry + 1, offset * sizeof(*offsetEntry));
874 *topEntry = found;
875 }
876 topEntry->count++;
877 ret = (id *)topEntry; // need to reset ret
878 goto done;
879 }
880 }
881 }
882 } else {
883 if (!empty() && (obj != POOL_BOUNDARY)) {
884 AutoreleasePoolEntry *prevEntry = (AutoreleasePoolEntry *)next - 1;
885 if (prevEntry->ptr == (uintptr_t)obj && prevEntry->count < AutoreleasePoolEntry::maxCount) {
886 prevEntry->count++;
887 ret = (id *)prevEntry; // need to reset ret
888 goto done;
889 }
890 }
891 }
892 }
893#endif
894 ret = next; // faster than `return next-1` because of aliasing
8972963c 895 *next++ = obj;
34d5b5e8
A
896#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
897 // Make sure obj fits in the bits available for it
898 ASSERT(((AutoreleasePoolEntry *)ret)->ptr == (uintptr_t)obj);
899#endif
900 done:
8972963c 901 protect();
8070259c 902 return ret;
8972963c
A
903 }
904
905 void releaseAll()
906 {
907 releaseUntil(begin());
908 }
909
910 void releaseUntil(id *stop)
911 {
912 // Not recursive: we don't want to blow out the stack
913 // if a thread accumulates a stupendous amount of garbage
914
915 while (this->next != stop) {
916 // Restart from hotPage() every time, in case -release
917 // autoreleased more objects
918 AutoreleasePoolPage *page = hotPage();
919
920 // fixme I think this `while` can be `if`, but I can't prove it
921 while (page->empty()) {
922 page = page->parent;
923 setHotPage(page);
924 }
925
926 page->unprotect();
34d5b5e8
A
927#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
928 AutoreleasePoolEntry* entry = (AutoreleasePoolEntry*) --page->next;
929
930 // create an obj with the zeroed out top byte and release that
931 id obj = (id)entry->ptr;
932 int count = (int)entry->count; // grab these before memset
933#else
8972963c 934 id obj = *--page->next;
34d5b5e8 935#endif
8972963c
A
936 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
937 page->protect();
938
c1e772c4 939 if (obj != POOL_BOUNDARY) {
34d5b5e8
A
940#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
941 // release count+1 times since it is count of the additional
942 // autoreleases beyond the first one
943 for (int i = 0; i < count + 1; i++) {
944 objc_release(obj);
945 }
946#else
8972963c 947 objc_release(obj);
34d5b5e8 948#endif
8972963c
A
949 }
950 }
951
952 setHotPage(this);
953
31875a97 954#if DEBUG
8972963c
A
955 // we expect any children to be completely empty
956 for (AutoreleasePoolPage *page = child; page; page = page->child) {
1807f628 957 ASSERT(page->empty());
8972963c
A
958 }
959#endif
960 }
961
962 void kill()
963 {
964 // Not recursive: we don't want to blow out the stack
965 // if a thread accumulates a stupendous amount of garbage
966 AutoreleasePoolPage *page = this;
967 while (page->child) page = page->child;
968
969 AutoreleasePoolPage *deathptr;
970 do {
971 deathptr = page;
972 page = page->parent;
973 if (page) {
974 page->unprotect();
7257e56c 975 page->child = nil;
8972963c
A
976 page->protect();
977 }
978 delete deathptr;
979 } while (deathptr != this);
980 }
981
982 static void tls_dealloc(void *p)
983 {
c1e772c4
A
984 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
985 // No objects or pool pages to clean up here.
986 return;
987 }
988
8972963c
A
989 // reinstate TLS value while we work
990 setHotPage((AutoreleasePoolPage *)p);
31875a97
A
991
992 if (AutoreleasePoolPage *page = coldPage()) {
1807f628
A
993 if (!page->empty()) objc_autoreleasePoolPop(page->begin()); // pop all of the pools
994 if (slowpath(DebugMissingPools || DebugPoolAllocation)) {
31875a97
A
995 // pop() killed the pages already
996 } else {
997 page->kill(); // free all of the pages
998 }
999 }
1000
1001 // clear TLS value so TLS destruction doesn't loop
7257e56c 1002 setHotPage(nil);
8972963c
A
1003 }
1004
1005 static AutoreleasePoolPage *pageForPointer(const void *p)
1006 {
1007 return pageForPointer((uintptr_t)p);
1008 }
1009
1010 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
1011 {
1012 AutoreleasePoolPage *result;
1013 uintptr_t offset = p % SIZE;
1014
1807f628 1015 ASSERT(offset >= sizeof(AutoreleasePoolPage));
8972963c
A
1016
1017 result = (AutoreleasePoolPage *)(p - offset);
1018 result->fastcheck();
1019
1020 return result;
1021 }
1022
1023
c1e772c4
A
1024 static inline bool haveEmptyPoolPlaceholder()
1025 {
1026 id *tls = (id *)tls_get_direct(key);
1027 return (tls == EMPTY_POOL_PLACEHOLDER);
1028 }
1029
1030 static inline id* setEmptyPoolPlaceholder()
1031 {
1807f628 1032 ASSERT(tls_get_direct(key) == nil);
c1e772c4
A
1033 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
1034 return EMPTY_POOL_PLACEHOLDER;
1035 }
1036
8972963c
A
1037 static inline AutoreleasePoolPage *hotPage()
1038 {
1039 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
cd5f04f5 1040 tls_get_direct(key);
c1e772c4 1041 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
8972963c
A
1042 if (result) result->fastcheck();
1043 return result;
1044 }
1045
1046 static inline void setHotPage(AutoreleasePoolPage *page)
1047 {
1048 if (page) page->fastcheck();
cd5f04f5 1049 tls_set_direct(key, (void *)page);
8972963c
A
1050 }
1051
1052 static inline AutoreleasePoolPage *coldPage()
1053 {
1054 AutoreleasePoolPage *result = hotPage();
1055 if (result) {
1056 while (result->parent) {
1057 result = result->parent;
1058 result->fastcheck();
1059 }
1060 }
1061 return result;
1062 }
1063
1064
1065 static inline id *autoreleaseFast(id obj)
1066 {
1067 AutoreleasePoolPage *page = hotPage();
1068 if (page && !page->full()) {
1069 return page->add(obj);
8070259c
A
1070 } else if (page) {
1071 return autoreleaseFullPage(obj, page);
8972963c 1072 } else {
8070259c 1073 return autoreleaseNoPage(obj);
8972963c
A
1074 }
1075 }
1076
1077 static __attribute__((noinline))
8070259c 1078 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
8972963c 1079 {
8070259c
A
1080 // The hot page is full.
1081 // Step to the next non-full page, adding a new page if necessary.
1082 // Then add the object to that page.
1807f628
A
1083 ASSERT(page == hotPage());
1084 ASSERT(page->full() || DebugPoolAllocation);
8972963c
A
1085
1086 do {
1087 if (page->child) page = page->child;
1088 else page = new AutoreleasePoolPage(page);
1089 } while (page->full());
1090
1091 setHotPage(page);
1092 return page->add(obj);
1093 }
1094
8070259c
A
1095 static __attribute__((noinline))
1096 id *autoreleaseNoPage(id obj)
1097 {
c1e772c4
A
1098 // "No page" could mean no pool has been pushed
1099 // or an empty placeholder pool has been pushed and has no contents yet
1807f628 1100 ASSERT(!hotPage());
8070259c 1101
c1e772c4
A
1102 bool pushExtraBoundary = false;
1103 if (haveEmptyPoolPlaceholder()) {
1104 // We are pushing a second pool over the empty placeholder pool
1105 // or pushing the first object into the empty placeholder pool.
1106 // Before doing that, push a pool boundary on behalf of the pool
1107 // that is currently represented by the empty placeholder.
1108 pushExtraBoundary = true;
1109 }
1110 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
8070259c
A
1111 // We are pushing an object with no pool in place,
1112 // and no-pool debugging was requested by environment.
c1e772c4 1113 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
8070259c
A
1114 "autoreleased with no pool in place - "
1115 "just leaking - break on "
1116 "objc_autoreleaseNoPool() to debug",
1807f628 1117 objc_thread_self(), (void*)obj, object_getClassName(obj));
8070259c
A
1118 objc_autoreleaseNoPool(obj);
1119 return nil;
1120 }
c1e772c4
A
1121 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
1122 // We are pushing a pool with no pool in place,
1123 // and alloc-per-pool debugging was not requested.
1124 // Install and return the empty pool placeholder.
1125 return setEmptyPoolPlaceholder();
1126 }
1127
1128 // We are pushing an object or a non-placeholder'd pool.
8070259c
A
1129
1130 // Install the first page.
1131 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
1132 setHotPage(page);
c1e772c4
A
1133
1134 // Push a boundary on behalf of the previously-placeholder'd pool.
1135 if (pushExtraBoundary) {
1136 page->add(POOL_BOUNDARY);
8070259c 1137 }
c1e772c4
A
1138
1139 // Push the requested object or pool.
8070259c
A
1140 return page->add(obj);
1141 }
1142
31875a97
A
1143
1144 static __attribute__((noinline))
1145 id *autoreleaseNewPage(id obj)
1146 {
1147 AutoreleasePoolPage *page = hotPage();
1148 if (page) return autoreleaseFullPage(obj, page);
1149 else return autoreleaseNoPage(obj);
1150 }
1151
8972963c
A
1152public:
1153 static inline id autorelease(id obj)
1154 {
34d5b5e8 1155 ASSERT(!obj->isTaggedPointerOrNil());
8972963c 1156 id *dest __unused = autoreleaseFast(obj);
34d5b5e8
A
1157#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1158 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || (id)((AutoreleasePoolEntry *)dest)->ptr == obj);
1159#else
1807f628 1160 ASSERT(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
34d5b5e8 1161#endif
8972963c
A
1162 return obj;
1163 }
1164
1165
1166 static inline void *push()
1167 {
31875a97 1168 id *dest;
1807f628 1169 if (slowpath(DebugPoolAllocation)) {
31875a97 1170 // Each autorelease pool starts on a new pool page.
c1e772c4 1171 dest = autoreleaseNewPage(POOL_BOUNDARY);
31875a97 1172 } else {
c1e772c4 1173 dest = autoreleaseFast(POOL_BOUNDARY);
31875a97 1174 }
1807f628 1175 ASSERT(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
8972963c
A
1176 return dest;
1177 }
1178
1807f628 1179 __attribute__((noinline, cold))
c1e772c4
A
1180 static void badPop(void *token)
1181 {
1182 // Error. For bincompat purposes this is not
1183 // fatal in executables built with old SDKs.
1184
4a109af3 1185 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0, 2_0)) {
c1e772c4
A
1186 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1187 _objc_fatal
1188 ("Invalid or prematurely-freed autorelease pool %p.", token);
1189 }
1190
1191 // Old SDK. Bad pop is warned once.
1192 static bool complained = false;
1193 if (!complained) {
1194 complained = true;
1195 _objc_inform_now_and_on_crash
1196 ("Invalid or prematurely-freed autorelease pool %p. "
1197 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
34d5b5e8
A
1198 "Proceeding anyway because the app is old. Memory errors "
1199 "are likely.",
1200 token);
c1e772c4
A
1201 }
1202 objc_autoreleasePoolInvalid(token);
1203 }
1807f628
A
1204
1205 template<bool allowDebug>
1206 static void
1207 popPage(void *token, AutoreleasePoolPage *page, id *stop)
1208 {
1209 if (allowDebug && PrintPoolHiwat) printHiwat();
1210
1211 page->releaseUntil(stop);
1212
1213 // memory: delete empty children
1214 if (allowDebug && DebugPoolAllocation && page->empty()) {
1215 // special case: delete everything during page-per-pool debugging
1216 AutoreleasePoolPage *parent = page->parent;
1217 page->kill();
1218 setHotPage(parent);
1219 } else if (allowDebug && DebugMissingPools && page->empty() && !page->parent) {
1220 // special case: delete everything for pop(top)
1221 // when debugging missing autorelease pools
1222 page->kill();
1223 setHotPage(nil);
1224 } else if (page->child) {
1225 // hysteresis: keep one empty child if page is more than half full
1226 if (page->lessThanHalfFull()) {
1227 page->child->kill();
1228 }
1229 else if (page->child->child) {
1230 page->child->child->kill();
1231 }
1232 }
1233 }
1234
1235 __attribute__((noinline, cold))
1236 static void
1237 popPageDebug(void *token, AutoreleasePoolPage *page, id *stop)
1238 {
1239 popPage<true>(token, page, stop);
1240 }
1241
1242 static inline void
1243 pop(void *token)
8972963c
A
1244 {
1245 AutoreleasePoolPage *page;
1246 id *stop;
c1e772c4
A
1247 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1248 // Popping the top-level placeholder pool.
1807f628
A
1249 page = hotPage();
1250 if (!page) {
c1e772c4 1251 // Pool was never used. Clear the placeholder.
1807f628 1252 return setHotPage(nil);
c1e772c4 1253 }
1807f628
A
1254 // Pool was used. Pop its contents normally.
1255 // Pool pages remain allocated for re-use as usual.
1256 page = coldPage();
1257 token = page->begin();
1258 } else {
1259 page = pageForPointer(token);
c1e772c4
A
1260 }
1261
31875a97 1262 stop = (id *)token;
c1e772c4
A
1263 if (*stop != POOL_BOUNDARY) {
1264 if (stop == page->begin() && !page->parent) {
1265 // Start of coldest page may correctly not be POOL_BOUNDARY:
1266 // 1. top-level pool is popped, leaving the cold page in place
1267 // 2. an object is autoreleased with no pool
1268 } else {
1269 // Error. For bincompat purposes this is not
1270 // fatal in executables built with old SDKs.
1271 return badPop(token);
1272 }
8972963c
A
1273 }
1274
1807f628
A
1275 if (slowpath(PrintPoolHiwat || DebugPoolAllocation || DebugMissingPools)) {
1276 return popPageDebug(token, page, stop);
8972963c 1277 }
1807f628
A
1278
1279 return popPage<false>(token, page, stop);
8972963c
A
1280 }
1281
1282 static void init()
1283 {
1284 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1285 AutoreleasePoolPage::tls_dealloc);
1807f628 1286 ASSERT(r == 0);
8972963c
A
1287 }
1288
1807f628
A
1289 __attribute__((noinline, cold))
1290 void print()
8972963c
A
1291 {
1292 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1293 full() ? "(full)" : "",
1294 this == hotPage() ? "(hot)" : "",
1295 this == coldPage() ? "(cold)" : "");
1296 check(false);
1297 for (id *p = begin(); p < next; p++) {
c1e772c4 1298 if (*p == POOL_BOUNDARY) {
8972963c
A
1299 _objc_inform("[%p] ################ POOL %p", p, p);
1300 } else {
34d5b5e8
A
1301#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1302 AutoreleasePoolEntry *entry = (AutoreleasePoolEntry *)p;
1303 if (entry->count > 0) {
1304 id obj = (id)entry->ptr;
1305 _objc_inform("[%p] %#16lx %s autorelease count %u",
1306 p, (unsigned long)obj, object_getClassName(obj),
1307 entry->count + 1);
1308 goto done;
1309 }
1310#endif
1311 _objc_inform("[%p] %#16lx %s",
8972963c 1312 p, (unsigned long)*p, object_getClassName(*p));
34d5b5e8 1313 done:;
8972963c
A
1314 }
1315 }
1316 }
1317
1807f628 1318 __attribute__((noinline, cold))
8972963c 1319 static void printAll()
1807f628 1320 {
8972963c 1321 _objc_inform("##############");
1807f628 1322 _objc_inform("AUTORELEASE POOLS for thread %p", objc_thread_self());
8972963c
A
1323
1324 AutoreleasePoolPage *page;
1325 ptrdiff_t objects = 0;
1326 for (page = coldPage(); page; page = page->child) {
1327 objects += page->next - page->begin();
1328 }
1329 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1330
c1e772c4
A
1331 if (haveEmptyPoolPlaceholder()) {
1332 _objc_inform("[%p] ................ PAGE (placeholder)",
1333 EMPTY_POOL_PLACEHOLDER);
1334 _objc_inform("[%p] ################ POOL (placeholder)",
1335 EMPTY_POOL_PLACEHOLDER);
1336 }
1337 else {
1338 for (page = coldPage(); page; page = page->child) {
1339 page->print();
1340 }
8972963c
A
1341 }
1342
1343 _objc_inform("##############");
1344 }
1345
34d5b5e8
A
1346#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1347 __attribute__((noinline, cold))
1348 unsigned sumOfExtraReleases()
1349 {
1350 unsigned sumOfExtraReleases = 0;
1351 for (id *p = begin(); p < next; p++) {
1352 if (*p != POOL_BOUNDARY) {
1353 sumOfExtraReleases += ((AutoreleasePoolEntry *)p)->count;
1354 }
1355 }
1356 return sumOfExtraReleases;
1357 }
1358#endif
1359
1807f628 1360 __attribute__((noinline, cold))
8972963c
A
1361 static void printHiwat()
1362 {
1363 // Check and propagate high water mark
1364 // Ignore high water marks under 256 to suppress noise.
1365 AutoreleasePoolPage *p = hotPage();
1366 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
34d5b5e8
A
1367 if (mark > p->hiwat + 256) {
1368#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1369 unsigned sumOfExtraReleases = 0;
1370#endif
8972963c
A
1371 for( ; p; p = p->parent) {
1372 p->unprotect();
1373 p->hiwat = mark;
1374 p->protect();
34d5b5e8
A
1375
1376#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1377 sumOfExtraReleases += p->sumOfExtraReleases();
1378#endif
8972963c 1379 }
1807f628 1380
8972963c 1381 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1807f628
A
1382 "pending releases for thread %p:",
1383 mark, objc_thread_self());
34d5b5e8
A
1384#if SUPPORT_AUTORELEASEPOOL_DEDUP_PTRS
1385 if (sumOfExtraReleases > 0) {
1386 _objc_inform("POOL HIGHWATER: extra sequential autoreleases of objects: %u",
1387 sumOfExtraReleases);
1388 }
1389#endif
1807f628 1390
8972963c
A
1391 void *stack[128];
1392 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1393 char **sym = backtrace_symbols(stack, count);
1394 for (int i = 0; i < count; i++) {
1395 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1396 }
cd5f04f5 1397 free(sym);
8972963c
A
1398 }
1399 }
1400
c1e772c4 1401#undef POOL_BOUNDARY
8972963c
A
1402};
1403
8070259c
A
1404/***********************************************************************
1405* Slow paths for inline control
1406**********************************************************************/
8972963c 1407
8070259c
A
1408#if SUPPORT_NONPOINTER_ISA
1409
1410NEVER_INLINE id
1411objc_object::rootRetain_overflow(bool tryRetain)
1412{
34d5b5e8 1413 return rootRetain(tryRetain, RRVariant::Full);
8070259c
A
1414}
1415
1416
1807f628 1417NEVER_INLINE uintptr_t
8070259c
A
1418objc_object::rootRelease_underflow(bool performDealloc)
1419{
34d5b5e8 1420 return rootRelease(performDealloc, RRVariant::Full);
8070259c
A
1421}
1422
1423
1424// Slow path of clearDeallocating()
c1e772c4 1425// for objects with nonpointer isa
31875a97
A
1426// that were ever weakly referenced
1427// or whose retain count ever overflowed to the side table.
8070259c 1428NEVER_INLINE void
31875a97 1429objc_object::clearDeallocating_slow()
8972963c 1430{
1807f628 1431 ASSERT(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
8070259c 1432
31875a97
A
1433 SideTable& table = SideTables()[this];
1434 table.lock();
1435 if (isa.weakly_referenced) {
1436 weak_clear_no_lock(&table.weak_table, (id)this);
1437 }
1438 if (isa.has_sidetable_rc) {
1439 table.refcnts.erase(this);
1440 }
1441 table.unlock();
8070259c 1442}
8972963c 1443
8070259c
A
1444#endif
1445
1446__attribute__((noinline,used))
1447id
1448objc_object::rootAutorelease2()
1449{
1807f628 1450 ASSERT(!isTaggedPointer());
8070259c
A
1451 return AutoreleasePoolPage::autorelease((id)this);
1452}
1453
1454
1455BREAKPOINT_FUNCTION(
1456 void objc_overrelease_during_dealloc_error(void)
1457);
1458
1459
1807f628 1460NEVER_INLINE uintptr_t
8070259c
A
1461objc_object::overrelease_error()
1462{
1463 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1464 objc_overrelease_during_dealloc_error();
1807f628 1465 return 0; // allow rootRelease() to tail-call this
8972963c
A
1466}
1467
8070259c
A
1468
1469/***********************************************************************
1470* Retain count operations for side table.
1471**********************************************************************/
1472
1473
31875a97 1474#if DEBUG
8070259c 1475// Used to assert that an object is not present in the side table.
8972963c 1476bool
8070259c 1477objc_object::sidetable_present()
8972963c 1478{
8070259c 1479 bool result = false;
31875a97 1480 SideTable& table = SideTables()[this];
8070259c 1481
31875a97 1482 table.lock();
8070259c 1483
31875a97
A
1484 RefcountMap::iterator it = table.refcnts.find(this);
1485 if (it != table.refcnts.end()) result = true;
8070259c 1486
31875a97 1487 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
8070259c 1488
31875a97 1489 table.unlock();
8070259c
A
1490
1491 return result;
1492}
1493#endif
1494
1495#if SUPPORT_NONPOINTER_ISA
1496
1497void
1498objc_object::sidetable_lock()
1499{
31875a97
A
1500 SideTable& table = SideTables()[this];
1501 table.lock();
8070259c
A
1502}
1503
1504void
1505objc_object::sidetable_unlock()
1506{
31875a97
A
1507 SideTable& table = SideTables()[this];
1508 table.unlock();
8070259c
A
1509}
1510
1511
1512// Move the entire retain count to the side table,
1513// as well as isDeallocating and weaklyReferenced.
1514void
1515objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1516 bool isDeallocating,
1517 bool weaklyReferenced)
1518{
1807f628 1519 ASSERT(!isa.nonpointer); // should already be changed to raw pointer
31875a97 1520 SideTable& table = SideTables()[this];
8070259c 1521
31875a97 1522 size_t& refcntStorage = table.refcnts[this];
8070259c
A
1523 size_t oldRefcnt = refcntStorage;
1524 // not deallocating - that was in the isa
1807f628
A
1525 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1526 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
8070259c
A
1527
1528 uintptr_t carry;
34d5b5e8 1529 size_t refcnt = addc(oldRefcnt, (extra_rc - 1) << SIDE_TABLE_RC_SHIFT, 0, &carry);
8070259c
A
1530 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1531 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1532 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1533
1534 refcntStorage = refcnt;
1535}
1536
1537
1538// Move some retain counts to the side table from the isa field.
1539// Returns true if the object is now pinned.
1540bool
1541objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1542{
1807f628 1543 ASSERT(isa.nonpointer);
31875a97 1544 SideTable& table = SideTables()[this];
8070259c 1545
31875a97 1546 size_t& refcntStorage = table.refcnts[this];
8070259c 1547 size_t oldRefcnt = refcntStorage;
31875a97 1548 // isa-side bits should not be set here
1807f628
A
1549 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1550 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
8070259c
A
1551
1552 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1553
1554 uintptr_t carry;
1555 size_t newRefcnt =
1556 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1557 if (carry) {
1558 refcntStorage =
1559 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1560 return true;
1561 }
1562 else {
1563 refcntStorage = newRefcnt;
1564 return false;
1565 }
1566}
1567
1568
1569// Move some retain counts from the side table to the isa field.
31875a97 1570// Returns the actual count subtracted, which may be less than the request.
34d5b5e8 1571objc_object::SidetableBorrow
8070259c
A
1572objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1573{
1807f628 1574 ASSERT(isa.nonpointer);
31875a97 1575 SideTable& table = SideTables()[this];
8070259c 1576
31875a97
A
1577 RefcountMap::iterator it = table.refcnts.find(this);
1578 if (it == table.refcnts.end() || it->second == 0) {
1579 // Side table retain count is zero. Can't borrow.
34d5b5e8 1580 return { 0, 0 };
8070259c 1581 }
31875a97
A
1582 size_t oldRefcnt = it->second;
1583
1584 // isa-side bits should not be set here
1807f628
A
1585 ASSERT((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1586 ASSERT((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
8070259c
A
1587
1588 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1807f628 1589 ASSERT(oldRefcnt > newRefcnt); // shouldn't underflow
31875a97 1590 it->second = newRefcnt;
34d5b5e8 1591 return { delta_rc, newRefcnt >> SIDE_TABLE_RC_SHIFT };
8070259c
A
1592}
1593
1594
1595size_t
1596objc_object::sidetable_getExtraRC_nolock()
1597{
1807f628 1598 ASSERT(isa.nonpointer);
31875a97
A
1599 SideTable& table = SideTables()[this];
1600 RefcountMap::iterator it = table.refcnts.find(this);
1601 if (it == table.refcnts.end()) return 0;
1602 else return it->second >> SIDE_TABLE_RC_SHIFT;
8070259c 1603}
8972963c 1604
8972963c 1605
34d5b5e8
A
1606void
1607objc_object::sidetable_clearExtraRC_nolock()
1608{
1609 ASSERT(isa.nonpointer);
1610 SideTable& table = SideTables()[this];
1611 RefcountMap::iterator it = table.refcnts.find(this);
1612 table.refcnts.erase(it);
1613}
1614
1615
8070259c
A
1616// SUPPORT_NONPOINTER_ISA
1617#endif
1618
1619
8070259c 1620id
34d5b5e8 1621objc_object::sidetable_retain(bool locked)
8070259c
A
1622{
1623#if SUPPORT_NONPOINTER_ISA
1807f628 1624 ASSERT(!isa.nonpointer);
8070259c 1625#endif
c1e772c4
A
1626 SideTable& table = SideTables()[this];
1627
34d5b5e8 1628 if (!locked) table.lock();
31875a97 1629 size_t& refcntStorage = table.refcnts[this];
8070259c
A
1630 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1631 refcntStorage += SIDE_TABLE_RC_ONE;
1632 }
31875a97 1633 table.unlock();
8070259c
A
1634
1635 return (id)this;
1636}
1637
1638
8070259c
A
1639bool
1640objc_object::sidetable_tryRetain()
1641{
1642#if SUPPORT_NONPOINTER_ISA
1807f628 1643 ASSERT(!isa.nonpointer);
8070259c 1644#endif
31875a97 1645 SideTable& table = SideTables()[this];
8972963c
A
1646
1647 // NO SPINLOCK HERE
1648 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1649 // which already acquired the lock on our behalf.
7257e56c
A
1650
1651 // fixme can't do this efficiently with os_lock_handoff_s
31875a97 1652 // if (table.slock == 0) {
7257e56c
A
1653 // _objc_fatal("Do not call -_tryRetain.");
1654 // }
8972963c
A
1655
1656 bool result = true;
1807f628
A
1657 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_RC_ONE);
1658 auto &refcnt = it.first->second;
1659 if (it.second) {
1660 // there was no entry
1661 } else if (refcnt & SIDE_TABLE_DEALLOCATING) {
8972963c 1662 result = false;
1807f628
A
1663 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1664 refcnt += SIDE_TABLE_RC_ONE;
8972963c
A
1665 }
1666
1667 return result;
1668}
1669
8070259c
A
1670
1671uintptr_t
1672objc_object::sidetable_retainCount()
8972963c 1673{
31875a97 1674 SideTable& table = SideTables()[this];
8070259c
A
1675
1676 size_t refcnt_result = 1;
1677
31875a97
A
1678 table.lock();
1679 RefcountMap::iterator it = table.refcnts.find(this);
1680 if (it != table.refcnts.end()) {
8070259c
A
1681 // this is valid for SIDE_TABLE_RC_PINNED too
1682 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1683 }
31875a97 1684 table.unlock();
8070259c
A
1685 return refcnt_result;
1686}
8972963c 1687
8972963c 1688
8070259c
A
1689bool
1690objc_object::sidetable_isDeallocating()
1691{
31875a97 1692 SideTable& table = SideTables()[this];
8972963c
A
1693
1694 // NO SPINLOCK HERE
1695 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1696 // which already acquired the lock on our behalf.
7257e56c
A
1697
1698
1699 // fixme can't do this efficiently with os_lock_handoff_s
31875a97 1700 // if (table.slock == 0) {
7257e56c
A
1701 // _objc_fatal("Do not call -_isDeallocating.");
1702 // }
8972963c 1703
31875a97
A
1704 RefcountMap::iterator it = table.refcnts.find(this);
1705 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
8972963c
A
1706}
1707
1708
8070259c
A
1709bool
1710objc_object::sidetable_isWeaklyReferenced()
8972963c 1711{
8070259c 1712 bool result = false;
8972963c 1713
31875a97
A
1714 SideTable& table = SideTables()[this];
1715 table.lock();
8070259c 1716
31875a97
A
1717 RefcountMap::iterator it = table.refcnts.find(this);
1718 if (it != table.refcnts.end()) {
8070259c 1719 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
8972963c 1720 }
8070259c 1721
31875a97 1722 table.unlock();
8070259c
A
1723
1724 return result;
1725}
1726
34d5b5e8
A
1727#if OBJC_WEAK_FORMATION_CALLOUT_DEFINED
1728//Clients can dlsym() for this symbol to see if an ObjC supporting
1729//-_setWeaklyReferenced is present
1730OBJC_EXPORT const uintptr_t _objc_has_weak_formation_callout = 0;
1731static_assert(SUPPORT_NONPOINTER_ISA, "Weak formation callout must only be defined when nonpointer isa is supported.");
1732#else
1733static_assert(!SUPPORT_NONPOINTER_ISA, "If weak callout is not present then we must not support nonpointer isas.");
1734#endif
8070259c
A
1735
1736void
1737objc_object::sidetable_setWeaklyReferenced_nolock()
1738{
1739#if SUPPORT_NONPOINTER_ISA
1807f628 1740 ASSERT(!isa.nonpointer);
8070259c 1741#endif
34d5b5e8 1742
31875a97 1743 SideTable& table = SideTables()[this];
34d5b5e8 1744
31875a97 1745 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
8972963c
A
1746}
1747
1748
31875a97
A
1749// rdar://20206767
1750// return uintptr_t instead of bool so that the various raw-isa
1751// -release paths all return zero in eax
31875a97 1752uintptr_t
34d5b5e8 1753objc_object::sidetable_release(bool locked, bool performDealloc)
8972963c 1754{
8070259c 1755#if SUPPORT_NONPOINTER_ISA
1807f628 1756 ASSERT(!isa.nonpointer);
8070259c 1757#endif
c1e772c4
A
1758 SideTable& table = SideTables()[this];
1759
8972963c
A
1760 bool do_dealloc = false;
1761
34d5b5e8 1762 if (!locked) table.lock();
1807f628
A
1763 auto it = table.refcnts.try_emplace(this, SIDE_TABLE_DEALLOCATING);
1764 auto &refcnt = it.first->second;
1765 if (it.second) {
8972963c 1766 do_dealloc = true;
1807f628 1767 } else if (refcnt < SIDE_TABLE_DEALLOCATING) {
7257e56c 1768 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
8972963c 1769 do_dealloc = true;
1807f628
A
1770 refcnt |= SIDE_TABLE_DEALLOCATING;
1771 } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {
1772 refcnt -= SIDE_TABLE_RC_ONE;
8972963c 1773 }
31875a97 1774 table.unlock();
8070259c 1775 if (do_dealloc && performDealloc) {
1807f628 1776 ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc));
8070259c 1777 }
8972963c
A
1778 return do_dealloc;
1779}
1780
8972963c 1781
8070259c
A
1782void
1783objc_object::sidetable_clearDeallocating()
8972963c 1784{
31875a97 1785 SideTable& table = SideTables()[this];
8972963c 1786
8070259c
A
1787 // clear any weak table items
1788 // clear extra retain count and deallocating bit
1789 // (fixme warn or abort if extra retain count == 0 ?)
31875a97
A
1790 table.lock();
1791 RefcountMap::iterator it = table.refcnts.find(this);
1792 if (it != table.refcnts.end()) {
8070259c 1793 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
31875a97 1794 weak_clear_no_lock(&table.weak_table, (id)this);
8070259c 1795 }
31875a97 1796 table.refcnts.erase(it);
8972963c 1797 }
31875a97 1798 table.unlock();
8972963c
A
1799}
1800
8070259c
A
1801
1802/***********************************************************************
1803* Optimized retain/release/autorelease entrypoints
1804**********************************************************************/
1805
1806
1807#if __OBJC2__
1808
1807f628 1809__attribute__((aligned(16), flatten, noinline))
8070259c
A
1810id
1811objc_retain(id obj)
8972963c 1812{
34d5b5e8 1813 if (obj->isTaggedPointerOrNil()) return obj;
8070259c 1814 return obj->retain();
8972963c
A
1815}
1816
cd5f04f5 1817
1807f628 1818__attribute__((aligned(16), flatten, noinline))
8070259c
A
1819void
1820objc_release(id obj)
1821{
34d5b5e8 1822 if (obj->isTaggedPointerOrNil()) return;
8070259c
A
1823 return obj->release();
1824}
1825
1826
1807f628 1827__attribute__((aligned(16), flatten, noinline))
8070259c
A
1828id
1829objc_autorelease(id obj)
1830{
34d5b5e8 1831 if (obj->isTaggedPointerOrNil()) return obj;
8070259c
A
1832 return obj->autorelease();
1833}
1834
1835
1836// OBJC2
1837#else
1838// not OBJC2
1839
1840
1841id objc_retain(id obj) { return [obj retain]; }
1842void objc_release(id obj) { [obj release]; }
1843id objc_autorelease(id obj) { return [obj autorelease]; }
1844
1845
1846#endif
1847
1848
1849/***********************************************************************
1850* Basic operations for root class implementations a.k.a. _objc_root*()
1851**********************************************************************/
1852
1853bool
1854_objc_rootTryRetain(id obj)
1855{
1807f628 1856 ASSERT(obj);
8070259c
A
1857
1858 return obj->rootTryRetain();
1859}
1860
1861bool
1862_objc_rootIsDeallocating(id obj)
1863{
1807f628 1864 ASSERT(obj);
8070259c
A
1865
1866 return obj->rootIsDeallocating();
1867}
1868
1869
1870void
1871objc_clear_deallocating(id obj)
1872{
1807f628 1873 ASSERT(obj);
8070259c
A
1874
1875 if (obj->isTaggedPointer()) return;
1876 obj->clearDeallocating();
1877}
1878
1879
1880bool
1881_objc_rootReleaseWasZero(id obj)
1882{
1807f628 1883 ASSERT(obj);
8070259c
A
1884
1885 return obj->rootReleaseShouldDealloc();
1886}
1887
1888
1807f628 1889NEVER_INLINE id
8070259c
A
1890_objc_rootAutorelease(id obj)
1891{
1807f628 1892 ASSERT(obj);
8070259c
A
1893 return obj->rootAutorelease();
1894}
1895
1896uintptr_t
1897_objc_rootRetainCount(id obj)
1898{
1807f628 1899 ASSERT(obj);
8070259c
A
1900
1901 return obj->rootRetainCount();
1902}
1903
1904
1807f628 1905NEVER_INLINE id
8070259c
A
1906_objc_rootRetain(id obj)
1907{
1807f628 1908 ASSERT(obj);
8070259c
A
1909
1910 return obj->rootRetain();
1911}
1912
1807f628 1913NEVER_INLINE void
8070259c
A
1914_objc_rootRelease(id obj)
1915{
1807f628 1916 ASSERT(obj);
8070259c
A
1917
1918 obj->rootRelease();
1919}
1920
34d5b5e8 1921// Call [cls alloc] or [cls allocWithZone:nil], with appropriate
8070259c
A
1922// shortcutting optimizations.
1923static ALWAYS_INLINE id
1924callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
8972963c 1925{
7257e56c 1926#if __OBJC2__
1807f628 1927 if (slowpath(checkNil && !cls)) return nil;
c1e772c4 1928 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1807f628 1929 return _objc_rootAllocWithZone(cls, nil);
7257e56c
A
1930 }
1931#endif
8070259c
A
1932
1933 // No shortcuts available.
1807f628
A
1934 if (allocWithZone) {
1935 return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil);
1936 }
1937 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(alloc));
7257e56c
A
1938}
1939
8070259c
A
1940
1941// Base class implementation of +alloc. cls is not nil.
1942// Calls [cls allocWithZone:nil].
1943id
1944_objc_rootAlloc(Class cls)
1945{
1946 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1947}
1948
1949// Calls [cls alloc].
7257e56c
A
1950id
1951objc_alloc(Class cls)
1952{
8070259c 1953 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
7257e56c
A
1954}
1955
8070259c 1956// Calls [cls allocWithZone:nil].
34d5b5e8 1957id
7257e56c
A
1958objc_allocWithZone(Class cls)
1959{
8070259c 1960 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
8972963c
A
1961}
1962
13ba007e
A
1963// Calls [[cls alloc] init].
1964id
1965objc_alloc_init(Class cls)
1966{
1967 return [callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/) init];
1968}
1969
1807f628
A
1970// Calls [cls new]
1971id
1972objc_opt_new(Class cls)
1973{
1974#if __OBJC2__
1975 if (fastpath(cls && !cls->ISA()->hasCustomCore())) {
34d5b5e8 1976 return [callAlloc(cls, false/*checkNil*/) init];
1807f628
A
1977 }
1978#endif
1979 return ((id(*)(id, SEL))objc_msgSend)(cls, @selector(new));
1980}
1981
1982// Calls [obj self]
1983id
1984objc_opt_self(id obj)
1985{
1986#if __OBJC2__
34d5b5e8 1987 if (fastpath(obj->isTaggedPointerOrNil() || !obj->ISA()->hasCustomCore())) {
1807f628
A
1988 return obj;
1989 }
1990#endif
1991 return ((id(*)(id, SEL))objc_msgSend)(obj, @selector(self));
1992}
1993
1994// Calls [obj class]
1995Class
1996objc_opt_class(id obj)
1997{
1998#if __OBJC2__
1999 if (slowpath(!obj)) return nil;
2000 Class cls = obj->getIsa();
2001 if (fastpath(!cls->hasCustomCore())) {
2002 return cls->isMetaClass() ? obj : cls;
2003 }
2004#endif
2005 return ((Class(*)(id, SEL))objc_msgSend)(obj, @selector(class));
2006}
2007
2008// Calls [obj isKindOfClass]
2009BOOL
2010objc_opt_isKindOfClass(id obj, Class otherClass)
2011{
2012#if __OBJC2__
2013 if (slowpath(!obj)) return NO;
2014 Class cls = obj->getIsa();
2015 if (fastpath(!cls->hasCustomCore())) {
34d5b5e8 2016 for (Class tcls = cls; tcls; tcls = tcls->getSuperclass()) {
1807f628
A
2017 if (tcls == otherClass) return YES;
2018 }
2019 return NO;
2020 }
2021#endif
2022 return ((BOOL(*)(id, SEL, Class))objc_msgSend)(obj, @selector(isKindOfClass:), otherClass);
2023}
2024
2025// Calls [obj respondsToSelector]
2026BOOL
2027objc_opt_respondsToSelector(id obj, SEL sel)
2028{
2029#if __OBJC2__
2030 if (slowpath(!obj)) return NO;
2031 Class cls = obj->getIsa();
2032 if (fastpath(!cls->hasCustomCore())) {
2033 return class_respondsToSelector_inst(obj, sel, cls);
2034 }
2035#endif
2036 return ((BOOL(*)(id, SEL, SEL))objc_msgSend)(obj, @selector(respondsToSelector:), sel);
2037}
8070259c 2038
8972963c
A
2039void
2040_objc_rootDealloc(id obj)
2041{
1807f628 2042 ASSERT(obj);
8972963c 2043
8070259c 2044 obj->rootDealloc();
8972963c
A
2045}
2046
2047void
2048_objc_rootFinalize(id obj __unused)
2049{
1807f628 2050 ASSERT(obj);
8972963c
A
2051 _objc_fatal("_objc_rootFinalize called with garbage collection off");
2052}
2053
8070259c
A
2054
2055id
2056_objc_rootInit(id obj)
2057{
2058 // In practice, it will be hard to rely on this function.
2059 // Many classes do not properly chain -init calls.
2060 return obj;
2061}
2062
2063
8972963c
A
2064malloc_zone_t *
2065_objc_rootZone(id obj)
2066{
cd5f04f5 2067 (void)obj;
8972963c 2068#if __OBJC2__
cd5f04f5
A
2069 // allocWithZone under __OBJC2__ ignores the zone parameter
2070 return malloc_default_zone();
8972963c 2071#else
cd5f04f5
A
2072 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
2073 return rval ? rval : malloc_default_zone();
8972963c
A
2074#endif
2075}
2076
2077uintptr_t
2078_objc_rootHash(id obj)
2079{
cd5f04f5 2080 return (uintptr_t)obj;
8972963c
A
2081}
2082
8972963c
A
2083void *
2084objc_autoreleasePoolPush(void)
2085{
8972963c
A
2086 return AutoreleasePoolPage::push();
2087}
2088
1807f628 2089NEVER_INLINE
8972963c
A
2090void
2091objc_autoreleasePoolPop(void *ctxt)
2092{
8972963c
A
2093 AutoreleasePoolPage::pop(ctxt);
2094}
2095
8972963c 2096
8070259c
A
2097void *
2098_objc_autoreleasePoolPush(void)
cd5f04f5 2099{
8070259c 2100 return objc_autoreleasePoolPush();
cd5f04f5
A
2101}
2102
8070259c
A
2103void
2104_objc_autoreleasePoolPop(void *ctxt)
cd5f04f5 2105{
8070259c 2106 objc_autoreleasePoolPop(ctxt);
cd5f04f5
A
2107}
2108
8070259c
A
2109void
2110_objc_autoreleasePoolPrint(void)
8972963c 2111{
8070259c 2112 AutoreleasePoolPage::printAll();
8972963c
A
2113}
2114
31875a97
A
2115
2116// Same as objc_release but suitable for tail-calling
2117// if you need the value back and don't want to push a frame before this point.
2118__attribute__((noinline))
2119static id
2120objc_releaseAndReturn(id obj)
2121{
2122 objc_release(obj);
2123 return obj;
2124}
2125
2126// Same as objc_retainAutorelease but suitable for tail-calling
2127// if you don't want to push a frame before this point.
2128__attribute__((noinline))
2129static id
2130objc_retainAutoreleaseAndReturn(id obj)
2131{
2132 return objc_retainAutorelease(obj);
2133}
2134
2135
2136// Prepare a value at +1 for return through a +0 autoreleasing convention.
8972963c
A
2137id
2138objc_autoreleaseReturnValue(id obj)
2139{
31875a97 2140 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
8972963c
A
2141
2142 return objc_autorelease(obj);
2143}
2144
31875a97 2145// Prepare a value at +0 for return through a +0 autoreleasing convention.
8972963c
A
2146id
2147objc_retainAutoreleaseReturnValue(id obj)
2148{
31875a97
A
2149 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
2150
2151 // not objc_autoreleaseReturnValue(objc_retain(obj))
2152 // because we don't need another optimization attempt
2153 return objc_retainAutoreleaseAndReturn(obj);
8972963c
A
2154}
2155
31875a97 2156// Accept a value returned through a +0 autoreleasing convention for use at +1.
8972963c
A
2157id
2158objc_retainAutoreleasedReturnValue(id obj)
2159{
31875a97 2160 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
8972963c 2161
8070259c 2162 return objc_retain(obj);
8972963c
A
2163}
2164
31875a97
A
2165// Accept a value returned through a +0 autoreleasing convention for use at +0.
2166id
2167objc_unsafeClaimAutoreleasedReturnValue(id obj)
2168{
2169 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
2170
2171 return objc_releaseAndReturn(obj);
2172}
2173
8972963c
A
2174id
2175objc_retainAutorelease(id obj)
2176{
cd5f04f5 2177 return objc_autorelease(objc_retain(obj));
8972963c
A
2178}
2179
2180void
2181_objc_deallocOnMainThreadHelper(void *context)
2182{
cd5f04f5
A
2183 id obj = (id)context;
2184 [obj dealloc];
8972963c
A
2185}
2186
2187// convert objc_objectptr_t to id, callee must take ownership.
cd5f04f5 2188id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
8972963c
A
2189
2190// convert objc_objectptr_t to id, without ownership transfer.
cd5f04f5 2191id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
8972963c
A
2192
2193// convert id to objc_objectptr_t, no ownership transfer.
cd5f04f5 2194objc_objectptr_t objc_unretainedPointer(id object) { return object; }
8972963c
A
2195
2196
cd5f04f5 2197void arr_init(void)
8972963c
A
2198{
2199 AutoreleasePoolPage::init();
1807f628
A
2200 SideTablesMap.init();
2201 _objc_associations_init();
8972963c 2202}
cd5f04f5 2203
c1e772c4
A
2204
2205#if SUPPORT_TAGGED_POINTERS
2206
2207// Placeholder for old debuggers. When they inspect an
2208// extended tagged pointer object they will see this isa.
2209
2210@interface __NSUnrecognizedTaggedPointer : NSObject
2211@end
2212
1807f628 2213__attribute__((objc_nonlazy_class))
c1e772c4 2214@implementation __NSUnrecognizedTaggedPointer
c1e772c4
A
2215-(id) retain { return self; }
2216-(oneway void) release { }
2217-(id) autorelease { return self; }
2218@end
2219
2220#endif
2221
1807f628 2222__attribute__((objc_nonlazy_class))
cd5f04f5
A
2223@implementation NSObject
2224
cd5f04f5
A
2225+ (void)initialize {
2226}
2227
2228+ (id)self {
2229 return (id)self;
2230}
2231
2232- (id)self {
2233 return self;
2234}
2235
2236+ (Class)class {
2237 return self;
2238}
2239
2240- (Class)class {
2241 return object_getClass(self);
2242}
2243
2244+ (Class)superclass {
34d5b5e8 2245 return self->getSuperclass();
cd5f04f5
A
2246}
2247
2248- (Class)superclass {
34d5b5e8 2249 return [self class]->getSuperclass();
cd5f04f5
A
2250}
2251
2252+ (BOOL)isMemberOfClass:(Class)cls {
1807f628 2253 return self->ISA() == cls;
cd5f04f5
A
2254}
2255
2256- (BOOL)isMemberOfClass:(Class)cls {
2257 return [self class] == cls;
2258}
2259
2260+ (BOOL)isKindOfClass:(Class)cls {
34d5b5e8 2261 for (Class tcls = self->ISA(); tcls; tcls = tcls->getSuperclass()) {
cd5f04f5
A
2262 if (tcls == cls) return YES;
2263 }
2264 return NO;
2265}
2266
2267- (BOOL)isKindOfClass:(Class)cls {
34d5b5e8 2268 for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
cd5f04f5
A
2269 if (tcls == cls) return YES;
2270 }
2271 return NO;
2272}
2273
2274+ (BOOL)isSubclassOfClass:(Class)cls {
34d5b5e8 2275 for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
cd5f04f5
A
2276 if (tcls == cls) return YES;
2277 }
2278 return NO;
2279}
2280
2281+ (BOOL)isAncestorOfObject:(NSObject *)obj {
34d5b5e8 2282 for (Class tcls = [obj class]; tcls; tcls = tcls->getSuperclass()) {
cd5f04f5
A
2283 if (tcls == self) return YES;
2284 }
2285 return NO;
2286}
2287
2288+ (BOOL)instancesRespondToSelector:(SEL)sel {
1807f628 2289 return class_respondsToSelector_inst(nil, sel, self);
cd5f04f5
A
2290}
2291
2292+ (BOOL)respondsToSelector:(SEL)sel {
1807f628 2293 return class_respondsToSelector_inst(self, sel, self->ISA());
cd5f04f5
A
2294}
2295
2296- (BOOL)respondsToSelector:(SEL)sel {
1807f628 2297 return class_respondsToSelector_inst(self, sel, [self class]);
cd5f04f5
A
2298}
2299
2300+ (BOOL)conformsToProtocol:(Protocol *)protocol {
2301 if (!protocol) return NO;
34d5b5e8 2302 for (Class tcls = self; tcls; tcls = tcls->getSuperclass()) {
cd5f04f5
A
2303 if (class_conformsToProtocol(tcls, protocol)) return YES;
2304 }
2305 return NO;
2306}
2307
2308- (BOOL)conformsToProtocol:(Protocol *)protocol {
2309 if (!protocol) return NO;
34d5b5e8 2310 for (Class tcls = [self class]; tcls; tcls = tcls->getSuperclass()) {
cd5f04f5
A
2311 if (class_conformsToProtocol(tcls, protocol)) return YES;
2312 }
2313 return NO;
2314}
2315
2316+ (NSUInteger)hash {
2317 return _objc_rootHash(self);
2318}
2319
2320- (NSUInteger)hash {
2321 return _objc_rootHash(self);
2322}
2323
2324+ (BOOL)isEqual:(id)obj {
2325 return obj == (id)self;
2326}
2327
2328- (BOOL)isEqual:(id)obj {
2329 return obj == self;
2330}
2331
2332
2333+ (BOOL)isFault {
2334 return NO;
2335}
2336
2337- (BOOL)isFault {
2338 return NO;
2339}
2340
2341+ (BOOL)isProxy {
2342 return NO;
2343}
2344
2345- (BOOL)isProxy {
2346 return NO;
2347}
2348
cd5f04f5
A
2349
2350+ (IMP)instanceMethodForSelector:(SEL)sel {
2351 if (!sel) [self doesNotRecognizeSelector:sel];
2352 return class_getMethodImplementation(self, sel);
2353}
2354
2355+ (IMP)methodForSelector:(SEL)sel {
2356 if (!sel) [self doesNotRecognizeSelector:sel];
7257e56c 2357 return object_getMethodImplementation((id)self, sel);
cd5f04f5
A
2358}
2359
2360- (IMP)methodForSelector:(SEL)sel {
2361 if (!sel) [self doesNotRecognizeSelector:sel];
7257e56c 2362 return object_getMethodImplementation(self, sel);
cd5f04f5
A
2363}
2364
2365+ (BOOL)resolveClassMethod:(SEL)sel {
2366 return NO;
2367}
2368
2369+ (BOOL)resolveInstanceMethod:(SEL)sel {
2370 return NO;
2371}
2372
2373// Replaced by CF (throws an NSException)
2374+ (void)doesNotRecognizeSelector:(SEL)sel {
2375 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2376 class_getName(self), sel_getName(sel), self);
2377}
2378
2379// Replaced by CF (throws an NSException)
2380- (void)doesNotRecognizeSelector:(SEL)sel {
2381 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2382 object_getClassName(self), sel_getName(sel), self);
2383}
2384
2385
2386+ (id)performSelector:(SEL)sel {
2387 if (!sel) [self doesNotRecognizeSelector:sel];
2388 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2389}
2390
2391+ (id)performSelector:(SEL)sel withObject:(id)obj {
2392 if (!sel) [self doesNotRecognizeSelector:sel];
2393 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2394}
2395
2396+ (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2397 if (!sel) [self doesNotRecognizeSelector:sel];
2398 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2399}
2400
2401- (id)performSelector:(SEL)sel {
2402 if (!sel) [self doesNotRecognizeSelector:sel];
2403 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2404}
2405
2406- (id)performSelector:(SEL)sel withObject:(id)obj {
2407 if (!sel) [self doesNotRecognizeSelector:sel];
2408 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2409}
2410
2411- (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2412 if (!sel) [self doesNotRecognizeSelector:sel];
2413 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2414}
2415
2416
2417// Replaced by CF (returns an NSMethodSignature)
2418+ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2419 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2420 "not available without CoreFoundation");
2421}
2422
2423// Replaced by CF (returns an NSMethodSignature)
2424+ (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2425 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2426 "not available without CoreFoundation");
2427}
2428
2429// Replaced by CF (returns an NSMethodSignature)
2430- (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2431 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2432 "not available without CoreFoundation");
2433}
2434
2435+ (void)forwardInvocation:(NSInvocation *)invocation {
2436 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2437}
2438
2439- (void)forwardInvocation:(NSInvocation *)invocation {
2440 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2441}
2442
2443+ (id)forwardingTargetForSelector:(SEL)sel {
2444 return nil;
2445}
2446
2447- (id)forwardingTargetForSelector:(SEL)sel {
2448 return nil;
2449}
2450
2451
2452// Replaced by CF (returns an NSString)
2453+ (NSString *)description {
2454 return nil;
2455}
2456
2457// Replaced by CF (returns an NSString)
2458- (NSString *)description {
2459 return nil;
2460}
2461
2462+ (NSString *)debugDescription {
2463 return [self description];
2464}
2465
2466- (NSString *)debugDescription {
2467 return [self description];
2468}
2469
2470
2471+ (id)new {
8070259c 2472 return [callAlloc(self, false/*checkNil*/) init];
cd5f04f5
A
2473}
2474
2475+ (id)retain {
2476 return (id)self;
2477}
2478
2479// Replaced by ObjectAlloc
8070259c 2480- (id)retain {
1807f628 2481 return _objc_rootRetain(self);
cd5f04f5
A
2482}
2483
2484
2485+ (BOOL)_tryRetain {
2486 return YES;
2487}
2488
2489// Replaced by ObjectAlloc
2490- (BOOL)_tryRetain {
1807f628 2491 return _objc_rootTryRetain(self);
cd5f04f5
A
2492}
2493
2494+ (BOOL)_isDeallocating {
2495 return NO;
2496}
2497
2498- (BOOL)_isDeallocating {
1807f628 2499 return _objc_rootIsDeallocating(self);
cd5f04f5
A
2500}
2501
2502+ (BOOL)allowsWeakReference {
2503 return YES;
2504}
2505
1807f628 2506+ (BOOL)retainWeakReference {
cd5f04f5
A
2507 return YES;
2508}
2509
2510- (BOOL)allowsWeakReference {
2511 return ! [self _isDeallocating];
2512}
2513
2514- (BOOL)retainWeakReference {
2515 return [self _tryRetain];
2516}
2517
2518+ (oneway void)release {
2519}
2520
2521// Replaced by ObjectAlloc
8070259c 2522- (oneway void)release {
1807f628 2523 _objc_rootRelease(self);
cd5f04f5
A
2524}
2525
2526+ (id)autorelease {
2527 return (id)self;
2528}
2529
2530// Replaced by ObjectAlloc
8070259c 2531- (id)autorelease {
1807f628 2532 return _objc_rootAutorelease(self);
cd5f04f5
A
2533}
2534
2535+ (NSUInteger)retainCount {
2536 return ULONG_MAX;
2537}
2538
2539- (NSUInteger)retainCount {
1807f628 2540 return _objc_rootRetainCount(self);
cd5f04f5
A
2541}
2542
2543+ (id)alloc {
2544 return _objc_rootAlloc(self);
2545}
2546
2547// Replaced by ObjectAlloc
7257e56c 2548+ (id)allocWithZone:(struct _NSZone *)zone {
cd5f04f5
A
2549 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2550}
2551
2552// Replaced by CF (throws an NSException)
2553+ (id)init {
2554 return (id)self;
2555}
2556
2557- (id)init {
2558 return _objc_rootInit(self);
2559}
2560
2561// Replaced by CF (throws an NSException)
2562+ (void)dealloc {
2563}
2564
8070259c 2565
cd5f04f5
A
2566// Replaced by NSZombies
2567- (void)dealloc {
2568 _objc_rootDealloc(self);
2569}
2570
c1e772c4
A
2571// Previously used by GC. Now a placeholder for binary compatibility.
2572- (void) finalize {
cd5f04f5
A
2573}
2574
7257e56c
A
2575+ (struct _NSZone *)zone {
2576 return (struct _NSZone *)_objc_rootZone(self);
cd5f04f5
A
2577}
2578
7257e56c
A
2579- (struct _NSZone *)zone {
2580 return (struct _NSZone *)_objc_rootZone(self);
cd5f04f5
A
2581}
2582
2583+ (id)copy {
2584 return (id)self;
2585}
2586
7257e56c 2587+ (id)copyWithZone:(struct _NSZone *)zone {
cd5f04f5
A
2588 return (id)self;
2589}
2590
2591- (id)copy {
7257e56c 2592 return [(id)self copyWithZone:nil];
cd5f04f5
A
2593}
2594
2595+ (id)mutableCopy {
2596 return (id)self;
2597}
2598
7257e56c 2599+ (id)mutableCopyWithZone:(struct _NSZone *)zone {
cd5f04f5
A
2600 return (id)self;
2601}
2602
2603- (id)mutableCopy {
7257e56c 2604 return [(id)self mutableCopyWithZone:nil];
cd5f04f5
A
2605}
2606
2607@end
2608
cd5f04f5 2609