]> git.saurik.com Git - apple/objc4.git/blob - runtime/NSObject.mm
objc4-706.tar.gz
[apple/objc4.git] / runtime / NSObject.mm
1 /*
2 * Copyright (c) 2010-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "objc-private.h"
25 #include "NSObject.h"
26
27 #include "objc-weak.h"
28 #include "llvm-DenseMap.h"
29 #include "NSObject.h"
30
31 #include <malloc/malloc.h>
32 #include <stdint.h>
33 #include <stdbool.h>
34 #include <mach/mach.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/nlist.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <libkern/OSAtomic.h>
40 #include <Block.h>
41 #include <map>
42 #include <execinfo.h>
43
44 @interface NSInvocation
45 - (SEL)selector;
46 @end
47
48
49 #if TARGET_OS_MAC
50
51 // NSObject used to be in Foundation/CoreFoundation.
52
53 #define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \
54 OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0
55 #define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \
56 SYMBOL_ELSEWHERE_IN_3(sym, vers, n)
57 #define SYMBOL_ELSEWHERE_IN(sym, vers) \
58 SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__)
59
60 #if __OBJC2__
61 # define NSOBJECT_ELSEWHERE_IN(vers) \
62 SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \
63 SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \
64 SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers)
65 #else
66 # define NSOBJECT_ELSEWHERE_IN(vers) \
67 SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
68 #endif
69
70 #if TARGET_OS_IOS
71 NSOBJECT_ELSEWHERE_IN(5.1);
72 NSOBJECT_ELSEWHERE_IN(5.0);
73 NSOBJECT_ELSEWHERE_IN(4.3);
74 NSOBJECT_ELSEWHERE_IN(4.2);
75 NSOBJECT_ELSEWHERE_IN(4.1);
76 NSOBJECT_ELSEWHERE_IN(4.0);
77 NSOBJECT_ELSEWHERE_IN(3.2);
78 NSOBJECT_ELSEWHERE_IN(3.1);
79 NSOBJECT_ELSEWHERE_IN(3.0);
80 NSOBJECT_ELSEWHERE_IN(2.2);
81 NSOBJECT_ELSEWHERE_IN(2.1);
82 NSOBJECT_ELSEWHERE_IN(2.0);
83 #elif TARGET_OS_OSX
84 NSOBJECT_ELSEWHERE_IN(10.7);
85 NSOBJECT_ELSEWHERE_IN(10.6);
86 NSOBJECT_ELSEWHERE_IN(10.5);
87 NSOBJECT_ELSEWHERE_IN(10.4);
88 NSOBJECT_ELSEWHERE_IN(10.3);
89 NSOBJECT_ELSEWHERE_IN(10.2);
90 NSOBJECT_ELSEWHERE_IN(10.1);
91 NSOBJECT_ELSEWHERE_IN(10.0);
92 #else
93 // NSObject has always been in libobjc on these platforms.
94 #endif
95
96 // TARGET_OS_MAC
97 #endif
98
99
100 /***********************************************************************
101 * Weak ivar support
102 **********************************************************************/
103
104 static id defaultBadAllocHandler(Class cls)
105 {
106 _objc_fatal("attempt to allocate object of class '%s' failed",
107 cls->nameForLogging());
108 }
109
110 static id(*badAllocHandler)(Class) = &defaultBadAllocHandler;
111
112 static id callBadAllocHandler(Class cls)
113 {
114 // fixme add re-entrancy protection in case allocation fails inside handler
115 return (*badAllocHandler)(cls);
116 }
117
118 void _objc_setBadAllocHandler(id(*newHandler)(Class))
119 {
120 badAllocHandler = newHandler;
121 }
122
123
124 namespace {
125
126 // The order of these bits is important.
127 #define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
128 #define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
129 #define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit
130 #define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1))
131
132 #define SIDE_TABLE_RC_SHIFT 2
133 #define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1)
134
135 // RefcountMap disguises its pointers because we
136 // don't want the table to act as a root for `leaks`.
137 typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
138
139 struct SideTable {
140 spinlock_t slock;
141 RefcountMap refcnts;
142 weak_table_t weak_table;
143
144 SideTable() {
145 memset(&weak_table, 0, sizeof(weak_table));
146 }
147
148 ~SideTable() {
149 _objc_fatal("Do not delete SideTable.");
150 }
151
152 void lock() { slock.lock(); }
153 void unlock() { slock.unlock(); }
154
155 // Address-ordered lock discipline for a pair of side tables.
156
157 template<bool HaveOld, bool HaveNew>
158 static void lockTwo(SideTable *lock1, SideTable *lock2);
159 template<bool HaveOld, bool HaveNew>
160 static void unlockTwo(SideTable *lock1, SideTable *lock2);
161 };
162
163
164 template<>
165 void SideTable::lockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
166 spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
167 }
168
169 template<>
170 void SideTable::lockTwo<true, false>(SideTable *lock1, SideTable *) {
171 lock1->lock();
172 }
173
174 template<>
175 void SideTable::lockTwo<false, true>(SideTable *, SideTable *lock2) {
176 lock2->lock();
177 }
178
179 template<>
180 void SideTable::unlockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
181 spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
182 }
183
184 template<>
185 void SideTable::unlockTwo<true, false>(SideTable *lock1, SideTable *) {
186 lock1->unlock();
187 }
188
189 template<>
190 void SideTable::unlockTwo<false, true>(SideTable *, SideTable *lock2) {
191 lock2->unlock();
192 }
193
194
195
196 // We cannot use a C++ static initializer to initialize SideTables because
197 // libc calls us before our C++ initializers run. We also don't want a global
198 // pointer to this struct because of the extra indirection.
199 // Do it the hard way.
200 alignas(StripedMap<SideTable>) static uint8_t
201 SideTableBuf[sizeof(StripedMap<SideTable>)];
202
203 static void SideTableInit() {
204 new (SideTableBuf) StripedMap<SideTable>();
205 }
206
207 static StripedMap<SideTable>& SideTables() {
208 return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
209 }
210
211 // anonymous namespace
212 };
213
214
215 //
216 // The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
217 //
218
219 id objc_retainBlock(id x) {
220 return (id)_Block_copy(x);
221 }
222
223 //
224 // The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-)
225 //
226
227 BOOL objc_should_deallocate(id object) {
228 return YES;
229 }
230
231 id
232 objc_retain_autorelease(id obj)
233 {
234 return objc_autorelease(objc_retain(obj));
235 }
236
237
238 void
239 objc_storeStrong(id *location, id obj)
240 {
241 id prev = *location;
242 if (obj == prev) {
243 return;
244 }
245 objc_retain(obj);
246 *location = obj;
247 objc_release(prev);
248 }
249
250
251 // Update a weak variable.
252 // If HaveOld is true, the variable has an existing value
253 // that needs to be cleaned up. This value might be nil.
254 // If HaveNew is true, there is a new value that needs to be
255 // assigned into the variable. This value might be nil.
256 // If CrashIfDeallocating is true, the process is halted if newObj is
257 // deallocating or newObj's class does not support weak references.
258 // If CrashIfDeallocating is false, nil is stored instead.
259 template <bool HaveOld, bool HaveNew, bool CrashIfDeallocating>
260 static id
261 storeWeak(id *location, objc_object *newObj)
262 {
263 assert(HaveOld || HaveNew);
264 if (!HaveNew) assert(newObj == nil);
265
266 Class previouslyInitializedClass = nil;
267 id oldObj;
268 SideTable *oldTable;
269 SideTable *newTable;
270
271 // Acquire locks for old and new values.
272 // Order by lock address to prevent lock ordering problems.
273 // Retry if the old value changes underneath us.
274 retry:
275 if (HaveOld) {
276 oldObj = *location;
277 oldTable = &SideTables()[oldObj];
278 } else {
279 oldTable = nil;
280 }
281 if (HaveNew) {
282 newTable = &SideTables()[newObj];
283 } else {
284 newTable = nil;
285 }
286
287 SideTable::lockTwo<HaveOld, HaveNew>(oldTable, newTable);
288
289 if (HaveOld && *location != oldObj) {
290 SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
291 goto retry;
292 }
293
294 // Prevent a deadlock between the weak reference machinery
295 // and the +initialize machinery by ensuring that no
296 // weakly-referenced object has an un-+initialized isa.
297 if (HaveNew && newObj) {
298 Class cls = newObj->getIsa();
299 if (cls != previouslyInitializedClass &&
300 !((objc_class *)cls)->isInitialized())
301 {
302 SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
303 _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
304
305 // If this class is finished with +initialize then we're good.
306 // If this class is still running +initialize on this thread
307 // (i.e. +initialize called storeWeak on an instance of itself)
308 // then we may proceed but it will appear initializing and
309 // not yet initialized to the check above.
310 // Instead set previouslyInitializedClass to recognize it on retry.
311 previouslyInitializedClass = cls;
312
313 goto retry;
314 }
315 }
316
317 // Clean up old value, if any.
318 if (HaveOld) {
319 weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
320 }
321
322 // Assign new value, if any.
323 if (HaveNew) {
324 newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table,
325 (id)newObj, location,
326 CrashIfDeallocating);
327 // weak_register_no_lock returns nil if weak store should be rejected
328
329 // Set is-weakly-referenced bit in refcount table.
330 if (newObj && !newObj->isTaggedPointer()) {
331 newObj->setWeaklyReferenced_nolock();
332 }
333
334 // Do not set *location anywhere else. That would introduce a race.
335 *location = (id)newObj;
336 }
337 else {
338 // No new value. The storage is not changed.
339 }
340
341 SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
342
343 return (id)newObj;
344 }
345
346
347 /**
348 * This function stores a new value into a __weak variable. It would
349 * be used anywhere a __weak variable is the target of an assignment.
350 *
351 * @param location The address of the weak pointer itself
352 * @param newObj The new object this weak ptr should now point to
353 *
354 * @return \e newObj
355 */
356 id
357 objc_storeWeak(id *location, id newObj)
358 {
359 return storeWeak<true/*old*/, true/*new*/, true/*crash*/>
360 (location, (objc_object *)newObj);
361 }
362
363
364 /**
365 * This function stores a new value into a __weak variable.
366 * If the new object is deallocating or the new object's class
367 * does not support weak references, stores nil instead.
368 *
369 * @param location The address of the weak pointer itself
370 * @param newObj The new object this weak ptr should now point to
371 *
372 * @return The value stored (either the new object or nil)
373 */
374 id
375 objc_storeWeakOrNil(id *location, id newObj)
376 {
377 return storeWeak<true/*old*/, true/*new*/, false/*crash*/>
378 (location, (objc_object *)newObj);
379 }
380
381
382 /**
383 * Initialize a fresh weak pointer to some object location.
384 * It would be used for code like:
385 *
386 * (The nil case)
387 * __weak id weakPtr;
388 * (The non-nil case)
389 * NSObject *o = ...;
390 * __weak id weakPtr = o;
391 *
392 * This function IS NOT thread-safe with respect to concurrent
393 * modifications to the weak variable. (Concurrent weak clear is safe.)
394 *
395 * @param location Address of __weak ptr.
396 * @param newObj Object ptr.
397 */
398 id
399 objc_initWeak(id *location, id newObj)
400 {
401 if (!newObj) {
402 *location = nil;
403 return nil;
404 }
405
406 return storeWeak<false/*old*/, true/*new*/, true/*crash*/>
407 (location, (objc_object*)newObj);
408 }
409
410 id
411 objc_initWeakOrNil(id *location, id newObj)
412 {
413 if (!newObj) {
414 *location = nil;
415 return nil;
416 }
417
418 return storeWeak<false/*old*/, true/*new*/, false/*crash*/>
419 (location, (objc_object*)newObj);
420 }
421
422
423 /**
424 * Destroys the relationship between a weak pointer
425 * and the object it is referencing in the internal weak
426 * table. If the weak pointer is not referencing anything,
427 * there is no need to edit the weak table.
428 *
429 * This function IS NOT thread-safe with respect to concurrent
430 * modifications to the weak variable. (Concurrent weak clear is safe.)
431 *
432 * @param location The weak pointer address.
433 */
434 void
435 objc_destroyWeak(id *location)
436 {
437 (void)storeWeak<true/*old*/, false/*new*/, false/*crash*/>
438 (location, nil);
439 }
440
441
442 /*
443 Once upon a time we eagerly cleared *location if we saw the object
444 was deallocating. This confuses code like NSPointerFunctions which
445 tries to pre-flight the raw storage and assumes if the storage is
446 zero then the weak system is done interfering. That is false: the
447 weak system is still going to check and clear the storage later.
448 This can cause objc_weak_error complaints and crashes.
449 So we now don't touch the storage until deallocation completes.
450 */
451
452 id
453 objc_loadWeakRetained(id *location)
454 {
455 id obj;
456 id result;
457 Class cls;
458
459 SideTable *table;
460
461 retry:
462 // fixme std::atomic this load
463 obj = *location;
464 if (!obj) return nil;
465 if (obj->isTaggedPointer()) return obj;
466
467 table = &SideTables()[obj];
468
469 table->lock();
470 if (*location != obj) {
471 table->unlock();
472 goto retry;
473 }
474
475 result = obj;
476
477 cls = obj->ISA();
478 if (! cls->hasCustomRR()) {
479 // Fast case. We know +initialize is complete because
480 // default-RR can never be set before then.
481 assert(cls->isInitialized());
482 if (! obj->rootTryRetain()) {
483 result = nil;
484 }
485 }
486 else {
487 // Slow case. We must check for +initialize and call it outside
488 // the lock if necessary in order to avoid deadlocks.
489 if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) {
490 BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
491 class_getMethodImplementation(cls, SEL_retainWeakReference);
492 if ((IMP)tryRetain == _objc_msgForward) {
493 result = nil;
494 }
495 else if (! (*tryRetain)(obj, SEL_retainWeakReference)) {
496 result = nil;
497 }
498 }
499 else {
500 table->unlock();
501 _class_initialize(cls);
502 goto retry;
503 }
504 }
505
506 table->unlock();
507 return result;
508 }
509
510 /**
511 * This loads the object referenced by a weak pointer and returns it, after
512 * retaining and autoreleasing the object to ensure that it stays alive
513 * long enough for the caller to use it. This function would be used
514 * anywhere a __weak variable is used in an expression.
515 *
516 * @param location The weak pointer address
517 *
518 * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
519 */
520 id
521 objc_loadWeak(id *location)
522 {
523 if (!*location) return nil;
524 return objc_autorelease(objc_loadWeakRetained(location));
525 }
526
527
528 /**
529 * This function copies a weak pointer from one location to another,
530 * when the destination doesn't already contain a weak pointer. It
531 * would be used for code like:
532 *
533 * __weak id src = ...;
534 * __weak id dst = src;
535 *
536 * This function IS NOT thread-safe with respect to concurrent
537 * modifications to the destination variable. (Concurrent weak clear is safe.)
538 *
539 * @param dst The destination variable.
540 * @param src The source variable.
541 */
542 void
543 objc_copyWeak(id *dst, id *src)
544 {
545 id obj = objc_loadWeakRetained(src);
546 objc_initWeak(dst, obj);
547 objc_release(obj);
548 }
549
550 /**
551 * Move a weak pointer from one location to another.
552 * Before the move, the destination must be uninitialized.
553 * After the move, the source is nil.
554 *
555 * This function IS NOT thread-safe with respect to concurrent
556 * modifications to either weak variable. (Concurrent weak clear is safe.)
557 *
558 */
559 void
560 objc_moveWeak(id *dst, id *src)
561 {
562 objc_copyWeak(dst, src);
563 objc_destroyWeak(src);
564 *src = nil;
565 }
566
567
568 /***********************************************************************
569 Autorelease pool implementation
570
571 A thread's autorelease pool is a stack of pointers.
572 Each pointer is either an object to release, or POOL_BOUNDARY which is
573 an autorelease pool boundary.
574 A pool token is a pointer to the POOL_BOUNDARY for that pool. When
575 the pool is popped, every object hotter than the sentinel is released.
576 The stack is divided into a doubly-linked list of pages. Pages are added
577 and deleted as necessary.
578 Thread-local storage points to the hot page, where newly autoreleased
579 objects are stored.
580 **********************************************************************/
581
582 // Set this to 1 to mprotect() autorelease pool contents
583 #define PROTECT_AUTORELEASEPOOL 0
584
585 // Set this to 1 to validate the entire autorelease pool header all the time
586 // (i.e. use check() instead of fastcheck() everywhere)
587 #define CHECK_AUTORELEASEPOOL (DEBUG)
588
589 BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
590 BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token));
591
592 namespace {
593
594 struct magic_t {
595 static const uint32_t M0 = 0xA1A1A1A1;
596 # define M1 "AUTORELEASE!"
597 static const size_t M1_len = 12;
598 uint32_t m[4];
599
600 magic_t() {
601 assert(M1_len == strlen(M1));
602 assert(M1_len == 3 * sizeof(m[1]));
603
604 m[0] = M0;
605 strncpy((char *)&m[1], M1, M1_len);
606 }
607
608 ~magic_t() {
609 m[0] = m[1] = m[2] = m[3] = 0;
610 }
611
612 bool check() const {
613 return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len));
614 }
615
616 bool fastcheck() const {
617 #if CHECK_AUTORELEASEPOOL
618 return check();
619 #else
620 return (m[0] == M0);
621 #endif
622 }
623
624 # undef M1
625 };
626
627
628 class AutoreleasePoolPage
629 {
630 // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is
631 // pushed and it has never contained any objects. This saves memory
632 // when the top level (i.e. libdispatch) pushes and pops pools but
633 // never uses them.
634 # define EMPTY_POOL_PLACEHOLDER ((id*)1)
635
636 # define POOL_BOUNDARY nil
637 static pthread_key_t const key = AUTORELEASE_POOL_KEY;
638 static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
639 static size_t const SIZE =
640 #if PROTECT_AUTORELEASEPOOL
641 PAGE_MAX_SIZE; // must be multiple of vm page size
642 #else
643 PAGE_MAX_SIZE; // size and alignment, power of 2
644 #endif
645 static size_t const COUNT = SIZE / sizeof(id);
646
647 magic_t const magic;
648 id *next;
649 pthread_t const thread;
650 AutoreleasePoolPage * const parent;
651 AutoreleasePoolPage *child;
652 uint32_t const depth;
653 uint32_t hiwat;
654
655 // SIZE-sizeof(*this) bytes of contents follow
656
657 static void * operator new(size_t size) {
658 return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE);
659 }
660 static void operator delete(void * p) {
661 return free(p);
662 }
663
664 inline void protect() {
665 #if PROTECT_AUTORELEASEPOOL
666 mprotect(this, SIZE, PROT_READ);
667 check();
668 #endif
669 }
670
671 inline void unprotect() {
672 #if PROTECT_AUTORELEASEPOOL
673 check();
674 mprotect(this, SIZE, PROT_READ | PROT_WRITE);
675 #endif
676 }
677
678 AutoreleasePoolPage(AutoreleasePoolPage *newParent)
679 : magic(), next(begin()), thread(pthread_self()),
680 parent(newParent), child(nil),
681 depth(parent ? 1+parent->depth : 0),
682 hiwat(parent ? parent->hiwat : 0)
683 {
684 if (parent) {
685 parent->check();
686 assert(!parent->child);
687 parent->unprotect();
688 parent->child = this;
689 parent->protect();
690 }
691 protect();
692 }
693
694 ~AutoreleasePoolPage()
695 {
696 check();
697 unprotect();
698 assert(empty());
699
700 // Not recursive: we don't want to blow out the stack
701 // if a thread accumulates a stupendous amount of garbage
702 assert(!child);
703 }
704
705
706 void busted(bool die = true)
707 {
708 magic_t right;
709 (die ? _objc_fatal : _objc_inform)
710 ("autorelease pool page %p corrupted\n"
711 " magic 0x%08x 0x%08x 0x%08x 0x%08x\n"
712 " should be 0x%08x 0x%08x 0x%08x 0x%08x\n"
713 " pthread %p\n"
714 " should be %p\n",
715 this,
716 magic.m[0], magic.m[1], magic.m[2], magic.m[3],
717 right.m[0], right.m[1], right.m[2], right.m[3],
718 this->thread, pthread_self());
719 }
720
721 void check(bool die = true)
722 {
723 if (!magic.check() || !pthread_equal(thread, pthread_self())) {
724 busted(die);
725 }
726 }
727
728 void fastcheck(bool die = true)
729 {
730 #if CHECK_AUTORELEASEPOOL
731 check(die);
732 #else
733 if (! magic.fastcheck()) {
734 busted(die);
735 }
736 #endif
737 }
738
739
740 id * begin() {
741 return (id *) ((uint8_t *)this+sizeof(*this));
742 }
743
744 id * end() {
745 return (id *) ((uint8_t *)this+SIZE);
746 }
747
748 bool empty() {
749 return next == begin();
750 }
751
752 bool full() {
753 return next == end();
754 }
755
756 bool lessThanHalfFull() {
757 return (next - begin() < (end() - begin()) / 2);
758 }
759
760 id *add(id obj)
761 {
762 assert(!full());
763 unprotect();
764 id *ret = next; // faster than `return next-1` because of aliasing
765 *next++ = obj;
766 protect();
767 return ret;
768 }
769
770 void releaseAll()
771 {
772 releaseUntil(begin());
773 }
774
775 void releaseUntil(id *stop)
776 {
777 // Not recursive: we don't want to blow out the stack
778 // if a thread accumulates a stupendous amount of garbage
779
780 while (this->next != stop) {
781 // Restart from hotPage() every time, in case -release
782 // autoreleased more objects
783 AutoreleasePoolPage *page = hotPage();
784
785 // fixme I think this `while` can be `if`, but I can't prove it
786 while (page->empty()) {
787 page = page->parent;
788 setHotPage(page);
789 }
790
791 page->unprotect();
792 id obj = *--page->next;
793 memset((void*)page->next, SCRIBBLE, sizeof(*page->next));
794 page->protect();
795
796 if (obj != POOL_BOUNDARY) {
797 objc_release(obj);
798 }
799 }
800
801 setHotPage(this);
802
803 #if DEBUG
804 // we expect any children to be completely empty
805 for (AutoreleasePoolPage *page = child; page; page = page->child) {
806 assert(page->empty());
807 }
808 #endif
809 }
810
811 void kill()
812 {
813 // Not recursive: we don't want to blow out the stack
814 // if a thread accumulates a stupendous amount of garbage
815 AutoreleasePoolPage *page = this;
816 while (page->child) page = page->child;
817
818 AutoreleasePoolPage *deathptr;
819 do {
820 deathptr = page;
821 page = page->parent;
822 if (page) {
823 page->unprotect();
824 page->child = nil;
825 page->protect();
826 }
827 delete deathptr;
828 } while (deathptr != this);
829 }
830
831 static void tls_dealloc(void *p)
832 {
833 if (p == (void*)EMPTY_POOL_PLACEHOLDER) {
834 // No objects or pool pages to clean up here.
835 return;
836 }
837
838 // reinstate TLS value while we work
839 setHotPage((AutoreleasePoolPage *)p);
840
841 if (AutoreleasePoolPage *page = coldPage()) {
842 if (!page->empty()) pop(page->begin()); // pop all of the pools
843 if (DebugMissingPools || DebugPoolAllocation) {
844 // pop() killed the pages already
845 } else {
846 page->kill(); // free all of the pages
847 }
848 }
849
850 // clear TLS value so TLS destruction doesn't loop
851 setHotPage(nil);
852 }
853
854 static AutoreleasePoolPage *pageForPointer(const void *p)
855 {
856 return pageForPointer((uintptr_t)p);
857 }
858
859 static AutoreleasePoolPage *pageForPointer(uintptr_t p)
860 {
861 AutoreleasePoolPage *result;
862 uintptr_t offset = p % SIZE;
863
864 assert(offset >= sizeof(AutoreleasePoolPage));
865
866 result = (AutoreleasePoolPage *)(p - offset);
867 result->fastcheck();
868
869 return result;
870 }
871
872
873 static inline bool haveEmptyPoolPlaceholder()
874 {
875 id *tls = (id *)tls_get_direct(key);
876 return (tls == EMPTY_POOL_PLACEHOLDER);
877 }
878
879 static inline id* setEmptyPoolPlaceholder()
880 {
881 assert(tls_get_direct(key) == nil);
882 tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER);
883 return EMPTY_POOL_PLACEHOLDER;
884 }
885
886 static inline AutoreleasePoolPage *hotPage()
887 {
888 AutoreleasePoolPage *result = (AutoreleasePoolPage *)
889 tls_get_direct(key);
890 if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil;
891 if (result) result->fastcheck();
892 return result;
893 }
894
895 static inline void setHotPage(AutoreleasePoolPage *page)
896 {
897 if (page) page->fastcheck();
898 tls_set_direct(key, (void *)page);
899 }
900
901 static inline AutoreleasePoolPage *coldPage()
902 {
903 AutoreleasePoolPage *result = hotPage();
904 if (result) {
905 while (result->parent) {
906 result = result->parent;
907 result->fastcheck();
908 }
909 }
910 return result;
911 }
912
913
914 static inline id *autoreleaseFast(id obj)
915 {
916 AutoreleasePoolPage *page = hotPage();
917 if (page && !page->full()) {
918 return page->add(obj);
919 } else if (page) {
920 return autoreleaseFullPage(obj, page);
921 } else {
922 return autoreleaseNoPage(obj);
923 }
924 }
925
926 static __attribute__((noinline))
927 id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page)
928 {
929 // The hot page is full.
930 // Step to the next non-full page, adding a new page if necessary.
931 // Then add the object to that page.
932 assert(page == hotPage());
933 assert(page->full() || DebugPoolAllocation);
934
935 do {
936 if (page->child) page = page->child;
937 else page = new AutoreleasePoolPage(page);
938 } while (page->full());
939
940 setHotPage(page);
941 return page->add(obj);
942 }
943
944 static __attribute__((noinline))
945 id *autoreleaseNoPage(id obj)
946 {
947 // "No page" could mean no pool has been pushed
948 // or an empty placeholder pool has been pushed and has no contents yet
949 assert(!hotPage());
950
951 bool pushExtraBoundary = false;
952 if (haveEmptyPoolPlaceholder()) {
953 // We are pushing a second pool over the empty placeholder pool
954 // or pushing the first object into the empty placeholder pool.
955 // Before doing that, push a pool boundary on behalf of the pool
956 // that is currently represented by the empty placeholder.
957 pushExtraBoundary = true;
958 }
959 else if (obj != POOL_BOUNDARY && DebugMissingPools) {
960 // We are pushing an object with no pool in place,
961 // and no-pool debugging was requested by environment.
962 _objc_inform("MISSING POOLS: (%p) Object %p of class %s "
963 "autoreleased with no pool in place - "
964 "just leaking - break on "
965 "objc_autoreleaseNoPool() to debug",
966 pthread_self(), (void*)obj, object_getClassName(obj));
967 objc_autoreleaseNoPool(obj);
968 return nil;
969 }
970 else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) {
971 // We are pushing a pool with no pool in place,
972 // and alloc-per-pool debugging was not requested.
973 // Install and return the empty pool placeholder.
974 return setEmptyPoolPlaceholder();
975 }
976
977 // We are pushing an object or a non-placeholder'd pool.
978
979 // Install the first page.
980 AutoreleasePoolPage *page = new AutoreleasePoolPage(nil);
981 setHotPage(page);
982
983 // Push a boundary on behalf of the previously-placeholder'd pool.
984 if (pushExtraBoundary) {
985 page->add(POOL_BOUNDARY);
986 }
987
988 // Push the requested object or pool.
989 return page->add(obj);
990 }
991
992
993 static __attribute__((noinline))
994 id *autoreleaseNewPage(id obj)
995 {
996 AutoreleasePoolPage *page = hotPage();
997 if (page) return autoreleaseFullPage(obj, page);
998 else return autoreleaseNoPage(obj);
999 }
1000
1001 public:
1002 static inline id autorelease(id obj)
1003 {
1004 assert(obj);
1005 assert(!obj->isTaggedPointer());
1006 id *dest __unused = autoreleaseFast(obj);
1007 assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj);
1008 return obj;
1009 }
1010
1011
1012 static inline void *push()
1013 {
1014 id *dest;
1015 if (DebugPoolAllocation) {
1016 // Each autorelease pool starts on a new pool page.
1017 dest = autoreleaseNewPage(POOL_BOUNDARY);
1018 } else {
1019 dest = autoreleaseFast(POOL_BOUNDARY);
1020 }
1021 assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY);
1022 return dest;
1023 }
1024
1025 static void badPop(void *token)
1026 {
1027 // Error. For bincompat purposes this is not
1028 // fatal in executables built with old SDKs.
1029
1030 if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0)) {
1031 // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal.
1032 _objc_fatal
1033 ("Invalid or prematurely-freed autorelease pool %p.", token);
1034 }
1035
1036 // Old SDK. Bad pop is warned once.
1037 static bool complained = false;
1038 if (!complained) {
1039 complained = true;
1040 _objc_inform_now_and_on_crash
1041 ("Invalid or prematurely-freed autorelease pool %p. "
1042 "Set a breakpoint on objc_autoreleasePoolInvalid to debug. "
1043 "Proceeding anyway because the app is old "
1044 "(SDK version " SDK_FORMAT "). Memory errors are likely.",
1045 token, FORMAT_SDK(sdkVersion()));
1046 }
1047 objc_autoreleasePoolInvalid(token);
1048 }
1049
1050 static inline void pop(void *token)
1051 {
1052 AutoreleasePoolPage *page;
1053 id *stop;
1054
1055 if (token == (void*)EMPTY_POOL_PLACEHOLDER) {
1056 // Popping the top-level placeholder pool.
1057 if (hotPage()) {
1058 // Pool was used. Pop its contents normally.
1059 // Pool pages remain allocated for re-use as usual.
1060 pop(coldPage()->begin());
1061 } else {
1062 // Pool was never used. Clear the placeholder.
1063 setHotPage(nil);
1064 }
1065 return;
1066 }
1067
1068 page = pageForPointer(token);
1069 stop = (id *)token;
1070 if (*stop != POOL_BOUNDARY) {
1071 if (stop == page->begin() && !page->parent) {
1072 // Start of coldest page may correctly not be POOL_BOUNDARY:
1073 // 1. top-level pool is popped, leaving the cold page in place
1074 // 2. an object is autoreleased with no pool
1075 } else {
1076 // Error. For bincompat purposes this is not
1077 // fatal in executables built with old SDKs.
1078 return badPop(token);
1079 }
1080 }
1081
1082 if (PrintPoolHiwat) printHiwat();
1083
1084 page->releaseUntil(stop);
1085
1086 // memory: delete empty children
1087 if (DebugPoolAllocation && page->empty()) {
1088 // special case: delete everything during page-per-pool debugging
1089 AutoreleasePoolPage *parent = page->parent;
1090 page->kill();
1091 setHotPage(parent);
1092 } else if (DebugMissingPools && page->empty() && !page->parent) {
1093 // special case: delete everything for pop(top)
1094 // when debugging missing autorelease pools
1095 page->kill();
1096 setHotPage(nil);
1097 }
1098 else if (page->child) {
1099 // hysteresis: keep one empty child if page is more than half full
1100 if (page->lessThanHalfFull()) {
1101 page->child->kill();
1102 }
1103 else if (page->child->child) {
1104 page->child->child->kill();
1105 }
1106 }
1107 }
1108
1109 static void init()
1110 {
1111 int r __unused = pthread_key_init_np(AutoreleasePoolPage::key,
1112 AutoreleasePoolPage::tls_dealloc);
1113 assert(r == 0);
1114 }
1115
1116 void print()
1117 {
1118 _objc_inform("[%p] ................ PAGE %s %s %s", this,
1119 full() ? "(full)" : "",
1120 this == hotPage() ? "(hot)" : "",
1121 this == coldPage() ? "(cold)" : "");
1122 check(false);
1123 for (id *p = begin(); p < next; p++) {
1124 if (*p == POOL_BOUNDARY) {
1125 _objc_inform("[%p] ################ POOL %p", p, p);
1126 } else {
1127 _objc_inform("[%p] %#16lx %s",
1128 p, (unsigned long)*p, object_getClassName(*p));
1129 }
1130 }
1131 }
1132
1133 static void printAll()
1134 {
1135 _objc_inform("##############");
1136 _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self());
1137
1138 AutoreleasePoolPage *page;
1139 ptrdiff_t objects = 0;
1140 for (page = coldPage(); page; page = page->child) {
1141 objects += page->next - page->begin();
1142 }
1143 _objc_inform("%llu releases pending.", (unsigned long long)objects);
1144
1145 if (haveEmptyPoolPlaceholder()) {
1146 _objc_inform("[%p] ................ PAGE (placeholder)",
1147 EMPTY_POOL_PLACEHOLDER);
1148 _objc_inform("[%p] ################ POOL (placeholder)",
1149 EMPTY_POOL_PLACEHOLDER);
1150 }
1151 else {
1152 for (page = coldPage(); page; page = page->child) {
1153 page->print();
1154 }
1155 }
1156
1157 _objc_inform("##############");
1158 }
1159
1160 static void printHiwat()
1161 {
1162 // Check and propagate high water mark
1163 // Ignore high water marks under 256 to suppress noise.
1164 AutoreleasePoolPage *p = hotPage();
1165 uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin());
1166 if (mark > p->hiwat && mark > 256) {
1167 for( ; p; p = p->parent) {
1168 p->unprotect();
1169 p->hiwat = mark;
1170 p->protect();
1171 }
1172
1173 _objc_inform("POOL HIGHWATER: new high water mark of %u "
1174 "pending releases for thread %p:",
1175 mark, pthread_self());
1176
1177 void *stack[128];
1178 int count = backtrace(stack, sizeof(stack)/sizeof(stack[0]));
1179 char **sym = backtrace_symbols(stack, count);
1180 for (int i = 0; i < count; i++) {
1181 _objc_inform("POOL HIGHWATER: %s", sym[i]);
1182 }
1183 free(sym);
1184 }
1185 }
1186
1187 #undef POOL_BOUNDARY
1188 };
1189
1190 // anonymous namespace
1191 };
1192
1193
1194 /***********************************************************************
1195 * Slow paths for inline control
1196 **********************************************************************/
1197
1198 #if SUPPORT_NONPOINTER_ISA
1199
1200 NEVER_INLINE id
1201 objc_object::rootRetain_overflow(bool tryRetain)
1202 {
1203 return rootRetain(tryRetain, true);
1204 }
1205
1206
1207 NEVER_INLINE bool
1208 objc_object::rootRelease_underflow(bool performDealloc)
1209 {
1210 return rootRelease(performDealloc, true);
1211 }
1212
1213
1214 // Slow path of clearDeallocating()
1215 // for objects with nonpointer isa
1216 // that were ever weakly referenced
1217 // or whose retain count ever overflowed to the side table.
1218 NEVER_INLINE void
1219 objc_object::clearDeallocating_slow()
1220 {
1221 assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc));
1222
1223 SideTable& table = SideTables()[this];
1224 table.lock();
1225 if (isa.weakly_referenced) {
1226 weak_clear_no_lock(&table.weak_table, (id)this);
1227 }
1228 if (isa.has_sidetable_rc) {
1229 table.refcnts.erase(this);
1230 }
1231 table.unlock();
1232 }
1233
1234 #endif
1235
1236 __attribute__((noinline,used))
1237 id
1238 objc_object::rootAutorelease2()
1239 {
1240 assert(!isTaggedPointer());
1241 return AutoreleasePoolPage::autorelease((id)this);
1242 }
1243
1244
1245 BREAKPOINT_FUNCTION(
1246 void objc_overrelease_during_dealloc_error(void)
1247 );
1248
1249
1250 NEVER_INLINE
1251 bool
1252 objc_object::overrelease_error()
1253 {
1254 _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this);
1255 objc_overrelease_during_dealloc_error();
1256 return false; // allow rootRelease() to tail-call this
1257 }
1258
1259
1260 /***********************************************************************
1261 * Retain count operations for side table.
1262 **********************************************************************/
1263
1264
1265 #if DEBUG
1266 // Used to assert that an object is not present in the side table.
1267 bool
1268 objc_object::sidetable_present()
1269 {
1270 bool result = false;
1271 SideTable& table = SideTables()[this];
1272
1273 table.lock();
1274
1275 RefcountMap::iterator it = table.refcnts.find(this);
1276 if (it != table.refcnts.end()) result = true;
1277
1278 if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
1279
1280 table.unlock();
1281
1282 return result;
1283 }
1284 #endif
1285
1286 #if SUPPORT_NONPOINTER_ISA
1287
1288 void
1289 objc_object::sidetable_lock()
1290 {
1291 SideTable& table = SideTables()[this];
1292 table.lock();
1293 }
1294
1295 void
1296 objc_object::sidetable_unlock()
1297 {
1298 SideTable& table = SideTables()[this];
1299 table.unlock();
1300 }
1301
1302
1303 // Move the entire retain count to the side table,
1304 // as well as isDeallocating and weaklyReferenced.
1305 void
1306 objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc,
1307 bool isDeallocating,
1308 bool weaklyReferenced)
1309 {
1310 assert(!isa.nonpointer); // should already be changed to raw pointer
1311 SideTable& table = SideTables()[this];
1312
1313 size_t& refcntStorage = table.refcnts[this];
1314 size_t oldRefcnt = refcntStorage;
1315 // not deallocating - that was in the isa
1316 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1317 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1318
1319 uintptr_t carry;
1320 size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1321 if (carry) refcnt = SIDE_TABLE_RC_PINNED;
1322 if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
1323 if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
1324
1325 refcntStorage = refcnt;
1326 }
1327
1328
1329 // Move some retain counts to the side table from the isa field.
1330 // Returns true if the object is now pinned.
1331 bool
1332 objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
1333 {
1334 assert(isa.nonpointer);
1335 SideTable& table = SideTables()[this];
1336
1337 size_t& refcntStorage = table.refcnts[this];
1338 size_t oldRefcnt = refcntStorage;
1339 // isa-side bits should not be set here
1340 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1341 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1342
1343 if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
1344
1345 uintptr_t carry;
1346 size_t newRefcnt =
1347 addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
1348 if (carry) {
1349 refcntStorage =
1350 SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK);
1351 return true;
1352 }
1353 else {
1354 refcntStorage = newRefcnt;
1355 return false;
1356 }
1357 }
1358
1359
1360 // Move some retain counts from the side table to the isa field.
1361 // Returns the actual count subtracted, which may be less than the request.
1362 size_t
1363 objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
1364 {
1365 assert(isa.nonpointer);
1366 SideTable& table = SideTables()[this];
1367
1368 RefcountMap::iterator it = table.refcnts.find(this);
1369 if (it == table.refcnts.end() || it->second == 0) {
1370 // Side table retain count is zero. Can't borrow.
1371 return 0;
1372 }
1373 size_t oldRefcnt = it->second;
1374
1375 // isa-side bits should not be set here
1376 assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
1377 assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
1378
1379 size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
1380 assert(oldRefcnt > newRefcnt); // shouldn't underflow
1381 it->second = newRefcnt;
1382 return delta_rc;
1383 }
1384
1385
1386 size_t
1387 objc_object::sidetable_getExtraRC_nolock()
1388 {
1389 assert(isa.nonpointer);
1390 SideTable& table = SideTables()[this];
1391 RefcountMap::iterator it = table.refcnts.find(this);
1392 if (it == table.refcnts.end()) return 0;
1393 else return it->second >> SIDE_TABLE_RC_SHIFT;
1394 }
1395
1396
1397 // SUPPORT_NONPOINTER_ISA
1398 #endif
1399
1400
1401 id
1402 objc_object::sidetable_retain()
1403 {
1404 #if SUPPORT_NONPOINTER_ISA
1405 assert(!isa.nonpointer);
1406 #endif
1407 SideTable& table = SideTables()[this];
1408
1409 table.lock();
1410 size_t& refcntStorage = table.refcnts[this];
1411 if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
1412 refcntStorage += SIDE_TABLE_RC_ONE;
1413 }
1414 table.unlock();
1415
1416 return (id)this;
1417 }
1418
1419
1420 bool
1421 objc_object::sidetable_tryRetain()
1422 {
1423 #if SUPPORT_NONPOINTER_ISA
1424 assert(!isa.nonpointer);
1425 #endif
1426 SideTable& table = SideTables()[this];
1427
1428 // NO SPINLOCK HERE
1429 // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
1430 // which already acquired the lock on our behalf.
1431
1432 // fixme can't do this efficiently with os_lock_handoff_s
1433 // if (table.slock == 0) {
1434 // _objc_fatal("Do not call -_tryRetain.");
1435 // }
1436
1437 bool result = true;
1438 RefcountMap::iterator it = table.refcnts.find(this);
1439 if (it == table.refcnts.end()) {
1440 table.refcnts[this] = SIDE_TABLE_RC_ONE;
1441 } else if (it->second & SIDE_TABLE_DEALLOCATING) {
1442 result = false;
1443 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1444 it->second += SIDE_TABLE_RC_ONE;
1445 }
1446
1447 return result;
1448 }
1449
1450
1451 uintptr_t
1452 objc_object::sidetable_retainCount()
1453 {
1454 SideTable& table = SideTables()[this];
1455
1456 size_t refcnt_result = 1;
1457
1458 table.lock();
1459 RefcountMap::iterator it = table.refcnts.find(this);
1460 if (it != table.refcnts.end()) {
1461 // this is valid for SIDE_TABLE_RC_PINNED too
1462 refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
1463 }
1464 table.unlock();
1465 return refcnt_result;
1466 }
1467
1468
1469 bool
1470 objc_object::sidetable_isDeallocating()
1471 {
1472 SideTable& table = SideTables()[this];
1473
1474 // NO SPINLOCK HERE
1475 // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
1476 // which already acquired the lock on our behalf.
1477
1478
1479 // fixme can't do this efficiently with os_lock_handoff_s
1480 // if (table.slock == 0) {
1481 // _objc_fatal("Do not call -_isDeallocating.");
1482 // }
1483
1484 RefcountMap::iterator it = table.refcnts.find(this);
1485 return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
1486 }
1487
1488
1489 bool
1490 objc_object::sidetable_isWeaklyReferenced()
1491 {
1492 bool result = false;
1493
1494 SideTable& table = SideTables()[this];
1495 table.lock();
1496
1497 RefcountMap::iterator it = table.refcnts.find(this);
1498 if (it != table.refcnts.end()) {
1499 result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
1500 }
1501
1502 table.unlock();
1503
1504 return result;
1505 }
1506
1507
1508 void
1509 objc_object::sidetable_setWeaklyReferenced_nolock()
1510 {
1511 #if SUPPORT_NONPOINTER_ISA
1512 assert(!isa.nonpointer);
1513 #endif
1514
1515 SideTable& table = SideTables()[this];
1516
1517 table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
1518 }
1519
1520
1521 // rdar://20206767
1522 // return uintptr_t instead of bool so that the various raw-isa
1523 // -release paths all return zero in eax
1524 uintptr_t
1525 objc_object::sidetable_release(bool performDealloc)
1526 {
1527 #if SUPPORT_NONPOINTER_ISA
1528 assert(!isa.nonpointer);
1529 #endif
1530 SideTable& table = SideTables()[this];
1531
1532 bool do_dealloc = false;
1533
1534 table.lock();
1535 RefcountMap::iterator it = table.refcnts.find(this);
1536 if (it == table.refcnts.end()) {
1537 do_dealloc = true;
1538 table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
1539 } else if (it->second < SIDE_TABLE_DEALLOCATING) {
1540 // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
1541 do_dealloc = true;
1542 it->second |= SIDE_TABLE_DEALLOCATING;
1543 } else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
1544 it->second -= SIDE_TABLE_RC_ONE;
1545 }
1546 table.unlock();
1547 if (do_dealloc && performDealloc) {
1548 ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
1549 }
1550 return do_dealloc;
1551 }
1552
1553
1554 void
1555 objc_object::sidetable_clearDeallocating()
1556 {
1557 SideTable& table = SideTables()[this];
1558
1559 // clear any weak table items
1560 // clear extra retain count and deallocating bit
1561 // (fixme warn or abort if extra retain count == 0 ?)
1562 table.lock();
1563 RefcountMap::iterator it = table.refcnts.find(this);
1564 if (it != table.refcnts.end()) {
1565 if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
1566 weak_clear_no_lock(&table.weak_table, (id)this);
1567 }
1568 table.refcnts.erase(it);
1569 }
1570 table.unlock();
1571 }
1572
1573
1574 /***********************************************************************
1575 * Optimized retain/release/autorelease entrypoints
1576 **********************************************************************/
1577
1578
1579 #if __OBJC2__
1580
1581 __attribute__((aligned(16)))
1582 id
1583 objc_retain(id obj)
1584 {
1585 if (!obj) return obj;
1586 if (obj->isTaggedPointer()) return obj;
1587 return obj->retain();
1588 }
1589
1590
1591 __attribute__((aligned(16)))
1592 void
1593 objc_release(id obj)
1594 {
1595 if (!obj) return;
1596 if (obj->isTaggedPointer()) return;
1597 return obj->release();
1598 }
1599
1600
1601 __attribute__((aligned(16)))
1602 id
1603 objc_autorelease(id obj)
1604 {
1605 if (!obj) return obj;
1606 if (obj->isTaggedPointer()) return obj;
1607 return obj->autorelease();
1608 }
1609
1610
1611 // OBJC2
1612 #else
1613 // not OBJC2
1614
1615
1616 id objc_retain(id obj) { return [obj retain]; }
1617 void objc_release(id obj) { [obj release]; }
1618 id objc_autorelease(id obj) { return [obj autorelease]; }
1619
1620
1621 #endif
1622
1623
1624 /***********************************************************************
1625 * Basic operations for root class implementations a.k.a. _objc_root*()
1626 **********************************************************************/
1627
1628 bool
1629 _objc_rootTryRetain(id obj)
1630 {
1631 assert(obj);
1632
1633 return obj->rootTryRetain();
1634 }
1635
1636 bool
1637 _objc_rootIsDeallocating(id obj)
1638 {
1639 assert(obj);
1640
1641 return obj->rootIsDeallocating();
1642 }
1643
1644
1645 void
1646 objc_clear_deallocating(id obj)
1647 {
1648 assert(obj);
1649
1650 if (obj->isTaggedPointer()) return;
1651 obj->clearDeallocating();
1652 }
1653
1654
1655 bool
1656 _objc_rootReleaseWasZero(id obj)
1657 {
1658 assert(obj);
1659
1660 return obj->rootReleaseShouldDealloc();
1661 }
1662
1663
1664 id
1665 _objc_rootAutorelease(id obj)
1666 {
1667 assert(obj);
1668 return obj->rootAutorelease();
1669 }
1670
1671 uintptr_t
1672 _objc_rootRetainCount(id obj)
1673 {
1674 assert(obj);
1675
1676 return obj->rootRetainCount();
1677 }
1678
1679
1680 id
1681 _objc_rootRetain(id obj)
1682 {
1683 assert(obj);
1684
1685 return obj->rootRetain();
1686 }
1687
1688 void
1689 _objc_rootRelease(id obj)
1690 {
1691 assert(obj);
1692
1693 obj->rootRelease();
1694 }
1695
1696
1697 id
1698 _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone)
1699 {
1700 id obj;
1701
1702 #if __OBJC2__
1703 // allocWithZone under __OBJC2__ ignores the zone parameter
1704 (void)zone;
1705 obj = class_createInstance(cls, 0);
1706 #else
1707 if (!zone) {
1708 obj = class_createInstance(cls, 0);
1709 }
1710 else {
1711 obj = class_createInstanceFromZone(cls, 0, zone);
1712 }
1713 #endif
1714
1715 if (slowpath(!obj)) obj = callBadAllocHandler(cls);
1716 return obj;
1717 }
1718
1719
1720 // Call [cls alloc] or [cls allocWithZone:nil], with appropriate
1721 // shortcutting optimizations.
1722 static ALWAYS_INLINE id
1723 callAlloc(Class cls, bool checkNil, bool allocWithZone=false)
1724 {
1725 if (slowpath(checkNil && !cls)) return nil;
1726
1727 #if __OBJC2__
1728 if (fastpath(!cls->ISA()->hasCustomAWZ())) {
1729 // No alloc/allocWithZone implementation. Go straight to the allocator.
1730 // fixme store hasCustomAWZ in the non-meta class and
1731 // add it to canAllocFast's summary
1732 if (fastpath(cls->canAllocFast())) {
1733 // No ctors, raw isa, etc. Go straight to the metal.
1734 bool dtor = cls->hasCxxDtor();
1735 id obj = (id)calloc(1, cls->bits.fastInstanceSize());
1736 if (slowpath(!obj)) return callBadAllocHandler(cls);
1737 obj->initInstanceIsa(cls, dtor);
1738 return obj;
1739 }
1740 else {
1741 // Has ctor or raw isa or something. Use the slower path.
1742 id obj = class_createInstance(cls, 0);
1743 if (slowpath(!obj)) return callBadAllocHandler(cls);
1744 return obj;
1745 }
1746 }
1747 #endif
1748
1749 // No shortcuts available.
1750 if (allocWithZone) return [cls allocWithZone:nil];
1751 return [cls alloc];
1752 }
1753
1754
1755 // Base class implementation of +alloc. cls is not nil.
1756 // Calls [cls allocWithZone:nil].
1757 id
1758 _objc_rootAlloc(Class cls)
1759 {
1760 return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/);
1761 }
1762
1763 // Calls [cls alloc].
1764 id
1765 objc_alloc(Class cls)
1766 {
1767 return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
1768 }
1769
1770 // Calls [cls allocWithZone:nil].
1771 id
1772 objc_allocWithZone(Class cls)
1773 {
1774 return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/);
1775 }
1776
1777
1778 void
1779 _objc_rootDealloc(id obj)
1780 {
1781 assert(obj);
1782
1783 obj->rootDealloc();
1784 }
1785
1786 void
1787 _objc_rootFinalize(id obj __unused)
1788 {
1789 assert(obj);
1790 _objc_fatal("_objc_rootFinalize called with garbage collection off");
1791 }
1792
1793
1794 id
1795 _objc_rootInit(id obj)
1796 {
1797 // In practice, it will be hard to rely on this function.
1798 // Many classes do not properly chain -init calls.
1799 return obj;
1800 }
1801
1802
1803 malloc_zone_t *
1804 _objc_rootZone(id obj)
1805 {
1806 (void)obj;
1807 #if __OBJC2__
1808 // allocWithZone under __OBJC2__ ignores the zone parameter
1809 return malloc_default_zone();
1810 #else
1811 malloc_zone_t *rval = malloc_zone_from_ptr(obj);
1812 return rval ? rval : malloc_default_zone();
1813 #endif
1814 }
1815
1816 uintptr_t
1817 _objc_rootHash(id obj)
1818 {
1819 return (uintptr_t)obj;
1820 }
1821
1822 void *
1823 objc_autoreleasePoolPush(void)
1824 {
1825 return AutoreleasePoolPage::push();
1826 }
1827
1828 void
1829 objc_autoreleasePoolPop(void *ctxt)
1830 {
1831 AutoreleasePoolPage::pop(ctxt);
1832 }
1833
1834
1835 void *
1836 _objc_autoreleasePoolPush(void)
1837 {
1838 return objc_autoreleasePoolPush();
1839 }
1840
1841 void
1842 _objc_autoreleasePoolPop(void *ctxt)
1843 {
1844 objc_autoreleasePoolPop(ctxt);
1845 }
1846
1847 void
1848 _objc_autoreleasePoolPrint(void)
1849 {
1850 AutoreleasePoolPage::printAll();
1851 }
1852
1853
1854 // Same as objc_release but suitable for tail-calling
1855 // if you need the value back and don't want to push a frame before this point.
1856 __attribute__((noinline))
1857 static id
1858 objc_releaseAndReturn(id obj)
1859 {
1860 objc_release(obj);
1861 return obj;
1862 }
1863
1864 // Same as objc_retainAutorelease but suitable for tail-calling
1865 // if you don't want to push a frame before this point.
1866 __attribute__((noinline))
1867 static id
1868 objc_retainAutoreleaseAndReturn(id obj)
1869 {
1870 return objc_retainAutorelease(obj);
1871 }
1872
1873
1874 // Prepare a value at +1 for return through a +0 autoreleasing convention.
1875 id
1876 objc_autoreleaseReturnValue(id obj)
1877 {
1878 if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
1879
1880 return objc_autorelease(obj);
1881 }
1882
1883 // Prepare a value at +0 for return through a +0 autoreleasing convention.
1884 id
1885 objc_retainAutoreleaseReturnValue(id obj)
1886 {
1887 if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
1888
1889 // not objc_autoreleaseReturnValue(objc_retain(obj))
1890 // because we don't need another optimization attempt
1891 return objc_retainAutoreleaseAndReturn(obj);
1892 }
1893
1894 // Accept a value returned through a +0 autoreleasing convention for use at +1.
1895 id
1896 objc_retainAutoreleasedReturnValue(id obj)
1897 {
1898 if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
1899
1900 return objc_retain(obj);
1901 }
1902
1903 // Accept a value returned through a +0 autoreleasing convention for use at +0.
1904 id
1905 objc_unsafeClaimAutoreleasedReturnValue(id obj)
1906 {
1907 if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
1908
1909 return objc_releaseAndReturn(obj);
1910 }
1911
1912 id
1913 objc_retainAutorelease(id obj)
1914 {
1915 return objc_autorelease(objc_retain(obj));
1916 }
1917
1918 void
1919 _objc_deallocOnMainThreadHelper(void *context)
1920 {
1921 id obj = (id)context;
1922 [obj dealloc];
1923 }
1924
1925 // convert objc_objectptr_t to id, callee must take ownership.
1926 id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1927
1928 // convert objc_objectptr_t to id, without ownership transfer.
1929 id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; }
1930
1931 // convert id to objc_objectptr_t, no ownership transfer.
1932 objc_objectptr_t objc_unretainedPointer(id object) { return object; }
1933
1934
1935 void arr_init(void)
1936 {
1937 AutoreleasePoolPage::init();
1938 SideTableInit();
1939 }
1940
1941
1942 #if SUPPORT_TAGGED_POINTERS
1943
1944 // Placeholder for old debuggers. When they inspect an
1945 // extended tagged pointer object they will see this isa.
1946
1947 @interface __NSUnrecognizedTaggedPointer : NSObject
1948 @end
1949
1950 @implementation __NSUnrecognizedTaggedPointer
1951 +(void) load { }
1952 -(id) retain { return self; }
1953 -(oneway void) release { }
1954 -(id) autorelease { return self; }
1955 @end
1956
1957 #endif
1958
1959
1960 @implementation NSObject
1961
1962 + (void)load {
1963 }
1964
1965 + (void)initialize {
1966 }
1967
1968 + (id)self {
1969 return (id)self;
1970 }
1971
1972 - (id)self {
1973 return self;
1974 }
1975
1976 + (Class)class {
1977 return self;
1978 }
1979
1980 - (Class)class {
1981 return object_getClass(self);
1982 }
1983
1984 + (Class)superclass {
1985 return self->superclass;
1986 }
1987
1988 - (Class)superclass {
1989 return [self class]->superclass;
1990 }
1991
1992 + (BOOL)isMemberOfClass:(Class)cls {
1993 return object_getClass((id)self) == cls;
1994 }
1995
1996 - (BOOL)isMemberOfClass:(Class)cls {
1997 return [self class] == cls;
1998 }
1999
2000 + (BOOL)isKindOfClass:(Class)cls {
2001 for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
2002 if (tcls == cls) return YES;
2003 }
2004 return NO;
2005 }
2006
2007 - (BOOL)isKindOfClass:(Class)cls {
2008 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2009 if (tcls == cls) return YES;
2010 }
2011 return NO;
2012 }
2013
2014 + (BOOL)isSubclassOfClass:(Class)cls {
2015 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2016 if (tcls == cls) return YES;
2017 }
2018 return NO;
2019 }
2020
2021 + (BOOL)isAncestorOfObject:(NSObject *)obj {
2022 for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
2023 if (tcls == self) return YES;
2024 }
2025 return NO;
2026 }
2027
2028 + (BOOL)instancesRespondToSelector:(SEL)sel {
2029 if (!sel) return NO;
2030 return class_respondsToSelector(self, sel);
2031 }
2032
2033 + (BOOL)respondsToSelector:(SEL)sel {
2034 if (!sel) return NO;
2035 return class_respondsToSelector_inst(object_getClass(self), sel, self);
2036 }
2037
2038 - (BOOL)respondsToSelector:(SEL)sel {
2039 if (!sel) return NO;
2040 return class_respondsToSelector_inst([self class], sel, self);
2041 }
2042
2043 + (BOOL)conformsToProtocol:(Protocol *)protocol {
2044 if (!protocol) return NO;
2045 for (Class tcls = self; tcls; tcls = tcls->superclass) {
2046 if (class_conformsToProtocol(tcls, protocol)) return YES;
2047 }
2048 return NO;
2049 }
2050
2051 - (BOOL)conformsToProtocol:(Protocol *)protocol {
2052 if (!protocol) return NO;
2053 for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
2054 if (class_conformsToProtocol(tcls, protocol)) return YES;
2055 }
2056 return NO;
2057 }
2058
2059 + (NSUInteger)hash {
2060 return _objc_rootHash(self);
2061 }
2062
2063 - (NSUInteger)hash {
2064 return _objc_rootHash(self);
2065 }
2066
2067 + (BOOL)isEqual:(id)obj {
2068 return obj == (id)self;
2069 }
2070
2071 - (BOOL)isEqual:(id)obj {
2072 return obj == self;
2073 }
2074
2075
2076 + (BOOL)isFault {
2077 return NO;
2078 }
2079
2080 - (BOOL)isFault {
2081 return NO;
2082 }
2083
2084 + (BOOL)isProxy {
2085 return NO;
2086 }
2087
2088 - (BOOL)isProxy {
2089 return NO;
2090 }
2091
2092
2093 + (IMP)instanceMethodForSelector:(SEL)sel {
2094 if (!sel) [self doesNotRecognizeSelector:sel];
2095 return class_getMethodImplementation(self, sel);
2096 }
2097
2098 + (IMP)methodForSelector:(SEL)sel {
2099 if (!sel) [self doesNotRecognizeSelector:sel];
2100 return object_getMethodImplementation((id)self, sel);
2101 }
2102
2103 - (IMP)methodForSelector:(SEL)sel {
2104 if (!sel) [self doesNotRecognizeSelector:sel];
2105 return object_getMethodImplementation(self, sel);
2106 }
2107
2108 + (BOOL)resolveClassMethod:(SEL)sel {
2109 return NO;
2110 }
2111
2112 + (BOOL)resolveInstanceMethod:(SEL)sel {
2113 return NO;
2114 }
2115
2116 // Replaced by CF (throws an NSException)
2117 + (void)doesNotRecognizeSelector:(SEL)sel {
2118 _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p",
2119 class_getName(self), sel_getName(sel), self);
2120 }
2121
2122 // Replaced by CF (throws an NSException)
2123 - (void)doesNotRecognizeSelector:(SEL)sel {
2124 _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p",
2125 object_getClassName(self), sel_getName(sel), self);
2126 }
2127
2128
2129 + (id)performSelector:(SEL)sel {
2130 if (!sel) [self doesNotRecognizeSelector:sel];
2131 return ((id(*)(id, SEL))objc_msgSend)((id)self, sel);
2132 }
2133
2134 + (id)performSelector:(SEL)sel withObject:(id)obj {
2135 if (!sel) [self doesNotRecognizeSelector:sel];
2136 return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj);
2137 }
2138
2139 + (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2140 if (!sel) [self doesNotRecognizeSelector:sel];
2141 return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2);
2142 }
2143
2144 - (id)performSelector:(SEL)sel {
2145 if (!sel) [self doesNotRecognizeSelector:sel];
2146 return ((id(*)(id, SEL))objc_msgSend)(self, sel);
2147 }
2148
2149 - (id)performSelector:(SEL)sel withObject:(id)obj {
2150 if (!sel) [self doesNotRecognizeSelector:sel];
2151 return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj);
2152 }
2153
2154 - (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 {
2155 if (!sel) [self doesNotRecognizeSelector:sel];
2156 return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2);
2157 }
2158
2159
2160 // Replaced by CF (returns an NSMethodSignature)
2161 + (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel {
2162 _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] "
2163 "not available without CoreFoundation");
2164 }
2165
2166 // Replaced by CF (returns an NSMethodSignature)
2167 + (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2168 _objc_fatal("+[NSObject methodSignatureForSelector:] "
2169 "not available without CoreFoundation");
2170 }
2171
2172 // Replaced by CF (returns an NSMethodSignature)
2173 - (NSMethodSignature *)methodSignatureForSelector:(SEL)sel {
2174 _objc_fatal("-[NSObject methodSignatureForSelector:] "
2175 "not available without CoreFoundation");
2176 }
2177
2178 + (void)forwardInvocation:(NSInvocation *)invocation {
2179 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2180 }
2181
2182 - (void)forwardInvocation:(NSInvocation *)invocation {
2183 [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)];
2184 }
2185
2186 + (id)forwardingTargetForSelector:(SEL)sel {
2187 return nil;
2188 }
2189
2190 - (id)forwardingTargetForSelector:(SEL)sel {
2191 return nil;
2192 }
2193
2194
2195 // Replaced by CF (returns an NSString)
2196 + (NSString *)description {
2197 return nil;
2198 }
2199
2200 // Replaced by CF (returns an NSString)
2201 - (NSString *)description {
2202 return nil;
2203 }
2204
2205 + (NSString *)debugDescription {
2206 return [self description];
2207 }
2208
2209 - (NSString *)debugDescription {
2210 return [self description];
2211 }
2212
2213
2214 + (id)new {
2215 return [callAlloc(self, false/*checkNil*/) init];
2216 }
2217
2218 + (id)retain {
2219 return (id)self;
2220 }
2221
2222 // Replaced by ObjectAlloc
2223 - (id)retain {
2224 return ((id)self)->rootRetain();
2225 }
2226
2227
2228 + (BOOL)_tryRetain {
2229 return YES;
2230 }
2231
2232 // Replaced by ObjectAlloc
2233 - (BOOL)_tryRetain {
2234 return ((id)self)->rootTryRetain();
2235 }
2236
2237 + (BOOL)_isDeallocating {
2238 return NO;
2239 }
2240
2241 - (BOOL)_isDeallocating {
2242 return ((id)self)->rootIsDeallocating();
2243 }
2244
2245 + (BOOL)allowsWeakReference {
2246 return YES;
2247 }
2248
2249 + (BOOL)retainWeakReference {
2250 return YES;
2251 }
2252
2253 - (BOOL)allowsWeakReference {
2254 return ! [self _isDeallocating];
2255 }
2256
2257 - (BOOL)retainWeakReference {
2258 return [self _tryRetain];
2259 }
2260
2261 + (oneway void)release {
2262 }
2263
2264 // Replaced by ObjectAlloc
2265 - (oneway void)release {
2266 ((id)self)->rootRelease();
2267 }
2268
2269 + (id)autorelease {
2270 return (id)self;
2271 }
2272
2273 // Replaced by ObjectAlloc
2274 - (id)autorelease {
2275 return ((id)self)->rootAutorelease();
2276 }
2277
2278 + (NSUInteger)retainCount {
2279 return ULONG_MAX;
2280 }
2281
2282 - (NSUInteger)retainCount {
2283 return ((id)self)->rootRetainCount();
2284 }
2285
2286 + (id)alloc {
2287 return _objc_rootAlloc(self);
2288 }
2289
2290 // Replaced by ObjectAlloc
2291 + (id)allocWithZone:(struct _NSZone *)zone {
2292 return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
2293 }
2294
2295 // Replaced by CF (throws an NSException)
2296 + (id)init {
2297 return (id)self;
2298 }
2299
2300 - (id)init {
2301 return _objc_rootInit(self);
2302 }
2303
2304 // Replaced by CF (throws an NSException)
2305 + (void)dealloc {
2306 }
2307
2308
2309 // Replaced by NSZombies
2310 - (void)dealloc {
2311 _objc_rootDealloc(self);
2312 }
2313
2314 // Previously used by GC. Now a placeholder for binary compatibility.
2315 - (void) finalize {
2316 }
2317
2318 + (struct _NSZone *)zone {
2319 return (struct _NSZone *)_objc_rootZone(self);
2320 }
2321
2322 - (struct _NSZone *)zone {
2323 return (struct _NSZone *)_objc_rootZone(self);
2324 }
2325
2326 + (id)copy {
2327 return (id)self;
2328 }
2329
2330 + (id)copyWithZone:(struct _NSZone *)zone {
2331 return (id)self;
2332 }
2333
2334 - (id)copy {
2335 return [(id)self copyWithZone:nil];
2336 }
2337
2338 + (id)mutableCopy {
2339 return (id)self;
2340 }
2341
2342 + (id)mutableCopyWithZone:(struct _NSZone *)zone {
2343 return (id)self;
2344 }
2345
2346 - (id)mutableCopy {
2347 return [(id)self mutableCopyWithZone:nil];
2348 }
2349
2350 @end
2351
2352