1 // This source file is part of the Swift.org open source project
3 // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
4 // Licensed under Apache License v2.0 with Runtime Library Exception
6 // See http://swift.org/LICENSE.txt for license information
7 // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
10 #include "Block_private.h"
19 #if __has_include(<os/assumes.h>)
20 #include <os/assumes.h>
25 #define os_assumes(_x) _x
28 #define os_assert(_x) assert(_x)
32 #define _CRT_SECURE_NO_WARNINGS 1
34 static __inline
bool OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
36 // fixme barrier is overkill -- see objc-os.h
37 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
38 return (original
== oldl
);
41 static __inline
bool OSAtomicCompareAndSwapInt(int oldi
, int newi
, int volatile *dst
)
43 // fixme barrier is overkill -- see objc-os.h
44 int original
= InterlockedCompareExchange(dst
, newi
, oldi
);
45 return (original
== oldi
);
48 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
49 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
52 /***********************
54 ************************/
57 static void *_Block_copy_class
= _NSConcreteMallocBlock
;
58 static void *_Block_copy_finalizing_class
= _NSConcreteMallocBlock
;
59 static int _Block_copy_flag
= BLOCK_NEEDS_FREE
;
61 static int _Byref_flag_initial_value
= BLOCK_BYREF_NEEDS_FREE
| 4; // logical 2
63 static bool isGC
= false;
65 /*******************************************************************************
67 ********************************************************************************/
70 static int32_t latching_incr_int(volatile int32_t *where
) {
72 int32_t old_value
= *where
;
73 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
74 return BLOCK_REFCOUNT_MASK
;
76 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+2, where
)) {
82 static bool latching_incr_int_not_deallocating(volatile int32_t *where
) {
84 int32_t old_value
= *where
;
85 if (old_value
& BLOCK_DEALLOCATING
) {
86 // if deallocating we can't do this
89 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
90 // if latched, we're leaking this block, and we succeed
93 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+2, where
)) {
94 // otherwise, we must store a new retained value without the deallocating bit set
101 // return should_deallocate?
102 static bool latching_decr_int_should_deallocate(volatile int32_t *where
) {
104 int32_t old_value
= *where
;
105 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
106 return false; // latched high
108 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
109 return false; // underflow, latch low
111 int32_t new_value
= old_value
- 2;
113 if ((old_value
& (BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
)) == 2) {
114 new_value
= old_value
- 1;
117 if (OSAtomicCompareAndSwapInt(old_value
, new_value
, where
)) {
124 static bool latching_decr_int_now_zero(volatile int32_t *where
) {
126 int32_t old_value
= *where
;
127 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
128 return false; // latched high
130 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
131 return false; // underflow, latch low
133 int32_t new_value
= old_value
- 2;
134 if (OSAtomicCompareAndSwapInt(old_value
, new_value
, where
)) {
135 return (new_value
& BLOCK_REFCOUNT_MASK
) == 0;
141 /***********************
142 GC support stub routines
143 ************************/
145 #pragma mark GC Support Routines
150 static void *_Block_alloc_default(const unsigned long size
, const bool initialCountIsOne
, const bool isObject
) {
151 (void)initialCountIsOne
;
156 static void _Block_assign_default(void *value
, void **destptr
) {
160 static void _Block_setHasRefcount_default(const void *ptr
, const bool hasRefcount
) {
166 static void _Block_do_nothing(const void *aBlock
) { }
169 static void _Block_retain_object_default(const void *ptr
) {
173 static void _Block_release_object_default(const void *ptr
) {
177 static void _Block_assign_weak_default(const void *ptr
, void *dest
) {
179 *(long *)dest
= (long)ptr
;
181 *(void **)dest
= (void *)ptr
;
185 static void _Block_memmove_default(void *dst
, void *src
, unsigned long size
) {
186 memmove(dst
, src
, (size_t)size
);
190 static void _Block_memmove_gc_broken(void *dest
, void *src
, unsigned long size
) {
191 void **destp
= (void **)dest
;
192 void **srcp
= (void **)src
;
194 _Block_assign_default(*srcp
, destp
);
197 size
-= sizeof(void *);
202 static void _Block_destructInstance_default(const void *aBlock
) {
206 /**************************************************************************
207 GC support callout functions - initially set to stub routines
208 ***************************************************************************/
210 static void *(*_Block_allocator
)(const unsigned long, const bool isOne
, const bool isObject
) = _Block_alloc_default
;
211 static void (*_Block_deallocator
)(const void *) = (void (*)(const void *))free
;
212 static void (*_Block_assign
)(void *value
, void **destptr
) = _Block_assign_default
;
213 static void (*_Block_setHasRefcount
)(const void *ptr
, const bool hasRefcount
) = _Block_setHasRefcount_default
;
214 static void (*_Block_retain_object
)(const void *ptr
) = _Block_retain_object_default
;
215 static void (*_Block_release_object
)(const void *ptr
) = _Block_release_object_default
;
216 static void (*_Block_assign_weak
)(const void *dest
, void *ptr
) = _Block_assign_weak_default
;
217 static void (*_Block_memmove
)(void *dest
, void *src
, unsigned long size
) = _Block_memmove_default
;
218 static void (*_Block_destructInstance
) (const void *aBlock
) = _Block_destructInstance_default
;
222 /**************************************************************************
223 GC support SPI functions - called from ObjC runtime and CoreFoundation
224 ***************************************************************************/
227 // Called from objc-auto to turn on GC.
228 // version 3, 4 arg, but changed 1st arg
229 void _Block_use_GC( void *(*alloc
)(const unsigned long, const bool isOne
, const bool isObject
),
230 void (*setHasRefcount
)(const void *, const bool),
231 void (*gc_assign
)(void *, void **),
232 void (*gc_assign_weak
)(const void *, void *),
233 void (*gc_memmove
)(void *, void *, unsigned long)) {
236 _Block_allocator
= alloc
;
237 _Block_deallocator
= _Block_do_nothing
;
238 _Block_assign
= gc_assign
;
239 _Block_copy_flag
= BLOCK_IS_GC
;
240 _Block_copy_class
= _NSConcreteAutoBlock
;
241 // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
242 _Block_copy_finalizing_class
= _NSConcreteFinalizingBlock
;
243 _Block_setHasRefcount
= setHasRefcount
;
244 _Byref_flag_initial_value
= BLOCK_BYREF_IS_GC
; // no refcount
245 _Block_retain_object
= _Block_do_nothing
;
246 _Block_release_object
= _Block_do_nothing
;
247 _Block_assign_weak
= gc_assign_weak
;
248 _Block_memmove
= gc_memmove
;
252 void _Block_use_GC5( void *(*alloc
)(const unsigned long, const bool isOne
, const bool isObject
),
253 void (*setHasRefcount
)(const void *, const bool),
254 void (*gc_assign
)(void *, void **),
255 void (*gc_assign_weak
)(const void *, void *)) {
256 // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
257 _Block_use_GC(alloc
, setHasRefcount
, gc_assign
, gc_assign_weak
, _Block_memmove_gc_broken
);
261 // Called from objc-auto to alternatively turn on retain/release.
262 // Prior to this the only "object" support we can provide is for those
263 // super special objects that live in libSystem, namely dispatch queues.
264 // Blocks and Block_byrefs have their own special entry points.
266 void _Block_use_RR( void (*retain
)(const void *),
267 void (*release
)(const void *)) {
268 _Block_retain_object
= retain
;
269 _Block_release_object
= release
;
270 _Block_destructInstance
= dlsym(RTLD_DEFAULT
, "objc_destructInstance");
274 // Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
275 // without defining a new entry point.
277 void _Block_use_RR2(const Block_callbacks_RR
*callbacks
) {
278 _Block_retain_object
= callbacks
->retain
;
279 _Block_release_object
= callbacks
->release
;
280 _Block_destructInstance
= callbacks
->destructInstance
;
283 /****************************************************************************
284 Accessors for block descriptor fields
285 *****************************************************************************/
287 static struct Block_descriptor_1
* _Block_descriptor_1(struct Block_layout
*aBlock
)
289 return aBlock
->descriptor
;
293 static struct Block_descriptor_2
* _Block_descriptor_2(struct Block_layout
*aBlock
)
295 if (! (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
)) return NULL
;
296 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
297 desc
+= sizeof(struct Block_descriptor_1
);
298 return (struct Block_descriptor_2
*)desc
;
301 static struct Block_descriptor_3
* _Block_descriptor_3(struct Block_layout
*aBlock
)
303 if (! (aBlock
->flags
& BLOCK_HAS_SIGNATURE
)) return NULL
;
304 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
305 desc
+= sizeof(struct Block_descriptor_1
);
306 if (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
307 desc
+= sizeof(struct Block_descriptor_2
);
309 return (struct Block_descriptor_3
*)desc
;
312 static __inline
bool _Block_has_layout(struct Block_layout
*aBlock
) {
313 if (! (aBlock
->flags
& BLOCK_HAS_SIGNATURE
)) return false;
314 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
315 desc
+= sizeof(struct Block_descriptor_1
);
316 if (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
317 desc
+= sizeof(struct Block_descriptor_2
);
319 return ((struct Block_descriptor_3
*)desc
)->layout
!= NULL
;
322 static void _Block_call_copy_helper(void *result
, struct Block_layout
*aBlock
)
324 struct Block_descriptor_2
*desc
= _Block_descriptor_2(aBlock
);
327 (*desc
->copy
)(result
, aBlock
); // do fixup
330 static void _Block_call_dispose_helper(struct Block_layout
*aBlock
)
332 struct Block_descriptor_2
*desc
= _Block_descriptor_2(aBlock
);
335 (*desc
->dispose
)(aBlock
);
338 /*******************************************************************************
339 Internal Support routines for copying
340 ********************************************************************************/
343 #pragma mark Copy/Release support
346 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
347 static void *_Block_copy_internal(const void *arg
, const bool wantsOne
) {
348 struct Block_layout
*aBlock
;
350 if (!arg
) return NULL
;
353 // The following would be better done as a switch statement
354 aBlock
= (struct Block_layout
*)arg
;
355 if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
357 latching_incr_int(&aBlock
->flags
);
360 else if (aBlock
->flags
& BLOCK_IS_GC
) {
361 // GC refcounting is expensive so do most refcounting here.
362 if (wantsOne
&& ((latching_incr_int(&aBlock
->flags
) & BLOCK_REFCOUNT_MASK
) == 2)) {
363 // Tell collector to hang on this - it will bump the GC refcount version
364 _Block_setHasRefcount(aBlock
, true);
368 else if (aBlock
->flags
& BLOCK_IS_GLOBAL
) {
372 // Its a stack block. Make a copy.
374 struct Block_layout
*result
= malloc(aBlock
->descriptor
->size
);
375 if (!result
) return NULL
;
376 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
378 result
->flags
&= ~(BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
); // XXX not needed
379 result
->flags
|= BLOCK_NEEDS_FREE
| 2; // logical refcount 1
380 result
->isa
= _NSConcreteMallocBlock
;
381 _Block_call_copy_helper(result
, aBlock
);
385 // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
386 // This allows the copy helper routines to make non-refcounted block copies under GC
387 int32_t flags
= aBlock
->flags
;
388 bool hasCTOR
= (flags
& BLOCK_HAS_CTOR
) != 0;
389 struct Block_layout
*result
= _Block_allocator(aBlock
->descriptor
->size
, wantsOne
, hasCTOR
|| _Block_has_layout(aBlock
));
390 if (!result
) return NULL
;
391 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
393 // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
394 flags
&= ~(BLOCK_NEEDS_FREE
|BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
); // XXX not needed
396 flags
|= BLOCK_IS_GC
| 2;
398 flags
|= BLOCK_IS_GC
;
399 result
->flags
= flags
;
400 _Block_call_copy_helper(result
, aBlock
);
402 result
->isa
= _NSConcreteFinalizingBlock
;
405 result
->isa
= _NSConcreteAutoBlock
;
415 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
417 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
418 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
419 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
420 // Otherwise we need to copy it and update the stack forwarding pointer
421 static void _Block_byref_assign_copy(void *dest
, const void *arg
, const int flags
) {
422 struct Block_byref
**destp
= (struct Block_byref
**)dest
;
423 struct Block_byref
*src
= (struct Block_byref
*)arg
;
425 if (src
->forwarding
->flags
& BLOCK_BYREF_IS_GC
) {
426 ; // don't need to do any more work
428 else if ((src
->forwarding
->flags
& BLOCK_REFCOUNT_MASK
) == 0) {
429 // src points to stack
430 bool isWeak
= ((flags
& (BLOCK_FIELD_IS_BYREF
|BLOCK_FIELD_IS_WEAK
)) == (BLOCK_FIELD_IS_BYREF
|BLOCK_FIELD_IS_WEAK
));
431 // if its weak ask for an object (only matters under GC)
432 struct Block_byref
*copy
= (struct Block_byref
*)_Block_allocator(src
->size
, false, isWeak
);
433 copy
->flags
= src
->flags
| _Byref_flag_initial_value
; // non-GC one for caller, one for stack
434 copy
->forwarding
= copy
; // patch heap copy to point to itself (skip write-barrier)
435 src
->forwarding
= copy
; // patch stack to point to heap copy
436 copy
->size
= src
->size
;
438 copy
->isa
= &_NSConcreteWeakBlockVariable
; // mark isa field so it gets weak scanning
440 if (src
->flags
& BLOCK_BYREF_HAS_COPY_DISPOSE
) {
441 // Trust copy helper to copy everything of interest
442 // If more than one field shows up in a byref block this is wrong XXX
443 struct Block_byref_2
*src2
= (struct Block_byref_2
*)(src
+1);
444 struct Block_byref_2
*copy2
= (struct Block_byref_2
*)(copy
+1);
445 copy2
->byref_keep
= src2
->byref_keep
;
446 copy2
->byref_destroy
= src2
->byref_destroy
;
448 if (src
->flags
& BLOCK_BYREF_LAYOUT_EXTENDED
) {
449 struct Block_byref_3
*src3
= (struct Block_byref_3
*)(src2
+1);
450 struct Block_byref_3
*copy3
= (struct Block_byref_3
*)(copy2
+1);
451 copy3
->layout
= src3
->layout
;
454 (*src2
->byref_keep
)(copy
, src
);
457 // just bits. Blast 'em using _Block_memmove in case they're __strong
458 // This copy includes Block_byref_3, if any.
459 _Block_memmove(copy
+1, src
+1,
460 src
->size
- sizeof(struct Block_byref
));
463 // already copied to heap
464 else if ((src
->forwarding
->flags
& BLOCK_BYREF_NEEDS_FREE
) == BLOCK_BYREF_NEEDS_FREE
) {
465 latching_incr_int(&src
->forwarding
->flags
);
467 // assign byref data block pointer into new Block
468 _Block_assign(src
->forwarding
, (void **)destp
);
472 static void _Block_byref_release(const void *arg
) {
473 struct Block_byref
*byref
= (struct Block_byref
*)arg
;
476 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
477 byref
= byref
->forwarding
;
479 // To support C++ destructors under GC we arrange for there to be a finalizer for this
480 // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
481 if ((byref
->flags
& BLOCK_BYREF_NEEDS_FREE
) == 0) {
482 return; // stack or GC or global
484 refcount
= byref
->flags
& BLOCK_REFCOUNT_MASK
;
486 if (latching_decr_int_should_deallocate(&byref
->flags
)) {
487 if (byref
->flags
& BLOCK_BYREF_HAS_COPY_DISPOSE
) {
488 struct Block_byref_2
*byref2
= (struct Block_byref_2
*)(byref
+1);
489 (*byref2
->byref_destroy
)(byref
);
491 _Block_deallocator((struct Block_layout
*)byref
);
496 /************************************************************
499 * _Block_copy, _Block_release, and (old) _Block_destroy
501 ***********************************************************/
508 void *_Block_copy(const void *arg
) {
509 return _Block_copy_internal(arg
, true);
513 // API entry point to release a copied Block
515 void _Block_release(const void *arg
) {
516 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
518 || (aBlock
->flags
& BLOCK_IS_GLOBAL
)
519 || ((aBlock
->flags
& (BLOCK_IS_GC
|BLOCK_NEEDS_FREE
)) == 0)
521 if (aBlock
->flags
& BLOCK_IS_GC
) {
522 if (latching_decr_int_now_zero(&aBlock
->flags
)) {
523 // Tell GC we no longer have our own refcounts. GC will decr its refcount
524 // and unless someone has done a CFRetain or marked it uncollectable it will
525 // now be subject to GC reclamation.
526 _Block_setHasRefcount(aBlock
, false);
529 else if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
530 if (latching_decr_int_should_deallocate(&aBlock
->flags
)) {
531 _Block_call_dispose_helper(aBlock
);
532 _Block_destructInstance(aBlock
);
533 _Block_deallocator(aBlock
);
539 bool _Block_tryRetain(const void *arg
) {
540 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
541 return latching_incr_int_not_deallocating(&aBlock
->flags
);
545 bool _Block_isDeallocating(const void *arg
) {
546 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
547 return (aBlock
->flags
& BLOCK_DEALLOCATING
) != 0;
550 // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
551 static void _Block_destroy(const void *arg
) {
552 struct Block_layout
*aBlock
;
554 aBlock
= (struct Block_layout
*)arg
;
555 if (aBlock
->flags
& BLOCK_IS_GC
) {
556 // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
557 return; // ignore, we are being called because of a DTOR
559 _Block_release(aBlock
);
564 /************************************************************
566 * SPI used by other layers
568 ***********************************************************/
570 // SPI, also internal. Called from NSAutoBlock only under GC
572 void *_Block_copy_collectable(const void *aBlock
) {
573 return _Block_copy_internal(aBlock
, false);
579 size_t Block_size(void *aBlock
) {
580 return ((struct Block_layout
*)aBlock
)->descriptor
->size
;
584 bool _Block_use_stret(void *aBlock
) {
585 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
587 int requiredFlags
= BLOCK_HAS_SIGNATURE
| BLOCK_USE_STRET
;
588 return (layout
->flags
& requiredFlags
) == requiredFlags
;
591 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
593 bool _Block_has_signature(void *aBlock
) {
594 return _Block_signature(aBlock
) ? true : false;
598 const char * _Block_signature(void *aBlock
)
600 struct Block_descriptor_3
*desc3
= _Block_descriptor_3(aBlock
);
601 if (!desc3
) return NULL
;
603 return desc3
->signature
;
607 const char * _Block_layout(void *aBlock
)
609 // Don't return extended layout to callers expecting GC layout
610 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
611 if (layout
->flags
& BLOCK_HAS_EXTENDED_LAYOUT
) return NULL
;
613 struct Block_descriptor_3
*desc3
= _Block_descriptor_3(aBlock
);
614 if (!desc3
) return NULL
;
616 return desc3
->layout
;
620 const char * _Block_extended_layout(void *aBlock
)
622 // Don't return GC layout to callers expecting extended layout
623 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
624 if (! (layout
->flags
& BLOCK_HAS_EXTENDED_LAYOUT
)) return NULL
;
626 struct Block_descriptor_3
*desc3
= _Block_descriptor_3(aBlock
);
627 if (!desc3
) return NULL
;
629 // Return empty string (all non-object bytes) instead of NULL
630 // so callers can distinguish "empty layout" from "no layout".
631 if (!desc3
->layout
) return "";
632 else return desc3
->layout
;
636 #pragma mark Compiler SPI entry points
640 /*******************************************************
642 Entry points used by the compiler - the real API!
645 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
646 1) C++ stack based objects
647 2) References to Objective-C objects
651 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
653 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
654 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
655 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
656 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
657 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
659 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
661 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
663 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
664 __block id 128+3 (0x83)
665 __block (^Block) 128+7 (0x87)
666 __weak __block id 128+3+16 (0x93)
667 __weak __block (^Block) 128+7+16 (0x97)
670 ********************************************************/
673 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
674 // to do the assignment.
677 void _Block_object_assign(void *destAddr
, const void *object
, const int flags
) {
678 switch (os_assumes(flags
& BLOCK_ALL_COPY_DISPOSE_FLAGS
)) {
679 case BLOCK_FIELD_IS_OBJECT
:
685 _Block_retain_object(object
);
686 _Block_assign((void *)object
, destAddr
);
689 case BLOCK_FIELD_IS_BLOCK
:
691 void (^object)(void) = ...;
695 _Block_assign(_Block_copy_internal(object
, false), destAddr
);
698 case BLOCK_FIELD_IS_BYREF
| BLOCK_FIELD_IS_WEAK
:
699 case BLOCK_FIELD_IS_BYREF
:
701 // copy the onstack __block container to the heap
703 __weak __block ... x;
707 _Block_byref_assign_copy(destAddr
, object
, flags
);
710 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
:
711 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
:
713 // copy the actual field held in the __block container
715 __block void (^object)(void);
719 // under manual retain release __block object/block variables are dangling
720 _Block_assign((void *)object
, destAddr
);
723 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
| BLOCK_FIELD_IS_WEAK
:
724 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
| BLOCK_FIELD_IS_WEAK
:
726 // copy the actual field held in the __block container
727 __weak __block id object;
728 __weak __block void (^object)(void);
732 _Block_assign_weak(object
, destAddr
);
740 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
741 // to help dispose of the contents
742 // Used initially only for __attribute__((NSObject)) marked pointers.
744 void _Block_object_dispose(const void *object
, const int flags
) {
745 switch (os_assumes(flags
& BLOCK_ALL_COPY_DISPOSE_FLAGS
)) {
746 case BLOCK_FIELD_IS_BYREF
| BLOCK_FIELD_IS_WEAK
:
747 case BLOCK_FIELD_IS_BYREF
:
748 // get rid of the __block data structure held in a Block
749 _Block_byref_release(object
);
751 case BLOCK_FIELD_IS_BLOCK
:
752 _Block_destroy(object
);
754 case BLOCK_FIELD_IS_OBJECT
:
755 _Block_release_object(object
);
757 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
:
758 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
:
759 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
| BLOCK_FIELD_IS_WEAK
:
760 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
| BLOCK_FIELD_IS_WEAK
: