1 // This source file is part of the Swift.org open source project
3 // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
4 // Licensed under Apache License v2.0 with Runtime Library Exception
6 // See http://swift.org/LICENSE.txt for license information
7 // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
10 #include "Block_private.h"
17 #if __has_include(<os/assumes.h>)
18 #include <os/assumes.h>
23 #define os_assumes(_x) _x
26 #define os_assert(_x) assert(_x)
30 #define _CRT_SECURE_NO_WARNINGS 1
32 static __inline
bool OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
34 // fixme barrier is overkill -- see objc-os.h
35 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
36 return (original
== oldl
);
39 static __inline
bool OSAtomicCompareAndSwapInt(int oldi
, int newi
, int volatile *dst
)
41 // fixme barrier is overkill -- see objc-os.h
42 int original
= InterlockedCompareExchange(dst
, newi
, oldi
);
43 return (original
== oldi
);
46 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
47 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
50 /***********************
52 ************************/
54 static void *_Block_copy_class
= _NSConcreteMallocBlock
;
55 static void *_Block_copy_finalizing_class
= _NSConcreteMallocBlock
;
56 static int _Block_copy_flag
= BLOCK_NEEDS_FREE
;
57 static int _Byref_flag_initial_value
= BLOCK_BYREF_NEEDS_FREE
| 4; // logical 2
59 static bool isGC
= false;
61 /*******************************************************************************
63 ********************************************************************************/
66 static int32_t latching_incr_int(volatile int32_t *where
) {
68 int32_t old_value
= *where
;
69 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
70 return BLOCK_REFCOUNT_MASK
;
72 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+2, where
)) {
78 static bool latching_incr_int_not_deallocating(volatile int32_t *where
) {
80 int32_t old_value
= *where
;
81 if (old_value
& BLOCK_DEALLOCATING
) {
82 // if deallocating we can't do this
85 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
86 // if latched, we're leaking this block, and we succeed
89 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+2, where
)) {
90 // otherwise, we must store a new retained value without the deallocating bit set
97 // return should_deallocate?
98 static bool latching_decr_int_should_deallocate(volatile int32_t *where
) {
100 int32_t old_value
= *where
;
101 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
102 return false; // latched high
104 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
105 return false; // underflow, latch low
107 int32_t new_value
= old_value
- 2;
109 if ((old_value
& (BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
)) == 2) {
110 new_value
= old_value
- 1;
113 if (OSAtomicCompareAndSwapInt(old_value
, new_value
, where
)) {
120 static bool latching_decr_int_now_zero(volatile int32_t *where
) {
122 int32_t old_value
= *where
;
123 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
124 return false; // latched high
126 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
127 return false; // underflow, latch low
129 int32_t new_value
= old_value
- 2;
130 if (OSAtomicCompareAndSwapInt(old_value
, new_value
, where
)) {
131 return (new_value
& BLOCK_REFCOUNT_MASK
) == 0;
137 /***********************
138 GC support stub routines
139 ************************/
141 #pragma mark GC Support Routines
146 static void *_Block_alloc_default(const unsigned long size
, const bool initialCountIsOne
, const bool isObject
) {
150 static void _Block_assign_default(void *value
, void **destptr
) {
154 static void _Block_setHasRefcount_default(const void *ptr
, const bool hasRefcount
) {
157 static void _Block_do_nothing(const void *aBlock
) { }
159 static void _Block_retain_object_default(const void *ptr
) {
162 static void _Block_release_object_default(const void *ptr
) {
165 static void _Block_assign_weak_default(const void *ptr
, void *dest
) {
167 *(long *)dest
= (long)ptr
;
169 *(void **)dest
= (void *)ptr
;
173 static void _Block_memmove_default(void *dst
, void *src
, unsigned long size
) {
174 memmove(dst
, src
, (size_t)size
);
177 static void _Block_memmove_gc_broken(void *dest
, void *src
, unsigned long size
) {
178 void **destp
= (void **)dest
;
179 void **srcp
= (void **)src
;
181 _Block_assign_default(*srcp
, destp
);
184 size
-= sizeof(void *);
188 static void _Block_destructInstance_default(const void *aBlock
) {}
190 /**************************************************************************
191 GC support callout functions - initially set to stub routines
192 ***************************************************************************/
194 static void *(*_Block_allocator
)(const unsigned long, const bool isOne
, const bool isObject
) = _Block_alloc_default
;
195 static void (*_Block_deallocator
)(const void *) = (void (*)(const void *))free
;
196 static void (*_Block_assign
)(void *value
, void **destptr
) = _Block_assign_default
;
197 static void (*_Block_setHasRefcount
)(const void *ptr
, const bool hasRefcount
) = _Block_setHasRefcount_default
;
198 static void (*_Block_retain_object
)(const void *ptr
) = _Block_retain_object_default
;
199 static void (*_Block_release_object
)(const void *ptr
) = _Block_release_object_default
;
200 static void (*_Block_assign_weak
)(const void *dest
, void *ptr
) = _Block_assign_weak_default
;
201 static void (*_Block_memmove
)(void *dest
, void *src
, unsigned long size
) = _Block_memmove_default
;
202 static void (*_Block_destructInstance
) (const void *aBlock
) = _Block_destructInstance_default
;
205 /**************************************************************************
206 GC support SPI functions - called from ObjC runtime and CoreFoundation
207 ***************************************************************************/
210 // Called from objc-auto to turn on GC.
211 // version 3, 4 arg, but changed 1st arg
212 void _Block_use_GC( void *(*alloc
)(const unsigned long, const bool isOne
, const bool isObject
),
213 void (*setHasRefcount
)(const void *, const bool),
214 void (*gc_assign
)(void *, void **),
215 void (*gc_assign_weak
)(const void *, void *),
216 void (*gc_memmove
)(void *, void *, unsigned long)) {
219 _Block_allocator
= alloc
;
220 _Block_deallocator
= _Block_do_nothing
;
221 _Block_assign
= gc_assign
;
222 _Block_copy_flag
= BLOCK_IS_GC
;
223 _Block_copy_class
= _NSConcreteAutoBlock
;
224 // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
225 _Block_copy_finalizing_class
= _NSConcreteFinalizingBlock
;
226 _Block_setHasRefcount
= setHasRefcount
;
227 _Byref_flag_initial_value
= BLOCK_BYREF_IS_GC
; // no refcount
228 _Block_retain_object
= _Block_do_nothing
;
229 _Block_release_object
= _Block_do_nothing
;
230 _Block_assign_weak
= gc_assign_weak
;
231 _Block_memmove
= gc_memmove
;
235 void _Block_use_GC5( void *(*alloc
)(const unsigned long, const bool isOne
, const bool isObject
),
236 void (*setHasRefcount
)(const void *, const bool),
237 void (*gc_assign
)(void *, void **),
238 void (*gc_assign_weak
)(const void *, void *)) {
239 // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
240 _Block_use_GC(alloc
, setHasRefcount
, gc_assign
, gc_assign_weak
, _Block_memmove_gc_broken
);
244 // Called from objc-auto to alternatively turn on retain/release.
245 // Prior to this the only "object" support we can provide is for those
246 // super special objects that live in libSystem, namely dispatch queues.
247 // Blocks and Block_byrefs have their own special entry points.
249 void _Block_use_RR( void (*retain
)(const void *),
250 void (*release
)(const void *)) {
251 _Block_retain_object
= retain
;
252 _Block_release_object
= release
;
253 _Block_destructInstance
= dlsym(RTLD_DEFAULT
, "objc_destructInstance");
256 // Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
257 // without defining a new entry point.
259 void _Block_use_RR2(const Block_callbacks_RR
*callbacks
) {
260 _Block_retain_object
= callbacks
->retain
;
261 _Block_release_object
= callbacks
->release
;
262 _Block_destructInstance
= callbacks
->destructInstance
;
265 /****************************************************************************
266 Accessors for block descriptor fields
267 *****************************************************************************/
269 static struct Block_descriptor_1
* _Block_descriptor_1(struct Block_layout
*aBlock
)
271 return aBlock
->descriptor
;
275 static struct Block_descriptor_2
* _Block_descriptor_2(struct Block_layout
*aBlock
)
277 if (! (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
)) return NULL
;
278 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
279 desc
+= sizeof(struct Block_descriptor_1
);
280 return (struct Block_descriptor_2
*)desc
;
283 static struct Block_descriptor_3
* _Block_descriptor_3(struct Block_layout
*aBlock
)
285 if (! (aBlock
->flags
& BLOCK_HAS_SIGNATURE
)) return NULL
;
286 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
287 desc
+= sizeof(struct Block_descriptor_1
);
288 if (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
289 desc
+= sizeof(struct Block_descriptor_2
);
291 return (struct Block_descriptor_3
*)desc
;
294 static __inline
bool _Block_has_layout(struct Block_layout
*aBlock
) {
295 if (! (aBlock
->flags
& BLOCK_HAS_SIGNATURE
)) return false;
296 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
297 desc
+= sizeof(struct Block_descriptor_1
);
298 if (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
299 desc
+= sizeof(struct Block_descriptor_2
);
301 return ((struct Block_descriptor_3
*)desc
)->layout
!= NULL
;
304 static void _Block_call_copy_helper(void *result
, struct Block_layout
*aBlock
)
306 struct Block_descriptor_2
*desc
= _Block_descriptor_2(aBlock
);
309 (*desc
->copy
)(result
, aBlock
); // do fixup
312 static void _Block_call_dispose_helper(struct Block_layout
*aBlock
)
314 struct Block_descriptor_2
*desc
= _Block_descriptor_2(aBlock
);
317 (*desc
->dispose
)(aBlock
);
320 /*******************************************************************************
321 Internal Support routines for copying
322 ********************************************************************************/
325 #pragma mark Copy/Release support
328 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
329 static void *_Block_copy_internal(const void *arg
, const bool wantsOne
) {
330 struct Block_layout
*aBlock
;
332 if (!arg
) return NULL
;
335 // The following would be better done as a switch statement
336 aBlock
= (struct Block_layout
*)arg
;
337 if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
339 latching_incr_int(&aBlock
->flags
);
342 else if (aBlock
->flags
& BLOCK_IS_GC
) {
343 // GC refcounting is expensive so do most refcounting here.
344 if (wantsOne
&& ((latching_incr_int(&aBlock
->flags
) & BLOCK_REFCOUNT_MASK
) == 2)) {
345 // Tell collector to hang on this - it will bump the GC refcount version
346 _Block_setHasRefcount(aBlock
, true);
350 else if (aBlock
->flags
& BLOCK_IS_GLOBAL
) {
354 // Its a stack block. Make a copy.
356 struct Block_layout
*result
= malloc(aBlock
->descriptor
->size
);
357 if (!result
) return NULL
;
358 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
360 result
->flags
&= ~(BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
); // XXX not needed
361 result
->flags
|= BLOCK_NEEDS_FREE
| 2; // logical refcount 1
362 result
->isa
= _NSConcreteMallocBlock
;
363 _Block_call_copy_helper(result
, aBlock
);
367 // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
368 // This allows the copy helper routines to make non-refcounted block copies under GC
369 int32_t flags
= aBlock
->flags
;
370 bool hasCTOR
= (flags
& BLOCK_HAS_CTOR
) != 0;
371 struct Block_layout
*result
= _Block_allocator(aBlock
->descriptor
->size
, wantsOne
, hasCTOR
|| _Block_has_layout(aBlock
));
372 if (!result
) return NULL
;
373 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
375 // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
376 flags
&= ~(BLOCK_NEEDS_FREE
|BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
); // XXX not needed
378 flags
|= BLOCK_IS_GC
| 2;
380 flags
|= BLOCK_IS_GC
;
381 result
->flags
= flags
;
382 _Block_call_copy_helper(result
, aBlock
);
384 result
->isa
= _NSConcreteFinalizingBlock
;
387 result
->isa
= _NSConcreteAutoBlock
;
397 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
399 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
400 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
401 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
402 // Otherwise we need to copy it and update the stack forwarding pointer
403 static void _Block_byref_assign_copy(void *dest
, const void *arg
, const int flags
) {
404 struct Block_byref
**destp
= (struct Block_byref
**)dest
;
405 struct Block_byref
*src
= (struct Block_byref
*)arg
;
407 if (src
->forwarding
->flags
& BLOCK_BYREF_IS_GC
) {
408 ; // don't need to do any more work
410 else if ((src
->forwarding
->flags
& BLOCK_REFCOUNT_MASK
) == 0) {
411 // src points to stack
412 bool isWeak
= ((flags
& (BLOCK_FIELD_IS_BYREF
|BLOCK_FIELD_IS_WEAK
)) == (BLOCK_FIELD_IS_BYREF
|BLOCK_FIELD_IS_WEAK
));
413 // if its weak ask for an object (only matters under GC)
414 struct Block_byref
*copy
= (struct Block_byref
*)_Block_allocator(src
->size
, false, isWeak
);
415 copy
->flags
= src
->flags
| _Byref_flag_initial_value
; // non-GC one for caller, one for stack
416 copy
->forwarding
= copy
; // patch heap copy to point to itself (skip write-barrier)
417 src
->forwarding
= copy
; // patch stack to point to heap copy
418 copy
->size
= src
->size
;
420 copy
->isa
= &_NSConcreteWeakBlockVariable
; // mark isa field so it gets weak scanning
422 if (src
->flags
& BLOCK_BYREF_HAS_COPY_DISPOSE
) {
423 // Trust copy helper to copy everything of interest
424 // If more than one field shows up in a byref block this is wrong XXX
425 struct Block_byref_2
*src2
= (struct Block_byref_2
*)(src
+1);
426 struct Block_byref_2
*copy2
= (struct Block_byref_2
*)(copy
+1);
427 copy2
->byref_keep
= src2
->byref_keep
;
428 copy2
->byref_destroy
= src2
->byref_destroy
;
430 if (src
->flags
& BLOCK_BYREF_LAYOUT_EXTENDED
) {
431 struct Block_byref_3
*src3
= (struct Block_byref_3
*)(src2
+1);
432 struct Block_byref_3
*copy3
= (struct Block_byref_3
*)(copy2
+1);
433 copy3
->layout
= src3
->layout
;
436 (*src2
->byref_keep
)(copy
, src
);
439 // just bits. Blast 'em using _Block_memmove in case they're __strong
440 // This copy includes Block_byref_3, if any.
441 _Block_memmove(copy
+1, src
+1,
442 src
->size
- sizeof(struct Block_byref
));
445 // already copied to heap
446 else if ((src
->forwarding
->flags
& BLOCK_BYREF_NEEDS_FREE
) == BLOCK_BYREF_NEEDS_FREE
) {
447 latching_incr_int(&src
->forwarding
->flags
);
449 // assign byref data block pointer into new Block
450 _Block_assign(src
->forwarding
, (void **)destp
);
454 static void _Block_byref_release(const void *arg
) {
455 struct Block_byref
*byref
= (struct Block_byref
*)arg
;
458 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
459 byref
= byref
->forwarding
;
461 // To support C++ destructors under GC we arrange for there to be a finalizer for this
462 // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
463 if ((byref
->flags
& BLOCK_BYREF_NEEDS_FREE
) == 0) {
464 return; // stack or GC or global
466 refcount
= byref
->flags
& BLOCK_REFCOUNT_MASK
;
468 if (latching_decr_int_should_deallocate(&byref
->flags
)) {
469 if (byref
->flags
& BLOCK_BYREF_HAS_COPY_DISPOSE
) {
470 struct Block_byref_2
*byref2
= (struct Block_byref_2
*)(byref
+1);
471 (*byref2
->byref_destroy
)(byref
);
473 _Block_deallocator((struct Block_layout
*)byref
);
478 /************************************************************
481 * _Block_copy, _Block_release, and (old) _Block_destroy
483 ***********************************************************/
490 void *_Block_copy(const void *arg
) {
491 return _Block_copy_internal(arg
, true);
495 // API entry point to release a copied Block
497 void _Block_release(const void *arg
) {
498 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
500 || (aBlock
->flags
& BLOCK_IS_GLOBAL
)
501 || ((aBlock
->flags
& (BLOCK_IS_GC
|BLOCK_NEEDS_FREE
)) == 0)
503 if (aBlock
->flags
& BLOCK_IS_GC
) {
504 if (latching_decr_int_now_zero(&aBlock
->flags
)) {
505 // Tell GC we no longer have our own refcounts. GC will decr its refcount
506 // and unless someone has done a CFRetain or marked it uncollectable it will
507 // now be subject to GC reclamation.
508 _Block_setHasRefcount(aBlock
, false);
511 else if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
512 if (latching_decr_int_should_deallocate(&aBlock
->flags
)) {
513 _Block_call_dispose_helper(aBlock
);
514 _Block_destructInstance(aBlock
);
515 _Block_deallocator(aBlock
);
521 bool _Block_tryRetain(const void *arg
) {
522 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
523 return latching_incr_int_not_deallocating(&aBlock
->flags
);
527 bool _Block_isDeallocating(const void *arg
) {
528 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
529 return (aBlock
->flags
& BLOCK_DEALLOCATING
) != 0;
532 // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
533 static void _Block_destroy(const void *arg
) {
534 struct Block_layout
*aBlock
;
536 aBlock
= (struct Block_layout
*)arg
;
537 if (aBlock
->flags
& BLOCK_IS_GC
) {
538 // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
539 return; // ignore, we are being called because of a DTOR
541 _Block_release(aBlock
);
546 /************************************************************
548 * SPI used by other layers
550 ***********************************************************/
552 // SPI, also internal. Called from NSAutoBlock only under GC
554 void *_Block_copy_collectable(const void *aBlock
) {
555 return _Block_copy_internal(aBlock
, false);
561 size_t Block_size(void *aBlock
) {
562 return ((struct Block_layout
*)aBlock
)->descriptor
->size
;
566 bool _Block_use_stret(void *aBlock
) {
567 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
569 int requiredFlags
= BLOCK_HAS_SIGNATURE
| BLOCK_USE_STRET
;
570 return (layout
->flags
& requiredFlags
) == requiredFlags
;
573 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
575 bool _Block_has_signature(void *aBlock
) {
576 return _Block_signature(aBlock
) ? true : false;
580 const char * _Block_signature(void *aBlock
)
582 struct Block_descriptor_3
*desc3
= _Block_descriptor_3(aBlock
);
583 if (!desc3
) return NULL
;
585 return desc3
->signature
;
589 const char * _Block_layout(void *aBlock
)
591 // Don't return extended layout to callers expecting GC layout
592 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
593 if (layout
->flags
& BLOCK_HAS_EXTENDED_LAYOUT
) return NULL
;
595 struct Block_descriptor_3
*desc3
= _Block_descriptor_3(aBlock
);
596 if (!desc3
) return NULL
;
598 return desc3
->layout
;
602 const char * _Block_extended_layout(void *aBlock
)
604 // Don't return GC layout to callers expecting extended layout
605 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
606 if (! (layout
->flags
& BLOCK_HAS_EXTENDED_LAYOUT
)) return NULL
;
608 struct Block_descriptor_3
*desc3
= _Block_descriptor_3(aBlock
);
609 if (!desc3
) return NULL
;
611 // Return empty string (all non-object bytes) instead of NULL
612 // so callers can distinguish "empty layout" from "no layout".
613 if (!desc3
->layout
) return "";
614 else return desc3
->layout
;
618 #pragma mark Compiler SPI entry points
622 /*******************************************************
624 Entry points used by the compiler - the real API!
627 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
628 1) C++ stack based objects
629 2) References to Objective-C objects
633 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
635 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
636 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
637 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
638 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
639 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
641 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
643 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
645 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
646 __block id 128+3 (0x83)
647 __block (^Block) 128+7 (0x87)
648 __weak __block id 128+3+16 (0x93)
649 __weak __block (^Block) 128+7+16 (0x97)
652 ********************************************************/
655 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
656 // to do the assignment.
659 void _Block_object_assign(void *destAddr
, const void *object
, const int flags
) {
660 switch (os_assumes(flags
& BLOCK_ALL_COPY_DISPOSE_FLAGS
)) {
661 case BLOCK_FIELD_IS_OBJECT
:
667 _Block_retain_object(object
);
668 _Block_assign((void *)object
, destAddr
);
671 case BLOCK_FIELD_IS_BLOCK
:
673 void (^object)(void) = ...;
677 _Block_assign(_Block_copy_internal(object
, false), destAddr
);
680 case BLOCK_FIELD_IS_BYREF
| BLOCK_FIELD_IS_WEAK
:
681 case BLOCK_FIELD_IS_BYREF
:
683 // copy the onstack __block container to the heap
685 __weak __block ... x;
689 _Block_byref_assign_copy(destAddr
, object
, flags
);
692 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
:
693 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
:
695 // copy the actual field held in the __block container
697 __block void (^object)(void);
701 // under manual retain release __block object/block variables are dangling
702 _Block_assign((void *)object
, destAddr
);
705 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
| BLOCK_FIELD_IS_WEAK
:
706 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
| BLOCK_FIELD_IS_WEAK
:
708 // copy the actual field held in the __block container
709 __weak __block id object;
710 __weak __block void (^object)(void);
714 _Block_assign_weak(object
, destAddr
);
722 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
723 // to help dispose of the contents
724 // Used initially only for __attribute__((NSObject)) marked pointers.
726 void _Block_object_dispose(const void *object
, const int flags
) {
727 switch (os_assumes(flags
& BLOCK_ALL_COPY_DISPOSE_FLAGS
)) {
728 case BLOCK_FIELD_IS_BYREF
| BLOCK_FIELD_IS_WEAK
:
729 case BLOCK_FIELD_IS_BYREF
:
730 // get rid of the __block data structure held in a Block
731 _Block_byref_release(object
);
733 case BLOCK_FIELD_IS_BLOCK
:
734 _Block_destroy(object
);
736 case BLOCK_FIELD_IS_OBJECT
:
737 _Block_release_object(object
);
739 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
:
740 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
:
741 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
| BLOCK_FIELD_IS_WEAK
:
742 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
| BLOCK_FIELD_IS_WEAK
: