5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
7 * @APPLE_LLVM_LICENSE_HEADER@
13 #include "Block_private.h"
17 #include <os/assumes.h>
21 #include <libkern/Block_private.h>
22 #include <libkern/OSRuntime.h>
24 #define malloc(s) kern_os_malloc((s))
25 #define free(a) kern_os_free((a))
32 #define os_assumes(_x) (_x)
35 #define os_assert(_x) assert(_x)
39 #define _CRT_SECURE_NO_WARNINGS 1
41 static __inline
bool OSAtomicCompareAndSwapLong(long oldl
, long newl
, long volatile *dst
)
43 // fixme barrier is overkill -- see objc-os.h
44 long original
= InterlockedCompareExchange(dst
, newl
, oldl
);
45 return (original
== oldl
);
48 static __inline
bool OSAtomicCompareAndSwapInt(int oldi
, int newi
, int volatile *dst
)
50 // fixme barrier is overkill -- see objc-os.h
51 int original
= InterlockedCompareExchange(dst
, newi
, oldi
);
52 return (original
== oldi
);
55 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
56 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
60 /*******************************************************************************
62 ********************************************************************************/
64 static int32_t latching_incr_int(volatile int32_t *where
) {
66 int32_t old_value
= *where
;
67 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
68 return BLOCK_REFCOUNT_MASK
;
70 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+2, where
)) {
76 static bool latching_incr_int_not_deallocating(volatile int32_t *where
) {
78 int32_t old_value
= *where
;
79 if (old_value
& BLOCK_DEALLOCATING
) {
80 // if deallocating we can't do this
83 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
84 // if latched, we're leaking this block, and we succeed
87 if (OSAtomicCompareAndSwapInt(old_value
, old_value
+2, where
)) {
88 // otherwise, we must store a new retained value without the deallocating bit set
95 // return should_deallocate?
96 static bool latching_decr_int_should_deallocate(volatile int32_t *where
) {
98 int32_t old_value
= *where
;
99 if ((old_value
& BLOCK_REFCOUNT_MASK
) == BLOCK_REFCOUNT_MASK
) {
100 return false; // latched high
102 if ((old_value
& BLOCK_REFCOUNT_MASK
) == 0) {
103 return false; // underflow, latch low
105 int32_t new_value
= old_value
- 2;
107 if ((old_value
& (BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
)) == 2) {
108 new_value
= old_value
- 1;
111 if (OSAtomicCompareAndSwapInt(old_value
, new_value
, where
)) {
118 /**************************************************************************
119 Framework callback functions and their default implementations.
120 ***************************************************************************/
122 #pragma mark Framework Callback Routines
125 static void _Block_retain_object_default(const void *ptr __unused
) { }
127 static void _Block_release_object_default(const void *ptr __unused
) { }
129 static void _Block_destructInstance_default(const void *aBlock __unused
) {}
131 static void (*_Block_retain_object
)(const void *ptr
) = _Block_retain_object_default
;
132 static void (*_Block_release_object
)(const void *ptr
) = _Block_release_object_default
;
133 static void (*_Block_destructInstance
) (const void *aBlock
) = _Block_destructInstance_default
;
136 /**************************************************************************
137 Callback registration from ObjC runtime and CoreFoundation
138 ***************************************************************************/
140 void _Block_use_RR2(const Block_callbacks_RR
*callbacks
) {
141 _Block_retain_object
= callbacks
->retain
;
142 _Block_release_object
= callbacks
->release
;
143 _Block_destructInstance
= callbacks
->destructInstance
;
146 /****************************************************************************
147 Accessors for block descriptor fields
148 *****************************************************************************/
150 static struct Block_descriptor_1
* _Block_descriptor_1(struct Block_layout
*aBlock
)
152 return aBlock
->descriptor
;
156 static struct Block_descriptor_2
* _Block_descriptor_2(struct Block_layout
*aBlock
)
158 if (! (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
)) return NULL
;
159 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
160 desc
+= sizeof(struct Block_descriptor_1
);
161 return __IGNORE_WCASTALIGN((struct Block_descriptor_2
*)desc
);
164 static struct Block_descriptor_3
* _Block_descriptor_3(struct Block_layout
*aBlock
)
166 if (! (aBlock
->flags
& BLOCK_HAS_SIGNATURE
)) return NULL
;
167 uint8_t *desc
= (uint8_t *)aBlock
->descriptor
;
168 desc
+= sizeof(struct Block_descriptor_1
);
169 if (aBlock
->flags
& BLOCK_HAS_COPY_DISPOSE
) {
170 desc
+= sizeof(struct Block_descriptor_2
);
172 return __IGNORE_WCASTALIGN((struct Block_descriptor_3
*)desc
);
175 static void _Block_call_copy_helper(void *result
, struct Block_layout
*aBlock
)
177 struct Block_descriptor_2
*desc
= _Block_descriptor_2(aBlock
);
180 (*desc
->copy
)(result
, aBlock
); // do fixup
183 static void _Block_call_dispose_helper(struct Block_layout
*aBlock
)
185 struct Block_descriptor_2
*desc
= _Block_descriptor_2(aBlock
);
188 (*desc
->dispose
)(aBlock
);
191 /*******************************************************************************
192 Internal Support routines for copying
193 ********************************************************************************/
196 #pragma mark Copy/Release support
199 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
200 void *_Block_copy(const void *arg
) {
201 struct Block_layout
*aBlock
;
203 if (!arg
) return NULL
;
205 // The following would be better done as a switch statement
206 aBlock
= (struct Block_layout
*)arg
;
207 if (aBlock
->flags
& BLOCK_NEEDS_FREE
) {
209 latching_incr_int(&aBlock
->flags
);
212 else if (aBlock
->flags
& BLOCK_IS_GLOBAL
) {
216 // Its a stack block. Make a copy.
217 struct Block_layout
*result
= (typeof(result
)) malloc(aBlock
->descriptor
->size
);
218 if (!result
) return NULL
;
219 memmove(result
, aBlock
, aBlock
->descriptor
->size
); // bitcopy first
220 #if __has_feature(ptrauth_calls)
221 // Resign the invoke pointer as it uses address authentication.
222 result
->invoke
= aBlock
->invoke
;
225 result
->flags
&= ~(BLOCK_REFCOUNT_MASK
|BLOCK_DEALLOCATING
); // XXX not needed
226 result
->flags
|= BLOCK_NEEDS_FREE
| 2; // logical refcount 1
227 _Block_call_copy_helper(result
, aBlock
);
228 // Set isa last so memory analysis tools see a fully-initialized object.
229 result
->isa
= _NSConcreteMallocBlock
;
235 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
237 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
238 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
239 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
240 // Otherwise we need to copy it and update the stack forwarding pointer
241 static struct Block_byref
*_Block_byref_copy(const void *arg
) {
242 struct Block_byref
*src
= (struct Block_byref
*)arg
;
244 if ((src
->forwarding
->flags
& BLOCK_REFCOUNT_MASK
) == 0) {
245 // src points to stack
246 struct Block_byref
*copy
= (struct Block_byref
*)malloc(src
->size
);
248 // byref value 4 is logical refcount of 2: one for caller, one for stack
249 copy
->flags
= src
->flags
| BLOCK_BYREF_NEEDS_FREE
| 4;
250 copy
->forwarding
= copy
; // patch heap copy to point to itself
251 src
->forwarding
= copy
; // patch stack to point to heap copy
252 copy
->size
= src
->size
;
254 if (src
->flags
& BLOCK_BYREF_HAS_COPY_DISPOSE
) {
255 // Trust copy helper to copy everything of interest
256 // If more than one field shows up in a byref block this is wrong XXX
257 struct Block_byref_2
*src2
= (struct Block_byref_2
*)(src
+1);
258 struct Block_byref_2
*copy2
= (struct Block_byref_2
*)(copy
+1);
259 copy2
->byref_keep
= src2
->byref_keep
;
260 copy2
->byref_destroy
= src2
->byref_destroy
;
262 if (src
->flags
& BLOCK_BYREF_LAYOUT_EXTENDED
) {
263 struct Block_byref_3
*src3
= (struct Block_byref_3
*)(src2
+1);
264 struct Block_byref_3
*copy3
= (struct Block_byref_3
*)(copy2
+1);
265 copy3
->layout
= src3
->layout
;
268 (*src2
->byref_keep
)(copy
, src
);
272 // This copy includes Block_byref_3, if any.
273 memmove(copy
+1, src
+1, src
->size
- sizeof(*src
));
276 // already copied to heap
277 else if ((src
->forwarding
->flags
& BLOCK_BYREF_NEEDS_FREE
) == BLOCK_BYREF_NEEDS_FREE
) {
278 latching_incr_int(&src
->forwarding
->flags
);
281 return src
->forwarding
;
284 static void _Block_byref_release(const void *arg
) {
285 struct Block_byref
*byref
= (struct Block_byref
*)arg
;
287 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
288 byref
= byref
->forwarding
;
290 if (byref
->flags
& BLOCK_BYREF_NEEDS_FREE
) {
291 __assert_only
int32_t refcount
= byref
->flags
& BLOCK_REFCOUNT_MASK
;
293 if (latching_decr_int_should_deallocate(&byref
->flags
)) {
294 if (byref
->flags
& BLOCK_BYREF_HAS_COPY_DISPOSE
) {
295 struct Block_byref_2
*byref2
= (struct Block_byref_2
*)(byref
+1);
296 (*byref2
->byref_destroy
)(byref
);
304 /************************************************************
307 * _Block_copy, _Block_release, and (old) _Block_destroy
309 ***********************************************************/
316 // API entry point to release a copied Block
317 void _Block_release(const void *arg
) {
318 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
320 if (aBlock
->flags
& BLOCK_IS_GLOBAL
) return;
321 if (! (aBlock
->flags
& BLOCK_NEEDS_FREE
)) return;
323 if (latching_decr_int_should_deallocate(&aBlock
->flags
)) {
324 _Block_call_dispose_helper(aBlock
);
325 _Block_destructInstance(aBlock
);
330 bool _Block_tryRetain(const void *arg
) {
331 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
332 return latching_incr_int_not_deallocating(&aBlock
->flags
);
335 bool _Block_isDeallocating(const void *arg
) {
336 struct Block_layout
*aBlock
= (struct Block_layout
*)arg
;
337 return (aBlock
->flags
& BLOCK_DEALLOCATING
) != 0;
341 /************************************************************
343 * SPI used by other layers
345 ***********************************************************/
347 size_t Block_size(void *aBlock
) {
348 return ((struct Block_layout
*)aBlock
)->descriptor
->size
;
351 bool _Block_use_stret(void *aBlock
) {
352 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
354 int requiredFlags
= BLOCK_HAS_SIGNATURE
| BLOCK_USE_STRET
;
355 return (layout
->flags
& requiredFlags
) == requiredFlags
;
358 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
359 bool _Block_has_signature(void *aBlock
) {
360 return _Block_signature(aBlock
) ? true : false;
363 const char * _Block_signature(void *aBlock
)
365 struct Block_descriptor_3
*desc3
= _Block_descriptor_3((struct Block_layout
*)aBlock
);
366 if (!desc3
) return NULL
;
368 return desc3
->signature
;
371 const char * _Block_layout(void *aBlock
)
373 // Don't return extended layout to callers expecting old GC layout
374 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
375 if (layout
->flags
& BLOCK_HAS_EXTENDED_LAYOUT
) return NULL
;
377 struct Block_descriptor_3
*desc3
= _Block_descriptor_3((struct Block_layout
*)aBlock
);
378 if (!desc3
) return NULL
;
380 return desc3
->layout
;
383 const char * _Block_extended_layout(void *aBlock
)
385 // Don't return old GC layout to callers expecting extended layout
386 struct Block_layout
*layout
= (struct Block_layout
*)aBlock
;
387 if (! (layout
->flags
& BLOCK_HAS_EXTENDED_LAYOUT
)) return NULL
;
389 struct Block_descriptor_3
*desc3
= _Block_descriptor_3((struct Block_layout
*)aBlock
);
390 if (!desc3
) return NULL
;
392 // Return empty string (all non-object bytes) instead of NULL
393 // so callers can distinguish "empty layout" from "no layout".
394 if (!desc3
->layout
) return "";
395 else return desc3
->layout
;
399 #pragma mark Compiler SPI entry points
403 /*******************************************************
405 Entry points used by the compiler - the real API!
408 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
409 1) C++ stack based objects
410 2) References to Objective-C objects
414 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
416 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
417 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
418 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
419 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
420 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
422 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
424 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
426 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
427 __block id 128+3 (0x83)
428 __block (^Block) 128+7 (0x87)
429 __weak __block id 128+3+16 (0x93)
430 __weak __block (^Block) 128+7+16 (0x97)
433 ********************************************************/
436 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
437 // to do the assignment.
439 void _Block_object_assign(void *destArg
, const void *object
, const int flags
) {
440 const void **dest
= (const void **)destArg
;
441 switch (os_assumes(flags
& BLOCK_ALL_COPY_DISPOSE_FLAGS
)) {
442 case BLOCK_FIELD_IS_OBJECT
:
448 _Block_retain_object(object
);
452 case BLOCK_FIELD_IS_BLOCK
:
454 void (^object)(void) = ...;
458 *dest
= _Block_copy(object
);
461 case BLOCK_FIELD_IS_BYREF
| BLOCK_FIELD_IS_WEAK
:
462 case BLOCK_FIELD_IS_BYREF
:
464 // copy the onstack __block container to the heap
465 // Note this __weak is old GC-weak/MRC-unretained.
466 // ARC-style __weak is handled by the copy helper directly.
468 __weak __block ... x;
472 *dest
= _Block_byref_copy(object
);
475 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
:
476 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
:
478 // copy the actual field held in the __block container
479 // Note this is MRC unretained __block only.
480 // ARC retained __block is handled by the copy helper directly.
482 __block void (^object)(void);
489 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
| BLOCK_FIELD_IS_WEAK
:
490 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
| BLOCK_FIELD_IS_WEAK
:
492 // copy the actual field held in the __block container
493 // Note this __weak is old GC-weak/MRC-unretained.
494 // ARC-style __weak is handled by the copy helper directly.
495 __weak __block id object;
496 __weak __block void (^object)(void);
508 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
509 // to help dispose of the contents
510 void _Block_object_dispose(const void *object
, const int flags
) {
511 switch (os_assumes(flags
& BLOCK_ALL_COPY_DISPOSE_FLAGS
)) {
512 case BLOCK_FIELD_IS_BYREF
| BLOCK_FIELD_IS_WEAK
:
513 case BLOCK_FIELD_IS_BYREF
:
514 // get rid of the __block data structure held in a Block
515 _Block_byref_release(object
);
517 case BLOCK_FIELD_IS_BLOCK
:
518 _Block_release(object
);
520 case BLOCK_FIELD_IS_OBJECT
:
521 _Block_release_object(object
);
523 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
:
524 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
:
525 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_OBJECT
| BLOCK_FIELD_IS_WEAK
:
526 case BLOCK_BYREF_CALLER
| BLOCK_FIELD_IS_BLOCK
| BLOCK_FIELD_IS_WEAK
:
534 // Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
535 __attribute__((used
))
536 static int let_there_be_data
= 42;