]> git.saurik.com Git - apple/libdispatch.git/blob - src/BlocksRuntime/runtime.c
libdispatch-913.1.6.tar.gz
[apple/libdispatch.git] / src / BlocksRuntime / runtime.c
1 // This source file is part of the Swift.org open source project
2 //
3 // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
4 // Licensed under Apache License v2.0 with Runtime Library Exception
5 //
6 // See http://swift.org/LICENSE.txt for license information
7 // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
8 //
9
10 #include "Block_private.h"
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <stdint.h>
15 #if HAVE_OBJC
16 #define __USE_GNU
17 #include <dlfcn.h>
18 #endif
19 #if __has_include(<os/assumes.h>)
20 #include <os/assumes.h>
21 #else
22 #include <assert.h>
23 #endif
24 #ifndef os_assumes
25 #define os_assumes(_x) _x
26 #endif
27 #ifndef os_assert
28 #define os_assert(_x) assert(_x)
29 #endif
30
31 #if TARGET_OS_WIN32
32 #define _CRT_SECURE_NO_WARNINGS 1
33 #include <windows.h>
34 static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
35 {
36 // fixme barrier is overkill -- see objc-os.h
37 long original = InterlockedCompareExchange(dst, newl, oldl);
38 return (original == oldl);
39 }
40
41 static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
42 {
43 // fixme barrier is overkill -- see objc-os.h
44 int original = InterlockedCompareExchange(dst, newi, oldi);
45 return (original == oldi);
46 }
47 #else
48 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
49 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
50 #endif
51
52 /***********************
53 Globals
54 ************************/
55
56 #if HAVE_OBJC
57 static void *_Block_copy_class = _NSConcreteMallocBlock;
58 static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
59 static int _Block_copy_flag = BLOCK_NEEDS_FREE;
60 #endif
61 static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4; // logical 2
62
63 static bool isGC = false;
64
65 /*******************************************************************************
66 Internal Utilities
67 ********************************************************************************/
68
69
70 static int32_t latching_incr_int(volatile int32_t *where) {
71 while (1) {
72 int32_t old_value = *where;
73 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
74 return BLOCK_REFCOUNT_MASK;
75 }
76 if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
77 return old_value+2;
78 }
79 }
80 }
81
82 static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
83 while (1) {
84 int32_t old_value = *where;
85 if (old_value & BLOCK_DEALLOCATING) {
86 // if deallocating we can't do this
87 return false;
88 }
89 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
90 // if latched, we're leaking this block, and we succeed
91 return true;
92 }
93 if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
94 // otherwise, we must store a new retained value without the deallocating bit set
95 return true;
96 }
97 }
98 }
99
100
101 // return should_deallocate?
102 static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
103 while (1) {
104 int32_t old_value = *where;
105 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
106 return false; // latched high
107 }
108 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
109 return false; // underflow, latch low
110 }
111 int32_t new_value = old_value - 2;
112 bool result = false;
113 if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) {
114 new_value = old_value - 1;
115 result = true;
116 }
117 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
118 return result;
119 }
120 }
121 }
122
123 // hit zero?
124 static bool latching_decr_int_now_zero(volatile int32_t *where) {
125 while (1) {
126 int32_t old_value = *where;
127 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
128 return false; // latched high
129 }
130 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
131 return false; // underflow, latch low
132 }
133 int32_t new_value = old_value - 2;
134 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
135 return (new_value & BLOCK_REFCOUNT_MASK) == 0;
136 }
137 }
138 }
139
140
141 /***********************
142 GC support stub routines
143 ************************/
144 #if !TARGET_OS_WIN32
145 #pragma mark GC Support Routines
146 #endif
147
148
149
150 static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
151 (void)initialCountIsOne;
152 (void)isObject;
153 return malloc(size);
154 }
155
156 static void _Block_assign_default(void *value, void **destptr) {
157 *destptr = value;
158 }
159
160 static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
161 (void)ptr;
162 (void)hasRefcount;
163 }
164
165 #if HAVE_OBJC
166 static void _Block_do_nothing(const void *aBlock) { }
167 #endif
168
169 static void _Block_retain_object_default(const void *ptr) {
170 (void)ptr;
171 }
172
173 static void _Block_release_object_default(const void *ptr) {
174 (void)ptr;
175 }
176
177 static void _Block_assign_weak_default(const void *ptr, void *dest) {
178 #if !TARGET_OS_WIN32
179 *(long *)dest = (long)ptr;
180 #else
181 *(void **)dest = (void *)ptr;
182 #endif
183 }
184
185 static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
186 memmove(dst, src, (size_t)size);
187 }
188
189 #if HAVE_OBJC
190 static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
191 void **destp = (void **)dest;
192 void **srcp = (void **)src;
193 while (size) {
194 _Block_assign_default(*srcp, destp);
195 destp++;
196 srcp++;
197 size -= sizeof(void *);
198 }
199 }
200 #endif
201
202 static void _Block_destructInstance_default(const void *aBlock) {
203 (void)aBlock;
204 }
205
206 /**************************************************************************
207 GC support callout functions - initially set to stub routines
208 ***************************************************************************/
209
210 static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
211 static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
212 static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
213 static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
214 static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
215 static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
216 static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
217 static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
218 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
219
220
221 #if HAVE_OBJC
222 /**************************************************************************
223 GC support SPI functions - called from ObjC runtime and CoreFoundation
224 ***************************************************************************/
225
226 // Public SPI
227 // Called from objc-auto to turn on GC.
228 // version 3, 4 arg, but changed 1st arg
229 void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
230 void (*setHasRefcount)(const void *, const bool),
231 void (*gc_assign)(void *, void **),
232 void (*gc_assign_weak)(const void *, void *),
233 void (*gc_memmove)(void *, void *, unsigned long)) {
234
235 isGC = true;
236 _Block_allocator = alloc;
237 _Block_deallocator = _Block_do_nothing;
238 _Block_assign = gc_assign;
239 _Block_copy_flag = BLOCK_IS_GC;
240 _Block_copy_class = _NSConcreteAutoBlock;
241 // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
242 _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
243 _Block_setHasRefcount = setHasRefcount;
244 _Byref_flag_initial_value = BLOCK_BYREF_IS_GC; // no refcount
245 _Block_retain_object = _Block_do_nothing;
246 _Block_release_object = _Block_do_nothing;
247 _Block_assign_weak = gc_assign_weak;
248 _Block_memmove = gc_memmove;
249 }
250
251 // transitional
252 void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
253 void (*setHasRefcount)(const void *, const bool),
254 void (*gc_assign)(void *, void **),
255 void (*gc_assign_weak)(const void *, void *)) {
256 // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
257 _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
258 }
259
260
261 // Called from objc-auto to alternatively turn on retain/release.
262 // Prior to this the only "object" support we can provide is for those
263 // super special objects that live in libSystem, namely dispatch queues.
264 // Blocks and Block_byrefs have their own special entry points.
265 BLOCK_EXPORT
266 void _Block_use_RR( void (*retain)(const void *),
267 void (*release)(const void *)) {
268 _Block_retain_object = retain;
269 _Block_release_object = release;
270 _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance");
271 }
272 #endif // HAVE_OBJC
273
274 // Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
275 // without defining a new entry point.
276 BLOCK_EXPORT
277 void _Block_use_RR2(const Block_callbacks_RR *callbacks) {
278 _Block_retain_object = callbacks->retain;
279 _Block_release_object = callbacks->release;
280 _Block_destructInstance = callbacks->destructInstance;
281 }
282
283 /****************************************************************************
284 Accessors for block descriptor fields
285 *****************************************************************************/
286 #if 0
287 static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock)
288 {
289 return aBlock->descriptor;
290 }
291 #endif
292
293 static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock)
294 {
295 if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL;
296 uint8_t *desc = (uint8_t *)aBlock->descriptor;
297 desc += sizeof(struct Block_descriptor_1);
298 return (struct Block_descriptor_2 *)desc;
299 }
300
301 static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock)
302 {
303 if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL;
304 uint8_t *desc = (uint8_t *)aBlock->descriptor;
305 desc += sizeof(struct Block_descriptor_1);
306 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
307 desc += sizeof(struct Block_descriptor_2);
308 }
309 return (struct Block_descriptor_3 *)desc;
310 }
311
312 static __inline bool _Block_has_layout(struct Block_layout *aBlock) {
313 if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false;
314 uint8_t *desc = (uint8_t *)aBlock->descriptor;
315 desc += sizeof(struct Block_descriptor_1);
316 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
317 desc += sizeof(struct Block_descriptor_2);
318 }
319 return ((struct Block_descriptor_3 *)desc)->layout != NULL;
320 }
321
322 static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
323 {
324 struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
325 if (!desc) return;
326
327 (*desc->copy)(result, aBlock); // do fixup
328 }
329
330 static void _Block_call_dispose_helper(struct Block_layout *aBlock)
331 {
332 struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
333 if (!desc) return;
334
335 (*desc->dispose)(aBlock);
336 }
337
338 /*******************************************************************************
339 Internal Support routines for copying
340 ********************************************************************************/
341
342 #if !TARGET_OS_WIN32
343 #pragma mark Copy/Release support
344 #endif
345
346 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
347 static void *_Block_copy_internal(const void *arg, const bool wantsOne) {
348 struct Block_layout *aBlock;
349
350 if (!arg) return NULL;
351
352
353 // The following would be better done as a switch statement
354 aBlock = (struct Block_layout *)arg;
355 if (aBlock->flags & BLOCK_NEEDS_FREE) {
356 // latches on high
357 latching_incr_int(&aBlock->flags);
358 return aBlock;
359 }
360 else if (aBlock->flags & BLOCK_IS_GC) {
361 // GC refcounting is expensive so do most refcounting here.
362 if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) {
363 // Tell collector to hang on this - it will bump the GC refcount version
364 _Block_setHasRefcount(aBlock, true);
365 }
366 return aBlock;
367 }
368 else if (aBlock->flags & BLOCK_IS_GLOBAL) {
369 return aBlock;
370 }
371
372 // Its a stack block. Make a copy.
373 if (!isGC) {
374 struct Block_layout *result = malloc(aBlock->descriptor->size);
375 if (!result) return NULL;
376 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
377 // reset refcount
378 result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
379 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
380 result->isa = _NSConcreteMallocBlock;
381 _Block_call_copy_helper(result, aBlock);
382 return result;
383 }
384 else {
385 // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
386 // This allows the copy helper routines to make non-refcounted block copies under GC
387 int32_t flags = aBlock->flags;
388 bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
389 struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock));
390 if (!result) return NULL;
391 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
392 // reset refcount
393 // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
394 flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
395 if (wantsOne)
396 flags |= BLOCK_IS_GC | 2;
397 else
398 flags |= BLOCK_IS_GC;
399 result->flags = flags;
400 _Block_call_copy_helper(result, aBlock);
401 if (hasCTOR) {
402 result->isa = _NSConcreteFinalizingBlock;
403 }
404 else {
405 result->isa = _NSConcreteAutoBlock;
406 }
407 return result;
408 }
409 }
410
411
412
413
414
415 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
416
417 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
418 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
419 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
420 // Otherwise we need to copy it and update the stack forwarding pointer
421 static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
422 struct Block_byref **destp = (struct Block_byref **)dest;
423 struct Block_byref *src = (struct Block_byref *)arg;
424
425 if (src->forwarding->flags & BLOCK_BYREF_IS_GC) {
426 ; // don't need to do any more work
427 }
428 else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
429 // src points to stack
430 bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
431 // if its weak ask for an object (only matters under GC)
432 struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
433 copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
434 copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
435 src->forwarding = copy; // patch stack to point to heap copy
436 copy->size = src->size;
437 if (isWeak) {
438 copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning
439 }
440 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
441 // Trust copy helper to copy everything of interest
442 // If more than one field shows up in a byref block this is wrong XXX
443 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1);
444 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1);
445 copy2->byref_keep = src2->byref_keep;
446 copy2->byref_destroy = src2->byref_destroy;
447
448 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
449 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1);
450 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1);
451 copy3->layout = src3->layout;
452 }
453
454 (*src2->byref_keep)(copy, src);
455 }
456 else {
457 // just bits. Blast 'em using _Block_memmove in case they're __strong
458 // This copy includes Block_byref_3, if any.
459 _Block_memmove(copy+1, src+1,
460 src->size - sizeof(struct Block_byref));
461 }
462 }
463 // already copied to heap
464 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
465 latching_incr_int(&src->forwarding->flags);
466 }
467 // assign byref data block pointer into new Block
468 _Block_assign(src->forwarding, (void **)destp);
469 }
470
471 // Old compiler SPI
472 static void _Block_byref_release(const void *arg) {
473 struct Block_byref *byref = (struct Block_byref *)arg;
474 int32_t refcount;
475
476 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
477 byref = byref->forwarding;
478
479 // To support C++ destructors under GC we arrange for there to be a finalizer for this
480 // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
481 if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) {
482 return; // stack or GC or global
483 }
484 refcount = byref->flags & BLOCK_REFCOUNT_MASK;
485 os_assert(refcount);
486 if (latching_decr_int_should_deallocate(&byref->flags)) {
487 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
488 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1);
489 (*byref2->byref_destroy)(byref);
490 }
491 _Block_deallocator((struct Block_layout *)byref);
492 }
493 }
494
495
496 /************************************************************
497 *
498 * API supporting SPI
499 * _Block_copy, _Block_release, and (old) _Block_destroy
500 *
501 ***********************************************************/
502
503 #if !TARGET_OS_WIN32
504 #pragma mark SPI/API
505 #endif
506
507 BLOCK_EXPORT
508 void *_Block_copy(const void *arg) {
509 return _Block_copy_internal(arg, true);
510 }
511
512
513 // API entry point to release a copied Block
514 BLOCK_EXPORT
515 void _Block_release(const void *arg) {
516 struct Block_layout *aBlock = (struct Block_layout *)arg;
517 if (!aBlock
518 || (aBlock->flags & BLOCK_IS_GLOBAL)
519 || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0)
520 ) return;
521 if (aBlock->flags & BLOCK_IS_GC) {
522 if (latching_decr_int_now_zero(&aBlock->flags)) {
523 // Tell GC we no longer have our own refcounts. GC will decr its refcount
524 // and unless someone has done a CFRetain or marked it uncollectable it will
525 // now be subject to GC reclamation.
526 _Block_setHasRefcount(aBlock, false);
527 }
528 }
529 else if (aBlock->flags & BLOCK_NEEDS_FREE) {
530 if (latching_decr_int_should_deallocate(&aBlock->flags)) {
531 _Block_call_dispose_helper(aBlock);
532 _Block_destructInstance(aBlock);
533 _Block_deallocator(aBlock);
534 }
535 }
536 }
537
538 BLOCK_EXPORT
539 bool _Block_tryRetain(const void *arg) {
540 struct Block_layout *aBlock = (struct Block_layout *)arg;
541 return latching_incr_int_not_deallocating(&aBlock->flags);
542 }
543
544 BLOCK_EXPORT
545 bool _Block_isDeallocating(const void *arg) {
546 struct Block_layout *aBlock = (struct Block_layout *)arg;
547 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
548 }
549
550 // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
551 static void _Block_destroy(const void *arg) {
552 struct Block_layout *aBlock;
553 if (!arg) return;
554 aBlock = (struct Block_layout *)arg;
555 if (aBlock->flags & BLOCK_IS_GC) {
556 // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
557 return; // ignore, we are being called because of a DTOR
558 }
559 _Block_release(aBlock);
560 }
561
562
563
564 /************************************************************
565 *
566 * SPI used by other layers
567 *
568 ***********************************************************/
569
570 // SPI, also internal. Called from NSAutoBlock only under GC
571 BLOCK_EXPORT
572 void *_Block_copy_collectable(const void *aBlock) {
573 return _Block_copy_internal(aBlock, false);
574 }
575
576
577 // SPI
578 BLOCK_EXPORT
579 size_t Block_size(void *aBlock) {
580 return ((struct Block_layout *)aBlock)->descriptor->size;
581 }
582
583 BLOCK_EXPORT
584 bool _Block_use_stret(void *aBlock) {
585 struct Block_layout *layout = (struct Block_layout *)aBlock;
586
587 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
588 return (layout->flags & requiredFlags) == requiredFlags;
589 }
590
591 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
592 BLOCK_EXPORT
593 bool _Block_has_signature(void *aBlock) {
594 return _Block_signature(aBlock) ? true : false;
595 }
596
597 BLOCK_EXPORT
598 const char * _Block_signature(void *aBlock)
599 {
600 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
601 if (!desc3) return NULL;
602
603 return desc3->signature;
604 }
605
606 BLOCK_EXPORT
607 const char * _Block_layout(void *aBlock)
608 {
609 // Don't return extended layout to callers expecting GC layout
610 struct Block_layout *layout = (struct Block_layout *)aBlock;
611 if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL;
612
613 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
614 if (!desc3) return NULL;
615
616 return desc3->layout;
617 }
618
619 BLOCK_EXPORT
620 const char * _Block_extended_layout(void *aBlock)
621 {
622 // Don't return GC layout to callers expecting extended layout
623 struct Block_layout *layout = (struct Block_layout *)aBlock;
624 if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL;
625
626 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
627 if (!desc3) return NULL;
628
629 // Return empty string (all non-object bytes) instead of NULL
630 // so callers can distinguish "empty layout" from "no layout".
631 if (!desc3->layout) return "";
632 else return desc3->layout;
633 }
634
635 #if !TARGET_OS_WIN32
636 #pragma mark Compiler SPI entry points
637 #endif
638
639
640 /*******************************************************
641
642 Entry points used by the compiler - the real API!
643
644
645 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
646 1) C++ stack based objects
647 2) References to Objective-C objects
648 3) Other Blocks
649 4) __block variables
650
651 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
652
653 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
654 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
655 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
656 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
657 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
658
659 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
660
661 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
662
663 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
664 __block id 128+3 (0x83)
665 __block (^Block) 128+7 (0x87)
666 __weak __block id 128+3+16 (0x93)
667 __weak __block (^Block) 128+7+16 (0x97)
668
669
670 ********************************************************/
671
672 //
673 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
674 // to do the assignment.
675 //
676 BLOCK_EXPORT
677 void _Block_object_assign(void *destAddr, const void *object, const int flags) {
678 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
679 case BLOCK_FIELD_IS_OBJECT:
680 /*******
681 id object = ...;
682 [^{ object; } copy];
683 ********/
684
685 _Block_retain_object(object);
686 _Block_assign((void *)object, destAddr);
687 break;
688
689 case BLOCK_FIELD_IS_BLOCK:
690 /*******
691 void (^object)(void) = ...;
692 [^{ object; } copy];
693 ********/
694
695 _Block_assign(_Block_copy_internal(object, false), destAddr);
696 break;
697
698 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
699 case BLOCK_FIELD_IS_BYREF:
700 /*******
701 // copy the onstack __block container to the heap
702 __block ... x;
703 __weak __block ... x;
704 [^{ x; } copy];
705 ********/
706
707 _Block_byref_assign_copy(destAddr, object, flags);
708 break;
709
710 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
711 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
712 /*******
713 // copy the actual field held in the __block container
714 __block id object;
715 __block void (^object)(void);
716 [^{ object; } copy];
717 ********/
718
719 // under manual retain release __block object/block variables are dangling
720 _Block_assign((void *)object, destAddr);
721 break;
722
723 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
724 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
725 /*******
726 // copy the actual field held in the __block container
727 __weak __block id object;
728 __weak __block void (^object)(void);
729 [^{ object; } copy];
730 ********/
731
732 _Block_assign_weak(object, destAddr);
733 break;
734
735 default:
736 break;
737 }
738 }
739
740 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
741 // to help dispose of the contents
742 // Used initially only for __attribute__((NSObject)) marked pointers.
743 BLOCK_EXPORT
744 void _Block_object_dispose(const void *object, const int flags) {
745 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
746 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
747 case BLOCK_FIELD_IS_BYREF:
748 // get rid of the __block data structure held in a Block
749 _Block_byref_release(object);
750 break;
751 case BLOCK_FIELD_IS_BLOCK:
752 _Block_destroy(object);
753 break;
754 case BLOCK_FIELD_IS_OBJECT:
755 _Block_release_object(object);
756 break;
757 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
758 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
759 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
760 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
761 break;
762 default:
763 break;
764 }
765 }