]> git.saurik.com Git - apple/libdispatch.git/blob - src/BlocksRuntime/runtime.c
libdispatch-703.50.37.tar.gz
[apple/libdispatch.git] / src / BlocksRuntime / runtime.c
1 // This source file is part of the Swift.org open source project
2 //
3 // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
4 // Licensed under Apache License v2.0 with Runtime Library Exception
5 //
6 // See http://swift.org/LICENSE.txt for license information
7 // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
8 //
9
10 #include "Block_private.h"
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <stdint.h>
15 #define __USE_GNU
16 #include <dlfcn.h>
17 #if __has_include(<os/assumes.h>)
18 #include <os/assumes.h>
19 #else
20 #include <assert.h>
21 #endif
22 #ifndef os_assumes
23 #define os_assumes(_x) _x
24 #endif
25 #ifndef os_assert
26 #define os_assert(_x) assert(_x)
27 #endif
28
29 #if TARGET_OS_WIN32
30 #define _CRT_SECURE_NO_WARNINGS 1
31 #include <windows.h>
32 static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
33 {
34 // fixme barrier is overkill -- see objc-os.h
35 long original = InterlockedCompareExchange(dst, newl, oldl);
36 return (original == oldl);
37 }
38
39 static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
40 {
41 // fixme barrier is overkill -- see objc-os.h
42 int original = InterlockedCompareExchange(dst, newi, oldi);
43 return (original == oldi);
44 }
45 #else
46 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
47 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
48 #endif
49
50 /***********************
51 Globals
52 ************************/
53
54 static void *_Block_copy_class = _NSConcreteMallocBlock;
55 static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
56 static int _Block_copy_flag = BLOCK_NEEDS_FREE;
57 static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4; // logical 2
58
59 static bool isGC = false;
60
61 /*******************************************************************************
62 Internal Utilities
63 ********************************************************************************/
64
65
66 static int32_t latching_incr_int(volatile int32_t *where) {
67 while (1) {
68 int32_t old_value = *where;
69 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
70 return BLOCK_REFCOUNT_MASK;
71 }
72 if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
73 return old_value+2;
74 }
75 }
76 }
77
78 static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
79 while (1) {
80 int32_t old_value = *where;
81 if (old_value & BLOCK_DEALLOCATING) {
82 // if deallocating we can't do this
83 return false;
84 }
85 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
86 // if latched, we're leaking this block, and we succeed
87 return true;
88 }
89 if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
90 // otherwise, we must store a new retained value without the deallocating bit set
91 return true;
92 }
93 }
94 }
95
96
97 // return should_deallocate?
98 static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
99 while (1) {
100 int32_t old_value = *where;
101 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
102 return false; // latched high
103 }
104 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
105 return false; // underflow, latch low
106 }
107 int32_t new_value = old_value - 2;
108 bool result = false;
109 if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) {
110 new_value = old_value - 1;
111 result = true;
112 }
113 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
114 return result;
115 }
116 }
117 }
118
119 // hit zero?
120 static bool latching_decr_int_now_zero(volatile int32_t *where) {
121 while (1) {
122 int32_t old_value = *where;
123 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
124 return false; // latched high
125 }
126 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
127 return false; // underflow, latch low
128 }
129 int32_t new_value = old_value - 2;
130 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
131 return (new_value & BLOCK_REFCOUNT_MASK) == 0;
132 }
133 }
134 }
135
136
137 /***********************
138 GC support stub routines
139 ************************/
140 #if !TARGET_OS_WIN32
141 #pragma mark GC Support Routines
142 #endif
143
144
145
146 static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
147 return malloc(size);
148 }
149
150 static void _Block_assign_default(void *value, void **destptr) {
151 *destptr = value;
152 }
153
154 static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
155 }
156
157 static void _Block_do_nothing(const void *aBlock) { }
158
159 static void _Block_retain_object_default(const void *ptr) {
160 }
161
162 static void _Block_release_object_default(const void *ptr) {
163 }
164
165 static void _Block_assign_weak_default(const void *ptr, void *dest) {
166 #if !TARGET_OS_WIN32
167 *(long *)dest = (long)ptr;
168 #else
169 *(void **)dest = (void *)ptr;
170 #endif
171 }
172
173 static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
174 memmove(dst, src, (size_t)size);
175 }
176
177 static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
178 void **destp = (void **)dest;
179 void **srcp = (void **)src;
180 while (size) {
181 _Block_assign_default(*srcp, destp);
182 destp++;
183 srcp++;
184 size -= sizeof(void *);
185 }
186 }
187
188 static void _Block_destructInstance_default(const void *aBlock) {}
189
190 /**************************************************************************
191 GC support callout functions - initially set to stub routines
192 ***************************************************************************/
193
194 static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
195 static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
196 static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
197 static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
198 static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
199 static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
200 static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
201 static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
202 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
203
204
205 /**************************************************************************
206 GC support SPI functions - called from ObjC runtime and CoreFoundation
207 ***************************************************************************/
208
209 // Public SPI
210 // Called from objc-auto to turn on GC.
211 // version 3, 4 arg, but changed 1st arg
212 void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
213 void (*setHasRefcount)(const void *, const bool),
214 void (*gc_assign)(void *, void **),
215 void (*gc_assign_weak)(const void *, void *),
216 void (*gc_memmove)(void *, void *, unsigned long)) {
217
218 isGC = true;
219 _Block_allocator = alloc;
220 _Block_deallocator = _Block_do_nothing;
221 _Block_assign = gc_assign;
222 _Block_copy_flag = BLOCK_IS_GC;
223 _Block_copy_class = _NSConcreteAutoBlock;
224 // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
225 _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
226 _Block_setHasRefcount = setHasRefcount;
227 _Byref_flag_initial_value = BLOCK_BYREF_IS_GC; // no refcount
228 _Block_retain_object = _Block_do_nothing;
229 _Block_release_object = _Block_do_nothing;
230 _Block_assign_weak = gc_assign_weak;
231 _Block_memmove = gc_memmove;
232 }
233
234 // transitional
235 void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
236 void (*setHasRefcount)(const void *, const bool),
237 void (*gc_assign)(void *, void **),
238 void (*gc_assign_weak)(const void *, void *)) {
239 // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
240 _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
241 }
242
243
244 // Called from objc-auto to alternatively turn on retain/release.
245 // Prior to this the only "object" support we can provide is for those
246 // super special objects that live in libSystem, namely dispatch queues.
247 // Blocks and Block_byrefs have their own special entry points.
248 BLOCK_EXPORT
249 void _Block_use_RR( void (*retain)(const void *),
250 void (*release)(const void *)) {
251 _Block_retain_object = retain;
252 _Block_release_object = release;
253 _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance");
254 }
255
256 // Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
257 // without defining a new entry point.
258 BLOCK_EXPORT
259 void _Block_use_RR2(const Block_callbacks_RR *callbacks) {
260 _Block_retain_object = callbacks->retain;
261 _Block_release_object = callbacks->release;
262 _Block_destructInstance = callbacks->destructInstance;
263 }
264
265 /****************************************************************************
266 Accessors for block descriptor fields
267 *****************************************************************************/
268 #if 0
269 static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock)
270 {
271 return aBlock->descriptor;
272 }
273 #endif
274
275 static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock)
276 {
277 if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL;
278 uint8_t *desc = (uint8_t *)aBlock->descriptor;
279 desc += sizeof(struct Block_descriptor_1);
280 return (struct Block_descriptor_2 *)desc;
281 }
282
283 static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock)
284 {
285 if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL;
286 uint8_t *desc = (uint8_t *)aBlock->descriptor;
287 desc += sizeof(struct Block_descriptor_1);
288 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
289 desc += sizeof(struct Block_descriptor_2);
290 }
291 return (struct Block_descriptor_3 *)desc;
292 }
293
294 static __inline bool _Block_has_layout(struct Block_layout *aBlock) {
295 if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false;
296 uint8_t *desc = (uint8_t *)aBlock->descriptor;
297 desc += sizeof(struct Block_descriptor_1);
298 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
299 desc += sizeof(struct Block_descriptor_2);
300 }
301 return ((struct Block_descriptor_3 *)desc)->layout != NULL;
302 }
303
304 static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
305 {
306 struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
307 if (!desc) return;
308
309 (*desc->copy)(result, aBlock); // do fixup
310 }
311
312 static void _Block_call_dispose_helper(struct Block_layout *aBlock)
313 {
314 struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
315 if (!desc) return;
316
317 (*desc->dispose)(aBlock);
318 }
319
320 /*******************************************************************************
321 Internal Support routines for copying
322 ********************************************************************************/
323
324 #if !TARGET_OS_WIN32
325 #pragma mark Copy/Release support
326 #endif
327
328 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
329 static void *_Block_copy_internal(const void *arg, const bool wantsOne) {
330 struct Block_layout *aBlock;
331
332 if (!arg) return NULL;
333
334
335 // The following would be better done as a switch statement
336 aBlock = (struct Block_layout *)arg;
337 if (aBlock->flags & BLOCK_NEEDS_FREE) {
338 // latches on high
339 latching_incr_int(&aBlock->flags);
340 return aBlock;
341 }
342 else if (aBlock->flags & BLOCK_IS_GC) {
343 // GC refcounting is expensive so do most refcounting here.
344 if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) {
345 // Tell collector to hang on this - it will bump the GC refcount version
346 _Block_setHasRefcount(aBlock, true);
347 }
348 return aBlock;
349 }
350 else if (aBlock->flags & BLOCK_IS_GLOBAL) {
351 return aBlock;
352 }
353
354 // Its a stack block. Make a copy.
355 if (!isGC) {
356 struct Block_layout *result = malloc(aBlock->descriptor->size);
357 if (!result) return NULL;
358 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
359 // reset refcount
360 result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
361 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
362 result->isa = _NSConcreteMallocBlock;
363 _Block_call_copy_helper(result, aBlock);
364 return result;
365 }
366 else {
367 // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
368 // This allows the copy helper routines to make non-refcounted block copies under GC
369 int32_t flags = aBlock->flags;
370 bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
371 struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock));
372 if (!result) return NULL;
373 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
374 // reset refcount
375 // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
376 flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
377 if (wantsOne)
378 flags |= BLOCK_IS_GC | 2;
379 else
380 flags |= BLOCK_IS_GC;
381 result->flags = flags;
382 _Block_call_copy_helper(result, aBlock);
383 if (hasCTOR) {
384 result->isa = _NSConcreteFinalizingBlock;
385 }
386 else {
387 result->isa = _NSConcreteAutoBlock;
388 }
389 return result;
390 }
391 }
392
393
394
395
396
397 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
398
399 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
400 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
401 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
402 // Otherwise we need to copy it and update the stack forwarding pointer
403 static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
404 struct Block_byref **destp = (struct Block_byref **)dest;
405 struct Block_byref *src = (struct Block_byref *)arg;
406
407 if (src->forwarding->flags & BLOCK_BYREF_IS_GC) {
408 ; // don't need to do any more work
409 }
410 else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
411 // src points to stack
412 bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
413 // if its weak ask for an object (only matters under GC)
414 struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
415 copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
416 copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
417 src->forwarding = copy; // patch stack to point to heap copy
418 copy->size = src->size;
419 if (isWeak) {
420 copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning
421 }
422 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
423 // Trust copy helper to copy everything of interest
424 // If more than one field shows up in a byref block this is wrong XXX
425 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1);
426 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1);
427 copy2->byref_keep = src2->byref_keep;
428 copy2->byref_destroy = src2->byref_destroy;
429
430 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
431 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1);
432 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1);
433 copy3->layout = src3->layout;
434 }
435
436 (*src2->byref_keep)(copy, src);
437 }
438 else {
439 // just bits. Blast 'em using _Block_memmove in case they're __strong
440 // This copy includes Block_byref_3, if any.
441 _Block_memmove(copy+1, src+1,
442 src->size - sizeof(struct Block_byref));
443 }
444 }
445 // already copied to heap
446 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
447 latching_incr_int(&src->forwarding->flags);
448 }
449 // assign byref data block pointer into new Block
450 _Block_assign(src->forwarding, (void **)destp);
451 }
452
453 // Old compiler SPI
454 static void _Block_byref_release(const void *arg) {
455 struct Block_byref *byref = (struct Block_byref *)arg;
456 int32_t refcount;
457
458 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
459 byref = byref->forwarding;
460
461 // To support C++ destructors under GC we arrange for there to be a finalizer for this
462 // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
463 if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) {
464 return; // stack or GC or global
465 }
466 refcount = byref->flags & BLOCK_REFCOUNT_MASK;
467 os_assert(refcount);
468 if (latching_decr_int_should_deallocate(&byref->flags)) {
469 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
470 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1);
471 (*byref2->byref_destroy)(byref);
472 }
473 _Block_deallocator((struct Block_layout *)byref);
474 }
475 }
476
477
478 /************************************************************
479 *
480 * API supporting SPI
481 * _Block_copy, _Block_release, and (old) _Block_destroy
482 *
483 ***********************************************************/
484
485 #if !TARGET_OS_WIN32
486 #pragma mark SPI/API
487 #endif
488
489 BLOCK_EXPORT
490 void *_Block_copy(const void *arg) {
491 return _Block_copy_internal(arg, true);
492 }
493
494
495 // API entry point to release a copied Block
496 BLOCK_EXPORT
497 void _Block_release(const void *arg) {
498 struct Block_layout *aBlock = (struct Block_layout *)arg;
499 if (!aBlock
500 || (aBlock->flags & BLOCK_IS_GLOBAL)
501 || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0)
502 ) return;
503 if (aBlock->flags & BLOCK_IS_GC) {
504 if (latching_decr_int_now_zero(&aBlock->flags)) {
505 // Tell GC we no longer have our own refcounts. GC will decr its refcount
506 // and unless someone has done a CFRetain or marked it uncollectable it will
507 // now be subject to GC reclamation.
508 _Block_setHasRefcount(aBlock, false);
509 }
510 }
511 else if (aBlock->flags & BLOCK_NEEDS_FREE) {
512 if (latching_decr_int_should_deallocate(&aBlock->flags)) {
513 _Block_call_dispose_helper(aBlock);
514 _Block_destructInstance(aBlock);
515 _Block_deallocator(aBlock);
516 }
517 }
518 }
519
520 BLOCK_EXPORT
521 bool _Block_tryRetain(const void *arg) {
522 struct Block_layout *aBlock = (struct Block_layout *)arg;
523 return latching_incr_int_not_deallocating(&aBlock->flags);
524 }
525
526 BLOCK_EXPORT
527 bool _Block_isDeallocating(const void *arg) {
528 struct Block_layout *aBlock = (struct Block_layout *)arg;
529 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
530 }
531
532 // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
533 static void _Block_destroy(const void *arg) {
534 struct Block_layout *aBlock;
535 if (!arg) return;
536 aBlock = (struct Block_layout *)arg;
537 if (aBlock->flags & BLOCK_IS_GC) {
538 // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
539 return; // ignore, we are being called because of a DTOR
540 }
541 _Block_release(aBlock);
542 }
543
544
545
546 /************************************************************
547 *
548 * SPI used by other layers
549 *
550 ***********************************************************/
551
552 // SPI, also internal. Called from NSAutoBlock only under GC
553 BLOCK_EXPORT
554 void *_Block_copy_collectable(const void *aBlock) {
555 return _Block_copy_internal(aBlock, false);
556 }
557
558
559 // SPI
560 BLOCK_EXPORT
561 size_t Block_size(void *aBlock) {
562 return ((struct Block_layout *)aBlock)->descriptor->size;
563 }
564
565 BLOCK_EXPORT
566 bool _Block_use_stret(void *aBlock) {
567 struct Block_layout *layout = (struct Block_layout *)aBlock;
568
569 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
570 return (layout->flags & requiredFlags) == requiredFlags;
571 }
572
573 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
574 BLOCK_EXPORT
575 bool _Block_has_signature(void *aBlock) {
576 return _Block_signature(aBlock) ? true : false;
577 }
578
579 BLOCK_EXPORT
580 const char * _Block_signature(void *aBlock)
581 {
582 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
583 if (!desc3) return NULL;
584
585 return desc3->signature;
586 }
587
588 BLOCK_EXPORT
589 const char * _Block_layout(void *aBlock)
590 {
591 // Don't return extended layout to callers expecting GC layout
592 struct Block_layout *layout = (struct Block_layout *)aBlock;
593 if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL;
594
595 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
596 if (!desc3) return NULL;
597
598 return desc3->layout;
599 }
600
601 BLOCK_EXPORT
602 const char * _Block_extended_layout(void *aBlock)
603 {
604 // Don't return GC layout to callers expecting extended layout
605 struct Block_layout *layout = (struct Block_layout *)aBlock;
606 if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL;
607
608 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
609 if (!desc3) return NULL;
610
611 // Return empty string (all non-object bytes) instead of NULL
612 // so callers can distinguish "empty layout" from "no layout".
613 if (!desc3->layout) return "";
614 else return desc3->layout;
615 }
616
617 #if !TARGET_OS_WIN32
618 #pragma mark Compiler SPI entry points
619 #endif
620
621
622 /*******************************************************
623
624 Entry points used by the compiler - the real API!
625
626
627 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
628 1) C++ stack based objects
629 2) References to Objective-C objects
630 3) Other Blocks
631 4) __block variables
632
633 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
634
635 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
636 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
637 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
638 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
639 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
640
641 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
642
643 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
644
645 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
646 __block id 128+3 (0x83)
647 __block (^Block) 128+7 (0x87)
648 __weak __block id 128+3+16 (0x93)
649 __weak __block (^Block) 128+7+16 (0x97)
650
651
652 ********************************************************/
653
654 //
655 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
656 // to do the assignment.
657 //
658 BLOCK_EXPORT
659 void _Block_object_assign(void *destAddr, const void *object, const int flags) {
660 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
661 case BLOCK_FIELD_IS_OBJECT:
662 /*******
663 id object = ...;
664 [^{ object; } copy];
665 ********/
666
667 _Block_retain_object(object);
668 _Block_assign((void *)object, destAddr);
669 break;
670
671 case BLOCK_FIELD_IS_BLOCK:
672 /*******
673 void (^object)(void) = ...;
674 [^{ object; } copy];
675 ********/
676
677 _Block_assign(_Block_copy_internal(object, false), destAddr);
678 break;
679
680 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
681 case BLOCK_FIELD_IS_BYREF:
682 /*******
683 // copy the onstack __block container to the heap
684 __block ... x;
685 __weak __block ... x;
686 [^{ x; } copy];
687 ********/
688
689 _Block_byref_assign_copy(destAddr, object, flags);
690 break;
691
692 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
693 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
694 /*******
695 // copy the actual field held in the __block container
696 __block id object;
697 __block void (^object)(void);
698 [^{ object; } copy];
699 ********/
700
701 // under manual retain release __block object/block variables are dangling
702 _Block_assign((void *)object, destAddr);
703 break;
704
705 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
706 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
707 /*******
708 // copy the actual field held in the __block container
709 __weak __block id object;
710 __weak __block void (^object)(void);
711 [^{ object; } copy];
712 ********/
713
714 _Block_assign_weak(object, destAddr);
715 break;
716
717 default:
718 break;
719 }
720 }
721
722 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
723 // to help dispose of the contents
724 // Used initially only for __attribute__((NSObject)) marked pointers.
725 BLOCK_EXPORT
726 void _Block_object_dispose(const void *object, const int flags) {
727 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
728 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
729 case BLOCK_FIELD_IS_BYREF:
730 // get rid of the __block data structure held in a Block
731 _Block_byref_release(object);
732 break;
733 case BLOCK_FIELD_IS_BLOCK:
734 _Block_destroy(object);
735 break;
736 case BLOCK_FIELD_IS_OBJECT:
737 _Block_release_object(object);
738 break;
739 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
740 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
741 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
742 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
743 break;
744 default:
745 break;
746 }
747 }