]> git.saurik.com Git - apple/xnu.git/blame - libkern/libclosure/runtime.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / libclosure / runtime.cpp
CommitLineData
d9a64523
A
1/*
2 * runtime.c
3 * libclosure
4 *
5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6 *
7 * @APPLE_LLVM_LICENSE_HEADER@
8 */
9
10
11#ifndef KERNEL
12
13#include "Block_private.h"
14#include <stdio.h>
15#include <stdlib.h>
16#include <dlfcn.h>
17#include <os/assumes.h>
18
19#else /* !KERNEL */
f427ee49 20#define TARGET_OS_WIN32 0
d9a64523
A
21
22#include <libkern/Block_private.h>
f427ee49
A
23__BEGIN_DECLS
24#include <kern/kalloc.h>
25__END_DECLS
d9a64523 26
f427ee49
A
27static inline void *
28malloc(size_t size)
29{
30 if (size == 0) {
31 return NULL;
32 }
33 return kheap_alloc_tag_bt(KHEAP_DEFAULT, size,
34 (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN);
35}
36
37static inline void
38free(void *addr)
39{
40 kheap_free_addr(KHEAP_DEFAULT, addr);
41}
d9a64523
A
42
43#endif /* KERNEL */
44
cb323159 45#include <machine/atomic.h>
d9a64523
A
46#include <string.h>
47#include <stdint.h>
48#ifndef os_assumes
49#define os_assumes(_x) (_x)
50#endif
51#ifndef os_assert
52#define os_assert(_x) assert(_x)
53#endif
54
55#if TARGET_OS_WIN32
56#define _CRT_SECURE_NO_WARNINGS 1
57#include <windows.h>
0a7de745
A
58static __inline bool
59OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
60{
61 // fixme barrier is overkill -- see objc-os.h
62 long original = InterlockedCompareExchange(dst, newl, oldl);
63 return original == oldl;
d9a64523
A
64}
65
0a7de745
A
66static __inline bool
67OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
68{
69 // fixme barrier is overkill -- see objc-os.h
70 int original = InterlockedCompareExchange(dst, newi, oldi);
71 return original == oldi;
d9a64523
A
72}
73#else
cb323159
A
74#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
75#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
d9a64523
A
76#endif
77
78
79/*******************************************************************************
0a7de745
A
80 * Internal Utilities
81 ********************************************************************************/
82
83static int32_t
84latching_incr_int(volatile int32_t *where)
85{
86 while (1) {
87 int32_t old_value = *where;
88 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
89 return BLOCK_REFCOUNT_MASK;
90 }
91 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
92 return old_value + 2;
93 }
94 }
d9a64523
A
95}
96
0a7de745
A
97static bool
98latching_incr_int_not_deallocating(volatile int32_t *where)
99{
100 while (1) {
101 int32_t old_value = *where;
102 if (old_value & BLOCK_DEALLOCATING) {
103 // if deallocating we can't do this
104 return false;
105 }
106 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
107 // if latched, we're leaking this block, and we succeed
108 return true;
109 }
110 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
111 // otherwise, we must store a new retained value without the deallocating bit set
112 return true;
113 }
114 }
d9a64523
A
115}
116
117
118// return should_deallocate?
0a7de745
A
119static bool
120latching_decr_int_should_deallocate(volatile int32_t *where)
121{
122 while (1) {
123 int32_t old_value = *where;
124 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
125 return false; // latched high
126 }
127 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
128 return false; // underflow, latch low
129 }
130 int32_t new_value = old_value - 2;
131 bool result = false;
132 if ((old_value & (BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING)) == 2) {
133 new_value = old_value - 1;
134 result = true;
135 }
136 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
137 return result;
138 }
139 }
d9a64523
A
140}
141
142
143/**************************************************************************
0a7de745
A
144 * Framework callback functions and their default implementations.
145 ***************************************************************************/
d9a64523
A
146#if !TARGET_OS_WIN32
147#pragma mark Framework Callback Routines
148#endif
f427ee49
A
149#if KERNEL
150static inline void
151_Block_retain_object(const void *ptr __unused)
152{
153}
154
155static inline void
156_Block_release_object(const void *ptr __unused)
157{
158}
159
160static inline void
161_Block_destructInstance(const void *aBlock __unused)
162{
163}
164
165#else
d9a64523 166
0a7de745
A
167static void
168_Block_retain_object_default(const void *ptr __unused)
169{
170}
d9a64523 171
0a7de745
A
172static void
173_Block_release_object_default(const void *ptr __unused)
174{
175}
d9a64523 176
0a7de745
A
177static void
178_Block_destructInstance_default(const void *aBlock __unused)
179{
180}
d9a64523
A
181
182static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
183static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
184static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
185
186
187/**************************************************************************
0a7de745
A
188 * Callback registration from ObjC runtime and CoreFoundation
189 ***************************************************************************/
d9a64523 190
0a7de745
A
191void
192_Block_use_RR2(const Block_callbacks_RR *callbacks)
193{
194 _Block_retain_object = callbacks->retain;
195 _Block_release_object = callbacks->release;
196 _Block_destructInstance = callbacks->destructInstance;
d9a64523 197}
f427ee49 198#endif // !KERNEL
d9a64523
A
199
200/****************************************************************************
0a7de745
A
201 * Accessors for block descriptor fields
202 *****************************************************************************/
f427ee49
A
203
204template <class T>
205static T *
206unwrap_relative_pointer(int32_t &offset)
d9a64523 207{
f427ee49
A
208 if (offset == 0) {
209 return nullptr;
210 }
211
212 uintptr_t base = (uintptr_t)&offset;
213 uintptr_t extendedOffset = (uintptr_t)(intptr_t)offset;
214 uintptr_t pointer = base + extendedOffset;
215 return (T *)pointer;
d9a64523 216}
d9a64523 217
f427ee49 218#if 0
0a7de745
A
219static struct Block_descriptor_2 *
220_Block_descriptor_2(struct Block_layout *aBlock)
d9a64523 221{
f427ee49 222 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
0a7de745
A
223 desc += sizeof(struct Block_descriptor_1);
224 return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc);
d9a64523 225}
f427ee49 226#endif
d9a64523 227
0a7de745
A
228static struct Block_descriptor_3 *
229_Block_descriptor_3(struct Block_layout *aBlock)
d9a64523 230{
f427ee49 231 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
0a7de745
A
232 desc += sizeof(struct Block_descriptor_1);
233 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
234 desc += sizeof(struct Block_descriptor_2);
235 }
236 return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc);
d9a64523
A
237}
238
0a7de745
A
239static void
240_Block_call_copy_helper(void *result, struct Block_layout *aBlock)
d9a64523 241{
f427ee49
A
242 if (auto *pFn = _Block_get_copy_function(aBlock)) {
243 pFn(result, aBlock);
0a7de745 244 }
d9a64523
A
245}
246
0a7de745
A
247static void
248_Block_call_dispose_helper(struct Block_layout *aBlock)
d9a64523 249{
f427ee49
A
250 if (auto *pFn = _Block_get_dispose_function(aBlock)) {
251 pFn(aBlock);
0a7de745 252 }
d9a64523
A
253}
254
255/*******************************************************************************
0a7de745
A
256 * Internal Support routines for copying
257 ********************************************************************************/
d9a64523
A
258
259#if !TARGET_OS_WIN32
260#pragma mark Copy/Release support
261#endif
262
263// Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
0a7de745
A
264void *
265_Block_copy(const void *arg)
266{
267 struct Block_layout *aBlock;
268
269 if (!arg) {
270 return NULL;
271 }
272
273 // The following would be better done as a switch statement
274 aBlock = (struct Block_layout *)arg;
275 if (aBlock->flags & BLOCK_NEEDS_FREE) {
276 // latches on high
277 latching_incr_int(&aBlock->flags);
278 return aBlock;
279 } else if (aBlock->flags & BLOCK_IS_GLOBAL) {
280 return aBlock;
281 } else {
282 // Its a stack block. Make a copy.
f427ee49
A
283 size_t size = Block_size(aBlock);
284 struct Block_layout *result = (struct Block_layout *)malloc(size);
0a7de745
A
285 if (!result) {
286 return NULL;
287 }
f427ee49 288 memmove(result, aBlock, size); // bitcopy first
d9a64523 289#if __has_feature(ptrauth_calls)
0a7de745
A
290 // Resign the invoke pointer as it uses address authentication.
291 result->invoke = aBlock->invoke;
f427ee49
A
292
293#if __has_feature(ptrauth_signed_block_descriptors)
294 uintptr_t oldDesc =
295 ptrauth_blend_discriminator(
296 &aBlock->descriptor, _Block_descriptor_ptrauth_discriminator);
297 uintptr_t newDesc =
298 ptrauth_blend_discriminator(
299 &result->descriptor, _Block_descriptor_ptrauth_discriminator);
300
301 result->descriptor =
302 ptrauth_auth_and_resign(aBlock->descriptor, ptrauth_key_asda, oldDesc,
303 ptrauth_key_asda, newDesc);
d9a64523 304#endif
f427ee49
A
305#endif
306
0a7de745
A
307 // reset refcount
308 result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed
309 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
310 _Block_call_copy_helper(result, aBlock);
311 // Set isa last so memory analysis tools see a fully-initialized object.
312 result->isa = _NSConcreteMallocBlock;
313 return result;
314 }
d9a64523
A
315}
316
317
318// Runtime entry points for maintaining the sharing knowledge of byref data blocks.
319
320// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
321// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
322// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
323// Otherwise we need to copy it and update the stack forwarding pointer
0a7de745
A
324static struct Block_byref *
325_Block_byref_copy(const void *arg)
326{
327 struct Block_byref *src = (struct Block_byref *)arg;
328
329 if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
330 // src points to stack
331 struct Block_byref *copy = (struct Block_byref *)malloc(src->size);
332 copy->isa = NULL;
333 // byref value 4 is logical refcount of 2: one for caller, one for stack
334 copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4;
335 copy->forwarding = copy; // patch heap copy to point to itself
336 src->forwarding = copy; // patch stack to point to heap copy
337 copy->size = src->size;
338
339 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
340 // Trust copy helper to copy everything of interest
341 // If more than one field shows up in a byref block this is wrong XXX
342 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src + 1);
343 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy + 1);
344 copy2->byref_keep = src2->byref_keep;
345 copy2->byref_destroy = src2->byref_destroy;
346
347 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
348 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2 + 1);
349 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2 + 1);
350 copy3->layout = src3->layout;
351 }
352
353 (*src2->byref_keep)(copy, src);
354 } else {
355 // Bitwise copy.
356 // This copy includes Block_byref_3, if any.
357 memmove(copy + 1, src + 1, src->size - sizeof(*src));
358 }
359 }
360 // already copied to heap
361 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
362 latching_incr_int(&src->forwarding->flags);
363 }
364
365 return src->forwarding;
d9a64523
A
366}
367
0a7de745
A
368static void
369_Block_byref_release(const void *arg)
370{
371 struct Block_byref *byref = (struct Block_byref *)arg;
372
373 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
374 byref = byref->forwarding;
375
376 if (byref->flags & BLOCK_BYREF_NEEDS_FREE) {
377 __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK;
378 os_assert(refcount);
379 if (latching_decr_int_should_deallocate(&byref->flags)) {
380 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
381 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref + 1);
382 (*byref2->byref_destroy)(byref);
383 }
384 free(byref);
385 }
386 }
d9a64523
A
387}
388
389
390/************************************************************
391 *
392 * API supporting SPI
393 * _Block_copy, _Block_release, and (old) _Block_destroy
394 *
395 ***********************************************************/
396
397#if !TARGET_OS_WIN32
398#pragma mark SPI/API
399#endif
400
401
402// API entry point to release a copied Block
0a7de745
A
403void
404_Block_release(const void *arg)
405{
406 struct Block_layout *aBlock = (struct Block_layout *)arg;
407 if (!aBlock) {
408 return;
409 }
410 if (aBlock->flags & BLOCK_IS_GLOBAL) {
411 return;
412 }
413 if (!(aBlock->flags & BLOCK_NEEDS_FREE)) {
414 return;
415 }
416
417 if (latching_decr_int_should_deallocate(&aBlock->flags)) {
418 _Block_call_dispose_helper(aBlock);
419 _Block_destructInstance(aBlock);
420 free(aBlock);
421 }
d9a64523
A
422}
423
0a7de745
A
424bool
425_Block_tryRetain(const void *arg)
426{
427 struct Block_layout *aBlock = (struct Block_layout *)arg;
428 return latching_incr_int_not_deallocating(&aBlock->flags);
d9a64523
A
429}
430
0a7de745
A
431bool
432_Block_isDeallocating(const void *arg)
433{
434 struct Block_layout *aBlock = (struct Block_layout *)arg;
435 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
d9a64523
A
436}
437
438
439/************************************************************
440 *
441 * SPI used by other layers
442 *
443 ***********************************************************/
444
0a7de745
A
445size_t
446Block_size(void *aBlock)
447{
f427ee49
A
448 auto *layout = (Block_layout *)aBlock;
449 void *desc = _Block_get_descriptor(layout);
450 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
451 return ((Block_descriptor_small *)desc)->size;
452 }
453 return ((Block_descriptor_1 *)desc)->size;
d9a64523
A
454}
455
0a7de745
A
456bool
457_Block_use_stret(void *aBlock)
458{
459 struct Block_layout *layout = (struct Block_layout *)aBlock;
d9a64523 460
0a7de745
A
461 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
462 return (layout->flags & requiredFlags) == requiredFlags;
d9a64523
A
463}
464
465// Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
0a7de745
A
466bool
467_Block_has_signature(void *aBlock)
468{
469 return _Block_signature(aBlock) ? true : false;
d9a64523
A
470}
471
0a7de745
A
472const char *
473_Block_signature(void *aBlock)
d9a64523 474{
f427ee49
A
475 struct Block_layout *layout = (struct Block_layout *)aBlock;
476 if (!(layout->flags & BLOCK_HAS_SIGNATURE)) {
477 return nullptr;
0a7de745 478 }
d9a64523 479
f427ee49
A
480 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
481 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
482 return unwrap_relative_pointer<const char>(bds->signature);
483 }
484
485 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
0a7de745 486 return desc3->signature;
d9a64523
A
487}
488
0a7de745
A
489const char *
490_Block_layout(void *aBlock)
d9a64523 491{
0a7de745 492 // Don't return extended layout to callers expecting old GC layout
f427ee49
A
493 Block_layout *layout = (Block_layout *)aBlock;
494 if ((layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
495 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
496 return nullptr;
0a7de745
A
497 }
498
f427ee49
A
499 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
500 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
501 return unwrap_relative_pointer<const char>(bds->layout);
0a7de745
A
502 }
503
f427ee49
A
504 Block_descriptor_3 *desc = _Block_descriptor_3(layout);
505 return desc->layout;
d9a64523
A
506}
507
0a7de745
A
508const char *
509_Block_extended_layout(void *aBlock)
d9a64523 510{
0a7de745 511 // Don't return old GC layout to callers expecting extended layout
f427ee49
A
512 Block_layout *layout = (Block_layout *)aBlock;
513 if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
514 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
515 return nullptr;
0a7de745
A
516 }
517
f427ee49
A
518 const char *extLayout;
519 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
520 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
521 if (layout->flags & BLOCK_INLINE_LAYOUT_STRING) {
522 extLayout = (const char *)(uintptr_t)bds->layout;
523 } else {
524 extLayout = unwrap_relative_pointer<const char>(bds->layout);
525 }
526 } else {
527 Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
528 extLayout = desc3->layout;
0a7de745
A
529 }
530
531 // Return empty string (all non-object bytes) instead of NULL
532 // so callers can distinguish "empty layout" from "no layout".
f427ee49
A
533 if (!extLayout) {
534 extLayout = "";
0a7de745 535 }
f427ee49 536 return extLayout;
d9a64523
A
537}
538
539#if !TARGET_OS_WIN32
540#pragma mark Compiler SPI entry points
541#endif
542
d9a64523 543
0a7de745
A
544/*******************************************************
545 *
546 * Entry points used by the compiler - the real API!
547 *
548 *
549 * A Block can reference four different kinds of things that require help when the Block is copied to the heap.
550 * 1) C++ stack based objects
551 * 2) References to Objective-C objects
552 * 3) Other Blocks
553 * 4) __block variables
554 *
555 * In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
556 *
557 * The flags parameter of _Block_object_assign and _Block_object_dispose is set to
558 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
559 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
560 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
561 * If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
562 *
563 * So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
564 *
565 * When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
566 *
567 * So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
568 * __block id 128+3 (0x83)
569 * __block (^Block) 128+7 (0x87)
570 * __weak __block id 128+3+16 (0x93)
571 * __weak __block (^Block) 128+7+16 (0x97)
572 *
573 *
574 ********************************************************/
d9a64523
A
575
576//
577// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
578// to do the assignment.
579//
0a7de745
A
580void
581_Block_object_assign(void *destArg, const void *object, const int flags)
582{
583 const void **dest = (const void **)destArg;
584 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
585 case BLOCK_FIELD_IS_OBJECT:
586 /*******
587 * id object = ...;
588 * [^{ object; } copy];
589 ********/
590
591 _Block_retain_object(object);
592 *dest = object;
593 break;
594
595 case BLOCK_FIELD_IS_BLOCK:
596 /*******
597 * void (^object)(void) = ...;
598 * [^{ object; } copy];
599 ********/
600
601 *dest = _Block_copy(object);
602 break;
603
604 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
605 case BLOCK_FIELD_IS_BYREF:
606 /*******
607 * // copy the onstack __block container to the heap
608 * // Note this __weak is old GC-weak/MRC-unretained.
609 * // ARC-style __weak is handled by the copy helper directly.
610 * __block ... x;
611 * __weak __block ... x;
612 * [^{ x; } copy];
613 ********/
614
615 *dest = _Block_byref_copy(object);
616 break;
617
618 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
619 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
620 /*******
621 * // copy the actual field held in the __block container
622 * // Note this is MRC unretained __block only.
623 * // ARC retained __block is handled by the copy helper directly.
624 * __block id object;
625 * __block void (^object)(void);
626 * [^{ object; } copy];
627 ********/
628
629 *dest = object;
630 break;
631
632 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
633 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
634 /*******
635 * // copy the actual field held in the __block container
636 * // Note this __weak is old GC-weak/MRC-unretained.
637 * // ARC-style __weak is handled by the copy helper directly.
638 * __weak __block id object;
639 * __weak __block void (^object)(void);
640 * [^{ object; } copy];
641 ********/
642
643 *dest = object;
644 break;
645
646 default:
647 break;
648 }
d9a64523
A
649}
650
651// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
652// to help dispose of the contents
0a7de745
A
653void
654_Block_object_dispose(const void *object, const int flags)
655{
656 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
657 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
658 case BLOCK_FIELD_IS_BYREF:
659 // get rid of the __block data structure held in a Block
660 _Block_byref_release(object);
661 break;
662 case BLOCK_FIELD_IS_BLOCK:
663 _Block_release(object);
664 break;
665 case BLOCK_FIELD_IS_OBJECT:
666 _Block_release_object(object);
667 break;
668 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
669 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
670 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
671 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
672 break;
673 default:
674 break;
675 }
d9a64523
A
676}
677
678
679// Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
680__attribute__((used))
681static int let_there_be_data = 42;