]> git.saurik.com Git - apple/xnu.git/blob - libkern/libclosure/runtime.cpp
xnu-4903.221.2.tar.gz
[apple/xnu.git] / libkern / libclosure / runtime.cpp
1 /*
2 * runtime.c
3 * libclosure
4 *
5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6 *
7 * @APPLE_LLVM_LICENSE_HEADER@
8 */
9
10
11 #ifndef KERNEL
12
13 #include "Block_private.h"
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <dlfcn.h>
17 #include <os/assumes.h>
18
19 #else /* !KERNEL */
20
21 #include <libkern/Block_private.h>
22 #include <libkern/OSRuntime.h>
23
24 #define malloc(s) kern_os_malloc((s))
25 #define free(a) kern_os_free((a))
26
27 #endif /* KERNEL */
28
29 #include <string.h>
30 #include <stdint.h>
31 #ifndef os_assumes
32 #define os_assumes(_x) (_x)
33 #endif
34 #ifndef os_assert
35 #define os_assert(_x) assert(_x)
36 #endif
37
38 #if TARGET_OS_WIN32
39 #define _CRT_SECURE_NO_WARNINGS 1
40 #include <windows.h>
41 static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
42 {
43 // fixme barrier is overkill -- see objc-os.h
44 long original = InterlockedCompareExchange(dst, newl, oldl);
45 return (original == oldl);
46 }
47
48 static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
49 {
50 // fixme barrier is overkill -- see objc-os.h
51 int original = InterlockedCompareExchange(dst, newi, oldi);
52 return (original == oldi);
53 }
54 #else
55 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
56 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
57 #endif
58
59
60 /*******************************************************************************
61 Internal Utilities
62 ********************************************************************************/
63
64 static int32_t latching_incr_int(volatile int32_t *where) {
65 while (1) {
66 int32_t old_value = *where;
67 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
68 return BLOCK_REFCOUNT_MASK;
69 }
70 if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
71 return old_value+2;
72 }
73 }
74 }
75
76 static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
77 while (1) {
78 int32_t old_value = *where;
79 if (old_value & BLOCK_DEALLOCATING) {
80 // if deallocating we can't do this
81 return false;
82 }
83 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
84 // if latched, we're leaking this block, and we succeed
85 return true;
86 }
87 if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
88 // otherwise, we must store a new retained value without the deallocating bit set
89 return true;
90 }
91 }
92 }
93
94
95 // return should_deallocate?
96 static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
97 while (1) {
98 int32_t old_value = *where;
99 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
100 return false; // latched high
101 }
102 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
103 return false; // underflow, latch low
104 }
105 int32_t new_value = old_value - 2;
106 bool result = false;
107 if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) {
108 new_value = old_value - 1;
109 result = true;
110 }
111 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
112 return result;
113 }
114 }
115 }
116
117
118 /**************************************************************************
119 Framework callback functions and their default implementations.
120 ***************************************************************************/
121 #if !TARGET_OS_WIN32
122 #pragma mark Framework Callback Routines
123 #endif
124
125 static void _Block_retain_object_default(const void *ptr __unused) { }
126
127 static void _Block_release_object_default(const void *ptr __unused) { }
128
129 static void _Block_destructInstance_default(const void *aBlock __unused) {}
130
131 static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
132 static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
133 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
134
135
136 /**************************************************************************
137 Callback registration from ObjC runtime and CoreFoundation
138 ***************************************************************************/
139
140 void _Block_use_RR2(const Block_callbacks_RR *callbacks) {
141 _Block_retain_object = callbacks->retain;
142 _Block_release_object = callbacks->release;
143 _Block_destructInstance = callbacks->destructInstance;
144 }
145
146 /****************************************************************************
147 Accessors for block descriptor fields
148 *****************************************************************************/
149 #if 0
150 static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock)
151 {
152 return aBlock->descriptor;
153 }
154 #endif
155
156 static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock)
157 {
158 if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL;
159 uint8_t *desc = (uint8_t *)aBlock->descriptor;
160 desc += sizeof(struct Block_descriptor_1);
161 return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc);
162 }
163
164 static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock)
165 {
166 if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL;
167 uint8_t *desc = (uint8_t *)aBlock->descriptor;
168 desc += sizeof(struct Block_descriptor_1);
169 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
170 desc += sizeof(struct Block_descriptor_2);
171 }
172 return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc);
173 }
174
175 static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
176 {
177 struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
178 if (!desc) return;
179
180 (*desc->copy)(result, aBlock); // do fixup
181 }
182
183 static void _Block_call_dispose_helper(struct Block_layout *aBlock)
184 {
185 struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
186 if (!desc) return;
187
188 (*desc->dispose)(aBlock);
189 }
190
191 /*******************************************************************************
192 Internal Support routines for copying
193 ********************************************************************************/
194
195 #if !TARGET_OS_WIN32
196 #pragma mark Copy/Release support
197 #endif
198
199 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
200 void *_Block_copy(const void *arg) {
201 struct Block_layout *aBlock;
202
203 if (!arg) return NULL;
204
205 // The following would be better done as a switch statement
206 aBlock = (struct Block_layout *)arg;
207 if (aBlock->flags & BLOCK_NEEDS_FREE) {
208 // latches on high
209 latching_incr_int(&aBlock->flags);
210 return aBlock;
211 }
212 else if (aBlock->flags & BLOCK_IS_GLOBAL) {
213 return aBlock;
214 }
215 else {
216 // Its a stack block. Make a copy.
217 struct Block_layout *result = (typeof(result)) malloc(aBlock->descriptor->size);
218 if (!result) return NULL;
219 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
220 #if __has_feature(ptrauth_calls)
221 // Resign the invoke pointer as it uses address authentication.
222 result->invoke = aBlock->invoke;
223 #endif
224 // reset refcount
225 result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
226 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
227 _Block_call_copy_helper(result, aBlock);
228 // Set isa last so memory analysis tools see a fully-initialized object.
229 result->isa = _NSConcreteMallocBlock;
230 return result;
231 }
232 }
233
234
235 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
236
237 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
238 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
239 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
240 // Otherwise we need to copy it and update the stack forwarding pointer
241 static struct Block_byref *_Block_byref_copy(const void *arg) {
242 struct Block_byref *src = (struct Block_byref *)arg;
243
244 if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
245 // src points to stack
246 struct Block_byref *copy = (struct Block_byref *)malloc(src->size);
247 copy->isa = NULL;
248 // byref value 4 is logical refcount of 2: one for caller, one for stack
249 copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4;
250 copy->forwarding = copy; // patch heap copy to point to itself
251 src->forwarding = copy; // patch stack to point to heap copy
252 copy->size = src->size;
253
254 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
255 // Trust copy helper to copy everything of interest
256 // If more than one field shows up in a byref block this is wrong XXX
257 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1);
258 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1);
259 copy2->byref_keep = src2->byref_keep;
260 copy2->byref_destroy = src2->byref_destroy;
261
262 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
263 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1);
264 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1);
265 copy3->layout = src3->layout;
266 }
267
268 (*src2->byref_keep)(copy, src);
269 }
270 else {
271 // Bitwise copy.
272 // This copy includes Block_byref_3, if any.
273 memmove(copy+1, src+1, src->size - sizeof(*src));
274 }
275 }
276 // already copied to heap
277 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
278 latching_incr_int(&src->forwarding->flags);
279 }
280
281 return src->forwarding;
282 }
283
284 static void _Block_byref_release(const void *arg) {
285 struct Block_byref *byref = (struct Block_byref *)arg;
286
287 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
288 byref = byref->forwarding;
289
290 if (byref->flags & BLOCK_BYREF_NEEDS_FREE) {
291 __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK;
292 os_assert(refcount);
293 if (latching_decr_int_should_deallocate(&byref->flags)) {
294 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
295 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1);
296 (*byref2->byref_destroy)(byref);
297 }
298 free(byref);
299 }
300 }
301 }
302
303
304 /************************************************************
305 *
306 * API supporting SPI
307 * _Block_copy, _Block_release, and (old) _Block_destroy
308 *
309 ***********************************************************/
310
311 #if !TARGET_OS_WIN32
312 #pragma mark SPI/API
313 #endif
314
315
316 // API entry point to release a copied Block
317 void _Block_release(const void *arg) {
318 struct Block_layout *aBlock = (struct Block_layout *)arg;
319 if (!aBlock) return;
320 if (aBlock->flags & BLOCK_IS_GLOBAL) return;
321 if (! (aBlock->flags & BLOCK_NEEDS_FREE)) return;
322
323 if (latching_decr_int_should_deallocate(&aBlock->flags)) {
324 _Block_call_dispose_helper(aBlock);
325 _Block_destructInstance(aBlock);
326 free(aBlock);
327 }
328 }
329
330 bool _Block_tryRetain(const void *arg) {
331 struct Block_layout *aBlock = (struct Block_layout *)arg;
332 return latching_incr_int_not_deallocating(&aBlock->flags);
333 }
334
335 bool _Block_isDeallocating(const void *arg) {
336 struct Block_layout *aBlock = (struct Block_layout *)arg;
337 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
338 }
339
340
341 /************************************************************
342 *
343 * SPI used by other layers
344 *
345 ***********************************************************/
346
347 size_t Block_size(void *aBlock) {
348 return ((struct Block_layout *)aBlock)->descriptor->size;
349 }
350
351 bool _Block_use_stret(void *aBlock) {
352 struct Block_layout *layout = (struct Block_layout *)aBlock;
353
354 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
355 return (layout->flags & requiredFlags) == requiredFlags;
356 }
357
358 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
359 bool _Block_has_signature(void *aBlock) {
360 return _Block_signature(aBlock) ? true : false;
361 }
362
363 const char * _Block_signature(void *aBlock)
364 {
365 struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock);
366 if (!desc3) return NULL;
367
368 return desc3->signature;
369 }
370
371 const char * _Block_layout(void *aBlock)
372 {
373 // Don't return extended layout to callers expecting old GC layout
374 struct Block_layout *layout = (struct Block_layout *)aBlock;
375 if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL;
376
377 struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock);
378 if (!desc3) return NULL;
379
380 return desc3->layout;
381 }
382
383 const char * _Block_extended_layout(void *aBlock)
384 {
385 // Don't return old GC layout to callers expecting extended layout
386 struct Block_layout *layout = (struct Block_layout *)aBlock;
387 if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL;
388
389 struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock);
390 if (!desc3) return NULL;
391
392 // Return empty string (all non-object bytes) instead of NULL
393 // so callers can distinguish "empty layout" from "no layout".
394 if (!desc3->layout) return "";
395 else return desc3->layout;
396 }
397
398 #if !TARGET_OS_WIN32
399 #pragma mark Compiler SPI entry points
400 #endif
401
402
403 /*******************************************************
404
405 Entry points used by the compiler - the real API!
406
407
408 A Block can reference four different kinds of things that require help when the Block is copied to the heap.
409 1) C++ stack based objects
410 2) References to Objective-C objects
411 3) Other Blocks
412 4) __block variables
413
414 In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
415
416 The flags parameter of _Block_object_assign and _Block_object_dispose is set to
417 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
418 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
419 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
420 If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
421
422 So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
423
424 When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
425
426 So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
427 __block id 128+3 (0x83)
428 __block (^Block) 128+7 (0x87)
429 __weak __block id 128+3+16 (0x93)
430 __weak __block (^Block) 128+7+16 (0x97)
431
432
433 ********************************************************/
434
435 //
436 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
437 // to do the assignment.
438 //
439 void _Block_object_assign(void *destArg, const void *object, const int flags) {
440 const void **dest = (const void **)destArg;
441 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
442 case BLOCK_FIELD_IS_OBJECT:
443 /*******
444 id object = ...;
445 [^{ object; } copy];
446 ********/
447
448 _Block_retain_object(object);
449 *dest = object;
450 break;
451
452 case BLOCK_FIELD_IS_BLOCK:
453 /*******
454 void (^object)(void) = ...;
455 [^{ object; } copy];
456 ********/
457
458 *dest = _Block_copy(object);
459 break;
460
461 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
462 case BLOCK_FIELD_IS_BYREF:
463 /*******
464 // copy the onstack __block container to the heap
465 // Note this __weak is old GC-weak/MRC-unretained.
466 // ARC-style __weak is handled by the copy helper directly.
467 __block ... x;
468 __weak __block ... x;
469 [^{ x; } copy];
470 ********/
471
472 *dest = _Block_byref_copy(object);
473 break;
474
475 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
476 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
477 /*******
478 // copy the actual field held in the __block container
479 // Note this is MRC unretained __block only.
480 // ARC retained __block is handled by the copy helper directly.
481 __block id object;
482 __block void (^object)(void);
483 [^{ object; } copy];
484 ********/
485
486 *dest = object;
487 break;
488
489 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
490 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
491 /*******
492 // copy the actual field held in the __block container
493 // Note this __weak is old GC-weak/MRC-unretained.
494 // ARC-style __weak is handled by the copy helper directly.
495 __weak __block id object;
496 __weak __block void (^object)(void);
497 [^{ object; } copy];
498 ********/
499
500 *dest = object;
501 break;
502
503 default:
504 break;
505 }
506 }
507
508 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
509 // to help dispose of the contents
510 void _Block_object_dispose(const void *object, const int flags) {
511 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
512 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
513 case BLOCK_FIELD_IS_BYREF:
514 // get rid of the __block data structure held in a Block
515 _Block_byref_release(object);
516 break;
517 case BLOCK_FIELD_IS_BLOCK:
518 _Block_release(object);
519 break;
520 case BLOCK_FIELD_IS_OBJECT:
521 _Block_release_object(object);
522 break;
523 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
524 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
525 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
526 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
527 break;
528 default:
529 break;
530 }
531 }
532
533
534 // Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
535 __attribute__((used))
536 static int let_there_be_data = 42;
537
538 #undef malloc
539 #undef free
540