]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-block-trampolines.mm
objc4-750.1.tar.gz
[apple/objc4.git] / runtime / objc-block-trampolines.mm
1 /*
2 * Copyright (c) 2010 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /***********************************************************************
24 * objc-block-trampolines.m
25 * Author: b.bum
26 *
27 **********************************************************************/
28
29 /***********************************************************************
30 * Imports.
31 **********************************************************************/
32 #include "objc-private.h"
33 #include "runtime.h"
34
35 #include <Block.h>
36 #include <Block_private.h>
37 #include <mach/mach.h>
38 #include <objc/objc-block-trampolines.h>
39
40 // fixme C++ compilers don't implemement memory_order_consume efficiently.
41 // Use memory_order_relaxed and cross our fingers.
42 #define MEMORY_ORDER_CONSUME std::memory_order_relaxed
43
44 // 8 bytes of text and data per trampoline on all architectures.
45 #define SLOT_SIZE 8
46
47 // The trampolines are defined in assembly files in libobjc-trampolines.dylib.
48 // We can't link to libobjc-trampolines.dylib directly because
49 // for security reasons it isn't in the dyld shared cache.
50
51 // Trampoline addresses are lazily looked up.
52 // All of them are hidden behind a single atomic pointer for lock-free init.
53
54 #ifdef __PTRAUTH_INTRINSICS__
55 # define TrampolinePtrauth __ptrauth(ptrauth_key_function_pointer, 1, 0x3af1)
56 #else
57 # define TrampolinePtrauth
58 #endif
59
60 class TrampolinePointerWrapper {
61 struct TrampolinePointers {
62 class TrampolineAddress {
63 const void * TrampolinePtrauth storage;
64
65 public:
66 TrampolineAddress(void *dylib, const char *name) {
67 #define PREFIX "_objc_blockTrampoline"
68 char symbol[strlen(PREFIX) + strlen(name) + 1];
69 strcpy(symbol, PREFIX);
70 strcat(symbol, name);
71 // dlsym() from a text segment returns a signed pointer
72 // Authenticate it manually and let the compiler re-sign it.
73 storage = ptrauth_auth_data(dlsym(dylib, symbol),
74 ptrauth_key_function_pointer, 0);
75 if (!storage) {
76 _objc_fatal("couldn't dlsym %s", symbol);
77 }
78 }
79
80 uintptr_t address() {
81 return (uintptr_t)(void*)storage;
82 }
83 };
84
85 TrampolineAddress impl; // trampoline header code
86 TrampolineAddress start; // first trampoline
87 #if DEBUG
88 // These symbols are only used in assertions.
89 // fixme might be able to move the assertions to libobjc-trampolines itself
90 TrampolineAddress last; // start of the last trampoline
91 // We don't use the address after the last trampoline because that
92 // address might be in a different section, and then dlsym() would not
93 // sign it as a function pointer.
94 # if SUPPORT_STRET
95 TrampolineAddress impl_stret;
96 TrampolineAddress start_stret;
97 TrampolineAddress last_stret;
98 # endif
99 #endif
100
101 uintptr_t textSegment;
102 uintptr_t textSegmentSize;
103
104 void check() {
105 #if DEBUG
106 assert(impl.address() == textSegment + PAGE_MAX_SIZE);
107 assert(impl.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
108 assert(impl.address() + PAGE_MAX_SIZE ==
109 last.address() + SLOT_SIZE);
110 assert(last.address()+8 < textSegment + textSegmentSize);
111 assert((last.address() - start.address()) % SLOT_SIZE == 0);
112 # if SUPPORT_STRET
113 assert(impl_stret.address() == textSegment + 2*PAGE_MAX_SIZE);
114 assert(impl_stret.address() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
115 assert(impl_stret.address() + PAGE_MAX_SIZE ==
116 last_stret.address() + SLOT_SIZE);
117 assert(start.address() - impl.address() ==
118 start_stret.address() - impl_stret.address());
119 assert(last_stret.address() + SLOT_SIZE <
120 textSegment + textSegmentSize);
121 assert((last_stret.address() - start_stret.address())
122 % SLOT_SIZE == 0);
123 # endif
124 #endif
125 }
126
127
128 TrampolinePointers(void *dylib)
129 : impl(dylib, "Impl")
130 , start(dylib, "Start")
131 #if DEBUG
132 , last(dylib, "Last")
133 # if SUPPORT_STRET
134 , impl_stret(dylib, "Impl_stret")
135 , start_stret(dylib, "Start_stret")
136 , last_stret(dylib, "Last_stret")
137 # endif
138 #endif
139 {
140 const auto *mh =
141 dyld_image_header_containing_address((void *)impl.address());
142 unsigned long size = 0;
143 textSegment = (uintptr_t)
144 getsegmentdata((headerType *)mh, "__TEXT", &size);
145 textSegmentSize = size;
146
147 check();
148 }
149 };
150
151 std::atomic<TrampolinePointers *> trampolines{nil};
152
153 TrampolinePointers *get() {
154 return trampolines.load(MEMORY_ORDER_CONSUME);
155 }
156
157 public:
158 void Initialize() {
159 if (get()) return;
160
161 // This code may be called concurrently.
162 // In the worst case we perform extra dyld operations.
163 void *dylib = dlopen("/usr/lib/libobjc-trampolines.dylib",
164 RTLD_NOW | RTLD_LOCAL | RTLD_FIRST);
165 if (!dylib) {
166 _objc_fatal("couldn't dlopen libobjc-trampolines.dylib");
167 }
168
169 auto t = new TrampolinePointers(dylib);
170 TrampolinePointers *old = nil;
171 if (! trampolines.compare_exchange_strong(old, t, memory_order_release))
172 {
173 delete t; // Lost an initialization race.
174 }
175 }
176
177 uintptr_t textSegment() { return get()->textSegment; }
178 uintptr_t textSegmentSize() { return get()->textSegmentSize; }
179
180 uintptr_t impl() { return get()->impl.address(); }
181 uintptr_t start() { return get()->start.address(); }
182 };
183
184 static TrampolinePointerWrapper Trampolines;
185
186 // argument mode identifier
187 typedef enum {
188 ReturnValueInRegisterArgumentMode,
189 #if SUPPORT_STRET
190 ReturnValueOnStackArgumentMode,
191 #endif
192
193 ArgumentModeCount
194 } ArgumentMode;
195
196 // We must take care with our data layout on architectures that support
197 // multiple page sizes.
198 //
199 // The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE.
200 // On some platforms this requires additional linker flags.
201 //
202 // When we allocate a page group, we use PAGE_MAX_SIZE size.
203 // This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE.
204 //
205 // When we allocate a page group, we use the process's page alignment.
206 // This simplifies allocation because we don't need to force greater than
207 // default alignment when running with small pages, but it also means
208 // the trampoline code MUST NOT look for its data by masking with PAGE_MAX_MASK.
209
210 struct TrampolineBlockPageGroup
211 {
212 TrampolineBlockPageGroup *nextPageGroup; // linked list of all pages
213 TrampolineBlockPageGroup *nextAvailablePage; // linked list of pages with available slots
214
215 uintptr_t nextAvailable; // index of next available slot, endIndex() if no more available
216
217 // Payload data: block pointers and free list.
218 // Bytes parallel with trampoline header code are the fields above or unused
219 // uint8_t payloads[PAGE_MAX_SIZE - sizeof(TrampolineBlockPageGroup)]
220
221 // Code: Mach-O header, then trampoline header followed by trampolines.
222 // On platforms with struct return we have non-stret trampolines and
223 // stret trampolines. The stret and non-stret trampolines at a given
224 // index share the same data page.
225 // uint8_t macho[PAGE_MAX_SIZE];
226 // uint8_t trampolines[ArgumentModeCount][PAGE_MAX_SIZE];
227
228 // Per-trampoline block data format:
229 // initial value is 0 while page data is filled sequentially
230 // when filled, value is reference to Block_copy()d block
231 // when empty, value is index of next available slot OR 0 if never used yet
232
233 union Payload {
234 id block;
235 uintptr_t nextAvailable; // free list
236 };
237
238 static uintptr_t headerSize() {
239 return (uintptr_t) (Trampolines.start() - Trampolines.impl());
240 }
241
242 static uintptr_t slotSize() {
243 return SLOT_SIZE;
244 }
245
246 static uintptr_t startIndex() {
247 // headerSize is assumed to be slot-aligned
248 return headerSize() / slotSize();
249 }
250
251 static uintptr_t endIndex() {
252 return (uintptr_t)PAGE_MAX_SIZE / slotSize();
253 }
254
255 static bool validIndex(uintptr_t index) {
256 return (index >= startIndex() && index < endIndex());
257 }
258
259 Payload *payload(uintptr_t index) {
260 assert(validIndex(index));
261 return (Payload *)((char *)this + index*slotSize());
262 }
263
264 uintptr_t trampolinesForMode(int aMode) {
265 // Skip over data page and Mach-O page.
266 return (uintptr_t)this + PAGE_MAX_SIZE * (2 + aMode);
267 }
268
269 IMP trampoline(int aMode, uintptr_t index) {
270 assert(validIndex(index));
271 char *base = (char *)trampolinesForMode(aMode);
272 char *imp = base + index*slotSize();
273 #if __arm__
274 imp++; // trampoline is Thumb instructions
275 #endif
276 #if __has_feature(ptrauth_calls)
277 imp = ptrauth_sign_unauthenticated(imp,
278 ptrauth_key_function_pointer, 0);
279 #endif
280 return (IMP)imp;
281 }
282
283 uintptr_t indexForTrampoline(uintptr_t tramp) {
284 for (int aMode = 0; aMode < ArgumentModeCount; aMode++) {
285 uintptr_t base = trampolinesForMode(aMode);
286 uintptr_t start = base + startIndex() * slotSize();
287 uintptr_t end = base + endIndex() * slotSize();
288 if (tramp >= start && tramp < end) {
289 return (uintptr_t)(tramp - base) / slotSize();
290 }
291 }
292 return 0;
293 }
294
295 static void check() {
296 assert(TrampolineBlockPageGroup::headerSize() >= sizeof(TrampolineBlockPageGroup));
297 assert(TrampolineBlockPageGroup::headerSize() % TrampolineBlockPageGroup::slotSize() == 0);
298 }
299
300 };
301
302 static TrampolineBlockPageGroup *HeadPageGroup;
303
304 #pragma mark Utility Functions
305
306 #if !__OBJC2__
307 #define runtimeLock classLock
308 #endif
309
310 #pragma mark Trampoline Management Functions
311 static TrampolineBlockPageGroup *_allocateTrampolinesAndData()
312 {
313 runtimeLock.assertLocked();
314
315 vm_address_t dataAddress;
316
317 TrampolineBlockPageGroup::check();
318
319 // Our final mapping will look roughly like this:
320 // r/w data
321 // r/o text mapped from libobjc-trampolines.dylib
322 // with fixed offsets from the text to the data embedded in the text.
323 //
324 // More precisely it will look like this:
325 // 1 page r/w data
326 // 1 page libobjc-trampolines.dylib Mach-O header
327 // N pages trampoline code, one for each ArgumentMode
328 // M pages for the rest of libobjc-trampolines' TEXT segment.
329 // The kernel requires that we remap the entire TEXT segment every time.
330 // We assume that our code begins on the second TEXT page, but are robust
331 // against other additions to the end of the TEXT segment.
332
333 assert(HeadPageGroup == nil || HeadPageGroup->nextAvailablePage == nil);
334
335 auto textSource = Trampolines.textSegment();
336 auto textSourceSize = Trampolines.textSegmentSize();
337 auto dataSize = PAGE_MAX_SIZE;
338
339 // Allocate a single contiguous region big enough to hold data+text.
340 kern_return_t result;
341 result = vm_allocate(mach_task_self(), &dataAddress,
342 dataSize + textSourceSize,
343 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION));
344 if (result != KERN_SUCCESS) {
345 _objc_fatal("vm_allocate trampolines failed (%d)", result);
346 }
347
348 // Remap libobjc-trampolines' TEXT segment atop all
349 // but the first of the pages we just allocated:
350 vm_address_t textDest = dataAddress + dataSize;
351 vm_prot_t currentProtection, maxProtection;
352 result = vm_remap(mach_task_self(), &textDest,
353 textSourceSize,
354 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
355 mach_task_self(), textSource, TRUE,
356 &currentProtection, &maxProtection, VM_INHERIT_SHARE);
357 if (result != KERN_SUCCESS) {
358 _objc_fatal("vm_remap trampolines failed (%d)", result);
359 }
360
361 TrampolineBlockPageGroup *pageGroup = (TrampolineBlockPageGroup *) dataAddress;
362 pageGroup->nextAvailable = pageGroup->startIndex();
363 pageGroup->nextPageGroup = nil;
364 pageGroup->nextAvailablePage = nil;
365
366 if (HeadPageGroup) {
367 TrampolineBlockPageGroup *lastPageGroup = HeadPageGroup;
368 while(lastPageGroup->nextPageGroup) {
369 lastPageGroup = lastPageGroup->nextPageGroup;
370 }
371 lastPageGroup->nextPageGroup = pageGroup;
372 HeadPageGroup->nextAvailablePage = pageGroup;
373 } else {
374 HeadPageGroup = pageGroup;
375 }
376
377 return pageGroup;
378 }
379
380 static TrampolineBlockPageGroup *
381 getOrAllocatePageGroupWithNextAvailable()
382 {
383 runtimeLock.assertLocked();
384
385 if (!HeadPageGroup)
386 return _allocateTrampolinesAndData();
387
388 // make sure head page is filled first
389 if (HeadPageGroup->nextAvailable != HeadPageGroup->endIndex())
390 return HeadPageGroup;
391
392 if (HeadPageGroup->nextAvailablePage) // check if there is a page w/a hole
393 return HeadPageGroup->nextAvailablePage;
394
395 return _allocateTrampolinesAndData(); // tack on a new one
396 }
397
398 static TrampolineBlockPageGroup *
399 pageAndIndexContainingIMP(IMP anImp, uintptr_t *outIndex)
400 {
401 runtimeLock.assertLocked();
402
403 // Authenticate as a function pointer, returning an un-signed address.
404 uintptr_t trampAddress =
405 (uintptr_t)ptrauth_auth_data((const char *)anImp,
406 ptrauth_key_function_pointer, 0);
407
408 for (TrampolineBlockPageGroup *pageGroup = HeadPageGroup;
409 pageGroup;
410 pageGroup = pageGroup->nextPageGroup)
411 {
412 uintptr_t index = pageGroup->indexForTrampoline(trampAddress);
413 if (index) {
414 if (outIndex) *outIndex = index;
415 return pageGroup;
416 }
417 }
418
419 return nil;
420 }
421
422
423 static ArgumentMode
424 argumentModeForBlock(id block)
425 {
426 ArgumentMode aMode = ReturnValueInRegisterArgumentMode;
427
428 #if SUPPORT_STRET
429 if (_Block_has_signature(block) && _Block_use_stret(block))
430 aMode = ReturnValueOnStackArgumentMode;
431 #else
432 assert(! (_Block_has_signature(block) && _Block_use_stret(block)));
433 #endif
434
435 return aMode;
436 }
437
438
439 // `block` must already have been copied
440 IMP
441 _imp_implementationWithBlockNoCopy(id block)
442 {
443 runtimeLock.assertLocked();
444
445 TrampolineBlockPageGroup *pageGroup =
446 getOrAllocatePageGroupWithNextAvailable();
447
448 uintptr_t index = pageGroup->nextAvailable;
449 assert(index >= pageGroup->startIndex() && index < pageGroup->endIndex());
450 TrampolineBlockPageGroup::Payload *payload = pageGroup->payload(index);
451
452 uintptr_t nextAvailableIndex = payload->nextAvailable;
453 if (nextAvailableIndex == 0) {
454 // First time through (unused slots are zero). Fill sequentially.
455 // If the page is now full this will now be endIndex(), handled below.
456 nextAvailableIndex = index + 1;
457 }
458 pageGroup->nextAvailable = nextAvailableIndex;
459 if (nextAvailableIndex == pageGroup->endIndex()) {
460 // PageGroup is now full (free list or wilderness exhausted)
461 // Remove from available page linked list
462 TrampolineBlockPageGroup *iterator = HeadPageGroup;
463 while(iterator && (iterator->nextAvailablePage != pageGroup)) {
464 iterator = iterator->nextAvailablePage;
465 }
466 if (iterator) {
467 iterator->nextAvailablePage = pageGroup->nextAvailablePage;
468 pageGroup->nextAvailablePage = nil;
469 }
470 }
471
472 payload->block = block;
473 return pageGroup->trampoline(argumentModeForBlock(block), index);
474 }
475
476
477 #pragma mark Public API
478 IMP imp_implementationWithBlock(id block)
479 {
480 // Block object must be copied outside runtimeLock
481 // because it performs arbitrary work.
482 block = Block_copy(block);
483
484 // Trampolines must be initialized outside runtimeLock
485 // because it calls dlopen().
486 Trampolines.Initialize();
487
488 mutex_locker_t lock(runtimeLock);
489
490 return _imp_implementationWithBlockNoCopy(block);
491 }
492
493
494 id imp_getBlock(IMP anImp) {
495 uintptr_t index;
496 TrampolineBlockPageGroup *pageGroup;
497
498 if (!anImp) return nil;
499
500 mutex_locker_t lock(runtimeLock);
501
502 pageGroup = pageAndIndexContainingIMP(anImp, &index);
503
504 if (!pageGroup) {
505 return nil;
506 }
507
508 TrampolineBlockPageGroup::Payload *payload = pageGroup->payload(index);
509
510 if (payload->nextAvailable <= TrampolineBlockPageGroup::endIndex()) {
511 // unallocated
512 return nil;
513 }
514
515 return payload->block;
516 }
517
518 BOOL imp_removeBlock(IMP anImp) {
519
520 if (!anImp) return NO;
521
522 id block;
523
524 {
525 mutex_locker_t lock(runtimeLock);
526
527 uintptr_t index;
528 TrampolineBlockPageGroup *pageGroup =
529 pageAndIndexContainingIMP(anImp, &index);
530
531 if (!pageGroup) {
532 return NO;
533 }
534
535 TrampolineBlockPageGroup::Payload *payload = pageGroup->payload(index);
536 block = payload->block;
537 // block is released below, outside the lock
538
539 payload->nextAvailable = pageGroup->nextAvailable;
540 pageGroup->nextAvailable = index;
541
542 // make sure this page is on available linked list
543 TrampolineBlockPageGroup *pageGroupIterator = HeadPageGroup;
544
545 // see if page is the next available page for any existing pages
546 while (pageGroupIterator->nextAvailablePage &&
547 pageGroupIterator->nextAvailablePage != pageGroup)
548 {
549 pageGroupIterator = pageGroupIterator->nextAvailablePage;
550 }
551
552 if (! pageGroupIterator->nextAvailablePage) {
553 // if iteration stopped because nextAvail was nil
554 // add to end of list.
555 pageGroupIterator->nextAvailablePage = pageGroup;
556 pageGroup->nextAvailablePage = nil;
557 }
558 }
559
560 // do this AFTER dropping the lock
561 Block_release(block);
562 return YES;
563 }