]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-block-trampolines.mm
objc4-709.1.tar.gz
[apple/objc4.git] / runtime / objc-block-trampolines.mm
1 /*
2 * Copyright (c) 2010 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /***********************************************************************
24 * objc-block-trampolines.m
25 * Author: b.bum
26 *
27 **********************************************************************/
28
29 /***********************************************************************
30 * Imports.
31 **********************************************************************/
32 #include "objc-private.h"
33 #include "runtime.h"
34
35 #include <Block.h>
36 #include <Block_private.h>
37 #include <mach/mach.h>
38
39 // symbols defined in assembly files
40 // Don't use the symbols directly; they're thumb-biased on some ARM archs.
41 #define TRAMP(tramp) \
42 static inline __unused uintptr_t tramp(void) { \
43 extern void *_##tramp; \
44 return ((uintptr_t)&_##tramp) & ~1UL; \
45 }
46 // Scalar return
47 TRAMP(a1a2_tramphead); // trampoline header code
48 TRAMP(a1a2_firsttramp); // first trampoline
49 TRAMP(a1a2_trampend); // after the last trampoline
50
51 #if SUPPORT_STRET
52 // Struct return
53 TRAMP(a2a3_tramphead);
54 TRAMP(a2a3_firsttramp);
55 TRAMP(a2a3_trampend);
56 #endif
57
58 // argument mode identifier
59 typedef enum {
60 ReturnValueInRegisterArgumentMode,
61 #if SUPPORT_STRET
62 ReturnValueOnStackArgumentMode,
63 #endif
64
65 ArgumentModeCount
66 } ArgumentMode;
67
68
69 // We must take care with our data layout on architectures that support
70 // multiple page sizes.
71 //
72 // The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE.
73 // On some platforms this requires additional linker flags.
74 //
75 // When we allocate a page pair, we use PAGE_MAX_SIZE size.
76 // This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE.
77 //
78 // When we allocate a page pair, we use the process's page alignment.
79 // This simplifies allocation because we don't need to force greater than
80 // default alignment when running with small pages, but it also means
81 // the trampoline code MUST NOT look for its data by masking with PAGE_MAX_MASK.
82
83 struct TrampolineBlockPagePair
84 {
85 TrampolineBlockPagePair *nextPagePair; // linked list of all pages
86 TrampolineBlockPagePair *nextAvailablePage; // linked list of pages with available slots
87
88 uintptr_t nextAvailable; // index of next available slot, endIndex() if no more available
89
90 // Payload data: block pointers and free list.
91 // Bytes parallel with trampoline header code are the fields above or unused
92 // uint8_t blocks[ PAGE_MAX_SIZE - sizeof(TrampolineBlockPagePair) ]
93
94 // Code: trampoline header followed by trampolines.
95 // uint8_t trampolines[PAGE_MAX_SIZE];
96
97 // Per-trampoline block data format:
98 // initial value is 0 while page data is filled sequentially
99 // when filled, value is reference to Block_copy()d block
100 // when empty, value is index of next available slot OR 0 if never used yet
101
102 union Payload {
103 id block;
104 uintptr_t nextAvailable; // free list
105 };
106
107 static uintptr_t headerSize() {
108 return (uintptr_t) (a1a2_firsttramp() - a1a2_tramphead());
109 }
110
111 static uintptr_t slotSize() {
112 return 8;
113 }
114
115 static uintptr_t startIndex() {
116 // headerSize is assumed to be slot-aligned
117 return headerSize() / slotSize();
118 }
119
120 static uintptr_t endIndex() {
121 return (uintptr_t)PAGE_MAX_SIZE / slotSize();
122 }
123
124 static bool validIndex(uintptr_t index) {
125 return (index >= startIndex() && index < endIndex());
126 }
127
128 Payload *payload(uintptr_t index) {
129 assert(validIndex(index));
130 return (Payload *)((char *)this + index*slotSize());
131 }
132
133 IMP trampoline(uintptr_t index) {
134 assert(validIndex(index));
135 char *imp = (char *)this + index*slotSize() + PAGE_MAX_SIZE;
136 #if __arm__
137 imp++; // trampoline is Thumb instructions
138 #endif
139 return (IMP)imp;
140 }
141
142 uintptr_t indexForTrampoline(IMP tramp) {
143 uintptr_t tramp0 = (uintptr_t)this + PAGE_MAX_SIZE;
144 uintptr_t start = tramp0 + headerSize();
145 uintptr_t end = tramp0 + PAGE_MAX_SIZE;
146 uintptr_t address = (uintptr_t)tramp;
147 if (address >= start && address < end) {
148 return (uintptr_t)(address - tramp0) / slotSize();
149 }
150 return 0;
151 }
152
153 static void check() {
154 assert(TrampolineBlockPagePair::slotSize() == 8);
155 assert(TrampolineBlockPagePair::headerSize() >= sizeof(TrampolineBlockPagePair));
156 assert(TrampolineBlockPagePair::headerSize() % TrampolineBlockPagePair::slotSize() == 0);
157
158 // _objc_inform("%p %p %p", a1a2_tramphead(), a1a2_firsttramp(),
159 // a1a2_trampend());
160 assert(a1a2_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
161 assert(a1a2_tramphead() + PAGE_MAX_SIZE == a1a2_trampend());
162 #if SUPPORT_STRET
163 // _objc_inform("%p %p %p", a2a3_tramphead(), a2a3_firsttramp(),
164 // a2a3_trampend());
165 assert(a2a3_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
166 assert(a2a3_tramphead() + PAGE_MAX_SIZE == a2a3_trampend());
167 #endif
168
169 #if __arm__
170 // make sure trampolines are Thumb
171 extern void *_a1a2_firsttramp;
172 extern void *_a2a3_firsttramp;
173 assert(((uintptr_t)&_a1a2_firsttramp) % 2 == 1);
174 assert(((uintptr_t)&_a2a3_firsttramp) % 2 == 1);
175 #endif
176 }
177
178 };
179
180 // two sets of trampoline pages; one for stack returns and one for register returns
181 static TrampolineBlockPagePair *headPagePairs[ArgumentModeCount];
182
183 #pragma mark Utility Functions
184
185 static inline void _lock() {
186 #if __OBJC2__
187 runtimeLock.write();
188 #else
189 classLock.lock();
190 #endif
191 }
192
193 static inline void _unlock() {
194 #if __OBJC2__
195 runtimeLock.unlockWrite();
196 #else
197 classLock.unlock();
198 #endif
199 }
200
201 static inline void _assert_locked() {
202 #if __OBJC2__
203 runtimeLock.assertWriting();
204 #else
205 classLock.assertLocked();
206 #endif
207 }
208
209 #pragma mark Trampoline Management Functions
210 static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode)
211 {
212 _assert_locked();
213
214 vm_address_t dataAddress;
215
216 TrampolineBlockPagePair::check();
217
218 TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
219
220 if (headPagePair) {
221 assert(headPagePair->nextAvailablePage == nil);
222 }
223
224 kern_return_t result;
225 for (int i = 0; i < 5; i++) {
226 result = vm_allocate(mach_task_self(), &dataAddress,
227 PAGE_MAX_SIZE * 2,
228 TRUE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION));
229 if (result != KERN_SUCCESS) {
230 mach_error("vm_allocate failed", result);
231 return nil;
232 }
233
234 vm_address_t codeAddress = dataAddress + PAGE_MAX_SIZE;
235 result = vm_deallocate(mach_task_self(), codeAddress, PAGE_MAX_SIZE);
236 if (result != KERN_SUCCESS) {
237 mach_error("vm_deallocate failed", result);
238 return nil;
239 }
240
241 uintptr_t codePage;
242 switch(aMode) {
243 case ReturnValueInRegisterArgumentMode:
244 codePage = a1a2_tramphead();
245 break;
246 #if SUPPORT_STRET
247 case ReturnValueOnStackArgumentMode:
248 codePage = a2a3_tramphead();
249 break;
250 #endif
251 default:
252 _objc_fatal("unknown return mode %d", (int)aMode);
253 break;
254 }
255 vm_prot_t currentProtection, maxProtection;
256 result = vm_remap(mach_task_self(), &codeAddress, PAGE_MAX_SIZE,
257 0, FALSE, mach_task_self(), codePage, TRUE,
258 &currentProtection, &maxProtection, VM_INHERIT_SHARE);
259 if (result != KERN_SUCCESS) {
260 result = vm_deallocate(mach_task_self(),
261 dataAddress, PAGE_MAX_SIZE);
262 if (result != KERN_SUCCESS) {
263 mach_error("vm_deallocate for retry failed.", result);
264 return nil;
265 }
266 } else {
267 break;
268 }
269 }
270
271 if (result != KERN_SUCCESS) {
272 return nil;
273 }
274
275 TrampolineBlockPagePair *pagePair = (TrampolineBlockPagePair *) dataAddress;
276 pagePair->nextAvailable = pagePair->startIndex();
277 pagePair->nextPagePair = nil;
278 pagePair->nextAvailablePage = nil;
279
280 if (headPagePair) {
281 TrampolineBlockPagePair *lastPagePair = headPagePair;
282 while(lastPagePair->nextPagePair)
283 lastPagePair = lastPagePair->nextPagePair;
284
285 lastPagePair->nextPagePair = pagePair;
286 headPagePairs[aMode]->nextAvailablePage = pagePair;
287 } else {
288 headPagePairs[aMode] = pagePair;
289 }
290
291 return pagePair;
292 }
293
294 static TrampolineBlockPagePair *
295 _getOrAllocatePagePairWithNextAvailable(ArgumentMode aMode)
296 {
297 _assert_locked();
298
299 TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
300
301 if (!headPagePair)
302 return _allocateTrampolinesAndData(aMode);
303
304 // make sure head page is filled first
305 if (headPagePair->nextAvailable != headPagePair->endIndex())
306 return headPagePair;
307
308 if (headPagePair->nextAvailablePage) // check if there is a page w/a hole
309 return headPagePair->nextAvailablePage;
310
311 return _allocateTrampolinesAndData(aMode); // tack on a new one
312 }
313
314 static TrampolineBlockPagePair *
315 _pageAndIndexContainingIMP(IMP anImp, uintptr_t *outIndex,
316 TrampolineBlockPagePair **outHeadPagePair)
317 {
318 _assert_locked();
319
320 for (int arg = 0; arg < ArgumentModeCount; arg++) {
321 for (TrampolineBlockPagePair *pagePair = headPagePairs[arg];
322 pagePair;
323 pagePair = pagePair->nextPagePair)
324 {
325 uintptr_t index = pagePair->indexForTrampoline(anImp);
326 if (index) {
327 if (outIndex) *outIndex = index;
328 if (outHeadPagePair) *outHeadPagePair = headPagePairs[arg];
329 return pagePair;
330 }
331 }
332 }
333
334 return nil;
335 }
336
337
338 static ArgumentMode
339 _argumentModeForBlock(id block)
340 {
341 ArgumentMode aMode = ReturnValueInRegisterArgumentMode;
342
343 #if SUPPORT_STRET
344 if (_Block_has_signature(block) && _Block_use_stret(block))
345 aMode = ReturnValueOnStackArgumentMode;
346 #else
347 assert(! (_Block_has_signature(block) && _Block_use_stret(block)));
348 #endif
349
350 return aMode;
351 }
352
353
354 // `block` must already have been copied
355 IMP
356 _imp_implementationWithBlockNoCopy(id block)
357 {
358 _assert_locked();
359
360 ArgumentMode aMode = _argumentModeForBlock(block);
361
362 TrampolineBlockPagePair *pagePair =
363 _getOrAllocatePagePairWithNextAvailable(aMode);
364 if (!headPagePairs[aMode])
365 headPagePairs[aMode] = pagePair;
366
367 uintptr_t index = pagePair->nextAvailable;
368 assert(index >= pagePair->startIndex() && index < pagePair->endIndex());
369 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
370
371 uintptr_t nextAvailableIndex = payload->nextAvailable;
372 if (nextAvailableIndex == 0) {
373 // First time through (unused slots are zero). Fill sequentially.
374 // If the page is now full this will now be endIndex(), handled below.
375 nextAvailableIndex = index + 1;
376 }
377 pagePair->nextAvailable = nextAvailableIndex;
378 if (nextAvailableIndex == pagePair->endIndex()) {
379 // PagePair is now full (free list or wilderness exhausted)
380 // Remove from available page linked list
381 TrampolineBlockPagePair *iterator = headPagePairs[aMode];
382 while(iterator && (iterator->nextAvailablePage != pagePair)) {
383 iterator = iterator->nextAvailablePage;
384 }
385 if (iterator) {
386 iterator->nextAvailablePage = pagePair->nextAvailablePage;
387 pagePair->nextAvailablePage = nil;
388 }
389 }
390
391 payload->block = block;
392 return pagePair->trampoline(index);
393 }
394
395
396 #pragma mark Public API
397 IMP imp_implementationWithBlock(id block)
398 {
399 block = Block_copy(block);
400 _lock();
401 IMP returnIMP = _imp_implementationWithBlockNoCopy(block);
402 _unlock();
403 return returnIMP;
404 }
405
406
407 id imp_getBlock(IMP anImp) {
408 uintptr_t index;
409 TrampolineBlockPagePair *pagePair;
410
411 if (!anImp) return nil;
412
413 _lock();
414
415 pagePair = _pageAndIndexContainingIMP(anImp, &index, nil);
416
417 if (!pagePair) {
418 _unlock();
419 return nil;
420 }
421
422 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
423
424 if (payload->nextAvailable <= TrampolineBlockPagePair::endIndex()) {
425 // unallocated
426 _unlock();
427 return nil;
428 }
429
430 _unlock();
431
432 return payload->block;
433 }
434
435 BOOL imp_removeBlock(IMP anImp) {
436 TrampolineBlockPagePair *pagePair;
437 TrampolineBlockPagePair *headPagePair;
438 uintptr_t index;
439
440 if (!anImp) return NO;
441
442 _lock();
443 pagePair = _pageAndIndexContainingIMP(anImp, &index, &headPagePair);
444
445 if (!pagePair) {
446 _unlock();
447 return NO;
448 }
449
450 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
451 id block = payload->block;
452 // block is released below
453
454 payload->nextAvailable = pagePair->nextAvailable;
455 pagePair->nextAvailable = index;
456
457 // make sure this page is on available linked list
458 TrampolineBlockPagePair *pagePairIterator = headPagePair;
459
460 // see if page is the next available page for any existing pages
461 while (pagePairIterator->nextAvailablePage &&
462 pagePairIterator->nextAvailablePage != pagePair)
463 {
464 pagePairIterator = pagePairIterator->nextAvailablePage;
465 }
466
467 if (! pagePairIterator->nextAvailablePage) {
468 // if iteration stopped because nextAvail was nil
469 // add to end of list.
470 pagePairIterator->nextAvailablePage = pagePair;
471 pagePair->nextAvailablePage = nil;
472 }
473
474 _unlock();
475 Block_release(block);
476 return YES;
477 }