]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-block-trampolines.mm
objc4-723.tar.gz
[apple/objc4.git] / runtime / objc-block-trampolines.mm
1 /*
2 * Copyright (c) 2010 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /***********************************************************************
24 * objc-block-trampolines.m
25 * Author: b.bum
26 *
27 **********************************************************************/
28
29 /***********************************************************************
30 * Imports.
31 **********************************************************************/
32 #include "objc-private.h"
33 #include "runtime.h"
34
35 #include <Block.h>
36 #include <Block_private.h>
37 #include <mach/mach.h>
38
39 // symbols defined in assembly files
40 // Don't use the symbols directly; they're thumb-biased on some ARM archs.
41 #define TRAMP(tramp) \
42 static inline __unused uintptr_t tramp(void) { \
43 extern void *_##tramp; \
44 return ((uintptr_t)&_##tramp) & ~1UL; \
45 }
46 // Scalar return
47 TRAMP(a1a2_tramphead); // trampoline header code
48 TRAMP(a1a2_firsttramp); // first trampoline
49 TRAMP(a1a2_trampend); // after the last trampoline
50
51 #if SUPPORT_STRET
52 // Struct return
53 TRAMP(a2a3_tramphead);
54 TRAMP(a2a3_firsttramp);
55 TRAMP(a2a3_trampend);
56 #endif
57
58 // argument mode identifier
59 typedef enum {
60 ReturnValueInRegisterArgumentMode,
61 #if SUPPORT_STRET
62 ReturnValueOnStackArgumentMode,
63 #endif
64
65 ArgumentModeCount
66 } ArgumentMode;
67
68
69 // We must take care with our data layout on architectures that support
70 // multiple page sizes.
71 //
72 // The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE.
73 // On some platforms this requires additional linker flags.
74 //
75 // When we allocate a page pair, we use PAGE_MAX_SIZE size.
76 // This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE.
77 //
78 // When we allocate a page pair, we use the process's page alignment.
79 // This simplifies allocation because we don't need to force greater than
80 // default alignment when running with small pages, but it also means
81 // the trampoline code MUST NOT look for its data by masking with PAGE_MAX_MASK.
82
83 struct TrampolineBlockPagePair
84 {
85 TrampolineBlockPagePair *nextPagePair; // linked list of all pages
86 TrampolineBlockPagePair *nextAvailablePage; // linked list of pages with available slots
87
88 uintptr_t nextAvailable; // index of next available slot, endIndex() if no more available
89
90 // Payload data: block pointers and free list.
91 // Bytes parallel with trampoline header code are the fields above or unused
92 // uint8_t blocks[ PAGE_MAX_SIZE - sizeof(TrampolineBlockPagePair) ]
93
94 // Code: trampoline header followed by trampolines.
95 // uint8_t trampolines[PAGE_MAX_SIZE];
96
97 // Per-trampoline block data format:
98 // initial value is 0 while page data is filled sequentially
99 // when filled, value is reference to Block_copy()d block
100 // when empty, value is index of next available slot OR 0 if never used yet
101
102 union Payload {
103 id block;
104 uintptr_t nextAvailable; // free list
105 };
106
107 static uintptr_t headerSize() {
108 return (uintptr_t) (a1a2_firsttramp() - a1a2_tramphead());
109 }
110
111 static uintptr_t slotSize() {
112 return 8;
113 }
114
115 static uintptr_t startIndex() {
116 // headerSize is assumed to be slot-aligned
117 return headerSize() / slotSize();
118 }
119
120 static uintptr_t endIndex() {
121 return (uintptr_t)PAGE_MAX_SIZE / slotSize();
122 }
123
124 static bool validIndex(uintptr_t index) {
125 return (index >= startIndex() && index < endIndex());
126 }
127
128 Payload *payload(uintptr_t index) {
129 assert(validIndex(index));
130 return (Payload *)((char *)this + index*slotSize());
131 }
132
133 IMP trampoline(uintptr_t index) {
134 assert(validIndex(index));
135 char *imp = (char *)this + index*slotSize() + PAGE_MAX_SIZE;
136 #if __arm__
137 imp++; // trampoline is Thumb instructions
138 #endif
139 return (IMP)imp;
140 }
141
142 uintptr_t indexForTrampoline(IMP tramp) {
143 uintptr_t tramp0 = (uintptr_t)this + PAGE_MAX_SIZE;
144 uintptr_t start = tramp0 + headerSize();
145 uintptr_t end = tramp0 + PAGE_MAX_SIZE;
146 uintptr_t address = (uintptr_t)tramp;
147 if (address >= start && address < end) {
148 return (uintptr_t)(address - tramp0) / slotSize();
149 }
150 return 0;
151 }
152
153 static void check() {
154 assert(TrampolineBlockPagePair::slotSize() == 8);
155 assert(TrampolineBlockPagePair::headerSize() >= sizeof(TrampolineBlockPagePair));
156 assert(TrampolineBlockPagePair::headerSize() % TrampolineBlockPagePair::slotSize() == 0);
157
158 // _objc_inform("%p %p %p", a1a2_tramphead(), a1a2_firsttramp(),
159 // a1a2_trampend());
160 assert(a1a2_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
161 assert(a1a2_tramphead() + PAGE_MAX_SIZE == a1a2_trampend());
162 #if SUPPORT_STRET
163 // _objc_inform("%p %p %p", a2a3_tramphead(), a2a3_firsttramp(),
164 // a2a3_trampend());
165 assert(a2a3_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
166 assert(a2a3_tramphead() + PAGE_MAX_SIZE == a2a3_trampend());
167 #endif
168
169 #if __arm__
170 // make sure trampolines are Thumb
171 extern void *_a1a2_firsttramp;
172 extern void *_a2a3_firsttramp;
173 assert(((uintptr_t)&_a1a2_firsttramp) % 2 == 1);
174 assert(((uintptr_t)&_a2a3_firsttramp) % 2 == 1);
175 #endif
176 }
177
178 };
179
180 // two sets of trampoline pages; one for stack returns and one for register returns
181 static TrampolineBlockPagePair *headPagePairs[ArgumentModeCount];
182
183 #pragma mark Utility Functions
184
185 static inline void _lock() {
186 #if __OBJC2__
187 runtimeLock.write();
188 #else
189 classLock.lock();
190 #endif
191 }
192
193 static inline void _unlock() {
194 #if __OBJC2__
195 runtimeLock.unlockWrite();
196 #else
197 classLock.unlock();
198 #endif
199 }
200
201 static inline void _assert_locked() {
202 #if __OBJC2__
203 runtimeLock.assertWriting();
204 #else
205 classLock.assertLocked();
206 #endif
207 }
208
209 #pragma mark Trampoline Management Functions
210 static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode)
211 {
212 _assert_locked();
213
214 vm_address_t dataAddress;
215
216 TrampolineBlockPagePair::check();
217
218 TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
219
220 assert(headPagePair == nil || headPagePair->nextAvailablePage == nil);
221
222 kern_return_t result;
223 result = vm_allocate(mach_task_self(), &dataAddress, PAGE_MAX_SIZE * 2,
224 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION));
225 if (result != KERN_SUCCESS) {
226 _objc_fatal("vm_allocate trampolines failed (%d)", result);
227 }
228
229 vm_address_t codeAddress = dataAddress + PAGE_MAX_SIZE;
230
231 uintptr_t codePage;
232 switch(aMode) {
233 case ReturnValueInRegisterArgumentMode:
234 codePage = a1a2_tramphead();
235 break;
236 #if SUPPORT_STRET
237 case ReturnValueOnStackArgumentMode:
238 codePage = a2a3_tramphead();
239 break;
240 #endif
241 default:
242 _objc_fatal("unknown return mode %d", (int)aMode);
243 break;
244 }
245
246 vm_prot_t currentProtection, maxProtection;
247 result = vm_remap(mach_task_self(), &codeAddress, PAGE_MAX_SIZE,
248 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
249 mach_task_self(), codePage, TRUE,
250 &currentProtection, &maxProtection, VM_INHERIT_SHARE);
251 if (result != KERN_SUCCESS) {
252 // vm_deallocate(mach_task_self(), dataAddress, PAGE_MAX_SIZE * 2);
253 _objc_fatal("vm_remap trampolines failed (%d)", result);
254 }
255
256 TrampolineBlockPagePair *pagePair = (TrampolineBlockPagePair *) dataAddress;
257 pagePair->nextAvailable = pagePair->startIndex();
258 pagePair->nextPagePair = nil;
259 pagePair->nextAvailablePage = nil;
260
261 if (headPagePair) {
262 TrampolineBlockPagePair *lastPagePair = headPagePair;
263 while(lastPagePair->nextPagePair) {
264 lastPagePair = lastPagePair->nextPagePair;
265 }
266 lastPagePair->nextPagePair = pagePair;
267 headPagePairs[aMode]->nextAvailablePage = pagePair;
268 } else {
269 headPagePairs[aMode] = pagePair;
270 }
271
272 return pagePair;
273 }
274
275 static TrampolineBlockPagePair *
276 _getOrAllocatePagePairWithNextAvailable(ArgumentMode aMode)
277 {
278 _assert_locked();
279
280 TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
281
282 if (!headPagePair)
283 return _allocateTrampolinesAndData(aMode);
284
285 // make sure head page is filled first
286 if (headPagePair->nextAvailable != headPagePair->endIndex())
287 return headPagePair;
288
289 if (headPagePair->nextAvailablePage) // check if there is a page w/a hole
290 return headPagePair->nextAvailablePage;
291
292 return _allocateTrampolinesAndData(aMode); // tack on a new one
293 }
294
295 static TrampolineBlockPagePair *
296 _pageAndIndexContainingIMP(IMP anImp, uintptr_t *outIndex,
297 TrampolineBlockPagePair **outHeadPagePair)
298 {
299 _assert_locked();
300
301 for (int arg = 0; arg < ArgumentModeCount; arg++) {
302 for (TrampolineBlockPagePair *pagePair = headPagePairs[arg];
303 pagePair;
304 pagePair = pagePair->nextPagePair)
305 {
306 uintptr_t index = pagePair->indexForTrampoline(anImp);
307 if (index) {
308 if (outIndex) *outIndex = index;
309 if (outHeadPagePair) *outHeadPagePair = headPagePairs[arg];
310 return pagePair;
311 }
312 }
313 }
314
315 return nil;
316 }
317
318
319 static ArgumentMode
320 _argumentModeForBlock(id block)
321 {
322 ArgumentMode aMode = ReturnValueInRegisterArgumentMode;
323
324 #if SUPPORT_STRET
325 if (_Block_has_signature(block) && _Block_use_stret(block))
326 aMode = ReturnValueOnStackArgumentMode;
327 #else
328 assert(! (_Block_has_signature(block) && _Block_use_stret(block)));
329 #endif
330
331 return aMode;
332 }
333
334
335 // `block` must already have been copied
336 IMP
337 _imp_implementationWithBlockNoCopy(id block)
338 {
339 _assert_locked();
340
341 ArgumentMode aMode = _argumentModeForBlock(block);
342
343 TrampolineBlockPagePair *pagePair =
344 _getOrAllocatePagePairWithNextAvailable(aMode);
345 if (!headPagePairs[aMode])
346 headPagePairs[aMode] = pagePair;
347
348 uintptr_t index = pagePair->nextAvailable;
349 assert(index >= pagePair->startIndex() && index < pagePair->endIndex());
350 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
351
352 uintptr_t nextAvailableIndex = payload->nextAvailable;
353 if (nextAvailableIndex == 0) {
354 // First time through (unused slots are zero). Fill sequentially.
355 // If the page is now full this will now be endIndex(), handled below.
356 nextAvailableIndex = index + 1;
357 }
358 pagePair->nextAvailable = nextAvailableIndex;
359 if (nextAvailableIndex == pagePair->endIndex()) {
360 // PagePair is now full (free list or wilderness exhausted)
361 // Remove from available page linked list
362 TrampolineBlockPagePair *iterator = headPagePairs[aMode];
363 while(iterator && (iterator->nextAvailablePage != pagePair)) {
364 iterator = iterator->nextAvailablePage;
365 }
366 if (iterator) {
367 iterator->nextAvailablePage = pagePair->nextAvailablePage;
368 pagePair->nextAvailablePage = nil;
369 }
370 }
371
372 payload->block = block;
373 return pagePair->trampoline(index);
374 }
375
376
377 #pragma mark Public API
378 IMP imp_implementationWithBlock(id block)
379 {
380 block = Block_copy(block);
381 _lock();
382 IMP returnIMP = _imp_implementationWithBlockNoCopy(block);
383 _unlock();
384 return returnIMP;
385 }
386
387
388 id imp_getBlock(IMP anImp) {
389 uintptr_t index;
390 TrampolineBlockPagePair *pagePair;
391
392 if (!anImp) return nil;
393
394 _lock();
395
396 pagePair = _pageAndIndexContainingIMP(anImp, &index, nil);
397
398 if (!pagePair) {
399 _unlock();
400 return nil;
401 }
402
403 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
404
405 if (payload->nextAvailable <= TrampolineBlockPagePair::endIndex()) {
406 // unallocated
407 _unlock();
408 return nil;
409 }
410
411 _unlock();
412
413 return payload->block;
414 }
415
416 BOOL imp_removeBlock(IMP anImp) {
417 TrampolineBlockPagePair *pagePair;
418 TrampolineBlockPagePair *headPagePair;
419 uintptr_t index;
420
421 if (!anImp) return NO;
422
423 _lock();
424 pagePair = _pageAndIndexContainingIMP(anImp, &index, &headPagePair);
425
426 if (!pagePair) {
427 _unlock();
428 return NO;
429 }
430
431 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
432 id block = payload->block;
433 // block is released below
434
435 payload->nextAvailable = pagePair->nextAvailable;
436 pagePair->nextAvailable = index;
437
438 // make sure this page is on available linked list
439 TrampolineBlockPagePair *pagePairIterator = headPagePair;
440
441 // see if page is the next available page for any existing pages
442 while (pagePairIterator->nextAvailablePage &&
443 pagePairIterator->nextAvailablePage != pagePair)
444 {
445 pagePairIterator = pagePairIterator->nextAvailablePage;
446 }
447
448 if (! pagePairIterator->nextAvailablePage) {
449 // if iteration stopped because nextAvail was nil
450 // add to end of list.
451 pagePairIterator->nextAvailablePage = pagePair;
452 pagePair->nextAvailablePage = nil;
453 }
454
455 _unlock();
456 Block_release(block);
457 return YES;
458 }