2 * Copyright (c) 2010 Apple Inc. All Rights Reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
23 /***********************************************************************
24 * objc-block-trampolines.m
27 **********************************************************************/
29 /***********************************************************************
31 **********************************************************************/
32 #include "objc-private.h"
36 #include <Block_private.h>
37 #include <mach/mach.h>
39 // symbols defined in assembly files
40 // Don't use the symbols directly; they're thumb-biased on some ARM archs.
41 #define TRAMP(tramp) \
42 static inline __unused uintptr_t tramp(void) { \
43 extern void *_##tramp; \
44 return ((uintptr_t)&_##tramp) & ~1UL; \
47 TRAMP(a1a2_tramphead); // trampoline header code
48 TRAMP(a1a2_firsttramp); // first trampoline
49 TRAMP(a1a2_trampend); // after the last trampoline
53 TRAMP(a2a3_tramphead);
54 TRAMP(a2a3_firsttramp);
58 // argument mode identifier
60 ReturnValueInRegisterArgumentMode,
62 ReturnValueOnStackArgumentMode,
69 // We must take care with our data layout on architectures that support
70 // multiple page sizes.
72 // The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE.
73 // On some platforms this requires additional linker flags.
75 // When we allocate a page pair, we use PAGE_MAX_SIZE size.
76 // This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE.
78 // When we allocate a page pair, we use the process's page alignment.
79 // This simplifies allocation because we don't need to force greater than
80 // default alignment when running with small pages, but it also means
81 // the trampoline code MUST NOT look for its data by masking with PAGE_MAX_MASK.
83 struct TrampolineBlockPagePair
85 TrampolineBlockPagePair *nextPagePair; // linked list of all pages
86 TrampolineBlockPagePair *nextAvailablePage; // linked list of pages with available slots
88 uintptr_t nextAvailable; // index of next available slot, endIndex() if no more available
90 // Payload data: block pointers and free list.
91 // Bytes parallel with trampoline header code are the fields above or unused
92 // uint8_t blocks[ PAGE_MAX_SIZE - sizeof(TrampolineBlockPagePair) ]
94 // Code: trampoline header followed by trampolines.
95 // uint8_t trampolines[PAGE_MAX_SIZE];
97 // Per-trampoline block data format:
98 // initial value is 0 while page data is filled sequentially
99 // when filled, value is reference to Block_copy()d block
100 // when empty, value is index of next available slot OR 0 if never used yet
104 uintptr_t nextAvailable; // free list
107 static uintptr_t headerSize() {
108 return (uintptr_t) (a1a2_firsttramp() - a1a2_tramphead());
111 static uintptr_t slotSize() {
115 static uintptr_t startIndex() {
116 // headerSize is assumed to be slot-aligned
117 return headerSize() / slotSize();
120 static uintptr_t endIndex() {
121 return (uintptr_t)PAGE_MAX_SIZE / slotSize();
124 static bool validIndex(uintptr_t index) {
125 return (index >= startIndex() && index < endIndex());
128 Payload *payload(uintptr_t index) {
129 assert(validIndex(index));
130 return (Payload *)((char *)this + index*slotSize());
133 IMP trampoline(uintptr_t index) {
134 assert(validIndex(index));
135 char *imp = (char *)this + index*slotSize() + PAGE_MAX_SIZE;
137 imp++; // trampoline is Thumb instructions
142 uintptr_t indexForTrampoline(IMP tramp) {
143 uintptr_t tramp0 = (uintptr_t)this + PAGE_MAX_SIZE;
144 uintptr_t start = tramp0 + headerSize();
145 uintptr_t end = tramp0 + PAGE_MAX_SIZE;
146 uintptr_t address = (uintptr_t)tramp;
147 if (address >= start && address < end) {
148 return (uintptr_t)(address - tramp0) / slotSize();
153 static void check() {
154 assert(TrampolineBlockPagePair::slotSize() == 8);
155 assert(TrampolineBlockPagePair::headerSize() >= sizeof(TrampolineBlockPagePair));
156 assert(TrampolineBlockPagePair::headerSize() % TrampolineBlockPagePair::slotSize() == 0);
158 // _objc_inform("%p %p %p", a1a2_tramphead(), a1a2_firsttramp(),
160 assert(a1a2_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
161 assert(a1a2_tramphead() + PAGE_MAX_SIZE == a1a2_trampend());
163 // _objc_inform("%p %p %p", a2a3_tramphead(), a2a3_firsttramp(),
165 assert(a2a3_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE
166 assert(a2a3_tramphead() + PAGE_MAX_SIZE == a2a3_trampend());
170 // make sure trampolines are Thumb
171 extern void *_a1a2_firsttramp;
172 extern void *_a2a3_firsttramp;
173 assert(((uintptr_t)&_a1a2_firsttramp) % 2 == 1);
174 assert(((uintptr_t)&_a2a3_firsttramp) % 2 == 1);
180 // two sets of trampoline pages; one for stack returns and one for register returns
181 static TrampolineBlockPagePair *headPagePairs[ArgumentModeCount];
183 #pragma mark Utility Functions
185 static inline void _lock() {
193 static inline void _unlock() {
195 runtimeLock.unlockWrite();
201 static inline void _assert_locked() {
203 runtimeLock.assertWriting();
205 classLock.assertLocked();
209 #pragma mark Trampoline Management Functions
210 static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode)
214 vm_address_t dataAddress;
216 TrampolineBlockPagePair::check();
218 TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
220 assert(headPagePair == nil || headPagePair->nextAvailablePage == nil);
222 kern_return_t result;
223 result = vm_allocate(mach_task_self(), &dataAddress, PAGE_MAX_SIZE * 2,
224 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION));
225 if (result != KERN_SUCCESS) {
226 _objc_fatal("vm_allocate trampolines failed (%d)", result);
229 vm_address_t codeAddress = dataAddress + PAGE_MAX_SIZE;
233 case ReturnValueInRegisterArgumentMode:
234 codePage = a1a2_tramphead();
237 case ReturnValueOnStackArgumentMode:
238 codePage = a2a3_tramphead();
242 _objc_fatal("unknown return mode %d", (int)aMode);
246 vm_prot_t currentProtection, maxProtection;
247 result = vm_remap(mach_task_self(), &codeAddress, PAGE_MAX_SIZE,
248 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
249 mach_task_self(), codePage, TRUE,
250 ¤tProtection, &maxProtection, VM_INHERIT_SHARE);
251 if (result != KERN_SUCCESS) {
252 // vm_deallocate(mach_task_self(), dataAddress, PAGE_MAX_SIZE * 2);
253 _objc_fatal("vm_remap trampolines failed (%d)", result);
256 TrampolineBlockPagePair *pagePair = (TrampolineBlockPagePair *) dataAddress;
257 pagePair->nextAvailable = pagePair->startIndex();
258 pagePair->nextPagePair = nil;
259 pagePair->nextAvailablePage = nil;
262 TrampolineBlockPagePair *lastPagePair = headPagePair;
263 while(lastPagePair->nextPagePair) {
264 lastPagePair = lastPagePair->nextPagePair;
266 lastPagePair->nextPagePair = pagePair;
267 headPagePairs[aMode]->nextAvailablePage = pagePair;
269 headPagePairs[aMode] = pagePair;
275 static TrampolineBlockPagePair *
276 _getOrAllocatePagePairWithNextAvailable(ArgumentMode aMode)
280 TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
283 return _allocateTrampolinesAndData(aMode);
285 // make sure head page is filled first
286 if (headPagePair->nextAvailable != headPagePair->endIndex())
289 if (headPagePair->nextAvailablePage) // check if there is a page w/a hole
290 return headPagePair->nextAvailablePage;
292 return _allocateTrampolinesAndData(aMode); // tack on a new one
295 static TrampolineBlockPagePair *
296 _pageAndIndexContainingIMP(IMP anImp, uintptr_t *outIndex,
297 TrampolineBlockPagePair **outHeadPagePair)
301 for (int arg = 0; arg < ArgumentModeCount; arg++) {
302 for (TrampolineBlockPagePair *pagePair = headPagePairs[arg];
304 pagePair = pagePair->nextPagePair)
306 uintptr_t index = pagePair->indexForTrampoline(anImp);
308 if (outIndex) *outIndex = index;
309 if (outHeadPagePair) *outHeadPagePair = headPagePairs[arg];
320 _argumentModeForBlock(id block)
322 ArgumentMode aMode = ReturnValueInRegisterArgumentMode;
325 if (_Block_has_signature(block) && _Block_use_stret(block))
326 aMode = ReturnValueOnStackArgumentMode;
328 assert(! (_Block_has_signature(block) && _Block_use_stret(block)));
335 // `block` must already have been copied
337 _imp_implementationWithBlockNoCopy(id block)
341 ArgumentMode aMode = _argumentModeForBlock(block);
343 TrampolineBlockPagePair *pagePair =
344 _getOrAllocatePagePairWithNextAvailable(aMode);
345 if (!headPagePairs[aMode])
346 headPagePairs[aMode] = pagePair;
348 uintptr_t index = pagePair->nextAvailable;
349 assert(index >= pagePair->startIndex() && index < pagePair->endIndex());
350 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
352 uintptr_t nextAvailableIndex = payload->nextAvailable;
353 if (nextAvailableIndex == 0) {
354 // First time through (unused slots are zero). Fill sequentially.
355 // If the page is now full this will now be endIndex(), handled below.
356 nextAvailableIndex = index + 1;
358 pagePair->nextAvailable = nextAvailableIndex;
359 if (nextAvailableIndex == pagePair->endIndex()) {
360 // PagePair is now full (free list or wilderness exhausted)
361 // Remove from available page linked list
362 TrampolineBlockPagePair *iterator = headPagePairs[aMode];
363 while(iterator && (iterator->nextAvailablePage != pagePair)) {
364 iterator = iterator->nextAvailablePage;
367 iterator->nextAvailablePage = pagePair->nextAvailablePage;
368 pagePair->nextAvailablePage = nil;
372 payload->block = block;
373 return pagePair->trampoline(index);
377 #pragma mark Public API
378 IMP imp_implementationWithBlock(id block)
380 block = Block_copy(block);
382 IMP returnIMP = _imp_implementationWithBlockNoCopy(block);
388 id imp_getBlock(IMP anImp) {
390 TrampolineBlockPagePair *pagePair;
392 if (!anImp) return nil;
396 pagePair = _pageAndIndexContainingIMP(anImp, &index, nil);
403 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
405 if (payload->nextAvailable <= TrampolineBlockPagePair::endIndex()) {
413 return payload->block;
416 BOOL imp_removeBlock(IMP anImp) {
417 TrampolineBlockPagePair *pagePair;
418 TrampolineBlockPagePair *headPagePair;
421 if (!anImp) return NO;
424 pagePair = _pageAndIndexContainingIMP(anImp, &index, &headPagePair);
431 TrampolineBlockPagePair::Payload *payload = pagePair->payload(index);
432 id block = payload->block;
433 // block is released below
435 payload->nextAvailable = pagePair->nextAvailable;
436 pagePair->nextAvailable = index;
438 // make sure this page is on available linked list
439 TrampolineBlockPagePair *pagePairIterator = headPagePair;
441 // see if page is the next available page for any existing pages
442 while (pagePairIterator->nextAvailablePage &&
443 pagePairIterator->nextAvailablePage != pagePair)
445 pagePairIterator = pagePairIterator->nextAvailablePage;
448 if (! pagePairIterator->nextAvailablePage) {
449 // if iteration stopped because nextAvail was nil
450 // add to end of list.
451 pagePairIterator->nextAvailablePage = pagePair;
452 pagePair->nextAvailablePage = nil;
456 Block_release(block);