]>
Commit | Line | Data |
---|---|---|
81345200 A |
1 | /* |
2 | * Copyright (C) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "FTLThunks.h" | |
28 | ||
29 | #if ENABLE(FTL_JIT) | |
30 | ||
31 | #include "AssemblyHelpers.h" | |
32 | #include "FPRInfo.h" | |
33 | #include "FTLOSRExitCompiler.h" | |
34 | #include "FTLSaveRestore.h" | |
35 | #include "GPRInfo.h" | |
36 | #include "LinkBuffer.h" | |
37 | ||
38 | namespace JSC { namespace FTL { | |
39 | ||
40 | using namespace DFG; | |
41 | ||
42 | MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm) | |
43 | { | |
44 | AssemblyHelpers jit(vm, 0); | |
45 | ||
46 | // Note that the "return address" will be the OSR exit ID. | |
47 | ||
48 | ptrdiff_t stackMisalignment = MacroAssembler::pushToSaveByteOffset(); | |
49 | ||
50 | // Pretend that we're a C call frame. | |
51 | jit.pushToSave(MacroAssembler::framePointerRegister); | |
52 | jit.move(MacroAssembler::stackPointerRegister, MacroAssembler::framePointerRegister); | |
53 | stackMisalignment += MacroAssembler::pushToSaveByteOffset(); | |
54 | ||
55 | // Now create ourselves enough stack space to give saveAllRegisters() a scratch slot. | |
56 | unsigned numberOfRequiredPops = 0; | |
57 | do { | |
58 | jit.pushToSave(GPRInfo::regT0); | |
59 | stackMisalignment += MacroAssembler::pushToSaveByteOffset(); | |
60 | numberOfRequiredPops++; | |
61 | } while (stackMisalignment % stackAlignmentBytes()); | |
62 | ||
63 | ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(requiredScratchMemorySizeInBytes()); | |
64 | char* buffer = static_cast<char*>(scratchBuffer->dataBuffer()); | |
65 | ||
66 | saveAllRegisters(jit, buffer); | |
67 | ||
68 | // Tell GC mark phase how much of the scratch buffer is active during call. | |
69 | jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::nonArgGPR1); | |
70 | jit.storePtr(MacroAssembler::TrustedImmPtr(requiredScratchMemorySizeInBytes()), GPRInfo::nonArgGPR1); | |
71 | ||
72 | jit.loadPtr(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); | |
73 | jit.peek( | |
74 | GPRInfo::argumentGPR1, | |
75 | (stackMisalignment - MacroAssembler::pushToSaveByteOffset()) / sizeof(void*)); | |
76 | MacroAssembler::Call functionCall = jit.call(); | |
77 | ||
78 | // At this point we want to make a tail call to what was returned to us in the | |
79 | // returnValueGPR. But at the same time as we do this, we must restore all registers. | |
80 | // The way we will accomplish this is by arranging to have the tail call target in the | |
81 | // return address "slot" (be it a register or the stack). | |
82 | ||
83 | jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); | |
84 | ||
85 | // Make sure we tell the GC that we're not using the scratch buffer anymore. | |
86 | jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT1); | |
87 | jit.storePtr(MacroAssembler::TrustedImmPtr(0), GPRInfo::regT1); | |
88 | ||
89 | // Prepare for tail call. | |
90 | while (numberOfRequiredPops--) | |
91 | jit.popToRestore(GPRInfo::regT1); | |
92 | jit.popToRestore(MacroAssembler::framePointerRegister); | |
93 | ||
94 | // At this point we're sitting on the return address - so if we did a jump right now, the | |
95 | // tail-callee would be happy. Instead we'll stash the callee in the return address and then | |
96 | // restore all registers. | |
97 | ||
98 | jit.restoreReturnAddressBeforeReturn(GPRInfo::regT0); | |
99 | ||
100 | restoreAllRegisters(jit, buffer); | |
101 | ||
102 | jit.ret(); | |
103 | ||
104 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
105 | patchBuffer.link(functionCall, compileFTLOSRExit); | |
106 | return FINALIZE_CODE(patchBuffer, ("FTL OSR exit generation thunk")); | |
107 | } | |
108 | ||
109 | static void registerClobberCheck(AssemblyHelpers& jit, RegisterSet dontClobber) | |
110 | { | |
111 | if (!Options::clobberAllRegsInFTLICSlowPath()) | |
112 | return; | |
113 | ||
114 | RegisterSet clobber = RegisterSet::allRegisters(); | |
115 | clobber.exclude(RegisterSet::reservedHardwareRegisters()); | |
116 | clobber.exclude(RegisterSet::stackRegisters()); | |
117 | clobber.exclude(RegisterSet::calleeSaveRegisters()); | |
118 | clobber.exclude(dontClobber); | |
119 | ||
120 | GPRReg someGPR; | |
121 | for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { | |
122 | if (!clobber.get(reg) || !reg.isGPR()) | |
123 | continue; | |
124 | ||
125 | jit.move(AssemblyHelpers::TrustedImm32(0x1337beef), reg.gpr()); | |
126 | someGPR = reg.gpr(); | |
127 | } | |
128 | ||
129 | for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { | |
130 | if (!clobber.get(reg) || !reg.isFPR()) | |
131 | continue; | |
132 | ||
133 | jit.move64ToDouble(someGPR, reg.fpr()); | |
134 | } | |
135 | } | |
136 | ||
137 | MacroAssemblerCodeRef slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey& key) | |
138 | { | |
139 | AssemblyHelpers jit(&vm, 0); | |
140 | ||
141 | // We want to save the given registers at the given offset, then we want to save the | |
142 | // old return address somewhere past that offset, and then finally we want to make the | |
143 | // call. | |
144 | ||
145 | size_t currentOffset = key.offset() + sizeof(void*); | |
146 | ||
147 | #if CPU(X86) || CPU(X86_64) | |
148 | currentOffset += sizeof(void*); | |
149 | #endif | |
150 | ||
151 | for (MacroAssembler::RegisterID reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = static_cast<MacroAssembler::RegisterID>(reg + 1)) { | |
152 | if (!key.usedRegisters().get(reg)) | |
153 | continue; | |
154 | jit.storePtr(reg, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset)); | |
155 | currentOffset += sizeof(void*); | |
156 | } | |
157 | ||
158 | for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = static_cast<MacroAssembler::FPRegisterID>(reg + 1)) { | |
159 | if (!key.usedRegisters().get(reg)) | |
160 | continue; | |
161 | jit.storeDouble(reg, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset)); | |
162 | currentOffset += sizeof(double); | |
163 | } | |
164 | ||
165 | jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR0); | |
166 | jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset())); | |
167 | ||
168 | registerClobberCheck(jit, key.argumentRegisters()); | |
169 | ||
170 | AssemblyHelpers::Call call = jit.call(); | |
171 | ||
172 | jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset()), GPRInfo::nonPreservedNonReturnGPR); | |
173 | jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); | |
174 | ||
175 | for (MacroAssembler::FPRegisterID reg = MacroAssembler::lastFPRegister(); ; reg = static_cast<MacroAssembler::FPRegisterID>(reg - 1)) { | |
176 | if (key.usedRegisters().get(reg)) { | |
177 | currentOffset -= sizeof(double); | |
178 | jit.loadDouble(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset), reg); | |
179 | } | |
180 | if (reg == MacroAssembler::firstFPRegister()) | |
181 | break; | |
182 | } | |
183 | ||
184 | for (MacroAssembler::RegisterID reg = MacroAssembler::lastRegister(); ; reg = static_cast<MacroAssembler::RegisterID>(reg - 1)) { | |
185 | if (key.usedRegisters().get(reg)) { | |
186 | currentOffset -= sizeof(void*); | |
187 | jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset), reg); | |
188 | } | |
189 | if (reg == MacroAssembler::firstRegister()) | |
190 | break; | |
191 | } | |
192 | ||
193 | jit.ret(); | |
194 | ||
195 | LinkBuffer patchBuffer(vm, jit, GLOBAL_THUNK_ID); | |
196 | patchBuffer.link(call, FunctionPtr(key.callTarget())); | |
197 | return FINALIZE_CODE(patchBuffer, ("FTL slow path call thunk for %s", toCString(key).data())); | |
198 | } | |
199 | ||
200 | } } // namespace JSC::FTL | |
201 | ||
202 | #endif // ENABLE(FTL_JIT) | |
203 |