]>
Commit | Line | Data |
---|---|---|
81345200 | 1 | /* |
ed1e77d3 | 2 | * Copyright (C) 2013-2015 Apple Inc. All rights reserved. |
81345200 A |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "FTLOSRExitCompiler.h" | |
28 | ||
29 | #if ENABLE(FTL_JIT) | |
30 | ||
31 | #include "DFGOSRExitCompilerCommon.h" | |
32 | #include "DFGOSRExitPreparation.h" | |
33 | #include "FTLExitArgumentForOperand.h" | |
34 | #include "FTLJITCode.h" | |
35 | #include "FTLOSRExit.h" | |
ed1e77d3 | 36 | #include "FTLOperations.h" |
81345200 A |
37 | #include "FTLState.h" |
38 | #include "FTLSaveRestore.h" | |
39 | #include "LinkBuffer.h" | |
40 | #include "MaxFrameExtentForSlowPathCall.h" | |
41 | #include "OperandsInlines.h" | |
42 | #include "JSCInlines.h" | |
43 | #include "RegisterPreservationWrapperGenerator.h" | |
44 | #include "RepatchBuffer.h" | |
45 | ||
46 | namespace JSC { namespace FTL { | |
47 | ||
48 | using namespace DFG; | |
49 | ||
ed1e77d3 A |
50 | static void compileRecovery( |
51 | CCallHelpers& jit, const ExitValue& value, StackMaps::Record* record, StackMaps& stackmaps, | |
52 | char* registerScratch, | |
53 | const HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*>& materializationToPointer) | |
54 | { | |
55 | switch (value.kind()) { | |
56 | case ExitValueDead: | |
57 | jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0); | |
58 | break; | |
59 | ||
60 | case ExitValueConstant: | |
61 | jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0); | |
62 | break; | |
63 | ||
64 | case ExitValueArgument: | |
65 | record->locations[value.exitArgument().argument()].restoreInto( | |
66 | jit, stackmaps, registerScratch, GPRInfo::regT0); | |
67 | break; | |
68 | ||
69 | case ExitValueInJSStack: | |
70 | case ExitValueInJSStackAsInt32: | |
71 | case ExitValueInJSStackAsInt52: | |
72 | case ExitValueInJSStackAsDouble: | |
73 | jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0); | |
74 | break; | |
75 | ||
76 | case ExitValueRecovery: | |
77 | record->locations[value.rightRecoveryArgument()].restoreInto( | |
78 | jit, stackmaps, registerScratch, GPRInfo::regT1); | |
79 | record->locations[value.leftRecoveryArgument()].restoreInto( | |
80 | jit, stackmaps, registerScratch, GPRInfo::regT0); | |
81 | switch (value.recoveryOpcode()) { | |
82 | case AddRecovery: | |
83 | switch (value.recoveryFormat()) { | |
84 | case ValueFormatInt32: | |
85 | jit.add32(GPRInfo::regT1, GPRInfo::regT0); | |
86 | break; | |
87 | case ValueFormatInt52: | |
88 | jit.add64(GPRInfo::regT1, GPRInfo::regT0); | |
89 | break; | |
90 | default: | |
91 | RELEASE_ASSERT_NOT_REACHED(); | |
92 | break; | |
93 | } | |
94 | break; | |
95 | case SubRecovery: | |
96 | switch (value.recoveryFormat()) { | |
97 | case ValueFormatInt32: | |
98 | jit.sub32(GPRInfo::regT1, GPRInfo::regT0); | |
99 | break; | |
100 | case ValueFormatInt52: | |
101 | jit.sub64(GPRInfo::regT1, GPRInfo::regT0); | |
102 | break; | |
103 | default: | |
104 | RELEASE_ASSERT_NOT_REACHED(); | |
105 | break; | |
106 | } | |
107 | break; | |
108 | default: | |
109 | RELEASE_ASSERT_NOT_REACHED(); | |
110 | break; | |
111 | } | |
112 | break; | |
113 | ||
114 | case ExitValueMaterializeNewObject: | |
115 | jit.loadPtr(materializationToPointer.get(value.objectMaterialization()), GPRInfo::regT0); | |
116 | break; | |
117 | ||
118 | default: | |
119 | RELEASE_ASSERT_NOT_REACHED(); | |
120 | break; | |
121 | } | |
122 | ||
123 | reboxAccordingToFormat( | |
124 | value.valueFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); | |
125 | } | |
126 | ||
81345200 A |
127 | static void compileStub( |
128 | unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock) | |
129 | { | |
130 | StackMaps::Record* record = nullptr; | |
131 | ||
132 | for (unsigned i = jitCode->stackmaps.records.size(); i--;) { | |
133 | record = &jitCode->stackmaps.records[i]; | |
134 | if (record->patchpointID == exit.m_stackmapID) | |
135 | break; | |
136 | } | |
137 | ||
138 | RELEASE_ASSERT(record->patchpointID == exit.m_stackmapID); | |
139 | ||
140 | // This code requires framePointerRegister is the same as callFrameRegister | |
141 | static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same"); | |
142 | ||
143 | CCallHelpers jit(vm, codeBlock); | |
144 | ||
ed1e77d3 A |
145 | // We need scratch space to save all registers, to build up the JS stack, to deal with unwind |
146 | // fixup, pointers to all of the objects we materialize, and the elements inside those objects | |
147 | // that we materialize. | |
148 | ||
149 | // Figure out how much space we need for those object allocations. | |
150 | unsigned numMaterializations = 0; | |
151 | size_t maxMaterializationNumArguments = 0; | |
152 | for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) { | |
153 | numMaterializations++; | |
154 | ||
155 | maxMaterializationNumArguments = std::max( | |
156 | maxMaterializationNumArguments, | |
157 | materialization->properties().size()); | |
158 | } | |
159 | ||
160 | ScratchBuffer* scratchBuffer = vm->scratchBufferForSize( | |
161 | sizeof(EncodedJSValue) * ( | |
162 | exit.m_values.size() + numMaterializations + maxMaterializationNumArguments) + | |
163 | requiredScratchMemorySizeInBytes() + | |
164 | jitCode->unwindInfo.m_registers.size() * sizeof(uint64_t)); | |
81345200 | 165 | EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; |
ed1e77d3 A |
166 | EncodedJSValue* materializationPointers = scratch + exit.m_values.size(); |
167 | EncodedJSValue* materializationArguments = materializationPointers + numMaterializations; | |
168 | char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments); | |
81345200 A |
169 | uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes()); |
170 | ||
ed1e77d3 A |
171 | HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer; |
172 | unsigned materializationCount = 0; | |
173 | for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) { | |
174 | materializationToPointer.add( | |
175 | materialization, materializationPointers + materializationCount++); | |
176 | } | |
177 | ||
81345200 A |
178 | // Note that we come in here, the stack used to be as LLVM left it except that someone called pushToSave(). |
179 | // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use | |
180 | // that slot for saveAllRegisters(). | |
181 | ||
182 | saveAllRegisters(jit, registerScratch); | |
183 | ||
184 | // Bring the stack back into a sane form and assert that it's sane. | |
185 | jit.popToRestore(GPRInfo::regT0); | |
186 | jit.checkStackPointerAlignment(); | |
187 | ||
188 | if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) { | |
189 | Profiler::Database& database = *vm->m_perBytecodeProfiler; | |
190 | Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get(); | |
191 | ||
192 | Profiler::OSRExit* profilerExit = compilation->addOSRExit( | |
193 | exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin), | |
ed1e77d3 | 194 | exit.m_kind, exit.m_kind == UncountableInvalidation); |
81345200 A |
195 | jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress())); |
196 | } | |
197 | ||
198 | // The remaining code assumes that SP/FP are in the same state that they were in the FTL's | |
199 | // call frame. | |
200 | ||
201 | // Get the call frame and tag thingies. | |
202 | // Restore the exiting function's callFrame value into a regT4 | |
203 | jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); | |
204 | jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister); | |
205 | ||
206 | // Do some value profiling. | |
207 | if (exit.m_profileValueFormat != InvalidValueFormat) { | |
208 | record->locations[0].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0); | |
209 | reboxAccordingToFormat( | |
210 | exit.m_profileValueFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); | |
211 | ||
212 | if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { | |
213 | CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; | |
214 | if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { | |
215 | jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1); | |
216 | jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID()); | |
217 | jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeOffset()), GPRInfo::regT1); | |
218 | jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2); | |
219 | jit.lshift32(GPRInfo::regT1, GPRInfo::regT2); | |
220 | jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes())); | |
221 | } | |
222 | } | |
223 | ||
224 | if (!!exit.m_valueProfile) | |
225 | jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0)); | |
226 | } | |
81345200 | 227 | |
ed1e77d3 A |
228 | // Materialize all objects. Don't materialize an object until all of the objects it needs |
229 | // have been materialized. Curiously, this is the only place that we have an algorithm that prevents | |
230 | // OSR exit from handling cyclic object materializations. Of course, object allocation sinking | |
231 | // currently wouldn't recognize a cycle as being sinkable - but if it did then the only thing that | |
232 | // would ahve to change is this fixpoint. Instead we would allocate the objects first and populate | |
233 | // them with data later. | |
234 | HashSet<ExitTimeObjectMaterialization*> toMaterialize; | |
235 | for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) | |
236 | toMaterialize.add(materialization); | |
237 | ||
238 | while (!toMaterialize.isEmpty()) { | |
239 | unsigned previousToMaterializeSize = toMaterialize.size(); | |
81345200 | 240 | |
ed1e77d3 A |
241 | Vector<ExitTimeObjectMaterialization*> worklist; |
242 | worklist.appendRange(toMaterialize.begin(), toMaterialize.end()); | |
243 | for (ExitTimeObjectMaterialization* materialization : worklist) { | |
244 | // Check if we can do anything about this right now. | |
245 | bool allGood = true; | |
246 | for (ExitPropertyValue value : materialization->properties()) { | |
247 | if (!value.value().isObjectMaterialization()) | |
248 | continue; | |
249 | if (toMaterialize.contains(value.value().objectMaterialization())) { | |
250 | // Gotta skip this one, since one of its fields points to a materialization | |
251 | // that hasn't been materialized. | |
252 | allGood = false; | |
81345200 A |
253 | break; |
254 | } | |
81345200 | 255 | } |
ed1e77d3 A |
256 | if (!allGood) |
257 | continue; | |
81345200 | 258 | |
ed1e77d3 A |
259 | // All systems go for materializing the object. First we recover the values of all of |
260 | // its fields and then we call a function to actually allocate the beast. | |
261 | for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) { | |
262 | const ExitValue& value = materialization->properties()[propertyIndex].value(); | |
263 | compileRecovery( | |
264 | jit, value, record, jitCode->stackmaps, registerScratch, | |
265 | materializationToPointer); | |
266 | jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex); | |
267 | } | |
268 | ||
269 | // This call assumes that we don't pass arguments on the stack. | |
270 | jit.setupArgumentsWithExecState( | |
271 | CCallHelpers::TrustedImmPtr(materialization), | |
272 | CCallHelpers::TrustedImmPtr(materializationArguments)); | |
273 | jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0); | |
274 | jit.call(GPRInfo::nonArgGPR0); | |
275 | jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization)); | |
276 | ||
277 | // Let everyone know that we're done. | |
278 | toMaterialize.remove(materialization); | |
81345200 A |
279 | } |
280 | ||
ed1e77d3 A |
281 | // We expect progress! This ensures that we crash rather than looping infinitely if there |
282 | // is something broken about this fixpoint. Or, this could happen if we ever violate the | |
283 | // "materializations form a DAG" rule. | |
284 | RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize); | |
285 | } | |
286 | ||
287 | // Save all state from wherever the exit data tells us it was, into the appropriate place in | |
288 | // the scratch buffer. This also does the reboxing. | |
289 | ||
290 | for (unsigned index = exit.m_values.size(); index--;) { | |
291 | compileRecovery( | |
292 | jit, exit.m_values[index], record, jitCode->stackmaps, registerScratch, | |
293 | materializationToPointer); | |
81345200 A |
294 | jit.store64(GPRInfo::regT0, scratch + index); |
295 | } | |
296 | ||
297 | // Henceforth we make it look like the exiting function was called through a register | |
298 | // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then | |
299 | // we restore the various things according to either exit.m_values or by copying from the | |
300 | // old frame, and finally we save the various callee-save registers into where the | |
301 | // restoration thunk would restore them from. | |
302 | ||
303 | ptrdiff_t offset = registerPreservationOffset(); | |
304 | RegisterSet toSave = registersToPreserve(); | |
305 | ||
306 | // Before we start messing with the frame, we need to set aside any registers that the | |
307 | // FTL code was preserving. | |
308 | for (unsigned i = jitCode->unwindInfo.m_registers.size(); i--;) { | |
309 | RegisterAtOffset entry = jitCode->unwindInfo.m_registers[i]; | |
310 | jit.load64( | |
311 | MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()), | |
312 | GPRInfo::regT0); | |
313 | jit.store64(GPRInfo::regT0, unwindScratch + i); | |
314 | } | |
315 | ||
316 | jit.load32(CCallHelpers::payloadFor(JSStack::ArgumentCount), GPRInfo::regT2); | |
317 | ||
318 | // Let's say that the FTL function had failed its arity check. In that case, the stack will | |
319 | // contain some extra stuff. | |
320 | // | |
321 | // First we compute the padded stack space: | |
322 | // | |
323 | // paddedStackSpace = roundUp(codeBlock->numParameters - regT2 + 1) | |
324 | // | |
325 | // The stack will have regT2 + CallFrameHeaderSize stuff, but above it there will be | |
326 | // paddedStackSpace gunk used by the arity check fail restoration thunk. When that happens | |
327 | // we want to make the stack look like this, from higher addresses down: | |
328 | // | |
329 | // - register preservation return PC | |
330 | // - preserved registers | |
331 | // - arity check fail return PC | |
332 | // - argument padding | |
333 | // - actual arguments | |
334 | // - call frame header | |
335 | // | |
336 | // So that the actual call frame header appears to return to the arity check fail return | |
337 | // PC, and that then returns to the register preservation thunk. The arity check thunk that | |
338 | // we return to will have the padding size encoded into it. It will then know to return | |
339 | // into the register preservation thunk, which uses the argument count to figure out where | |
340 | // registers are preserved. | |
341 | ||
342 | // This code assumes that we're dealing with FunctionCode. | |
343 | RELEASE_ASSERT(codeBlock->codeType() == FunctionCode); | |
344 | ||
345 | jit.add32( | |
346 | MacroAssembler::TrustedImm32(-codeBlock->numParameters()), GPRInfo::regT2, | |
347 | GPRInfo::regT3); | |
348 | MacroAssembler::Jump arityIntact = jit.branch32( | |
349 | MacroAssembler::GreaterThanOrEqual, GPRInfo::regT3, MacroAssembler::TrustedImm32(0)); | |
350 | jit.neg32(GPRInfo::regT3); | |
351 | jit.add32(MacroAssembler::TrustedImm32(1 + stackAlignmentRegisters() - 1), GPRInfo::regT3); | |
352 | jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), GPRInfo::regT3); | |
353 | jit.add32(GPRInfo::regT3, GPRInfo::regT2); | |
354 | arityIntact.link(&jit); | |
355 | ||
356 | // First set up SP so that our data doesn't get clobbered by signals. | |
357 | unsigned conservativeStackDelta = | |
358 | registerPreservationOffset() + | |
359 | exit.m_values.numberOfLocals() * sizeof(Register) + | |
360 | maxFrameExtentForSlowPathCall; | |
361 | conservativeStackDelta = WTF::roundUpToMultipleOf( | |
362 | stackAlignmentBytes(), conservativeStackDelta); | |
363 | jit.addPtr( | |
364 | MacroAssembler::TrustedImm32(-conservativeStackDelta), | |
365 | MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister); | |
366 | jit.checkStackPointerAlignment(); | |
367 | ||
368 | jit.subPtr( | |
369 | MacroAssembler::TrustedImm32(registerPreservationOffset()), | |
370 | MacroAssembler::framePointerRegister); | |
371 | ||
372 | // Copy the old frame data into its new location. | |
373 | jit.add32(MacroAssembler::TrustedImm32(JSStack::CallFrameHeaderSize), GPRInfo::regT2); | |
374 | jit.move(MacroAssembler::framePointerRegister, GPRInfo::regT1); | |
375 | MacroAssembler::Label loop = jit.label(); | |
376 | jit.sub32(MacroAssembler::TrustedImm32(1), GPRInfo::regT2); | |
377 | jit.load64(MacroAssembler::Address(GPRInfo::regT1, offset), GPRInfo::regT0); | |
378 | jit.store64(GPRInfo::regT0, GPRInfo::regT1); | |
379 | jit.addPtr(MacroAssembler::TrustedImm32(sizeof(Register)), GPRInfo::regT1); | |
380 | jit.branchTest32(MacroAssembler::NonZero, GPRInfo::regT2).linkTo(loop, &jit); | |
381 | ||
382 | // At this point regT1 points to where we would save our registers. Save them here. | |
383 | ptrdiff_t currentOffset = 0; | |
384 | for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { | |
385 | if (!toSave.get(reg)) | |
386 | continue; | |
387 | currentOffset += sizeof(Register); | |
388 | unsigned unwindIndex = jitCode->unwindInfo.indexOf(reg); | |
389 | if (unwindIndex == UINT_MAX) { | |
390 | // The FTL compilation didn't preserve this register. This means that it also | |
391 | // didn't use the register. So its value at the beginning of OSR exit should be | |
392 | // preserved by the thunk. Luckily, we saved all registers into the register | |
393 | // scratch buffer, so we can restore them from there. | |
394 | jit.load64(registerScratch + offsetOfReg(reg), GPRInfo::regT0); | |
395 | } else { | |
396 | // The FTL compilation preserved the register. Its new value is therefore | |
397 | // irrelevant, but we can get the value that was preserved by using the unwind | |
398 | // data. We've already copied all unwind-able preserved registers into the unwind | |
399 | // scratch buffer, so we can get it from there. | |
400 | jit.load64(unwindScratch + unwindIndex, GPRInfo::regT0); | |
401 | } | |
402 | jit.store64(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT1, currentOffset)); | |
403 | } | |
404 | ||
405 | // We need to make sure that we return into the register restoration thunk. This works | |
406 | // differently depending on whether or not we had arity issues. | |
407 | MacroAssembler::Jump arityIntactForReturnPC = jit.branch32( | |
408 | MacroAssembler::GreaterThanOrEqual, | |
409 | CCallHelpers::payloadFor(JSStack::ArgumentCount), | |
410 | MacroAssembler::TrustedImm32(codeBlock->numParameters())); | |
411 | ||
412 | // The return PC in the call frame header points at exactly the right arity restoration | |
413 | // thunk. We don't want to change that. But the arity restoration thunk's frame has a | |
414 | // return PC and we want to reroute that to our register restoration thunk. The arity | |
415 | // restoration's return PC just just below regT1, and the register restoration's return PC | |
416 | // is right at regT1. | |
417 | jit.loadPtr(MacroAssembler::Address(GPRInfo::regT1, -static_cast<ptrdiff_t>(sizeof(Register))), GPRInfo::regT0); | |
418 | jit.storePtr(GPRInfo::regT0, GPRInfo::regT1); | |
419 | jit.storePtr( | |
420 | MacroAssembler::TrustedImmPtr(vm->getCTIStub(registerRestorationThunkGenerator).code().executableAddress()), | |
421 | MacroAssembler::Address(GPRInfo::regT1, -static_cast<ptrdiff_t>(sizeof(Register)))); | |
422 | ||
423 | MacroAssembler::Jump arityReturnPCReady = jit.jump(); | |
424 | ||
425 | arityIntactForReturnPC.link(&jit); | |
426 | ||
427 | jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, CallFrame::returnPCOffset()), GPRInfo::regT0); | |
428 | jit.storePtr(GPRInfo::regT0, GPRInfo::regT1); | |
429 | jit.storePtr( | |
430 | MacroAssembler::TrustedImmPtr(vm->getCTIStub(registerRestorationThunkGenerator).code().executableAddress()), | |
431 | MacroAssembler::Address(MacroAssembler::framePointerRegister, CallFrame::returnPCOffset())); | |
432 | ||
433 | arityReturnPCReady.link(&jit); | |
434 | ||
ed1e77d3 A |
435 | // Now get state out of the scratch buffer and place it back into the stack. The values are |
436 | // already reboxed so we just move them. | |
81345200 A |
437 | for (unsigned index = exit.m_values.size(); index--;) { |
438 | int operand = exit.m_values.operandForIndex(index); | |
81345200 A |
439 | |
440 | jit.load64(scratch + index, GPRInfo::regT0); | |
81345200 A |
441 | jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(static_cast<VirtualRegister>(operand))); |
442 | } | |
443 | ||
444 | handleExitCounts(jit, exit); | |
445 | reifyInlinedCallFrames(jit, exit); | |
81345200 A |
446 | adjustAndJumpToTarget(jit, exit); |
447 | ||
448 | LinkBuffer patchBuffer(*vm, jit, codeBlock); | |
449 | exit.m_code = FINALIZE_CODE_IF( | |
450 | shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(), | |
451 | patchBuffer, | |
452 | ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s, and record = %s", | |
453 | exitID, toCString(exit.m_codeOrigin).data(), | |
454 | exitKindToString(exit.m_kind), toCString(*codeBlock).data(), | |
455 | toCString(ignoringContext<DumpContext>(exit.m_values)).data(), | |
456 | toCString(*record).data())); | |
457 | } | |
458 | ||
459 | extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID) | |
460 | { | |
461 | SamplingRegion samplingRegion("FTL OSR Exit Compilation"); | |
462 | ||
463 | if (shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) | |
464 | dataLog("Compiling OSR exit with exitID = ", exitID, "\n"); | |
465 | ||
466 | CodeBlock* codeBlock = exec->codeBlock(); | |
467 | ||
468 | ASSERT(codeBlock); | |
469 | ASSERT(codeBlock->jitType() == JITCode::FTLJIT); | |
470 | ||
471 | VM* vm = &exec->vm(); | |
472 | ||
473 | // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't | |
474 | // really be profitable. | |
475 | DeferGCForAWhile deferGC(vm->heap); | |
476 | ||
477 | JITCode* jitCode = codeBlock->jitCode()->ftl(); | |
478 | OSRExit& exit = jitCode->osrExit[exitID]; | |
479 | ||
ed1e77d3 A |
480 | if (shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) { |
481 | dataLog(" Owning block: ", pointerDump(codeBlock), "\n"); | |
482 | dataLog(" Origin: ", exit.m_codeOrigin, "\n"); | |
483 | if (exit.m_codeOriginForExitProfile != exit.m_codeOrigin) | |
484 | dataLog(" Origin for exit profile: ", exit.m_codeOriginForExitProfile, "\n"); | |
485 | dataLog(" Exit values: ", exit.m_values, "\n"); | |
486 | if (!exit.m_materializations.isEmpty()) { | |
487 | dataLog(" Materializations:\n"); | |
488 | for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) | |
489 | dataLog(" ", pointerDump(materialization), "\n"); | |
490 | } | |
491 | } | |
492 | ||
81345200 A |
493 | prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); |
494 | ||
495 | compileStub(exitID, jitCode, exit, vm, codeBlock); | |
496 | ||
497 | RepatchBuffer repatchBuffer(codeBlock); | |
498 | repatchBuffer.relink( | |
499 | exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code())); | |
500 | ||
501 | return exit.m_code.code().executableAddress(); | |
502 | } | |
503 | ||
504 | } } // namespace JSC::FTL | |
505 | ||
506 | #endif // ENABLE(FTL_JIT) | |
507 |