2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLOSRExitCompiler.h"
31 #include "DFGOSRExitCompilerCommon.h"
32 #include "DFGOSRExitPreparation.h"
33 #include "FTLExitArgumentForOperand.h"
34 #include "FTLJITCode.h"
35 #include "FTLOSRExit.h"
36 #include "FTLOperations.h"
38 #include "FTLSaveRestore.h"
39 #include "LinkBuffer.h"
40 #include "MaxFrameExtentForSlowPathCall.h"
41 #include "OperandsInlines.h"
42 #include "JSCInlines.h"
43 #include "RegisterPreservationWrapperGenerator.h"
44 #include "RepatchBuffer.h"
46 namespace JSC
{ namespace FTL
{
50 static void compileRecovery(
51 CCallHelpers
& jit
, const ExitValue
& value
, StackMaps::Record
* record
, StackMaps
& stackmaps
,
52 char* registerScratch
,
53 const HashMap
<ExitTimeObjectMaterialization
*, EncodedJSValue
*>& materializationToPointer
)
55 switch (value
.kind()) {
57 jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0
);
60 case ExitValueConstant
:
61 jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(value
.constant())), GPRInfo::regT0
);
64 case ExitValueArgument
:
65 record
->locations
[value
.exitArgument().argument()].restoreInto(
66 jit
, stackmaps
, registerScratch
, GPRInfo::regT0
);
69 case ExitValueInJSStack
:
70 case ExitValueInJSStackAsInt32
:
71 case ExitValueInJSStackAsInt52
:
72 case ExitValueInJSStackAsDouble
:
73 jit
.load64(AssemblyHelpers::addressFor(value
.virtualRegister()), GPRInfo::regT0
);
76 case ExitValueRecovery
:
77 record
->locations
[value
.rightRecoveryArgument()].restoreInto(
78 jit
, stackmaps
, registerScratch
, GPRInfo::regT1
);
79 record
->locations
[value
.leftRecoveryArgument()].restoreInto(
80 jit
, stackmaps
, registerScratch
, GPRInfo::regT0
);
81 switch (value
.recoveryOpcode()) {
83 switch (value
.recoveryFormat()) {
84 case ValueFormatInt32
:
85 jit
.add32(GPRInfo::regT1
, GPRInfo::regT0
);
87 case ValueFormatInt52
:
88 jit
.add64(GPRInfo::regT1
, GPRInfo::regT0
);
91 RELEASE_ASSERT_NOT_REACHED();
96 switch (value
.recoveryFormat()) {
97 case ValueFormatInt32
:
98 jit
.sub32(GPRInfo::regT1
, GPRInfo::regT0
);
100 case ValueFormatInt52
:
101 jit
.sub64(GPRInfo::regT1
, GPRInfo::regT0
);
104 RELEASE_ASSERT_NOT_REACHED();
109 RELEASE_ASSERT_NOT_REACHED();
114 case ExitValueMaterializeNewObject
:
115 jit
.loadPtr(materializationToPointer
.get(value
.objectMaterialization()), GPRInfo::regT0
);
119 RELEASE_ASSERT_NOT_REACHED();
123 reboxAccordingToFormat(
124 value
.valueFormat(), jit
, GPRInfo::regT0
, GPRInfo::regT1
, GPRInfo::regT2
);
127 static void compileStub(
128 unsigned exitID
, JITCode
* jitCode
, OSRExit
& exit
, VM
* vm
, CodeBlock
* codeBlock
)
130 StackMaps::Record
* record
= nullptr;
132 for (unsigned i
= jitCode
->stackmaps
.records
.size(); i
--;) {
133 record
= &jitCode
->stackmaps
.records
[i
];
134 if (record
->patchpointID
== exit
.m_stackmapID
)
138 RELEASE_ASSERT(record
->patchpointID
== exit
.m_stackmapID
);
140 // This code requires framePointerRegister is the same as callFrameRegister
141 static_assert(MacroAssembler::framePointerRegister
== GPRInfo::callFrameRegister
, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same");
143 CCallHelpers
jit(vm
, codeBlock
);
145 // We need scratch space to save all registers, to build up the JS stack, to deal with unwind
146 // fixup, pointers to all of the objects we materialize, and the elements inside those objects
147 // that we materialize.
149 // Figure out how much space we need for those object allocations.
150 unsigned numMaterializations
= 0;
151 size_t maxMaterializationNumArguments
= 0;
152 for (ExitTimeObjectMaterialization
* materialization
: exit
.m_materializations
) {
153 numMaterializations
++;
155 maxMaterializationNumArguments
= std::max(
156 maxMaterializationNumArguments
,
157 materialization
->properties().size());
160 ScratchBuffer
* scratchBuffer
= vm
->scratchBufferForSize(
161 sizeof(EncodedJSValue
) * (
162 exit
.m_values
.size() + numMaterializations
+ maxMaterializationNumArguments
) +
163 requiredScratchMemorySizeInBytes() +
164 jitCode
->unwindInfo
.m_registers
.size() * sizeof(uint64_t));
165 EncodedJSValue
* scratch
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
166 EncodedJSValue
* materializationPointers
= scratch
+ exit
.m_values
.size();
167 EncodedJSValue
* materializationArguments
= materializationPointers
+ numMaterializations
;
168 char* registerScratch
= bitwise_cast
<char*>(materializationArguments
+ maxMaterializationNumArguments
);
169 uint64_t* unwindScratch
= bitwise_cast
<uint64_t*>(registerScratch
+ requiredScratchMemorySizeInBytes());
171 HashMap
<ExitTimeObjectMaterialization
*, EncodedJSValue
*> materializationToPointer
;
172 unsigned materializationCount
= 0;
173 for (ExitTimeObjectMaterialization
* materialization
: exit
.m_materializations
) {
174 materializationToPointer
.add(
175 materialization
, materializationPointers
+ materializationCount
++);
178 // Note that we come in here, the stack used to be as LLVM left it except that someone called pushToSave().
179 // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use
180 // that slot for saveAllRegisters().
182 saveAllRegisters(jit
, registerScratch
);
184 // Bring the stack back into a sane form and assert that it's sane.
185 jit
.popToRestore(GPRInfo::regT0
);
186 jit
.checkStackPointerAlignment();
188 if (vm
->m_perBytecodeProfiler
&& codeBlock
->jitCode()->dfgCommon()->compilation
) {
189 Profiler::Database
& database
= *vm
->m_perBytecodeProfiler
;
190 Profiler::Compilation
* compilation
= codeBlock
->jitCode()->dfgCommon()->compilation
.get();
192 Profiler::OSRExit
* profilerExit
= compilation
->addOSRExit(
193 exitID
, Profiler::OriginStack(database
, codeBlock
, exit
.m_codeOrigin
),
194 exit
.m_kind
, exit
.m_kind
== UncountableInvalidation
);
195 jit
.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit
->counterAddress()));
198 // The remaining code assumes that SP/FP are in the same state that they were in the FTL's
201 // Get the call frame and tag thingies.
202 // Restore the exiting function's callFrame value into a regT4
203 jit
.move(MacroAssembler::TrustedImm64(TagTypeNumber
), GPRInfo::tagTypeNumberRegister
);
204 jit
.move(MacroAssembler::TrustedImm64(TagMask
), GPRInfo::tagMaskRegister
);
206 // Do some value profiling.
207 if (exit
.m_profileValueFormat
!= InvalidValueFormat
) {
208 record
->locations
[0].restoreInto(jit
, jitCode
->stackmaps
, registerScratch
, GPRInfo::regT0
);
209 reboxAccordingToFormat(
210 exit
.m_profileValueFormat
, jit
, GPRInfo::regT0
, GPRInfo::regT1
, GPRInfo::regT2
);
212 if (exit
.m_kind
== BadCache
|| exit
.m_kind
== BadIndexingType
) {
213 CodeOrigin codeOrigin
= exit
.m_codeOriginForExitProfile
;
214 if (ArrayProfile
* arrayProfile
= jit
.baselineCodeBlockFor(codeOrigin
)->getArrayProfile(codeOrigin
.bytecodeIndex
)) {
215 jit
.load32(MacroAssembler::Address(GPRInfo::regT0
, JSCell::structureIDOffset()), GPRInfo::regT1
);
216 jit
.store32(GPRInfo::regT1
, arrayProfile
->addressOfLastSeenStructureID());
217 jit
.load8(MacroAssembler::Address(GPRInfo::regT0
, JSCell::indexingTypeOffset()), GPRInfo::regT1
);
218 jit
.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2
);
219 jit
.lshift32(GPRInfo::regT1
, GPRInfo::regT2
);
220 jit
.or32(GPRInfo::regT2
, MacroAssembler::AbsoluteAddress(arrayProfile
->addressOfArrayModes()));
224 if (!!exit
.m_valueProfile
)
225 jit
.store64(GPRInfo::regT0
, exit
.m_valueProfile
.getSpecFailBucket(0));
228 // Materialize all objects. Don't materialize an object until all of the objects it needs
229 // have been materialized. Curiously, this is the only place that we have an algorithm that prevents
230 // OSR exit from handling cyclic object materializations. Of course, object allocation sinking
231 // currently wouldn't recognize a cycle as being sinkable - but if it did then the only thing that
232 // would ahve to change is this fixpoint. Instead we would allocate the objects first and populate
233 // them with data later.
234 HashSet
<ExitTimeObjectMaterialization
*> toMaterialize
;
235 for (ExitTimeObjectMaterialization
* materialization
: exit
.m_materializations
)
236 toMaterialize
.add(materialization
);
238 while (!toMaterialize
.isEmpty()) {
239 unsigned previousToMaterializeSize
= toMaterialize
.size();
241 Vector
<ExitTimeObjectMaterialization
*> worklist
;
242 worklist
.appendRange(toMaterialize
.begin(), toMaterialize
.end());
243 for (ExitTimeObjectMaterialization
* materialization
: worklist
) {
244 // Check if we can do anything about this right now.
246 for (ExitPropertyValue value
: materialization
->properties()) {
247 if (!value
.value().isObjectMaterialization())
249 if (toMaterialize
.contains(value
.value().objectMaterialization())) {
250 // Gotta skip this one, since one of its fields points to a materialization
251 // that hasn't been materialized.
259 // All systems go for materializing the object. First we recover the values of all of
260 // its fields and then we call a function to actually allocate the beast.
261 for (unsigned propertyIndex
= materialization
->properties().size(); propertyIndex
--;) {
262 const ExitValue
& value
= materialization
->properties()[propertyIndex
].value();
264 jit
, value
, record
, jitCode
->stackmaps
, registerScratch
,
265 materializationToPointer
);
266 jit
.storePtr(GPRInfo::regT0
, materializationArguments
+ propertyIndex
);
269 // This call assumes that we don't pass arguments on the stack.
270 jit
.setupArgumentsWithExecState(
271 CCallHelpers::TrustedImmPtr(materialization
),
272 CCallHelpers::TrustedImmPtr(materializationArguments
));
273 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(operationMaterializeObjectInOSR
)), GPRInfo::nonArgGPR0
);
274 jit
.call(GPRInfo::nonArgGPR0
);
275 jit
.storePtr(GPRInfo::returnValueGPR
, materializationToPointer
.get(materialization
));
277 // Let everyone know that we're done.
278 toMaterialize
.remove(materialization
);
281 // We expect progress! This ensures that we crash rather than looping infinitely if there
282 // is something broken about this fixpoint. Or, this could happen if we ever violate the
283 // "materializations form a DAG" rule.
284 RELEASE_ASSERT(toMaterialize
.size() < previousToMaterializeSize
);
287 // Save all state from wherever the exit data tells us it was, into the appropriate place in
288 // the scratch buffer. This also does the reboxing.
290 for (unsigned index
= exit
.m_values
.size(); index
--;) {
292 jit
, exit
.m_values
[index
], record
, jitCode
->stackmaps
, registerScratch
,
293 materializationToPointer
);
294 jit
.store64(GPRInfo::regT0
, scratch
+ index
);
297 // Henceforth we make it look like the exiting function was called through a register
298 // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then
299 // we restore the various things according to either exit.m_values or by copying from the
300 // old frame, and finally we save the various callee-save registers into where the
301 // restoration thunk would restore them from.
303 ptrdiff_t offset
= registerPreservationOffset();
304 RegisterSet toSave
= registersToPreserve();
306 // Before we start messing with the frame, we need to set aside any registers that the
307 // FTL code was preserving.
308 for (unsigned i
= jitCode
->unwindInfo
.m_registers
.size(); i
--;) {
309 RegisterAtOffset entry
= jitCode
->unwindInfo
.m_registers
[i
];
311 MacroAssembler::Address(MacroAssembler::framePointerRegister
, entry
.offset()),
313 jit
.store64(GPRInfo::regT0
, unwindScratch
+ i
);
316 jit
.load32(CCallHelpers::payloadFor(JSStack::ArgumentCount
), GPRInfo::regT2
);
318 // Let's say that the FTL function had failed its arity check. In that case, the stack will
319 // contain some extra stuff.
321 // First we compute the padded stack space:
323 // paddedStackSpace = roundUp(codeBlock->numParameters - regT2 + 1)
325 // The stack will have regT2 + CallFrameHeaderSize stuff, but above it there will be
326 // paddedStackSpace gunk used by the arity check fail restoration thunk. When that happens
327 // we want to make the stack look like this, from higher addresses down:
329 // - register preservation return PC
330 // - preserved registers
331 // - arity check fail return PC
332 // - argument padding
333 // - actual arguments
334 // - call frame header
336 // So that the actual call frame header appears to return to the arity check fail return
337 // PC, and that then returns to the register preservation thunk. The arity check thunk that
338 // we return to will have the padding size encoded into it. It will then know to return
339 // into the register preservation thunk, which uses the argument count to figure out where
340 // registers are preserved.
342 // This code assumes that we're dealing with FunctionCode.
343 RELEASE_ASSERT(codeBlock
->codeType() == FunctionCode
);
346 MacroAssembler::TrustedImm32(-codeBlock
->numParameters()), GPRInfo::regT2
,
348 MacroAssembler::Jump arityIntact
= jit
.branch32(
349 MacroAssembler::GreaterThanOrEqual
, GPRInfo::regT3
, MacroAssembler::TrustedImm32(0));
350 jit
.neg32(GPRInfo::regT3
);
351 jit
.add32(MacroAssembler::TrustedImm32(1 + stackAlignmentRegisters() - 1), GPRInfo::regT3
);
352 jit
.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), GPRInfo::regT3
);
353 jit
.add32(GPRInfo::regT3
, GPRInfo::regT2
);
354 arityIntact
.link(&jit
);
356 // First set up SP so that our data doesn't get clobbered by signals.
357 unsigned conservativeStackDelta
=
358 registerPreservationOffset() +
359 exit
.m_values
.numberOfLocals() * sizeof(Register
) +
360 maxFrameExtentForSlowPathCall
;
361 conservativeStackDelta
= WTF::roundUpToMultipleOf(
362 stackAlignmentBytes(), conservativeStackDelta
);
364 MacroAssembler::TrustedImm32(-conservativeStackDelta
),
365 MacroAssembler::framePointerRegister
, MacroAssembler::stackPointerRegister
);
366 jit
.checkStackPointerAlignment();
369 MacroAssembler::TrustedImm32(registerPreservationOffset()),
370 MacroAssembler::framePointerRegister
);
372 // Copy the old frame data into its new location.
373 jit
.add32(MacroAssembler::TrustedImm32(JSStack::CallFrameHeaderSize
), GPRInfo::regT2
);
374 jit
.move(MacroAssembler::framePointerRegister
, GPRInfo::regT1
);
375 MacroAssembler::Label loop
= jit
.label();
376 jit
.sub32(MacroAssembler::TrustedImm32(1), GPRInfo::regT2
);
377 jit
.load64(MacroAssembler::Address(GPRInfo::regT1
, offset
), GPRInfo::regT0
);
378 jit
.store64(GPRInfo::regT0
, GPRInfo::regT1
);
379 jit
.addPtr(MacroAssembler::TrustedImm32(sizeof(Register
)), GPRInfo::regT1
);
380 jit
.branchTest32(MacroAssembler::NonZero
, GPRInfo::regT2
).linkTo(loop
, &jit
);
382 // At this point regT1 points to where we would save our registers. Save them here.
383 ptrdiff_t currentOffset
= 0;
384 for (Reg reg
= Reg::first(); reg
<= Reg::last(); reg
= reg
.next()) {
385 if (!toSave
.get(reg
))
387 currentOffset
+= sizeof(Register
);
388 unsigned unwindIndex
= jitCode
->unwindInfo
.indexOf(reg
);
389 if (unwindIndex
== UINT_MAX
) {
390 // The FTL compilation didn't preserve this register. This means that it also
391 // didn't use the register. So its value at the beginning of OSR exit should be
392 // preserved by the thunk. Luckily, we saved all registers into the register
393 // scratch buffer, so we can restore them from there.
394 jit
.load64(registerScratch
+ offsetOfReg(reg
), GPRInfo::regT0
);
396 // The FTL compilation preserved the register. Its new value is therefore
397 // irrelevant, but we can get the value that was preserved by using the unwind
398 // data. We've already copied all unwind-able preserved registers into the unwind
399 // scratch buffer, so we can get it from there.
400 jit
.load64(unwindScratch
+ unwindIndex
, GPRInfo::regT0
);
402 jit
.store64(GPRInfo::regT0
, AssemblyHelpers::Address(GPRInfo::regT1
, currentOffset
));
405 // We need to make sure that we return into the register restoration thunk. This works
406 // differently depending on whether or not we had arity issues.
407 MacroAssembler::Jump arityIntactForReturnPC
= jit
.branch32(
408 MacroAssembler::GreaterThanOrEqual
,
409 CCallHelpers::payloadFor(JSStack::ArgumentCount
),
410 MacroAssembler::TrustedImm32(codeBlock
->numParameters()));
412 // The return PC in the call frame header points at exactly the right arity restoration
413 // thunk. We don't want to change that. But the arity restoration thunk's frame has a
414 // return PC and we want to reroute that to our register restoration thunk. The arity
415 // restoration's return PC just just below regT1, and the register restoration's return PC
416 // is right at regT1.
417 jit
.loadPtr(MacroAssembler::Address(GPRInfo::regT1
, -static_cast<ptrdiff_t>(sizeof(Register
))), GPRInfo::regT0
);
418 jit
.storePtr(GPRInfo::regT0
, GPRInfo::regT1
);
420 MacroAssembler::TrustedImmPtr(vm
->getCTIStub(registerRestorationThunkGenerator
).code().executableAddress()),
421 MacroAssembler::Address(GPRInfo::regT1
, -static_cast<ptrdiff_t>(sizeof(Register
))));
423 MacroAssembler::Jump arityReturnPCReady
= jit
.jump();
425 arityIntactForReturnPC
.link(&jit
);
427 jit
.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister
, CallFrame::returnPCOffset()), GPRInfo::regT0
);
428 jit
.storePtr(GPRInfo::regT0
, GPRInfo::regT1
);
430 MacroAssembler::TrustedImmPtr(vm
->getCTIStub(registerRestorationThunkGenerator
).code().executableAddress()),
431 MacroAssembler::Address(MacroAssembler::framePointerRegister
, CallFrame::returnPCOffset()));
433 arityReturnPCReady
.link(&jit
);
435 // Now get state out of the scratch buffer and place it back into the stack. The values are
436 // already reboxed so we just move them.
437 for (unsigned index
= exit
.m_values
.size(); index
--;) {
438 int operand
= exit
.m_values
.operandForIndex(index
);
440 jit
.load64(scratch
+ index
, GPRInfo::regT0
);
441 jit
.store64(GPRInfo::regT0
, AssemblyHelpers::addressFor(static_cast<VirtualRegister
>(operand
)));
444 handleExitCounts(jit
, exit
);
445 reifyInlinedCallFrames(jit
, exit
);
446 adjustAndJumpToTarget(jit
, exit
);
448 LinkBuffer
patchBuffer(*vm
, jit
, codeBlock
);
449 exit
.m_code
= FINALIZE_CODE_IF(
450 shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
452 ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s, and record = %s",
453 exitID
, toCString(exit
.m_codeOrigin
).data(),
454 exitKindToString(exit
.m_kind
), toCString(*codeBlock
).data(),
455 toCString(ignoringContext
<DumpContext
>(exit
.m_values
)).data(),
456 toCString(*record
).data()));
459 extern "C" void* compileFTLOSRExit(ExecState
* exec
, unsigned exitID
)
461 SamplingRegion
samplingRegion("FTL OSR Exit Compilation");
463 if (shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit())
464 dataLog("Compiling OSR exit with exitID = ", exitID
, "\n");
466 CodeBlock
* codeBlock
= exec
->codeBlock();
469 ASSERT(codeBlock
->jitType() == JITCode::FTLJIT
);
471 VM
* vm
= &exec
->vm();
473 // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
474 // really be profitable.
475 DeferGCForAWhile
deferGC(vm
->heap
);
477 JITCode
* jitCode
= codeBlock
->jitCode()->ftl();
478 OSRExit
& exit
= jitCode
->osrExit
[exitID
];
480 if (shouldShowDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) {
481 dataLog(" Owning block: ", pointerDump(codeBlock
), "\n");
482 dataLog(" Origin: ", exit
.m_codeOrigin
, "\n");
483 if (exit
.m_codeOriginForExitProfile
!= exit
.m_codeOrigin
)
484 dataLog(" Origin for exit profile: ", exit
.m_codeOriginForExitProfile
, "\n");
485 dataLog(" Exit values: ", exit
.m_values
, "\n");
486 if (!exit
.m_materializations
.isEmpty()) {
487 dataLog(" Materializations:\n");
488 for (ExitTimeObjectMaterialization
* materialization
: exit
.m_materializations
)
489 dataLog(" ", pointerDump(materialization
), "\n");
493 prepareCodeOriginForOSRExit(exec
, exit
.m_codeOrigin
);
495 compileStub(exitID
, jitCode
, exit
, vm
, codeBlock
);
497 RepatchBuffer
repatchBuffer(codeBlock
);
498 repatchBuffer
.relink(
499 exit
.codeLocationForRepatch(codeBlock
), CodeLocationLabel(exit
.m_code
.code()));
501 return exit
.m_code
.code().executableAddress();
504 } } // namespace JSC::FTL
506 #endif // ENABLE(FTL_JIT)