2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
33 #include "JSArrayIterator.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
47 inline void emitPointerValidation(CCallHelpers
& jit
, GPRReg pointerGPR
)
51 CCallHelpers::Jump isNonZero
= jit
.branchTestPtr(CCallHelpers::NonZero
, pointerGPR
);
52 jit
.abortWithReason(TGInvalidPointer
);
54 jit
.pushToSave(pointerGPR
);
55 jit
.load8(pointerGPR
, pointerGPR
);
56 jit
.popToRestore(pointerGPR
);
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef
throwExceptionFromCallSlowPathGenerator(VM
* vm
)
65 // The call pushed a return address, so we need to pop it back off to re-align the stack,
66 // even though we won't use it.
67 jit
.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR
);
69 jit
.setupArguments(CCallHelpers::TrustedImmPtr(vm
), GPRInfo::callFrameRegister
);
70 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(lookupExceptionHandler
)), GPRInfo::nonArgGPR0
);
71 emitPointerValidation(jit
, GPRInfo::nonArgGPR0
);
72 jit
.call(GPRInfo::nonArgGPR0
);
73 jit
.jumpToExceptionHandler();
75 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
76 return FINALIZE_CODE(patchBuffer
, ("Throw exception from call slow path thunk"));
79 static void slowPathFor(
80 CCallHelpers
& jit
, VM
* vm
, P_JITOperation_ECli slowPathFunction
)
82 jit
.emitFunctionPrologue();
83 jit
.storePtr(GPRInfo::callFrameRegister
, &vm
->topCallFrame
);
84 if (maxFrameExtentForSlowPathCall
)
85 jit
.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall
), CCallHelpers::stackPointerRegister
);
86 jit
.setupArgumentsWithExecState(GPRInfo::regT2
);
87 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(slowPathFunction
)), GPRInfo::nonArgGPR0
);
88 emitPointerValidation(jit
, GPRInfo::nonArgGPR0
);
89 jit
.call(GPRInfo::nonArgGPR0
);
90 if (maxFrameExtentForSlowPathCall
)
91 jit
.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall
), CCallHelpers::stackPointerRegister
);
93 // This slow call will return the address of one of the following:
94 // 1) Exception throwing thunk.
95 // 2) Host call return value returner thingy.
96 // 3) The function to call.
97 emitPointerValidation(jit
, GPRInfo::returnValueGPR
);
98 jit
.emitFunctionEpilogue();
99 jit
.jump(GPRInfo::returnValueGPR
);
102 static MacroAssemblerCodeRef
linkForThunkGenerator(
103 VM
* vm
, CodeSpecializationKind kind
, RegisterPreservationMode registers
)
105 // The return address is on the stack or in the link register. We will hence
106 // save the return address to the call frame while we make a C++ function call
107 // to perform linking and lazy compilation if necessary. We expect the callee
108 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
109 // been adjusted, and all other registers to be available for use.
111 CCallHelpers
jit(vm
);
113 slowPathFor(jit
, vm
, operationLinkFor(kind
, registers
));
115 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
116 return FINALIZE_CODE(
118 ("Link %s%s slow path thunk", kind
== CodeForCall
? "call" : "construct", registers
== MustPreserveRegisters
? " that preserves registers" : ""));
121 MacroAssemblerCodeRef
linkCallThunkGenerator(VM
* vm
)
123 return linkForThunkGenerator(vm
, CodeForCall
, RegisterPreservationNotRequired
);
126 MacroAssemblerCodeRef
linkConstructThunkGenerator(VM
* vm
)
128 return linkForThunkGenerator(vm
, CodeForConstruct
, RegisterPreservationNotRequired
);
131 MacroAssemblerCodeRef
linkCallThatPreservesRegsThunkGenerator(VM
* vm
)
133 return linkForThunkGenerator(vm
, CodeForCall
, MustPreserveRegisters
);
136 MacroAssemblerCodeRef
linkConstructThatPreservesRegsThunkGenerator(VM
* vm
)
138 return linkForThunkGenerator(vm
, CodeForConstruct
, MustPreserveRegisters
);
141 static MacroAssemblerCodeRef
linkPolymorphicCallForThunkGenerator(
142 VM
* vm
, RegisterPreservationMode registers
)
144 CCallHelpers
jit(vm
);
146 slowPathFor(jit
, vm
, operationLinkPolymorphicCallFor(registers
));
148 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
149 return FINALIZE_CODE(patchBuffer
, ("Link polymorphic call %s slow path thunk", registers
== MustPreserveRegisters
? " that preserves registers" : ""));
152 // For closure optimizations, we only include calls, since if you're using closures for
153 // object construction then you're going to lose big time anyway.
154 MacroAssemblerCodeRef
linkPolymorphicCallThunkGenerator(VM
* vm
)
156 return linkPolymorphicCallForThunkGenerator(vm
, RegisterPreservationNotRequired
);
159 MacroAssemblerCodeRef
linkPolymorphicCallThatPreservesRegsThunkGenerator(VM
* vm
)
161 return linkPolymorphicCallForThunkGenerator(vm
, MustPreserveRegisters
);
164 static MacroAssemblerCodeRef
virtualForThunkGenerator(
165 VM
* vm
, CodeSpecializationKind kind
, RegisterPreservationMode registers
)
167 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
168 // The return address is on the stack, or in the link register. We will hence
169 // jump to the callee, or save the return address to the call frame while we
170 // make a C++ function call to the appropriate JIT operation.
172 CCallHelpers
jit(vm
);
174 CCallHelpers::JumpList slowCase
;
176 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
177 // slow path execution for the profiler.
179 CCallHelpers::TrustedImm32(1),
180 CCallHelpers::Address(GPRInfo::regT2
, CallLinkInfo::offsetOfSlowPathCount()));
182 // FIXME: we should have a story for eliminating these checks. In many cases,
183 // the DFG knows that the value is definitely a cell, or definitely a function.
186 jit
.move(CCallHelpers::TrustedImm64(TagMask
), GPRInfo::regT4
);
190 CCallHelpers::NonZero
, GPRInfo::regT0
, GPRInfo::regT4
));
194 CCallHelpers::NotEqual
, GPRInfo::regT1
,
195 CCallHelpers::TrustedImm32(JSValue::CellTag
)));
197 AssemblyHelpers::emitLoadStructure(jit
, GPRInfo::regT0
, GPRInfo::regT4
, GPRInfo::regT1
);
200 CCallHelpers::NotEqual
,
201 CCallHelpers::Address(GPRInfo::regT4
, Structure::classInfoOffset()),
202 CCallHelpers::TrustedImmPtr(JSFunction::info())));
204 // Now we know we have a JSFunction.
207 CCallHelpers::Address(GPRInfo::regT0
, JSFunction::offsetOfExecutable()),
210 CCallHelpers::Address(
211 GPRInfo::regT4
, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind
, registers
)),
213 slowCase
.append(jit
.branchTestPtr(CCallHelpers::Zero
, GPRInfo::regT4
));
215 // Now we know that we have a CodeBlock, and we're committed to making a fast
218 // Make a tail call. This will return back to JIT code.
219 emitPointerValidation(jit
, GPRInfo::regT4
);
220 jit
.jump(GPRInfo::regT4
);
224 // Here we don't know anything, so revert to the full slow path.
226 slowPathFor(jit
, vm
, operationVirtualFor(kind
, registers
));
228 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
229 return FINALIZE_CODE(
231 ("Virtual %s%s slow path thunk", kind
== CodeForCall
? "call" : "construct", registers
== MustPreserveRegisters
? " that preserves registers" : ""));
234 MacroAssemblerCodeRef
virtualCallThunkGenerator(VM
* vm
)
236 return virtualForThunkGenerator(vm
, CodeForCall
, RegisterPreservationNotRequired
);
239 MacroAssemblerCodeRef
virtualConstructThunkGenerator(VM
* vm
)
241 return virtualForThunkGenerator(vm
, CodeForConstruct
, RegisterPreservationNotRequired
);
244 MacroAssemblerCodeRef
virtualCallThatPreservesRegsThunkGenerator(VM
* vm
)
246 return virtualForThunkGenerator(vm
, CodeForCall
, MustPreserveRegisters
);
249 MacroAssemblerCodeRef
virtualConstructThatPreservesRegsThunkGenerator(VM
* vm
)
251 return virtualForThunkGenerator(vm
, CodeForConstruct
, MustPreserveRegisters
);
254 enum ThunkEntryType
{ EnterViaCall
, EnterViaJump
};
256 static MacroAssemblerCodeRef
nativeForGenerator(VM
* vm
, CodeSpecializationKind kind
, ThunkEntryType entryType
= EnterViaCall
)
258 int executableOffsetToFunction
= NativeExecutable::offsetOfNativeFunctionFor(kind
);
260 JSInterfaceJIT
jit(vm
);
262 if (entryType
== EnterViaCall
)
263 jit
.emitFunctionPrologue();
265 jit
.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock
);
266 jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
);
269 // Calling convention: f(ecx, edx, ...);
270 // Host function signature: f(ExecState*);
271 jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::ecx
);
273 jit
.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister
); // Align stack after prologue.
276 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, JSInterfaceJIT::regT1
);
277 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1
);
278 jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1
, executableOffsetToFunction
));
280 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister
);
284 // Calling convention: f(edi, esi, edx, ecx, ...);
285 // Host function signature: f(ExecState*);
286 jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::edi
);
288 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, X86Registers::esi
);
289 jit
.loadPtr(JSInterfaceJIT::Address(X86Registers::esi
, JSFunction::offsetOfExecutable()), X86Registers::r9
);
290 jit
.call(JSInterfaceJIT::Address(X86Registers::r9
, executableOffsetToFunction
));
293 // Calling convention: f(ecx, edx, r8, r9, ...);
294 // Host function signature: f(ExecState*);
295 jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::ecx
);
297 // Leave space for the callee parameter home addresses.
298 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
299 jit
.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
301 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, X86Registers::edx
);
302 jit
.loadPtr(JSInterfaceJIT::Address(X86Registers::edx
, JSFunction::offsetOfExecutable()), X86Registers::r9
);
303 jit
.call(JSInterfaceJIT::Address(X86Registers::r9
, executableOffsetToFunction
));
305 jit
.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
309 COMPILE_ASSERT(ARM64Registers::x3
!= JSInterfaceJIT::regT1
, prev_callframe_not_trampled_by_T1
);
310 COMPILE_ASSERT(ARM64Registers::x3
!= JSInterfaceJIT::regT3
, prev_callframe_not_trampled_by_T3
);
311 COMPILE_ASSERT(ARM64Registers::x0
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_0
);
312 COMPILE_ASSERT(ARM64Registers::x1
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_1
);
313 COMPILE_ASSERT(ARM64Registers::x2
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_2
);
315 // Host function signature: f(ExecState*);
316 jit
.move(JSInterfaceJIT::callFrameRegister
, ARM64Registers::x0
);
318 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, ARM64Registers::x1
);
319 jit
.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1
, JSFunction::offsetOfExecutable()), ARM64Registers::x2
);
320 jit
.call(JSInterfaceJIT::Address(ARM64Registers::x2
, executableOffsetToFunction
));
321 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
323 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
324 jit
.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
);
327 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
328 // Host function signature is f(ExecState*).
329 jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::argumentGPR0
);
331 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, JSInterfaceJIT::argumentGPR1
);
332 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
);
333 jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, executableOffsetToFunction
));
336 // Restore stack space
337 jit
.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
);
340 #error "JIT not supported on this platform."
341 UNUSED_PARAM(executableOffsetToFunction
);
342 abortWithReason(TGNotSupported
);
345 // Check for an exception
347 jit
.load64(vm
->addressOfException(), JSInterfaceJIT::regT2
);
348 JSInterfaceJIT::Jump exceptionHandler
= jit
.branchTest64(JSInterfaceJIT::NonZero
, JSInterfaceJIT::regT2
);
350 JSInterfaceJIT::Jump exceptionHandler
= jit
.branch32(
351 JSInterfaceJIT::NotEqual
,
352 JSInterfaceJIT::AbsoluteAddress(vm
->addressOfException()),
353 JSInterfaceJIT::TrustedImm32(0));
356 jit
.emitFunctionEpilogue();
360 // Handle an exception
361 exceptionHandler
.link(&jit
);
363 jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
);
365 #if CPU(X86) && USE(JSVALUE32_64)
366 jit
.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister
);
367 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
), JSInterfaceJIT::regT0
);
368 jit
.push(JSInterfaceJIT::regT0
);
371 // Allocate space on stack for the 4 parameter registers.
372 jit
.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
374 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
), JSInterfaceJIT::argumentGPR0
);
376 jit
.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException
).value()), JSInterfaceJIT::regT3
);
377 jit
.call(JSInterfaceJIT::regT3
);
378 #if CPU(X86) && USE(JSVALUE32_64)
379 jit
.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
);
381 jit
.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
384 jit
.jumpToExceptionHandler();
386 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
387 return FINALIZE_CODE(patchBuffer
, ("native %s%s trampoline", entryType
== EnterViaJump
? "Tail " : "", toCString(kind
).data()));
390 MacroAssemblerCodeRef
nativeCallGenerator(VM
* vm
)
392 return nativeForGenerator(vm
, CodeForCall
);
395 MacroAssemblerCodeRef
nativeTailCallGenerator(VM
* vm
)
397 return nativeForGenerator(vm
, CodeForCall
, EnterViaJump
);
400 MacroAssemblerCodeRef
nativeConstructGenerator(VM
* vm
)
402 return nativeForGenerator(vm
, CodeForConstruct
);
405 MacroAssemblerCodeRef
arityFixupGenerator(VM
* vm
)
407 JSInterfaceJIT
jit(vm
);
409 // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
410 // regT5 on 32-bit and regT7 on 64-bit.
413 jit
.pop(JSInterfaceJIT::regT4
);
415 jit
.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0
);
416 jit
.neg64(JSInterfaceJIT::regT0
);
417 jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::regT6
);
418 jit
.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, JSStack::ArgumentCount
* sizeof(Register
)), JSInterfaceJIT::regT2
);
419 jit
.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize
), JSInterfaceJIT::regT2
);
421 // Move current frame down regT0 number of slots
422 JSInterfaceJIT::Label
copyLoop(jit
.label());
423 jit
.load64(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT1
);
424 jit
.store64(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
425 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6
);
426 jit
.branchSub32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(copyLoop
, &jit
);
428 // Fill in regT0 - 1 missing arg slots with undefined
429 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT2
);
430 jit
.move(JSInterfaceJIT::TrustedImm64(ValueUndefined
), JSInterfaceJIT::regT1
);
431 jit
.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
);
432 JSInterfaceJIT::Label
fillUndefinedLoop(jit
.label());
433 jit
.store64(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
434 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6
);
435 jit
.branchAdd32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(fillUndefinedLoop
, &jit
);
437 // Adjust call frame register and stack pointer to account for missing args
438 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
);
439 jit
.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1
);
440 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::callFrameRegister
);
441 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::stackPointerRegister
);
443 // Save the original return PC.
444 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()), GPRInfo::regT1
);
445 jit
.storePtr(GPRInfo::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
447 // Install the new return PC.
448 jit
.storePtr(GPRInfo::regT7
, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()));
451 jit
.push(JSInterfaceJIT::regT4
);
456 jit
.pop(JSInterfaceJIT::regT4
);
458 jit
.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0
);
459 jit
.neg32(JSInterfaceJIT::regT0
);
460 jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::regT3
);
461 jit
.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, JSStack::ArgumentCount
* sizeof(Register
)), JSInterfaceJIT::regT2
);
462 jit
.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize
), JSInterfaceJIT::regT2
);
464 // Move current frame down regT0 number of slots
465 JSInterfaceJIT::Label
copyLoop(jit
.label());
466 jit
.load32(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT1
);
467 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
468 jit
.load32(MacroAssembler::Address(JSInterfaceJIT::regT3
, 4), JSInterfaceJIT::regT1
);
469 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
, 4));
470 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3
);
471 jit
.branchSub32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(copyLoop
, &jit
);
473 // Fill in regT0 - 1 missing arg slots with undefined
474 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT2
);
475 jit
.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
);
476 JSInterfaceJIT::Label
fillUndefinedLoop(jit
.label());
477 jit
.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1
);
478 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
479 jit
.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag
), JSInterfaceJIT::regT1
);
480 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
, 4));
482 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3
);
483 jit
.branchAdd32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(fillUndefinedLoop
, &jit
);
485 // Adjust call frame register and stack pointer to account for missing args
486 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
);
487 jit
.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1
);
488 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::callFrameRegister
);
489 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::stackPointerRegister
);
491 // Save the original return PC.
492 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()), GPRInfo::regT1
);
493 jit
.storePtr(GPRInfo::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
495 // Install the new return PC.
496 jit
.storePtr(GPRInfo::regT5
, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()));
499 jit
.push(JSInterfaceJIT::regT4
);
504 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
505 return FINALIZE_CODE(patchBuffer
, ("fixup arity"));
508 MacroAssemblerCodeRef
baselineGetterReturnThunkGenerator(VM
* vm
)
510 JSInterfaceJIT
jit(vm
);
513 jit
.move(GPRInfo::returnValueGPR
, GPRInfo::regT0
);
515 jit
.setupResults(GPRInfo::regT0
, GPRInfo::regT1
);
518 unsigned numberOfParameters
= 0;
519 numberOfParameters
++; // The 'this' argument.
520 numberOfParameters
++; // The true return PC.
522 unsigned numberOfRegsForCall
=
523 JSStack::CallFrameHeaderSize
+ numberOfParameters
;
525 unsigned numberOfBytesForCall
=
526 numberOfRegsForCall
* sizeof(Register
) - sizeof(CallerFrameAndPC
);
528 unsigned alignedNumberOfBytesForCall
=
529 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall
);
531 // The real return address is stored above the arguments. We passed one argument, which is
532 // 'this'. So argument at index 1 is the return address.
534 AssemblyHelpers::Address(
535 AssemblyHelpers::stackPointerRegister
,
536 (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize
) * sizeof(Register
)),
540 AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall
),
541 AssemblyHelpers::stackPointerRegister
);
543 jit
.jump(GPRInfo::regT2
);
545 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
546 return FINALIZE_CODE(patchBuffer
, ("baseline getter return thunk"));
549 MacroAssemblerCodeRef
baselineSetterReturnThunkGenerator(VM
* vm
)
551 JSInterfaceJIT
jit(vm
);
553 unsigned numberOfParameters
= 0;
554 numberOfParameters
++; // The 'this' argument.
555 numberOfParameters
++; // The value to set.
556 numberOfParameters
++; // The true return PC.
558 unsigned numberOfRegsForCall
=
559 JSStack::CallFrameHeaderSize
+ numberOfParameters
;
561 unsigned numberOfBytesForCall
=
562 numberOfRegsForCall
* sizeof(Register
) - sizeof(CallerFrameAndPC
);
564 unsigned alignedNumberOfBytesForCall
=
565 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall
);
567 // The real return address is stored above the arguments. We passed two arguments, so
568 // the argument at index 2 is the return address.
570 AssemblyHelpers::Address(
571 AssemblyHelpers::stackPointerRegister
,
572 (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize
) * sizeof(Register
)),
576 AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall
),
577 AssemblyHelpers::stackPointerRegister
);
579 jit
.jump(GPRInfo::regT2
);
581 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
582 return FINALIZE_CODE(patchBuffer
, ("baseline setter return thunk"));
585 static void stringCharLoad(SpecializedThunkJIT
& jit
, VM
* vm
)
588 jit
.loadJSStringArgument(*vm
, SpecializedThunkJIT::ThisArgument
, SpecializedThunkJIT::regT0
);
590 // Load string length to regT2, and start the process of loading the data pointer into regT0
591 jit
.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0
, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2
);
592 jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0
);
593 jit
.appendFailure(jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
));
596 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT1
); // regT1 contains the index
598 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
599 jit
.appendFailure(jit
.branch32(MacroAssembler::AboveOrEqual
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT2
));
601 // Load the character
602 SpecializedThunkJIT::JumpList is16Bit
;
603 SpecializedThunkJIT::JumpList cont8Bit
;
604 // Load the string flags
605 jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2
);
606 jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, StringImpl::dataOffset()), SpecializedThunkJIT::regT0
);
607 is16Bit
.append(jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT2
, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
608 jit
.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
, MacroAssembler::TimesOne
, 0), SpecializedThunkJIT::regT0
);
609 cont8Bit
.append(jit
.jump());
611 jit
.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
, MacroAssembler::TimesTwo
, 0), SpecializedThunkJIT::regT0
);
615 static void charToString(SpecializedThunkJIT
& jit
, VM
* vm
, MacroAssembler::RegisterID src
, MacroAssembler::RegisterID dst
, MacroAssembler::RegisterID scratch
)
617 jit
.appendFailure(jit
.branch32(MacroAssembler::AboveOrEqual
, src
, MacroAssembler::TrustedImm32(0x100)));
618 jit
.move(MacroAssembler::TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), scratch
);
619 jit
.loadPtr(MacroAssembler::BaseIndex(scratch
, src
, MacroAssembler::ScalePtr
, 0), dst
);
620 jit
.appendFailure(jit
.branchTestPtr(MacroAssembler::Zero
, dst
));
623 MacroAssemblerCodeRef
charCodeAtThunkGenerator(VM
* vm
)
625 SpecializedThunkJIT
jit(vm
, 1);
626 stringCharLoad(jit
, vm
);
627 jit
.returnInt32(SpecializedThunkJIT::regT0
);
628 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "charCodeAt");
631 MacroAssemblerCodeRef
charAtThunkGenerator(VM
* vm
)
633 SpecializedThunkJIT
jit(vm
, 1);
634 stringCharLoad(jit
, vm
);
635 charToString(jit
, vm
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
);
636 jit
.returnJSCell(SpecializedThunkJIT::regT0
);
637 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "charAt");
640 MacroAssemblerCodeRef
fromCharCodeThunkGenerator(VM
* vm
)
642 SpecializedThunkJIT
jit(vm
, 1);
644 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
);
645 charToString(jit
, vm
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
);
646 jit
.returnJSCell(SpecializedThunkJIT::regT0
);
647 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "fromCharCode");
650 MacroAssemblerCodeRef
clz32ThunkGenerator(VM
* vm
)
652 SpecializedThunkJIT
jit(vm
, 1);
653 MacroAssembler::Jump nonIntArgJump
;
654 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntArgJump
);
656 SpecializedThunkJIT::Label
convertedArgumentReentry(&jit
);
657 jit
.countLeadingZeros32(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
);
658 jit
.returnInt32(SpecializedThunkJIT::regT1
);
660 if (jit
.supportsFloatingPointTruncate()) {
661 nonIntArgJump
.link(&jit
);
662 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
663 jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(convertedArgumentReentry
, &jit
);
664 jit
.appendFailure(jit
.jump());
666 jit
.appendFailure(nonIntArgJump
);
668 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "clz32");
671 MacroAssemblerCodeRef
sqrtThunkGenerator(VM
* vm
)
673 SpecializedThunkJIT
jit(vm
, 1);
674 if (!jit
.supportsFloatingPointSqrt())
675 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
677 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
678 jit
.sqrtDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
679 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
680 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "sqrt");
684 #define UnaryDoubleOpWrapper(function) function##Wrapper
685 enum MathThunkCallingConvention
{ };
686 typedef MathThunkCallingConvention(*MathThunk
)(MathThunkCallingConvention
);
688 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
690 #define defineUnaryDoubleOpWrapper(function) \
693 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
694 HIDE_SYMBOL(function##Thunk) "\n" \
695 SYMBOL_STRING(function##Thunk) ":" "\n" \
697 "call " GLOBAL_REFERENCE(function) "\n" \
702 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
704 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
706 #elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
707 #define defineUnaryDoubleOpWrapper(function) \
710 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
711 HIDE_SYMBOL(function##Thunk) "\n" \
712 SYMBOL_STRING(function##Thunk) ":" "\n" \
715 "movsd %xmm0, (%esp) \n" \
716 "call __x86.get_pc_thunk.bx\n" \
717 "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
718 "call " GLOBAL_REFERENCE(function) "\n" \
720 "movsd (%esp), %xmm0 \n" \
726 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
728 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
730 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
731 #define defineUnaryDoubleOpWrapper(function) \
734 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
735 HIDE_SYMBOL(function##Thunk) "\n" \
736 SYMBOL_STRING(function##Thunk) ":" "\n" \
738 "movsd %xmm0, (%esp) \n" \
739 "call " GLOBAL_REFERENCE(function) "\n" \
741 "movsd (%esp), %xmm0 \n" \
746 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
748 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
750 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
752 #define defineUnaryDoubleOpWrapper(function) \
756 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
757 HIDE_SYMBOL(function##Thunk) "\n" \
759 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
760 SYMBOL_STRING(function##Thunk) ":" "\n" \
762 "vmov r0, r1, d0\n" \
763 "blx " GLOBAL_REFERENCE(function) "\n" \
764 "vmov d0, r0, r1\n" \
769 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
771 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
775 #define defineUnaryDoubleOpWrapper(function) \
779 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
780 HIDE_SYMBOL(function##Thunk) "\n" \
781 SYMBOL_STRING(function##Thunk) ":" "\n" \
782 "b " GLOBAL_REFERENCE(function) "\n" \
786 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
788 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
790 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
792 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
793 static double (_cdecl
*floorFunction
)(double) = floor
;
794 static double (_cdecl
*ceilFunction
)(double) = ceil
;
795 static double (_cdecl
*expFunction
)(double) = exp
;
796 static double (_cdecl
*logFunction
)(double) = log
;
797 static double (_cdecl
*jsRoundFunction
)(double) = jsRound
;
799 #define defineUnaryDoubleOpWrapper(function) \
800 extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
805 __asm movsd mmword ptr [esp], xmm0 \
806 __asm call function##Function \
807 __asm fstp qword ptr [esp] \
808 __asm movsd xmm0, mmword ptr [esp] \
813 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
817 #define defineUnaryDoubleOpWrapper(function) \
818 static MathThunk UnaryDoubleOpWrapper(function) = 0
821 defineUnaryDoubleOpWrapper(jsRound
);
822 defineUnaryDoubleOpWrapper(exp
);
823 defineUnaryDoubleOpWrapper(log
);
824 defineUnaryDoubleOpWrapper(floor
);
825 defineUnaryDoubleOpWrapper(ceil
);
827 static const double oneConstant
= 1.0;
828 static const double negativeHalfConstant
= -0.5;
829 static const double zeroConstant
= 0.0;
830 static const double halfConstant
= 0.5;
832 MacroAssemblerCodeRef
floorThunkGenerator(VM
* vm
)
834 SpecializedThunkJIT
jit(vm
, 1);
835 MacroAssembler::Jump nonIntJump
;
836 if (!UnaryDoubleOpWrapper(floor
) || !jit
.supportsFloatingPoint())
837 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
838 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
839 jit
.returnInt32(SpecializedThunkJIT::regT0
);
840 nonIntJump
.link(&jit
);
841 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
843 SpecializedThunkJIT::JumpList doubleResult
;
844 jit
.floorDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
845 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
846 jit
.returnInt32(SpecializedThunkJIT::regT0
);
847 doubleResult
.link(&jit
);
848 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
850 SpecializedThunkJIT::Jump intResult
;
851 SpecializedThunkJIT::JumpList doubleResult
;
852 if (jit
.supportsFloatingPointTruncate()) {
853 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant
), SpecializedThunkJIT::fpRegT1
);
854 doubleResult
.append(jit
.branchDouble(MacroAssembler::DoubleEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
855 SpecializedThunkJIT::JumpList slowPath
;
856 // Handle the negative doubles in the slow path for now.
857 slowPath
.append(jit
.branchDouble(MacroAssembler::DoubleLessThanOrUnordered
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
858 slowPath
.append(jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
));
859 intResult
= jit
.jump();
862 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor
));
863 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
864 if (jit
.supportsFloatingPointTruncate())
865 intResult
.link(&jit
);
866 jit
.returnInt32(SpecializedThunkJIT::regT0
);
867 doubleResult
.link(&jit
);
868 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
870 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "floor");
873 MacroAssemblerCodeRef
ceilThunkGenerator(VM
* vm
)
875 SpecializedThunkJIT
jit(vm
, 1);
876 if (!UnaryDoubleOpWrapper(ceil
) || !jit
.supportsFloatingPoint())
877 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
878 MacroAssembler::Jump nonIntJump
;
879 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
880 jit
.returnInt32(SpecializedThunkJIT::regT0
);
881 nonIntJump
.link(&jit
);
882 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
884 jit
.ceilDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
886 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil
));
888 SpecializedThunkJIT::JumpList doubleResult
;
889 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
890 jit
.returnInt32(SpecializedThunkJIT::regT0
);
891 doubleResult
.link(&jit
);
892 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
893 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "ceil");
896 MacroAssemblerCodeRef
roundThunkGenerator(VM
* vm
)
898 SpecializedThunkJIT
jit(vm
, 1);
899 if (!UnaryDoubleOpWrapper(jsRound
) || !jit
.supportsFloatingPoint())
900 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
901 MacroAssembler::Jump nonIntJump
;
902 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
903 jit
.returnInt32(SpecializedThunkJIT::regT0
);
904 nonIntJump
.link(&jit
);
905 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
906 SpecializedThunkJIT::Jump intResult
;
907 SpecializedThunkJIT::JumpList doubleResult
;
908 if (jit
.supportsFloatingPointTruncate()) {
909 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant
), SpecializedThunkJIT::fpRegT1
);
910 doubleResult
.append(jit
.branchDouble(MacroAssembler::DoubleEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
911 SpecializedThunkJIT::JumpList slowPath
;
912 // Handle the negative doubles in the slow path for now.
913 slowPath
.append(jit
.branchDouble(MacroAssembler::DoubleLessThanOrUnordered
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
914 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant
), SpecializedThunkJIT::fpRegT1
);
915 jit
.addDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
916 slowPath
.append(jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
));
917 intResult
= jit
.jump();
920 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound
));
921 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
922 if (jit
.supportsFloatingPointTruncate())
923 intResult
.link(&jit
);
924 jit
.returnInt32(SpecializedThunkJIT::regT0
);
925 doubleResult
.link(&jit
);
926 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
927 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "round");
930 MacroAssemblerCodeRef
expThunkGenerator(VM
* vm
)
932 if (!UnaryDoubleOpWrapper(exp
))
933 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
934 SpecializedThunkJIT
jit(vm
, 1);
935 if (!jit
.supportsFloatingPoint())
936 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
937 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
938 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp
));
939 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
940 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "exp");
943 MacroAssemblerCodeRef
logThunkGenerator(VM
* vm
)
945 if (!UnaryDoubleOpWrapper(log
))
946 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
947 SpecializedThunkJIT
jit(vm
, 1);
948 if (!jit
.supportsFloatingPoint())
949 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
950 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
951 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log
));
952 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
953 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "log");
956 MacroAssemblerCodeRef
absThunkGenerator(VM
* vm
)
958 SpecializedThunkJIT
jit(vm
, 1);
959 if (!jit
.supportsFloatingPointAbs())
960 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
961 MacroAssembler::Jump nonIntJump
;
962 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
963 jit
.rshift32(SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1
);
964 jit
.add32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
);
965 jit
.xor32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
);
966 jit
.appendFailure(jit
.branch32(MacroAssembler::Equal
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(1 << 31)));
967 jit
.returnInt32(SpecializedThunkJIT::regT0
);
968 nonIntJump
.link(&jit
);
969 // Shame about the double int conversion here.
970 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
971 jit
.absDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
972 jit
.returnDouble(SpecializedThunkJIT::fpRegT1
);
973 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "abs");
976 MacroAssemblerCodeRef
powThunkGenerator(VM
* vm
)
978 SpecializedThunkJIT
jit(vm
, 2);
979 if (!jit
.supportsFloatingPoint())
980 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
982 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant
), SpecializedThunkJIT::fpRegT1
);
983 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
984 MacroAssembler::Jump nonIntExponent
;
985 jit
.loadInt32Argument(1, SpecializedThunkJIT::regT0
, nonIntExponent
);
986 jit
.appendFailure(jit
.branch32(MacroAssembler::LessThan
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(0)));
988 MacroAssembler::Jump exponentIsZero
= jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
);
989 MacroAssembler::Label
startLoop(jit
.label());
991 MacroAssembler::Jump exponentIsEven
= jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(1));
992 jit
.mulDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
993 exponentIsEven
.link(&jit
);
994 jit
.mulDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
995 jit
.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0
);
996 jit
.branchTest32(MacroAssembler::NonZero
, SpecializedThunkJIT::regT0
).linkTo(startLoop
, &jit
);
998 exponentIsZero
.link(&jit
);
1001 SpecializedThunkJIT::JumpList doubleResult
;
1002 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT0
);
1003 jit
.returnInt32(SpecializedThunkJIT::regT0
);
1004 doubleResult
.link(&jit
);
1005 jit
.returnDouble(SpecializedThunkJIT::fpRegT1
);
1008 if (jit
.supportsFloatingPointSqrt()) {
1009 nonIntExponent
.link(&jit
);
1010 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant
), SpecializedThunkJIT::fpRegT3
);
1011 jit
.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2
, SpecializedThunkJIT::regT0
);
1012 jit
.appendFailure(jit
.branchDouble(MacroAssembler::DoubleLessThanOrEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
1013 jit
.appendFailure(jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, SpecializedThunkJIT::fpRegT2
, SpecializedThunkJIT::fpRegT3
));
1014 jit
.sqrtDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
1015 jit
.divDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
1017 SpecializedThunkJIT::JumpList doubleResult
;
1018 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT0
);
1019 jit
.returnInt32(SpecializedThunkJIT::regT0
);
1020 doubleResult
.link(&jit
);
1021 jit
.returnDouble(SpecializedThunkJIT::fpRegT1
);
1023 jit
.appendFailure(nonIntExponent
);
1025 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "pow");
1028 MacroAssemblerCodeRef
imulThunkGenerator(VM
* vm
)
1030 SpecializedThunkJIT
jit(vm
, 2);
1031 MacroAssembler::Jump nonIntArg0Jump
;
1032 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntArg0Jump
);
1033 SpecializedThunkJIT::Label
doneLoadingArg0(&jit
);
1034 MacroAssembler::Jump nonIntArg1Jump
;
1035 jit
.loadInt32Argument(1, SpecializedThunkJIT::regT1
, nonIntArg1Jump
);
1036 SpecializedThunkJIT::Label
doneLoadingArg1(&jit
);
1037 jit
.mul32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
);
1038 jit
.returnInt32(SpecializedThunkJIT::regT0
);
1040 if (jit
.supportsFloatingPointTruncate()) {
1041 nonIntArg0Jump
.link(&jit
);
1042 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
1043 jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(doneLoadingArg0
, &jit
);
1044 jit
.appendFailure(jit
.jump());
1046 jit
.appendFailure(nonIntArg0Jump
);
1048 if (jit
.supportsFloatingPointTruncate()) {
1049 nonIntArg1Jump
.link(&jit
);
1050 jit
.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT1
);
1051 jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(doneLoadingArg1
, &jit
);
1052 jit
.appendFailure(jit
.jump());
1054 jit
.appendFailure(nonIntArg1Jump
);
1056 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "imul");
1061 #endif // ENABLE(JIT)