2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
33 #include "JSArrayIterator.h"
35 #include "MaxFrameExtentForSlowPathCall.h"
36 #include "JSCInlines.h"
37 #include "SpecializedThunkJIT.h"
38 #include <wtf/InlineASM.h>
39 #include <wtf/StringPrintStream.h>
40 #include <wtf/text/StringImpl.h>
46 inline void emitPointerValidation(CCallHelpers
& jit
, GPRReg pointerGPR
)
50 CCallHelpers::Jump isNonZero
= jit
.branchTestPtr(CCallHelpers::NonZero
, pointerGPR
);
51 jit
.abortWithReason(TGInvalidPointer
);
53 jit
.pushToSave(pointerGPR
);
54 jit
.load8(pointerGPR
, pointerGPR
);
55 jit
.popToRestore(pointerGPR
);
58 // We will jump here if the JIT code tries to make a call, but the
59 // linking helper (C++ code) decides to throw an exception instead.
60 MacroAssemblerCodeRef
throwExceptionFromCallSlowPathGenerator(VM
* vm
)
64 // The call pushed a return address, so we need to pop it back off to re-align the stack,
65 // even though we won't use it.
66 jit
.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR
);
68 jit
.setupArguments(CCallHelpers::TrustedImmPtr(vm
), GPRInfo::callFrameRegister
);
69 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(lookupExceptionHandler
)), GPRInfo::nonArgGPR0
);
70 emitPointerValidation(jit
, GPRInfo::nonArgGPR0
);
71 jit
.call(GPRInfo::nonArgGPR0
);
72 jit
.jumpToExceptionHandler();
74 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
75 return FINALIZE_CODE(patchBuffer
, ("Throw exception from call slow path thunk"));
78 static void slowPathFor(
79 CCallHelpers
& jit
, VM
* vm
, P_JITOperation_ECli slowPathFunction
)
81 jit
.emitFunctionPrologue();
82 jit
.storePtr(GPRInfo::callFrameRegister
, &vm
->topCallFrame
);
83 if (maxFrameExtentForSlowPathCall
)
84 jit
.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall
), CCallHelpers::stackPointerRegister
);
85 jit
.setupArgumentsWithExecState(GPRInfo::regT2
);
86 jit
.move(CCallHelpers::TrustedImmPtr(bitwise_cast
<void*>(slowPathFunction
)), GPRInfo::nonArgGPR0
);
87 emitPointerValidation(jit
, GPRInfo::nonArgGPR0
);
88 jit
.call(GPRInfo::nonArgGPR0
);
89 if (maxFrameExtentForSlowPathCall
)
90 jit
.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall
), CCallHelpers::stackPointerRegister
);
92 // This slow call will return the address of one of the following:
93 // 1) Exception throwing thunk.
94 // 2) Host call return value returner thingy.
95 // 3) The function to call.
96 emitPointerValidation(jit
, GPRInfo::returnValueGPR
);
97 jit
.emitFunctionEpilogue();
98 jit
.jump(GPRInfo::returnValueGPR
);
101 static MacroAssemblerCodeRef
linkForThunkGenerator(
102 VM
* vm
, CodeSpecializationKind kind
, RegisterPreservationMode registers
)
104 // The return address is on the stack or in the link register. We will hence
105 // save the return address to the call frame while we make a C++ function call
106 // to perform linking and lazy compilation if necessary. We expect the callee
107 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
108 // been adjusted, and all other registers to be available for use.
110 CCallHelpers
jit(vm
);
112 slowPathFor(jit
, vm
, operationLinkFor(kind
, registers
));
114 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
115 return FINALIZE_CODE(
117 ("Link %s%s slow path thunk", kind
== CodeForCall
? "call" : "construct", registers
== MustPreserveRegisters
? " that preserves registers" : ""));
120 MacroAssemblerCodeRef
linkCallThunkGenerator(VM
* vm
)
122 return linkForThunkGenerator(vm
, CodeForCall
, RegisterPreservationNotRequired
);
125 MacroAssemblerCodeRef
linkConstructThunkGenerator(VM
* vm
)
127 return linkForThunkGenerator(vm
, CodeForConstruct
, RegisterPreservationNotRequired
);
130 MacroAssemblerCodeRef
linkCallThatPreservesRegsThunkGenerator(VM
* vm
)
132 return linkForThunkGenerator(vm
, CodeForCall
, MustPreserveRegisters
);
135 MacroAssemblerCodeRef
linkConstructThatPreservesRegsThunkGenerator(VM
* vm
)
137 return linkForThunkGenerator(vm
, CodeForConstruct
, MustPreserveRegisters
);
140 static MacroAssemblerCodeRef
linkClosureCallForThunkGenerator(
141 VM
* vm
, RegisterPreservationMode registers
)
143 CCallHelpers
jit(vm
);
145 slowPathFor(jit
, vm
, operationLinkClosureCallFor(registers
));
147 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
148 return FINALIZE_CODE(patchBuffer
, ("Link closure call %s slow path thunk", registers
== MustPreserveRegisters
? " that preserves registers" : ""));
151 // For closure optimizations, we only include calls, since if you're using closures for
152 // object construction then you're going to lose big time anyway.
153 MacroAssemblerCodeRef
linkClosureCallThunkGenerator(VM
* vm
)
155 return linkClosureCallForThunkGenerator(vm
, RegisterPreservationNotRequired
);
158 MacroAssemblerCodeRef
linkClosureCallThatPreservesRegsThunkGenerator(VM
* vm
)
160 return linkClosureCallForThunkGenerator(vm
, MustPreserveRegisters
);
163 static MacroAssemblerCodeRef
virtualForThunkGenerator(
164 VM
* vm
, CodeSpecializationKind kind
, RegisterPreservationMode registers
)
166 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
167 // The return address is on the stack, or in the link register. We will hence
168 // jump to the callee, or save the return address to the call frame while we
169 // make a C++ function call to the appropriate JIT operation.
171 CCallHelpers
jit(vm
);
173 CCallHelpers::JumpList slowCase
;
175 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
176 // slow path execution for the profiler.
178 CCallHelpers::TrustedImm32(1),
179 CCallHelpers::Address(GPRInfo::regT2
, OBJECT_OFFSETOF(CallLinkInfo
, slowPathCount
)));
181 // FIXME: we should have a story for eliminating these checks. In many cases,
182 // the DFG knows that the value is definitely a cell, or definitely a function.
185 jit
.move(CCallHelpers::TrustedImm64(TagMask
), GPRInfo::regT4
);
189 CCallHelpers::NonZero
, GPRInfo::regT0
, GPRInfo::regT4
));
193 CCallHelpers::NotEqual
, GPRInfo::regT1
,
194 CCallHelpers::TrustedImm32(JSValue::CellTag
)));
196 AssemblyHelpers::emitLoadStructure(jit
, GPRInfo::regT0
, GPRInfo::regT4
, GPRInfo::regT1
);
199 CCallHelpers::NotEqual
,
200 CCallHelpers::Address(GPRInfo::regT4
, Structure::classInfoOffset()),
201 CCallHelpers::TrustedImmPtr(JSFunction::info())));
203 // Now we know we have a JSFunction.
206 CCallHelpers::Address(GPRInfo::regT0
, JSFunction::offsetOfExecutable()),
209 CCallHelpers::Address(
210 GPRInfo::regT4
, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind
, registers
)),
212 slowCase
.append(jit
.branchTestPtr(CCallHelpers::Zero
, GPRInfo::regT4
));
214 // Now we know that we have a CodeBlock, and we're committed to making a fast
218 CCallHelpers::Address(GPRInfo::regT0
, JSFunction::offsetOfScopeChain()),
221 jit
.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1
, JSStack::ScopeChain
);
223 jit
.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1
, JSStack::ScopeChain
);
224 jit
.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag
),
225 JSStack::ScopeChain
);
228 // Make a tail call. This will return back to JIT code.
229 emitPointerValidation(jit
, GPRInfo::regT4
);
230 jit
.jump(GPRInfo::regT4
);
234 // Here we don't know anything, so revert to the full slow path.
236 slowPathFor(jit
, vm
, operationVirtualFor(kind
, registers
));
238 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
239 return FINALIZE_CODE(
241 ("Virtual %s%s slow path thunk", kind
== CodeForCall
? "call" : "construct", registers
== MustPreserveRegisters
? " that preserves registers" : ""));
244 MacroAssemblerCodeRef
virtualCallThunkGenerator(VM
* vm
)
246 return virtualForThunkGenerator(vm
, CodeForCall
, RegisterPreservationNotRequired
);
249 MacroAssemblerCodeRef
virtualConstructThunkGenerator(VM
* vm
)
251 return virtualForThunkGenerator(vm
, CodeForConstruct
, RegisterPreservationNotRequired
);
254 MacroAssemblerCodeRef
virtualCallThatPreservesRegsThunkGenerator(VM
* vm
)
256 return virtualForThunkGenerator(vm
, CodeForCall
, MustPreserveRegisters
);
259 MacroAssemblerCodeRef
virtualConstructThatPreservesRegsThunkGenerator(VM
* vm
)
261 return virtualForThunkGenerator(vm
, CodeForConstruct
, MustPreserveRegisters
);
264 enum ThunkEntryType
{ EnterViaCall
, EnterViaJump
};
266 static MacroAssemblerCodeRef
nativeForGenerator(VM
* vm
, CodeSpecializationKind kind
, ThunkEntryType entryType
= EnterViaCall
)
268 int executableOffsetToFunction
= NativeExecutable::offsetOfNativeFunctionFor(kind
);
270 JSInterfaceJIT
jit(vm
);
272 if (entryType
== EnterViaCall
)
273 jit
.emitFunctionPrologue();
275 jit
.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock
);
276 jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
);
279 // Load caller frame's scope chain into this callframe so that whatever we call can
280 // get to its global data.
281 jit
.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0
);
282 jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT0
);
283 jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
);
285 // Calling convention: f(ecx, edx, ...);
286 // Host function signature: f(ExecState*);
287 jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::ecx
);
289 jit
.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister
); // Align stack after prologue.
292 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, JSInterfaceJIT::regT1
);
293 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1
);
294 jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1
, executableOffsetToFunction
));
296 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister
);
299 // Load caller frame's scope chain into this callframe so that whatever we call can
300 // get to its global data.
301 jit
.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0
);
302 jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT0
);
303 jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
);
305 // Calling convention: f(edi, esi, edx, ecx, ...);
306 // Host function signature: f(ExecState*);
307 jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::edi
);
309 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, X86Registers::esi
);
310 jit
.loadPtr(JSInterfaceJIT::Address(X86Registers::esi
, JSFunction::offsetOfExecutable()), X86Registers::r9
);
311 jit
.call(JSInterfaceJIT::Address(X86Registers::r9
, executableOffsetToFunction
));
314 // Calling convention: f(ecx, edx, r8, r9, ...);
315 // Host function signature: f(ExecState*);
316 jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::ecx
);
318 // Leave space for the callee parameter home addresses.
319 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
320 jit
.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
322 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, X86Registers::edx
);
323 jit
.loadPtr(JSInterfaceJIT::Address(X86Registers::edx
, JSFunction::offsetOfExecutable()), X86Registers::r9
);
324 jit
.call(JSInterfaceJIT::Address(X86Registers::r9
, executableOffsetToFunction
));
326 jit
.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
330 COMPILE_ASSERT(ARM64Registers::x3
!= JSInterfaceJIT::regT1
, prev_callframe_not_trampled_by_T1
);
331 COMPILE_ASSERT(ARM64Registers::x3
!= JSInterfaceJIT::regT3
, prev_callframe_not_trampled_by_T3
);
332 COMPILE_ASSERT(ARM64Registers::x0
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_0
);
333 COMPILE_ASSERT(ARM64Registers::x1
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_1
);
334 COMPILE_ASSERT(ARM64Registers::x2
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_2
);
336 // Load caller frame's scope chain into this callframe so that whatever we call can
337 // get to its global data.
338 jit
.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3
);
339 jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, ARM64Registers::x3
);
340 jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
);
342 // Host function signature: f(ExecState*);
343 jit
.move(JSInterfaceJIT::callFrameRegister
, ARM64Registers::x0
);
345 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, ARM64Registers::x1
);
346 jit
.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1
, JSFunction::offsetOfExecutable()), ARM64Registers::x2
);
347 jit
.call(JSInterfaceJIT::Address(ARM64Registers::x2
, executableOffsetToFunction
));
348 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
349 // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
350 jit
.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2
);
351 jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT2
);
352 jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
);
355 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
356 jit
.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
);
359 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
360 // Host function signature is f(ExecState*).
361 jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::argumentGPR0
);
363 jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, JSInterfaceJIT::argumentGPR1
);
364 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
);
365 jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, executableOffsetToFunction
));
368 // Restore stack space
369 jit
.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
);
372 #error "JIT not supported on this platform."
373 UNUSED_PARAM(executableOffsetToFunction
);
374 abortWithReason(TGNotSupported
);
377 // Check for an exception
379 jit
.load64(vm
->addressOfException(), JSInterfaceJIT::regT2
);
380 JSInterfaceJIT::Jump exceptionHandler
= jit
.branchTest64(JSInterfaceJIT::NonZero
, JSInterfaceJIT::regT2
);
382 JSInterfaceJIT::Jump exceptionHandler
= jit
.branch32(
383 JSInterfaceJIT::NotEqual
,
384 JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm
->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
385 JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag
));
388 jit
.emitFunctionEpilogue();
392 // Handle an exception
393 exceptionHandler
.link(&jit
);
395 jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
);
397 #if CPU(X86) && USE(JSVALUE32_64)
398 jit
.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister
);
399 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
), JSInterfaceJIT::regT0
);
400 jit
.push(JSInterfaceJIT::regT0
);
403 // Allocate space on stack for the 4 parameter registers.
404 jit
.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
406 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
), JSInterfaceJIT::argumentGPR0
);
408 jit
.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException
).value()), JSInterfaceJIT::regT3
);
409 jit
.call(JSInterfaceJIT::regT3
);
410 #if CPU(X86) && USE(JSVALUE32_64)
411 jit
.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
);
413 jit
.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
);
416 jit
.jumpToExceptionHandler();
418 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
419 return FINALIZE_CODE(patchBuffer
, ("native %s%s trampoline", entryType
== EnterViaJump
? "Tail " : "", toCString(kind
).data()));
422 MacroAssemblerCodeRef
nativeCallGenerator(VM
* vm
)
424 return nativeForGenerator(vm
, CodeForCall
);
427 MacroAssemblerCodeRef
nativeTailCallGenerator(VM
* vm
)
429 return nativeForGenerator(vm
, CodeForCall
, EnterViaJump
);
432 MacroAssemblerCodeRef
nativeConstructGenerator(VM
* vm
)
434 return nativeForGenerator(vm
, CodeForConstruct
);
437 MacroAssemblerCodeRef
arityFixup(VM
* vm
)
439 JSInterfaceJIT
jit(vm
);
441 // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
442 // regT5 on 32-bit and regT7 on 64-bit.
445 jit
.pop(JSInterfaceJIT::regT4
);
447 jit
.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0
);
448 jit
.neg64(JSInterfaceJIT::regT0
);
449 jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::regT6
);
450 jit
.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, JSStack::ArgumentCount
* sizeof(Register
)), JSInterfaceJIT::regT2
);
451 jit
.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize
), JSInterfaceJIT::regT2
);
453 // Move current frame down regT0 number of slots
454 JSInterfaceJIT::Label
copyLoop(jit
.label());
455 jit
.load64(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT1
);
456 jit
.store64(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
457 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6
);
458 jit
.branchSub32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(copyLoop
, &jit
);
460 // Fill in regT0 - 1 missing arg slots with undefined
461 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT2
);
462 jit
.move(JSInterfaceJIT::TrustedImm64(ValueUndefined
), JSInterfaceJIT::regT1
);
463 jit
.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
);
464 JSInterfaceJIT::Label
fillUndefinedLoop(jit
.label());
465 jit
.store64(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
466 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6
);
467 jit
.branchAdd32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(fillUndefinedLoop
, &jit
);
469 // Adjust call frame register and stack pointer to account for missing args
470 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
);
471 jit
.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1
);
472 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::callFrameRegister
);
473 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::stackPointerRegister
);
475 // Save the original return PC.
476 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()), GPRInfo::regT1
);
477 jit
.storePtr(GPRInfo::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
479 // Install the new return PC.
480 jit
.storePtr(GPRInfo::regT7
, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()));
483 jit
.push(JSInterfaceJIT::regT4
);
488 jit
.pop(JSInterfaceJIT::regT4
);
490 jit
.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0
);
491 jit
.neg32(JSInterfaceJIT::regT0
);
492 jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::regT3
);
493 jit
.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, JSStack::ArgumentCount
* sizeof(Register
)), JSInterfaceJIT::regT2
);
494 jit
.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize
), JSInterfaceJIT::regT2
);
496 // Move current frame down regT0 number of slots
497 JSInterfaceJIT::Label
copyLoop(jit
.label());
498 jit
.load32(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT1
);
499 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
500 jit
.load32(MacroAssembler::Address(JSInterfaceJIT::regT3
, 4), JSInterfaceJIT::regT1
);
501 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
, 4));
502 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3
);
503 jit
.branchSub32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(copyLoop
, &jit
);
505 // Fill in regT0 - 1 missing arg slots with undefined
506 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT2
);
507 jit
.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
);
508 JSInterfaceJIT::Label
fillUndefinedLoop(jit
.label());
509 jit
.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1
);
510 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
511 jit
.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag
), JSInterfaceJIT::regT1
);
512 jit
.store32(JSInterfaceJIT::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
, 4));
514 jit
.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3
);
515 jit
.branchAdd32(MacroAssembler::NonZero
, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2
).linkTo(fillUndefinedLoop
, &jit
);
517 // Adjust call frame register and stack pointer to account for missing args
518 jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
);
519 jit
.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1
);
520 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::callFrameRegister
);
521 jit
.addPtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::stackPointerRegister
);
523 // Save the original return PC.
524 jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()), GPRInfo::regT1
);
525 jit
.storePtr(GPRInfo::regT1
, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TimesEight
));
527 // Install the new return PC.
528 jit
.storePtr(GPRInfo::regT5
, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister
, CallFrame::returnPCOffset()));
531 jit
.push(JSInterfaceJIT::regT4
);
536 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
537 return FINALIZE_CODE(patchBuffer
, ("fixup arity"));
540 static void stringCharLoad(SpecializedThunkJIT
& jit
, VM
* vm
)
543 jit
.loadJSStringArgument(*vm
, SpecializedThunkJIT::ThisArgument
, SpecializedThunkJIT::regT0
);
545 // Load string length to regT2, and start the process of loading the data pointer into regT0
546 jit
.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0
, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2
);
547 jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0
);
548 jit
.appendFailure(jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
));
551 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT1
); // regT1 contains the index
553 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
554 jit
.appendFailure(jit
.branch32(MacroAssembler::AboveOrEqual
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT2
));
556 // Load the character
557 SpecializedThunkJIT::JumpList is16Bit
;
558 SpecializedThunkJIT::JumpList cont8Bit
;
559 // Load the string flags
560 jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2
);
561 jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, StringImpl::dataOffset()), SpecializedThunkJIT::regT0
);
562 is16Bit
.append(jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT2
, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
563 jit
.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
, MacroAssembler::TimesOne
, 0), SpecializedThunkJIT::regT0
);
564 cont8Bit
.append(jit
.jump());
566 jit
.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
, MacroAssembler::TimesTwo
, 0), SpecializedThunkJIT::regT0
);
570 static void charToString(SpecializedThunkJIT
& jit
, VM
* vm
, MacroAssembler::RegisterID src
, MacroAssembler::RegisterID dst
, MacroAssembler::RegisterID scratch
)
572 jit
.appendFailure(jit
.branch32(MacroAssembler::AboveOrEqual
, src
, MacroAssembler::TrustedImm32(0x100)));
573 jit
.move(MacroAssembler::TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), scratch
);
574 jit
.loadPtr(MacroAssembler::BaseIndex(scratch
, src
, MacroAssembler::ScalePtr
, 0), dst
);
575 jit
.appendFailure(jit
.branchTestPtr(MacroAssembler::Zero
, dst
));
578 MacroAssemblerCodeRef
charCodeAtThunkGenerator(VM
* vm
)
580 SpecializedThunkJIT
jit(vm
, 1);
581 stringCharLoad(jit
, vm
);
582 jit
.returnInt32(SpecializedThunkJIT::regT0
);
583 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "charCodeAt");
586 MacroAssemblerCodeRef
charAtThunkGenerator(VM
* vm
)
588 SpecializedThunkJIT
jit(vm
, 1);
589 stringCharLoad(jit
, vm
);
590 charToString(jit
, vm
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
);
591 jit
.returnJSCell(SpecializedThunkJIT::regT0
);
592 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "charAt");
595 MacroAssemblerCodeRef
fromCharCodeThunkGenerator(VM
* vm
)
597 SpecializedThunkJIT
jit(vm
, 1);
599 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
);
600 charToString(jit
, vm
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
);
601 jit
.returnJSCell(SpecializedThunkJIT::regT0
);
602 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "fromCharCode");
605 MacroAssemblerCodeRef
sqrtThunkGenerator(VM
* vm
)
607 SpecializedThunkJIT
jit(vm
, 1);
608 if (!jit
.supportsFloatingPointSqrt())
609 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
611 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
612 jit
.sqrtDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
613 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
614 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "sqrt");
618 #define UnaryDoubleOpWrapper(function) function##Wrapper
619 enum MathThunkCallingConvention
{ };
620 typedef MathThunkCallingConvention(*MathThunk
)(MathThunkCallingConvention
);
623 double jsRound(double) REFERENCED_FROM_ASM
;
624 double jsRound(double d
)
626 double integer
= ceil(d
);
627 return integer
- (integer
- d
> 0.5);
632 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
634 #define defineUnaryDoubleOpWrapper(function) \
637 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
638 HIDE_SYMBOL(function##Thunk) "\n" \
639 SYMBOL_STRING(function##Thunk) ":" "\n" \
641 "call " GLOBAL_REFERENCE(function) "\n" \
646 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
648 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
650 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
651 #define defineUnaryDoubleOpWrapper(function) \
654 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
655 HIDE_SYMBOL(function##Thunk) "\n" \
656 SYMBOL_STRING(function##Thunk) ":" "\n" \
658 "movsd %xmm0, (%esp) \n" \
659 "call " GLOBAL_REFERENCE(function) "\n" \
661 "movsd (%esp), %xmm0 \n" \
666 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
668 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
670 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
672 #define defineUnaryDoubleOpWrapper(function) \
676 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
677 HIDE_SYMBOL(function##Thunk) "\n" \
679 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
680 SYMBOL_STRING(function##Thunk) ":" "\n" \
682 "vmov r0, r1, d0\n" \
683 "blx " GLOBAL_REFERENCE(function) "\n" \
684 "vmov d0, r0, r1\n" \
689 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
691 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
695 #define defineUnaryDoubleOpWrapper(function) \
699 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
700 HIDE_SYMBOL(function##Thunk) "\n" \
701 SYMBOL_STRING(function##Thunk) ":" "\n" \
702 "b " GLOBAL_REFERENCE(function) "\n" \
705 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
707 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
711 #define defineUnaryDoubleOpWrapper(function) \
712 static MathThunk UnaryDoubleOpWrapper(function) = 0
715 defineUnaryDoubleOpWrapper(jsRound
);
716 defineUnaryDoubleOpWrapper(exp
);
717 defineUnaryDoubleOpWrapper(log
);
718 defineUnaryDoubleOpWrapper(floor
);
719 defineUnaryDoubleOpWrapper(ceil
);
721 static const double oneConstant
= 1.0;
722 static const double negativeHalfConstant
= -0.5;
723 static const double zeroConstant
= 0.0;
724 static const double halfConstant
= 0.5;
726 MacroAssemblerCodeRef
floorThunkGenerator(VM
* vm
)
728 SpecializedThunkJIT
jit(vm
, 1);
729 MacroAssembler::Jump nonIntJump
;
730 if (!UnaryDoubleOpWrapper(floor
) || !jit
.supportsFloatingPoint())
731 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
732 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
733 jit
.returnInt32(SpecializedThunkJIT::regT0
);
734 nonIntJump
.link(&jit
);
735 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
737 SpecializedThunkJIT::JumpList doubleResult
;
738 jit
.floorDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
739 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
740 jit
.returnInt32(SpecializedThunkJIT::regT0
);
741 doubleResult
.link(&jit
);
742 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
744 SpecializedThunkJIT::Jump intResult
;
745 SpecializedThunkJIT::JumpList doubleResult
;
746 if (jit
.supportsFloatingPointTruncate()) {
747 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant
), SpecializedThunkJIT::fpRegT1
);
748 doubleResult
.append(jit
.branchDouble(MacroAssembler::DoubleEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
749 SpecializedThunkJIT::JumpList slowPath
;
750 // Handle the negative doubles in the slow path for now.
751 slowPath
.append(jit
.branchDouble(MacroAssembler::DoubleLessThanOrUnordered
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
752 slowPath
.append(jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
));
753 intResult
= jit
.jump();
756 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor
));
757 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
758 if (jit
.supportsFloatingPointTruncate())
759 intResult
.link(&jit
);
760 jit
.returnInt32(SpecializedThunkJIT::regT0
);
761 doubleResult
.link(&jit
);
762 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
764 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "floor");
767 MacroAssemblerCodeRef
ceilThunkGenerator(VM
* vm
)
769 SpecializedThunkJIT
jit(vm
, 1);
770 if (!UnaryDoubleOpWrapper(ceil
) || !jit
.supportsFloatingPoint())
771 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
772 MacroAssembler::Jump nonIntJump
;
773 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
774 jit
.returnInt32(SpecializedThunkJIT::regT0
);
775 nonIntJump
.link(&jit
);
776 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
778 jit
.ceilDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
780 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil
));
782 SpecializedThunkJIT::JumpList doubleResult
;
783 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
784 jit
.returnInt32(SpecializedThunkJIT::regT0
);
785 doubleResult
.link(&jit
);
786 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
787 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "ceil");
790 MacroAssemblerCodeRef
roundThunkGenerator(VM
* vm
)
792 SpecializedThunkJIT
jit(vm
, 1);
793 if (!UnaryDoubleOpWrapper(jsRound
) || !jit
.supportsFloatingPoint())
794 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
795 MacroAssembler::Jump nonIntJump
;
796 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
797 jit
.returnInt32(SpecializedThunkJIT::regT0
);
798 nonIntJump
.link(&jit
);
799 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
800 SpecializedThunkJIT::Jump intResult
;
801 SpecializedThunkJIT::JumpList doubleResult
;
802 if (jit
.supportsFloatingPointTruncate()) {
803 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant
), SpecializedThunkJIT::fpRegT1
);
804 doubleResult
.append(jit
.branchDouble(MacroAssembler::DoubleEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
805 SpecializedThunkJIT::JumpList slowPath
;
806 // Handle the negative doubles in the slow path for now.
807 slowPath
.append(jit
.branchDouble(MacroAssembler::DoubleLessThanOrUnordered
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
808 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant
), SpecializedThunkJIT::fpRegT1
);
809 jit
.addDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
810 slowPath
.append(jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
));
811 intResult
= jit
.jump();
814 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound
));
815 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
);
816 if (jit
.supportsFloatingPointTruncate())
817 intResult
.link(&jit
);
818 jit
.returnInt32(SpecializedThunkJIT::regT0
);
819 doubleResult
.link(&jit
);
820 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
821 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "round");
824 MacroAssemblerCodeRef
expThunkGenerator(VM
* vm
)
826 if (!UnaryDoubleOpWrapper(exp
))
827 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
828 SpecializedThunkJIT
jit(vm
, 1);
829 if (!jit
.supportsFloatingPoint())
830 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
831 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
832 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp
));
833 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
834 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "exp");
837 MacroAssemblerCodeRef
logThunkGenerator(VM
* vm
)
839 if (!UnaryDoubleOpWrapper(log
))
840 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
841 SpecializedThunkJIT
jit(vm
, 1);
842 if (!jit
.supportsFloatingPoint())
843 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
844 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
845 jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log
));
846 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
847 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "log");
850 MacroAssemblerCodeRef
absThunkGenerator(VM
* vm
)
852 SpecializedThunkJIT
jit(vm
, 1);
853 if (!jit
.supportsFloatingPointAbs())
854 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
855 MacroAssembler::Jump nonIntJump
;
856 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
);
857 jit
.rshift32(SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1
);
858 jit
.add32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
);
859 jit
.xor32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
);
860 jit
.appendFailure(jit
.branch32(MacroAssembler::Equal
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(1 << 31)));
861 jit
.returnInt32(SpecializedThunkJIT::regT0
);
862 nonIntJump
.link(&jit
);
863 // Shame about the double int conversion here.
864 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
865 jit
.absDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
866 jit
.returnDouble(SpecializedThunkJIT::fpRegT1
);
867 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "abs");
870 MacroAssemblerCodeRef
powThunkGenerator(VM
* vm
)
872 SpecializedThunkJIT
jit(vm
, 2);
873 if (!jit
.supportsFloatingPoint())
874 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
));
876 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant
), SpecializedThunkJIT::fpRegT1
);
877 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
878 MacroAssembler::Jump nonIntExponent
;
879 jit
.loadInt32Argument(1, SpecializedThunkJIT::regT0
, nonIntExponent
);
880 jit
.appendFailure(jit
.branch32(MacroAssembler::LessThan
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(0)));
882 MacroAssembler::Jump exponentIsZero
= jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
);
883 MacroAssembler::Label
startLoop(jit
.label());
885 MacroAssembler::Jump exponentIsEven
= jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(1));
886 jit
.mulDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
887 exponentIsEven
.link(&jit
);
888 jit
.mulDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
889 jit
.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0
);
890 jit
.branchTest32(MacroAssembler::NonZero
, SpecializedThunkJIT::regT0
).linkTo(startLoop
, &jit
);
892 exponentIsZero
.link(&jit
);
895 SpecializedThunkJIT::JumpList doubleResult
;
896 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT0
);
897 jit
.returnInt32(SpecializedThunkJIT::regT0
);
898 doubleResult
.link(&jit
);
899 jit
.returnDouble(SpecializedThunkJIT::fpRegT1
);
902 if (jit
.supportsFloatingPointSqrt()) {
903 nonIntExponent
.link(&jit
);
904 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant
), SpecializedThunkJIT::fpRegT3
);
905 jit
.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2
, SpecializedThunkJIT::regT0
);
906 jit
.appendFailure(jit
.branchDouble(MacroAssembler::DoubleLessThanOrEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
));
907 jit
.appendFailure(jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, SpecializedThunkJIT::fpRegT2
, SpecializedThunkJIT::fpRegT3
));
908 jit
.sqrtDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
);
909 jit
.divDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
);
911 SpecializedThunkJIT::JumpList doubleResult
;
912 jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT0
);
913 jit
.returnInt32(SpecializedThunkJIT::regT0
);
914 doubleResult
.link(&jit
);
915 jit
.returnDouble(SpecializedThunkJIT::fpRegT1
);
917 jit
.appendFailure(nonIntExponent
);
919 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "pow");
922 MacroAssemblerCodeRef
imulThunkGenerator(VM
* vm
)
924 SpecializedThunkJIT
jit(vm
, 2);
925 MacroAssembler::Jump nonIntArg0Jump
;
926 jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntArg0Jump
);
927 SpecializedThunkJIT::Label
doneLoadingArg0(&jit
);
928 MacroAssembler::Jump nonIntArg1Jump
;
929 jit
.loadInt32Argument(1, SpecializedThunkJIT::regT1
, nonIntArg1Jump
);
930 SpecializedThunkJIT::Label
doneLoadingArg1(&jit
);
931 jit
.mul32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
);
932 jit
.returnInt32(SpecializedThunkJIT::regT0
);
934 if (jit
.supportsFloatingPointTruncate()) {
935 nonIntArg0Jump
.link(&jit
);
936 jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
);
937 jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(doneLoadingArg0
, &jit
);
938 jit
.appendFailure(jit
.jump());
940 jit
.appendFailure(nonIntArg0Jump
);
942 if (jit
.supportsFloatingPointTruncate()) {
943 nonIntArg1Jump
.link(&jit
);
944 jit
.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT1
);
945 jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(doneLoadingArg1
, &jit
);
946 jit
.appendFailure(jit
.jump());
948 jit
.appendFailure(nonIntArg1Jump
);
950 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "imul");
953 static MacroAssemblerCodeRef
arrayIteratorNextThunkGenerator(VM
* vm
, ArrayIterationKind kind
)
955 typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32
;
956 typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr
;
957 typedef SpecializedThunkJIT::Address Address
;
958 typedef SpecializedThunkJIT::BaseIndex BaseIndex
;
959 typedef SpecializedThunkJIT::Jump Jump
;
961 SpecializedThunkJIT
jit(vm
);
962 // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
963 jit
.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument
, SpecializedThunkJIT::regT4
, SpecializedThunkJIT::regT1
);
965 // Early exit if we don't have a thunk for this form of iteration
966 jit
.appendFailure(jit
.branch32(SpecializedThunkJIT::AboveOrEqual
, Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue
)));
968 jit
.loadPtr(Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0
);
970 jit
.load32(Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1
);
972 // Pull out the butterfly from iteratedObject
973 jit
.load8(Address(SpecializedThunkJIT::regT0
, JSCell::indexingTypeOffset()), SpecializedThunkJIT::regT3
);
974 jit
.loadPtr(Address(SpecializedThunkJIT::regT0
, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2
);
976 jit
.and32(TrustedImm32(IndexingShapeMask
), SpecializedThunkJIT::regT3
);
978 Jump notDone
= jit
.branch32(SpecializedThunkJIT::Below
, SpecializedThunkJIT::regT1
, Address(SpecializedThunkJIT::regT2
, Butterfly::offsetOfPublicLength()));
979 // Return the termination signal to indicate that we've finished
980 jit
.move(TrustedImmPtr(vm
->iterationTerminator
.get()), SpecializedThunkJIT::regT0
);
981 jit
.returnJSCell(SpecializedThunkJIT::regT0
);
985 if (kind
== ArrayIterateKey
) {
986 jit
.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()));
987 jit
.returnInt32(SpecializedThunkJIT::regT1
);
988 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "array-iterator-next-key");
991 ASSERT(kind
== ArrayIterateValue
);
993 // Okay, now we're returning a value so make sure we're inside the vector size
994 jit
.appendFailure(jit
.branch32(SpecializedThunkJIT::AboveOrEqual
, SpecializedThunkJIT::regT1
, Address(SpecializedThunkJIT::regT2
, Butterfly::offsetOfVectorLength())));
996 // So now we perform inline loads for int32, value/undecided, and double storage
997 Jump undecidedStorage
= jit
.branch32(SpecializedThunkJIT::Equal
, SpecializedThunkJIT::regT3
, TrustedImm32(UndecidedShape
));
998 Jump notContiguousStorage
= jit
.branch32(SpecializedThunkJIT::NotEqual
, SpecializedThunkJIT::regT3
, TrustedImm32(ContiguousShape
));
1000 undecidedStorage
.link(&jit
);
1002 jit
.loadPtr(Address(SpecializedThunkJIT::regT0
, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2
);
1005 jit
.load64(BaseIndex(SpecializedThunkJIT::regT2
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::TimesEight
), SpecializedThunkJIT::regT0
);
1006 Jump notHole
= jit
.branchTest64(SpecializedThunkJIT::NonZero
, SpecializedThunkJIT::regT0
);
1007 jit
.move(JSInterfaceJIT::TrustedImm64(ValueUndefined
), JSInterfaceJIT::regT0
);
1009 jit
.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()));
1010 jit
.returnJSValue(SpecializedThunkJIT::regT0
);
1012 jit
.load32(BaseIndex(SpecializedThunkJIT::regT2
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::TimesEight
, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3
);
1013 Jump notHole
= jit
.branch32(SpecializedThunkJIT::NotEqual
, SpecializedThunkJIT::regT3
, TrustedImm32(JSValue::EmptyValueTag
));
1014 jit
.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag
), JSInterfaceJIT::regT1
);
1015 jit
.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0
);
1016 jit
.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()));
1017 jit
.returnJSValue(SpecializedThunkJIT::regT0
, JSInterfaceJIT::regT1
);
1019 jit
.load32(BaseIndex(SpecializedThunkJIT::regT2
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::TimesEight
, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0
);
1020 jit
.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()));
1021 jit
.move(SpecializedThunkJIT::regT3
, SpecializedThunkJIT::regT1
);
1022 jit
.returnJSValue(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
);
1024 notContiguousStorage
.link(&jit
);
1026 Jump notInt32Storage
= jit
.branch32(SpecializedThunkJIT::NotEqual
, SpecializedThunkJIT::regT3
, TrustedImm32(Int32Shape
));
1027 jit
.loadPtr(Address(SpecializedThunkJIT::regT0
, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2
);
1028 jit
.load32(BaseIndex(SpecializedThunkJIT::regT2
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::TimesEight
, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0
);
1029 jit
.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()));
1030 jit
.returnInt32(SpecializedThunkJIT::regT0
);
1031 notInt32Storage
.link(&jit
);
1033 jit
.appendFailure(jit
.branch32(SpecializedThunkJIT::NotEqual
, SpecializedThunkJIT::regT3
, TrustedImm32(DoubleShape
)));
1034 jit
.loadPtr(Address(SpecializedThunkJIT::regT0
, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2
);
1035 jit
.loadDouble(BaseIndex(SpecializedThunkJIT::regT2
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::TimesEight
), SpecializedThunkJIT::fpRegT0
);
1036 jit
.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4
, JSArrayIterator::offsetOfNextIndex()));
1037 jit
.returnDouble(SpecializedThunkJIT::fpRegT0
);
1039 return jit
.finalize(vm
->jitStubs
->ctiNativeTailCall(vm
), "array-iterator-next-value");
1042 MacroAssemblerCodeRef
arrayIteratorNextKeyThunkGenerator(VM
* vm
)
1044 return arrayIteratorNextThunkGenerator(vm
, ArrayIterateKey
);
1047 MacroAssemblerCodeRef
arrayIteratorNextValueThunkGenerator(VM
* vm
)
1049 return arrayIteratorNextThunkGenerator(vm
, ArrayIterateValue
);
1054 #endif // ENABLE(JIT)