]>
Commit | Line | Data |
---|---|---|
4e4e5a6f | 1 | /* |
81345200 | 2 | * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. |
4e4e5a6f A |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' | |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS | |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |
23 | * THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "ThunkGenerators.h" | |
28 | ||
29 | #include "CodeBlock.h" | |
81345200 A |
30 | #include "DFGSpeculativeJIT.h" |
31 | #include "JITOperations.h" | |
32 | #include "JSArray.h" | |
33 | #include "JSArrayIterator.h" | |
34 | #include "JSStack.h" | |
ed1e77d3 | 35 | #include "MathCommon.h" |
81345200 A |
36 | #include "MaxFrameExtentForSlowPathCall.h" |
37 | #include "JSCInlines.h" | |
4e4e5a6f | 38 | #include "SpecializedThunkJIT.h" |
93a37866 A |
39 | #include <wtf/InlineASM.h> |
40 | #include <wtf/StringPrintStream.h> | |
6fe7ccc8 | 41 | #include <wtf/text/StringImpl.h> |
4e4e5a6f A |
42 | |
43 | #if ENABLE(JIT) | |
44 | ||
45 | namespace JSC { | |
46 | ||
81345200 | 47 | inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR) |
93a37866 | 48 | { |
81345200 A |
49 | if (ASSERT_DISABLED) |
50 | return; | |
51 | CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR); | |
52 | jit.abortWithReason(TGInvalidPointer); | |
53 | isNonZero.link(&jit); | |
54 | jit.pushToSave(pointerGPR); | |
55 | jit.load8(pointerGPR, pointerGPR); | |
56 | jit.popToRestore(pointerGPR); | |
57 | } | |
93a37866 | 58 | |
81345200 A |
59 | // We will jump here if the JIT code tries to make a call, but the |
60 | // linking helper (C++ code) decides to throw an exception instead. | |
61 | MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm) | |
62 | { | |
63 | CCallHelpers jit(vm); | |
93a37866 | 64 | |
81345200 A |
65 | // The call pushed a return address, so we need to pop it back off to re-align the stack, |
66 | // even though we won't use it. | |
67 | jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); | |
68 | ||
69 | jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister); | |
70 | jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0); | |
71 | emitPointerValidation(jit, GPRInfo::nonArgGPR0); | |
72 | jit.call(GPRInfo::nonArgGPR0); | |
73 | jit.jumpToExceptionHandler(); | |
74 | ||
75 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
76 | return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk")); | |
93a37866 A |
77 | } |
78 | ||
81345200 A |
79 | static void slowPathFor( |
80 | CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction) | |
93a37866 | 81 | { |
81345200 A |
82 | jit.emitFunctionPrologue(); |
83 | jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); | |
84 | if (maxFrameExtentForSlowPathCall) | |
85 | jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); | |
86 | jit.setupArgumentsWithExecState(GPRInfo::regT2); | |
87 | jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0); | |
88 | emitPointerValidation(jit, GPRInfo::nonArgGPR0); | |
89 | jit.call(GPRInfo::nonArgGPR0); | |
90 | if (maxFrameExtentForSlowPathCall) | |
91 | jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); | |
93a37866 | 92 | |
81345200 A |
93 | // This slow call will return the address of one of the following: |
94 | // 1) Exception throwing thunk. | |
95 | // 2) Host call return value returner thingy. | |
96 | // 3) The function to call. | |
97 | emitPointerValidation(jit, GPRInfo::returnValueGPR); | |
98 | jit.emitFunctionEpilogue(); | |
99 | jit.jump(GPRInfo::returnValueGPR); | |
100 | } | |
93a37866 | 101 | |
81345200 A |
102 | static MacroAssemblerCodeRef linkForThunkGenerator( |
103 | VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers) | |
104 | { | |
105 | // The return address is on the stack or in the link register. We will hence | |
106 | // save the return address to the call frame while we make a C++ function call | |
107 | // to perform linking and lazy compilation if necessary. We expect the callee | |
108 | // to be in regT0/regT1 (payload/tag), the CallFrame to have already | |
109 | // been adjusted, and all other registers to be available for use. | |
93a37866 | 110 | |
81345200 | 111 | CCallHelpers jit(vm); |
93a37866 | 112 | |
81345200 | 113 | slowPathFor(jit, vm, operationLinkFor(kind, registers)); |
93a37866 | 114 | |
81345200 A |
115 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); |
116 | return FINALIZE_CODE( | |
117 | patchBuffer, | |
118 | ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : "")); | |
119 | } | |
120 | ||
121 | MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) | |
122 | { | |
123 | return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired); | |
93a37866 A |
124 | } |
125 | ||
81345200 | 126 | MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm) |
93a37866 | 127 | { |
81345200 | 128 | return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired); |
93a37866 A |
129 | } |
130 | ||
81345200 | 131 | MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm) |
93a37866 | 132 | { |
81345200 | 133 | return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters); |
93a37866 A |
134 | } |
135 | ||
81345200 | 136 | MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm) |
93a37866 | 137 | { |
81345200 | 138 | return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters); |
93a37866 A |
139 | } |
140 | ||
ed1e77d3 | 141 | static MacroAssemblerCodeRef linkPolymorphicCallForThunkGenerator( |
81345200 | 142 | VM* vm, RegisterPreservationMode registers) |
93a37866 | 143 | { |
81345200 A |
144 | CCallHelpers jit(vm); |
145 | ||
ed1e77d3 | 146 | slowPathFor(jit, vm, operationLinkPolymorphicCallFor(registers)); |
93a37866 | 147 | |
81345200 | 148 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); |
ed1e77d3 | 149 | return FINALIZE_CODE(patchBuffer, ("Link polymorphic call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : "")); |
81345200 | 150 | } |
93a37866 | 151 | |
81345200 A |
152 | // For closure optimizations, we only include calls, since if you're using closures for |
153 | // object construction then you're going to lose big time anyway. | |
ed1e77d3 | 154 | MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm) |
81345200 | 155 | { |
ed1e77d3 | 156 | return linkPolymorphicCallForThunkGenerator(vm, RegisterPreservationNotRequired); |
81345200 | 157 | } |
93a37866 | 158 | |
ed1e77d3 | 159 | MacroAssemblerCodeRef linkPolymorphicCallThatPreservesRegsThunkGenerator(VM* vm) |
81345200 | 160 | { |
ed1e77d3 | 161 | return linkPolymorphicCallForThunkGenerator(vm, MustPreserveRegisters); |
81345200 | 162 | } |
93a37866 | 163 | |
81345200 A |
164 | static MacroAssemblerCodeRef virtualForThunkGenerator( |
165 | VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers) | |
166 | { | |
167 | // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1). | |
168 | // The return address is on the stack, or in the link register. We will hence | |
169 | // jump to the callee, or save the return address to the call frame while we | |
170 | // make a C++ function call to the appropriate JIT operation. | |
93a37866 | 171 | |
81345200 A |
172 | CCallHelpers jit(vm); |
173 | ||
174 | CCallHelpers::JumpList slowCase; | |
175 | ||
176 | // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the | |
177 | // slow path execution for the profiler. | |
178 | jit.add32( | |
179 | CCallHelpers::TrustedImm32(1), | |
ed1e77d3 | 180 | CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount())); |
81345200 A |
181 | |
182 | // FIXME: we should have a story for eliminating these checks. In many cases, | |
183 | // the DFG knows that the value is definitely a cell, or definitely a function. | |
184 | ||
185 | #if USE(JSVALUE64) | |
186 | jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4); | |
187 | ||
188 | slowCase.append( | |
189 | jit.branchTest64( | |
190 | CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4)); | |
191 | #else | |
192 | slowCase.append( | |
193 | jit.branch32( | |
194 | CCallHelpers::NotEqual, GPRInfo::regT1, | |
195 | CCallHelpers::TrustedImm32(JSValue::CellTag))); | |
196 | #endif | |
197 | AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1); | |
198 | slowCase.append( | |
199 | jit.branchPtr( | |
200 | CCallHelpers::NotEqual, | |
201 | CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()), | |
202 | CCallHelpers::TrustedImmPtr(JSFunction::info()))); | |
203 | ||
204 | // Now we know we have a JSFunction. | |
205 | ||
206 | jit.loadPtr( | |
207 | CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()), | |
208 | GPRInfo::regT4); | |
209 | jit.loadPtr( | |
210 | CCallHelpers::Address( | |
211 | GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)), | |
212 | GPRInfo::regT4); | |
213 | slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4)); | |
214 | ||
215 | // Now we know that we have a CodeBlock, and we're committed to making a fast | |
216 | // call. | |
217 | ||
81345200 A |
218 | // Make a tail call. This will return back to JIT code. |
219 | emitPointerValidation(jit, GPRInfo::regT4); | |
220 | jit.jump(GPRInfo::regT4); | |
221 | ||
93a37866 | 222 | slowCase.link(&jit); |
93a37866 | 223 | |
81345200 A |
224 | // Here we don't know anything, so revert to the full slow path. |
225 | ||
226 | slowPathFor(jit, vm, operationVirtualFor(kind, registers)); | |
93a37866 | 227 | |
81345200 A |
228 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); |
229 | return FINALIZE_CODE( | |
230 | patchBuffer, | |
231 | ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : "")); | |
93a37866 A |
232 | } |
233 | ||
81345200 | 234 | MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm) |
93a37866 | 235 | { |
81345200 | 236 | return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired); |
93a37866 A |
237 | } |
238 | ||
81345200 | 239 | MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm) |
93a37866 | 240 | { |
81345200 | 241 | return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired); |
93a37866 A |
242 | } |
243 | ||
81345200 | 244 | MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm) |
93a37866 | 245 | { |
81345200 A |
246 | return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters); |
247 | } | |
93a37866 | 248 | |
81345200 A |
249 | MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm) |
250 | { | |
251 | return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters); | |
93a37866 A |
252 | } |
253 | ||
81345200 A |
254 | enum ThunkEntryType { EnterViaCall, EnterViaJump }; |
255 | ||
256 | static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall) | |
93a37866 A |
257 | { |
258 | int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind); | |
259 | ||
81345200 A |
260 | JSInterfaceJIT jit(vm); |
261 | ||
262 | if (entryType == EnterViaCall) | |
263 | jit.emitFunctionPrologue(); | |
264 | ||
93a37866 A |
265 | jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); |
266 | jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); | |
267 | ||
268 | #if CPU(X86) | |
93a37866 A |
269 | // Calling convention: f(ecx, edx, ...); |
270 | // Host function signature: f(ExecState*); | |
271 | jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); | |
272 | ||
81345200 | 273 | jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue. |
93a37866 A |
274 | |
275 | // call the function | |
276 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1); | |
277 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1); | |
93a37866 A |
278 | jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction)); |
279 | ||
81345200 | 280 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); |
93a37866 A |
281 | |
282 | #elif CPU(X86_64) | |
93a37866 A |
283 | #if !OS(WINDOWS) |
284 | // Calling convention: f(edi, esi, edx, ecx, ...); | |
285 | // Host function signature: f(ExecState*); | |
286 | jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi); | |
287 | ||
93a37866 A |
288 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi); |
289 | jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9); | |
93a37866 A |
290 | jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); |
291 | ||
93a37866 A |
292 | #else |
293 | // Calling convention: f(ecx, edx, r8, r9, ...); | |
294 | // Host function signature: f(ExecState*); | |
295 | jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); | |
296 | ||
81345200 A |
297 | // Leave space for the callee parameter home addresses. |
298 | // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it. | |
299 | jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); | |
93a37866 A |
300 | |
301 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx); | |
302 | jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9); | |
93a37866 A |
303 | jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); |
304 | ||
81345200 | 305 | jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); |
93a37866 A |
306 | #endif |
307 | ||
308 | #elif CPU(ARM64) | |
309 | COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1); | |
310 | COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3); | |
311 | COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0); | |
312 | COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1); | |
313 | COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2); | |
314 | ||
93a37866 A |
315 | // Host function signature: f(ExecState*); |
316 | jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0); | |
317 | ||
318 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1); | |
319 | jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2); | |
93a37866 | 320 | jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction)); |
81345200 | 321 | #elif CPU(ARM) || CPU(SH4) || CPU(MIPS) |
81345200 A |
322 | #if CPU(MIPS) |
323 | // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. | |
93a37866 | 324 | jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); |
81345200 | 325 | #endif |
93a37866 | 326 | |
81345200 A |
327 | // Calling convention is f(argumentGPR0, argumentGPR1, ...). |
328 | // Host function signature is f(ExecState*). | |
329 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0); | |
93a37866 | 330 | |
81345200 A |
331 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1); |
332 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2); | |
93a37866 A |
333 | jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction)); |
334 | ||
81345200 | 335 | #if CPU(MIPS) |
93a37866 A |
336 | // Restore stack space |
337 | jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); | |
81345200 | 338 | #endif |
93a37866 A |
339 | #else |
340 | #error "JIT not supported on this platform." | |
341 | UNUSED_PARAM(executableOffsetToFunction); | |
81345200 | 342 | abortWithReason(TGNotSupported); |
93a37866 A |
343 | #endif |
344 | ||
345 | // Check for an exception | |
346 | #if USE(JSVALUE64) | |
81345200 | 347 | jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2); |
93a37866 A |
348 | JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2); |
349 | #else | |
350 | JSInterfaceJIT::Jump exceptionHandler = jit.branch32( | |
351 | JSInterfaceJIT::NotEqual, | |
ed1e77d3 A |
352 | JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()), |
353 | JSInterfaceJIT::TrustedImm32(0)); | |
93a37866 A |
354 | #endif |
355 | ||
81345200 | 356 | jit.emitFunctionEpilogue(); |
93a37866 A |
357 | // Return. |
358 | jit.ret(); | |
359 | ||
360 | // Handle an exception | |
361 | exceptionHandler.link(&jit); | |
362 | ||
93a37866 | 363 | jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); |
93a37866 | 364 | |
81345200 A |
365 | #if CPU(X86) && USE(JSVALUE32_64) |
366 | jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister); | |
367 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0); | |
368 | jit.push(JSInterfaceJIT::regT0); | |
369 | #else | |
370 | #if OS(WINDOWS) | |
371 | // Allocate space on stack for the 4 parameter registers. | |
372 | jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); | |
373 | #endif | |
374 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0); | |
375 | #endif | |
376 | jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3); | |
377 | jit.call(JSInterfaceJIT::regT3); | |
378 | #if CPU(X86) && USE(JSVALUE32_64) | |
379 | jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); | |
380 | #elif OS(WINDOWS) | |
381 | jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); | |
382 | #endif | |
93a37866 | 383 | |
81345200 A |
384 | jit.jumpToExceptionHandler(); |
385 | ||
386 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
387 | return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data())); | |
93a37866 A |
388 | } |
389 | ||
390 | MacroAssemblerCodeRef nativeCallGenerator(VM* vm) | |
391 | { | |
392 | return nativeForGenerator(vm, CodeForCall); | |
393 | } | |
394 | ||
81345200 A |
395 | MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm) |
396 | { | |
397 | return nativeForGenerator(vm, CodeForCall, EnterViaJump); | |
398 | } | |
399 | ||
93a37866 A |
400 | MacroAssemblerCodeRef nativeConstructGenerator(VM* vm) |
401 | { | |
402 | return nativeForGenerator(vm, CodeForConstruct); | |
403 | } | |
404 | ||
ed1e77d3 | 405 | MacroAssemblerCodeRef arityFixupGenerator(VM* vm) |
81345200 A |
406 | { |
407 | JSInterfaceJIT jit(vm); | |
408 | ||
409 | // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in | |
410 | // regT5 on 32-bit and regT7 on 64-bit. | |
411 | #if USE(JSVALUE64) | |
412 | # if CPU(X86_64) | |
413 | jit.pop(JSInterfaceJIT::regT4); | |
414 | # endif | |
415 | jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); | |
416 | jit.neg64(JSInterfaceJIT::regT0); | |
417 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6); | |
418 | jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); | |
419 | jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); | |
420 | ||
421 | // Move current frame down regT0 number of slots | |
422 | JSInterfaceJIT::Label copyLoop(jit.label()); | |
423 | jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1); | |
424 | jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
425 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); | |
426 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); | |
427 | ||
428 | // Fill in regT0 - 1 missing arg slots with undefined | |
429 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); | |
430 | jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1); | |
431 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); | |
432 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); | |
433 | jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
434 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); | |
435 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); | |
436 | ||
437 | // Adjust call frame register and stack pointer to account for missing args | |
438 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); | |
439 | jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); | |
440 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); | |
441 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); | |
442 | ||
443 | // Save the original return PC. | |
444 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); | |
445 | jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
446 | ||
447 | // Install the new return PC. | |
448 | jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); | |
449 | ||
450 | # if CPU(X86_64) | |
451 | jit.push(JSInterfaceJIT::regT4); | |
452 | # endif | |
453 | jit.ret(); | |
454 | #else | |
455 | # if CPU(X86) | |
456 | jit.pop(JSInterfaceJIT::regT4); | |
457 | # endif | |
458 | jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); | |
459 | jit.neg32(JSInterfaceJIT::regT0); | |
460 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); | |
461 | jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); | |
462 | jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); | |
463 | ||
464 | // Move current frame down regT0 number of slots | |
465 | JSInterfaceJIT::Label copyLoop(jit.label()); | |
466 | jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1); | |
467 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
468 | jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1); | |
469 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4)); | |
470 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); | |
471 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); | |
472 | ||
473 | // Fill in regT0 - 1 missing arg slots with undefined | |
474 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); | |
475 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); | |
476 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); | |
477 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1); | |
478 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
479 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1); | |
480 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4)); | |
481 | ||
482 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); | |
483 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); | |
484 | ||
485 | // Adjust call frame register and stack pointer to account for missing args | |
486 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); | |
487 | jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); | |
488 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); | |
489 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); | |
490 | ||
491 | // Save the original return PC. | |
492 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); | |
493 | jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
494 | ||
495 | // Install the new return PC. | |
496 | jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); | |
497 | ||
498 | # if CPU(X86) | |
499 | jit.push(JSInterfaceJIT::regT4); | |
500 | # endif | |
501 | jit.ret(); | |
502 | #endif | |
503 | ||
504 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
505 | return FINALIZE_CODE(patchBuffer, ("fixup arity")); | |
506 | } | |
507 | ||
ed1e77d3 A |
508 | MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm) |
509 | { | |
510 | JSInterfaceJIT jit(vm); | |
511 | ||
512 | #if USE(JSVALUE64) | |
513 | jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); | |
514 | #else | |
515 | jit.setupResults(GPRInfo::regT0, GPRInfo::regT1); | |
516 | #endif | |
517 | ||
518 | unsigned numberOfParameters = 0; | |
519 | numberOfParameters++; // The 'this' argument. | |
520 | numberOfParameters++; // The true return PC. | |
521 | ||
522 | unsigned numberOfRegsForCall = | |
523 | JSStack::CallFrameHeaderSize + numberOfParameters; | |
524 | ||
525 | unsigned numberOfBytesForCall = | |
526 | numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); | |
527 | ||
528 | unsigned alignedNumberOfBytesForCall = | |
529 | WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); | |
530 | ||
531 | // The real return address is stored above the arguments. We passed one argument, which is | |
532 | // 'this'. So argument at index 1 is the return address. | |
533 | jit.loadPtr( | |
534 | AssemblyHelpers::Address( | |
535 | AssemblyHelpers::stackPointerRegister, | |
536 | (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), | |
537 | GPRInfo::regT2); | |
538 | ||
539 | jit.addPtr( | |
540 | AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), | |
541 | AssemblyHelpers::stackPointerRegister); | |
542 | ||
543 | jit.jump(GPRInfo::regT2); | |
544 | ||
545 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
546 | return FINALIZE_CODE(patchBuffer, ("baseline getter return thunk")); | |
547 | } | |
548 | ||
549 | MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm) | |
550 | { | |
551 | JSInterfaceJIT jit(vm); | |
552 | ||
553 | unsigned numberOfParameters = 0; | |
554 | numberOfParameters++; // The 'this' argument. | |
555 | numberOfParameters++; // The value to set. | |
556 | numberOfParameters++; // The true return PC. | |
557 | ||
558 | unsigned numberOfRegsForCall = | |
559 | JSStack::CallFrameHeaderSize + numberOfParameters; | |
560 | ||
561 | unsigned numberOfBytesForCall = | |
562 | numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); | |
563 | ||
564 | unsigned alignedNumberOfBytesForCall = | |
565 | WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); | |
566 | ||
567 | // The real return address is stored above the arguments. We passed two arguments, so | |
568 | // the argument at index 2 is the return address. | |
569 | jit.loadPtr( | |
570 | AssemblyHelpers::Address( | |
571 | AssemblyHelpers::stackPointerRegister, | |
572 | (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), | |
573 | GPRInfo::regT2); | |
574 | ||
575 | jit.addPtr( | |
576 | AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), | |
577 | AssemblyHelpers::stackPointerRegister); | |
578 | ||
579 | jit.jump(GPRInfo::regT2); | |
580 | ||
581 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
582 | return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk")); | |
583 | } | |
584 | ||
93a37866 | 585 | static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm) |
4e4e5a6f A |
586 | { |
587 | // load string | |
93a37866 | 588 | jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0); |
4e4e5a6f A |
589 | |
590 | // Load string length to regT2, and start the process of loading the data pointer into regT0 | |
591 | jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2); | |
592 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0); | |
6fe7ccc8 | 593 | jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0)); |
4e4e5a6f A |
594 | |
595 | // load index | |
596 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index | |
597 | ||
598 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large | |
599 | jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2)); | |
600 | ||
601 | // Load the character | |
6fe7ccc8 A |
602 | SpecializedThunkJIT::JumpList is16Bit; |
603 | SpecializedThunkJIT::JumpList cont8Bit; | |
604 | // Load the string flags | |
93a37866 A |
605 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2); |
606 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0); | |
607 | is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit()))); | |
6fe7ccc8 A |
608 | jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0); |
609 | cont8Bit.append(jit.jump()); | |
610 | is16Bit.link(&jit); | |
4e4e5a6f | 611 | jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0); |
6fe7ccc8 | 612 | cont8Bit.link(&jit); |
4e4e5a6f A |
613 | } |
614 | ||
93a37866 | 615 | static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch) |
4e4e5a6f | 616 | { |
14957cd0 | 617 | jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100))); |
93a37866 | 618 | jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch); |
4e4e5a6f A |
619 | jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst); |
620 | jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst)); | |
621 | } | |
622 | ||
93a37866 | 623 | MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm) |
4e4e5a6f | 624 | { |
81345200 | 625 | SpecializedThunkJIT jit(vm, 1); |
93a37866 | 626 | stringCharLoad(jit, vm); |
4e4e5a6f | 627 | jit.returnInt32(SpecializedThunkJIT::regT0); |
81345200 | 628 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt"); |
4e4e5a6f A |
629 | } |
630 | ||
93a37866 | 631 | MacroAssemblerCodeRef charAtThunkGenerator(VM* vm) |
4e4e5a6f | 632 | { |
81345200 | 633 | SpecializedThunkJIT jit(vm, 1); |
93a37866 A |
634 | stringCharLoad(jit, vm); |
635 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); | |
4e4e5a6f | 636 | jit.returnJSCell(SpecializedThunkJIT::regT0); |
81345200 | 637 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt"); |
4e4e5a6f A |
638 | } |
639 | ||
93a37866 | 640 | MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm) |
4e4e5a6f | 641 | { |
81345200 | 642 | SpecializedThunkJIT jit(vm, 1); |
4e4e5a6f A |
643 | // load char code |
644 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0); | |
93a37866 | 645 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); |
4e4e5a6f | 646 | jit.returnJSCell(SpecializedThunkJIT::regT0); |
81345200 | 647 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode"); |
4e4e5a6f A |
648 | } |
649 | ||
ed1e77d3 A |
650 | MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm) |
651 | { | |
652 | SpecializedThunkJIT jit(vm, 1); | |
653 | MacroAssembler::Jump nonIntArgJump; | |
654 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump); | |
655 | ||
656 | SpecializedThunkJIT::Label convertedArgumentReentry(&jit); | |
657 | jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); | |
658 | jit.returnInt32(SpecializedThunkJIT::regT1); | |
659 | ||
660 | if (jit.supportsFloatingPointTruncate()) { | |
661 | nonIntArgJump.link(&jit); | |
662 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
663 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit); | |
664 | jit.appendFailure(jit.jump()); | |
665 | } else | |
666 | jit.appendFailure(nonIntArgJump); | |
667 | ||
668 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32"); | |
669 | } | |
670 | ||
93a37866 | 671 | MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm) |
4e4e5a6f | 672 | { |
81345200 | 673 | SpecializedThunkJIT jit(vm, 1); |
4e4e5a6f | 674 | if (!jit.supportsFloatingPointSqrt()) |
93a37866 | 675 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
4e4e5a6f A |
676 | |
677 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
678 | jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
679 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
81345200 | 680 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt"); |
4e4e5a6f A |
681 | } |
682 | ||
6fe7ccc8 A |
683 | |
684 | #define UnaryDoubleOpWrapper(function) function##Wrapper | |
685 | enum MathThunkCallingConvention { }; | |
686 | typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); | |
93a37866 | 687 | |
81345200 | 688 | #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) |
6fe7ccc8 A |
689 | |
690 | #define defineUnaryDoubleOpWrapper(function) \ | |
691 | asm( \ | |
692 | ".text\n" \ | |
693 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
694 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
695 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
81345200 | 696 | "pushq %rax\n" \ |
93a37866 | 697 | "call " GLOBAL_REFERENCE(function) "\n" \ |
81345200 | 698 | "popq %rcx\n" \ |
6fe7ccc8 A |
699 | "ret\n" \ |
700 | );\ | |
701 | extern "C" { \ | |
702 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
703 | } \ | |
704 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
705 | ||
ed1e77d3 A |
706 | #elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__) |
707 | #define defineUnaryDoubleOpWrapper(function) \ | |
708 | asm( \ | |
709 | ".text\n" \ | |
710 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
711 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
712 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
713 | "pushl %ebx\n" \ | |
714 | "subl $20, %esp\n" \ | |
715 | "movsd %xmm0, (%esp) \n" \ | |
716 | "call __x86.get_pc_thunk.bx\n" \ | |
717 | "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \ | |
718 | "call " GLOBAL_REFERENCE(function) "\n" \ | |
719 | "fstpl (%esp) \n" \ | |
720 | "movsd (%esp), %xmm0 \n" \ | |
721 | "addl $20, %esp\n" \ | |
722 | "popl %ebx\n" \ | |
723 | "ret\n" \ | |
724 | );\ | |
725 | extern "C" { \ | |
726 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
727 | } \ | |
728 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
729 | ||
81345200 | 730 | #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) |
6fe7ccc8 A |
731 | #define defineUnaryDoubleOpWrapper(function) \ |
732 | asm( \ | |
733 | ".text\n" \ | |
734 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
735 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
736 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
81345200 | 737 | "subl $20, %esp\n" \ |
6fe7ccc8 | 738 | "movsd %xmm0, (%esp) \n" \ |
93a37866 | 739 | "call " GLOBAL_REFERENCE(function) "\n" \ |
6fe7ccc8 A |
740 | "fstpl (%esp) \n" \ |
741 | "movsd (%esp), %xmm0 \n" \ | |
81345200 | 742 | "addl $20, %esp\n" \ |
6fe7ccc8 A |
743 | "ret\n" \ |
744 | );\ | |
745 | extern "C" { \ | |
746 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
747 | } \ | |
748 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
749 | ||
93a37866 A |
750 | #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS) |
751 | ||
752 | #define defineUnaryDoubleOpWrapper(function) \ | |
753 | asm( \ | |
754 | ".text\n" \ | |
755 | ".align 2\n" \ | |
756 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
757 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
758 | ".thumb\n" \ | |
759 | ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \ | |
760 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
761 | "push {lr}\n" \ | |
762 | "vmov r0, r1, d0\n" \ | |
763 | "blx " GLOBAL_REFERENCE(function) "\n" \ | |
764 | "vmov d0, r0, r1\n" \ | |
765 | "pop {lr}\n" \ | |
766 | "bx lr\n" \ | |
767 | ); \ | |
768 | extern "C" { \ | |
769 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
770 | } \ | |
771 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
772 | ||
773 | #elif CPU(ARM64) | |
774 | ||
775 | #define defineUnaryDoubleOpWrapper(function) \ | |
776 | asm( \ | |
777 | ".text\n" \ | |
778 | ".align 2\n" \ | |
779 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
780 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
781 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
782 | "b " GLOBAL_REFERENCE(function) "\n" \ | |
ed1e77d3 | 783 | ".previous" \ |
93a37866 A |
784 | ); \ |
785 | extern "C" { \ | |
786 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
787 | } \ | |
788 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
789 | ||
ed1e77d3 A |
790 | #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS) |
791 | ||
792 | // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions. | |
793 | static double (_cdecl *floorFunction)(double) = floor; | |
794 | static double (_cdecl *ceilFunction)(double) = ceil; | |
795 | static double (_cdecl *expFunction)(double) = exp; | |
796 | static double (_cdecl *logFunction)(double) = log; | |
797 | static double (_cdecl *jsRoundFunction)(double) = jsRound; | |
798 | ||
799 | #define defineUnaryDoubleOpWrapper(function) \ | |
800 | extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \ | |
801 | { \ | |
802 | __asm \ | |
803 | { \ | |
804 | __asm sub esp, 20 \ | |
805 | __asm movsd mmword ptr [esp], xmm0 \ | |
806 | __asm call function##Function \ | |
807 | __asm fstp qword ptr [esp] \ | |
808 | __asm movsd xmm0, mmword ptr [esp] \ | |
809 | __asm add esp, 20 \ | |
810 | __asm ret \ | |
811 | } \ | |
812 | } \ | |
813 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
814 | ||
6fe7ccc8 A |
815 | #else |
816 | ||
817 | #define defineUnaryDoubleOpWrapper(function) \ | |
818 | static MathThunk UnaryDoubleOpWrapper(function) = 0 | |
819 | #endif | |
820 | ||
821 | defineUnaryDoubleOpWrapper(jsRound); | |
822 | defineUnaryDoubleOpWrapper(exp); | |
823 | defineUnaryDoubleOpWrapper(log); | |
824 | defineUnaryDoubleOpWrapper(floor); | |
825 | defineUnaryDoubleOpWrapper(ceil); | |
826 | ||
93a37866 A |
827 | static const double oneConstant = 1.0; |
828 | static const double negativeHalfConstant = -0.5; | |
829 | static const double zeroConstant = 0.0; | |
830 | static const double halfConstant = 0.5; | |
831 | ||
832 | MacroAssemblerCodeRef floorThunkGenerator(VM* vm) | |
6fe7ccc8 | 833 | { |
81345200 | 834 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 A |
835 | MacroAssembler::Jump nonIntJump; |
836 | if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint()) | |
93a37866 | 837 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
838 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
839 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
840 | nonIntJump.link(&jit); | |
841 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
93a37866 A |
842 | #if CPU(ARM64) |
843 | SpecializedThunkJIT::JumpList doubleResult; | |
844 | jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
845 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); | |
846 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
847 | doubleResult.link(&jit); | |
848 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
849 | #else | |
850 | SpecializedThunkJIT::Jump intResult; | |
6fe7ccc8 | 851 | SpecializedThunkJIT::JumpList doubleResult; |
93a37866 | 852 | if (jit.supportsFloatingPointTruncate()) { |
81345200 | 853 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); |
93a37866 A |
854 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
855 | SpecializedThunkJIT::JumpList slowPath; | |
856 | // Handle the negative doubles in the slow path for now. | |
857 | slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); | |
858 | slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0)); | |
859 | intResult = jit.jump(); | |
860 | slowPath.link(&jit); | |
861 | } | |
862 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor)); | |
6fe7ccc8 | 863 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
93a37866 A |
864 | if (jit.supportsFloatingPointTruncate()) |
865 | intResult.link(&jit); | |
6fe7ccc8 A |
866 | jit.returnInt32(SpecializedThunkJIT::regT0); |
867 | doubleResult.link(&jit); | |
868 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
93a37866 | 869 | #endif // CPU(ARM64) |
81345200 | 870 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor"); |
6fe7ccc8 A |
871 | } |
872 | ||
93a37866 | 873 | MacroAssemblerCodeRef ceilThunkGenerator(VM* vm) |
6fe7ccc8 | 874 | { |
81345200 | 875 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 876 | if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint()) |
93a37866 | 877 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
878 | MacroAssembler::Jump nonIntJump; |
879 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); | |
880 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
881 | nonIntJump.link(&jit); | |
882 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
93a37866 A |
883 | #if CPU(ARM64) |
884 | jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
885 | #else | |
886 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil)); | |
887 | #endif // CPU(ARM64) | |
6fe7ccc8 A |
888 | SpecializedThunkJIT::JumpList doubleResult; |
889 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); | |
890 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
891 | doubleResult.link(&jit); | |
892 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
81345200 | 893 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil"); |
6fe7ccc8 A |
894 | } |
895 | ||
93a37866 | 896 | MacroAssemblerCodeRef roundThunkGenerator(VM* vm) |
6fe7ccc8 | 897 | { |
81345200 | 898 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 899 | if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint()) |
93a37866 | 900 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
901 | MacroAssembler::Jump nonIntJump; |
902 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); | |
903 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
904 | nonIntJump.link(&jit); | |
905 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
93a37866 | 906 | SpecializedThunkJIT::Jump intResult; |
6fe7ccc8 | 907 | SpecializedThunkJIT::JumpList doubleResult; |
93a37866 | 908 | if (jit.supportsFloatingPointTruncate()) { |
81345200 | 909 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); |
93a37866 A |
910 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
911 | SpecializedThunkJIT::JumpList slowPath; | |
912 | // Handle the negative doubles in the slow path for now. | |
913 | slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); | |
81345200 | 914 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1); |
93a37866 A |
915 | jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); |
916 | slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0)); | |
917 | intResult = jit.jump(); | |
918 | slowPath.link(&jit); | |
919 | } | |
920 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound)); | |
6fe7ccc8 | 921 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
93a37866 A |
922 | if (jit.supportsFloatingPointTruncate()) |
923 | intResult.link(&jit); | |
6fe7ccc8 A |
924 | jit.returnInt32(SpecializedThunkJIT::regT0); |
925 | doubleResult.link(&jit); | |
926 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
81345200 | 927 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round"); |
6fe7ccc8 A |
928 | } |
929 | ||
93a37866 | 930 | MacroAssemblerCodeRef expThunkGenerator(VM* vm) |
6fe7ccc8 A |
931 | { |
932 | if (!UnaryDoubleOpWrapper(exp)) | |
93a37866 | 933 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
81345200 | 934 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 935 | if (!jit.supportsFloatingPoint()) |
93a37866 | 936 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 | 937 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
93a37866 | 938 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp)); |
6fe7ccc8 | 939 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
81345200 | 940 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp"); |
6fe7ccc8 A |
941 | } |
942 | ||
93a37866 | 943 | MacroAssemblerCodeRef logThunkGenerator(VM* vm) |
6fe7ccc8 A |
944 | { |
945 | if (!UnaryDoubleOpWrapper(log)) | |
93a37866 | 946 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
81345200 | 947 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 948 | if (!jit.supportsFloatingPoint()) |
93a37866 | 949 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 | 950 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
93a37866 | 951 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log)); |
6fe7ccc8 | 952 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
81345200 | 953 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log"); |
6fe7ccc8 A |
954 | } |
955 | ||
93a37866 | 956 | MacroAssemblerCodeRef absThunkGenerator(VM* vm) |
6fe7ccc8 | 957 | { |
81345200 | 958 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 959 | if (!jit.supportsFloatingPointAbs()) |
93a37866 | 960 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
961 | MacroAssembler::Jump nonIntJump; |
962 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); | |
963 | jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1); | |
964 | jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); | |
965 | jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); | |
966 | jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31))); | |
967 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
968 | nonIntJump.link(&jit); | |
969 | // Shame about the double int conversion here. | |
970 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
971 | jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); | |
972 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); | |
81345200 | 973 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs"); |
6fe7ccc8 | 974 | } |
4e4e5a6f | 975 | |
93a37866 | 976 | MacroAssemblerCodeRef powThunkGenerator(VM* vm) |
4e4e5a6f | 977 | { |
81345200 | 978 | SpecializedThunkJIT jit(vm, 2); |
4e4e5a6f | 979 | if (!jit.supportsFloatingPoint()) |
93a37866 | 980 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
4e4e5a6f | 981 | |
81345200 | 982 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1); |
4e4e5a6f A |
983 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
984 | MacroAssembler::Jump nonIntExponent; | |
985 | jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent); | |
14957cd0 | 986 | jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0))); |
4e4e5a6f A |
987 | |
988 | MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0); | |
989 | MacroAssembler::Label startLoop(jit.label()); | |
990 | ||
14957cd0 | 991 | MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1)); |
4e4e5a6f A |
992 | jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); |
993 | exponentIsEven.link(&jit); | |
994 | jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
14957cd0 | 995 | jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0); |
4e4e5a6f A |
996 | jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit); |
997 | ||
998 | exponentIsZero.link(&jit); | |
14957cd0 A |
999 | |
1000 | { | |
1001 | SpecializedThunkJIT::JumpList doubleResult; | |
1002 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0); | |
1003 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
1004 | doubleResult.link(&jit); | |
1005 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); | |
1006 | } | |
4e4e5a6f A |
1007 | |
1008 | if (jit.supportsFloatingPointSqrt()) { | |
1009 | nonIntExponent.link(&jit); | |
81345200 | 1010 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3); |
4e4e5a6f A |
1011 | jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0); |
1012 | jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); | |
1013 | jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3)); | |
1014 | jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
1015 | jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); | |
14957cd0 A |
1016 | |
1017 | SpecializedThunkJIT::JumpList doubleResult; | |
1018 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0); | |
1019 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
1020 | doubleResult.link(&jit); | |
4e4e5a6f A |
1021 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); |
1022 | } else | |
1023 | jit.appendFailure(nonIntExponent); | |
1024 | ||
81345200 | 1025 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow"); |
93a37866 A |
1026 | } |
1027 | ||
1028 | MacroAssemblerCodeRef imulThunkGenerator(VM* vm) | |
1029 | { | |
81345200 | 1030 | SpecializedThunkJIT jit(vm, 2); |
93a37866 A |
1031 | MacroAssembler::Jump nonIntArg0Jump; |
1032 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump); | |
1033 | SpecializedThunkJIT::Label doneLoadingArg0(&jit); | |
1034 | MacroAssembler::Jump nonIntArg1Jump; | |
1035 | jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump); | |
1036 | SpecializedThunkJIT::Label doneLoadingArg1(&jit); | |
1037 | jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); | |
1038 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
1039 | ||
1040 | if (jit.supportsFloatingPointTruncate()) { | |
1041 | nonIntArg0Jump.link(&jit); | |
1042 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
1043 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit); | |
81345200 | 1044 | jit.appendFailure(jit.jump()); |
93a37866 A |
1045 | } else |
1046 | jit.appendFailure(nonIntArg0Jump); | |
1047 | ||
1048 | if (jit.supportsFloatingPointTruncate()) { | |
1049 | nonIntArg1Jump.link(&jit); | |
1050 | jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1); | |
1051 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit); | |
81345200 | 1052 | jit.appendFailure(jit.jump()); |
93a37866 A |
1053 | } else |
1054 | jit.appendFailure(nonIntArg1Jump); | |
1055 | ||
81345200 A |
1056 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul"); |
1057 | } | |
1058 | ||
4e4e5a6f A |
1059 | } |
1060 | ||
1061 | #endif // ENABLE(JIT) |