]> git.saurik.com Git - apple/javascriptcore.git/blame - jit/ThunkGenerators.cpp
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / jit / ThunkGenerators.cpp
CommitLineData
4e4e5a6f 1/*
93a37866 2 * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
4e4e5a6f
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "ThunkGenerators.h"
28
29#include "CodeBlock.h"
93a37866 30#include "Operations.h"
4e4e5a6f 31#include "SpecializedThunkJIT.h"
93a37866
A
32#include <wtf/InlineASM.h>
33#include <wtf/StringPrintStream.h>
6fe7ccc8 34#include <wtf/text/StringImpl.h>
4e4e5a6f
A
35
36#if ENABLE(JIT)
37
38namespace JSC {
39
93a37866
A
40static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
41{
42 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
43 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
44 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
45
46 // Also initialize ReturnPC and CodeBlock, like a JS function would.
47 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
48 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
49 jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
50
51 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
52 jit.restoreArgumentReference();
53 JSInterfaceJIT::Call callNotJSFunction = jit.call();
54 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
55 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
56 jit.ret();
57
58 return callNotJSFunction;
59}
60
61static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
62{
63 JSInterfaceJIT jit;
64
65 JSInterfaceJIT::JumpList slowCase;
66
67#if USE(JSVALUE64)
68 slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
69 slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
70#else // USE(JSVALUE64)
71 slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
72 slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
73#endif // USE(JSVALUE64)
74
75 // Finish canonical initialization before JS function call.
76 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
77 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
78
79 // Also initialize ReturnPC for use by lazy linking and exceptions.
80 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
81 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
82
83 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
84 jit.restoreArgumentReference();
85 JSInterfaceJIT::Call callLazyLink = jit.call();
86 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
87 jit.jump(JSInterfaceJIT::regT0);
88
89 slowCase.link(&jit);
90 JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
91
92 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
93 patchBuffer.link(callLazyLink, lazyLink);
94 patchBuffer.link(callNotJSFunction, notJSFunction);
95
96 return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
97}
98
99MacroAssemblerCodeRef linkCallGenerator(VM* vm)
100{
101 return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
102}
103
104MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
105{
106 return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
107}
108
109MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
110{
111 return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
112}
113
114static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
115{
116 JSInterfaceJIT jit;
117
118 JSInterfaceJIT::JumpList slowCase;
119
120#if USE(JSVALUE64)
121 slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
122#else // USE(JSVALUE64)
123 slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
124#endif // USE(JSVALUE64)
125 slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
126
127 // Finish canonical initialization before JS function call.
128 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
129 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
130
131 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
132 JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
133 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
134 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
135 jit.restoreArgumentReference();
136 JSInterfaceJIT::Call callCompile = jit.call();
137 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
138 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
139
140 hasCodeBlock1.link(&jit);
141 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
142 jit.jump(JSInterfaceJIT::regT0);
143
144 slowCase.link(&jit);
145 JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
146
147 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
148 patchBuffer.link(callCompile, compile);
149 patchBuffer.link(callNotJSFunction, notJSFunction);
150
151 return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
152}
153
154MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
155{
156 return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
157}
158
159MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
160{
161 return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
162}
163
164MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
165{
166 JSInterfaceJIT jit;
167
168#if USE(JSVALUE64)
169 // Check eax is a string
170 JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
171 JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
172 JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
173 JSInterfaceJIT::regT0, JSCell::structureOffset()),
174 JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
175
176 // Checks out okay! - get the length from the Ustring.
177 jit.load32(
178 JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
179 JSInterfaceJIT::regT0);
180
181 JSInterfaceJIT::Jump failureCases3 = jit.branch32(
182 JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
183
184 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
185 jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
186
187#else // USE(JSVALUE64)
188 // regT0 holds payload, regT1 holds tag
189
190 JSInterfaceJIT::Jump failureCases1 = jit.branch32(
191 JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
192 JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
193 JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
194 JSInterfaceJIT::NotEqual,
195 JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
196 JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
197
198 // Checks out okay! - get the length from the Ustring.
199 jit.load32(
200 JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
201 JSInterfaceJIT::regT2);
202
203 JSInterfaceJIT::Jump failureCases3 = jit.branch32(
204 JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
205 jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
206 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
207#endif // USE(JSVALUE64)
208
209 jit.ret();
210
211 JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
212 JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
213 JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
214
215 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
216
217 patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
218 patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
219 patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
220
221 return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
222}
223
224static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
225{
226 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
227
228 JSInterfaceJIT jit;
229
230 jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
231 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
232
233#if CPU(X86)
234 // Load caller frame's scope chain into this callframe so that whatever we call can
235 // get to its global data.
236 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
237 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
238 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
239
240 jit.peek(JSInterfaceJIT::regT1);
241 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
242
243 // Calling convention: f(ecx, edx, ...);
244 // Host function signature: f(ExecState*);
245 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
246
247 jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
248
249 // call the function
250 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
251 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
252 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
253 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
254
255 jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
256
257#elif CPU(X86_64)
258 // Load caller frame's scope chain into this callframe so that whatever we call can
259 // get to its global data.
260 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
261 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
262 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
263
264 jit.peek(JSInterfaceJIT::regT1);
265 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
266
267#if !OS(WINDOWS)
268 // Calling convention: f(edi, esi, edx, ecx, ...);
269 // Host function signature: f(ExecState*);
270 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
271
272 jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
273
274 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
275 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
276 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
277 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
278
279 jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
280#else
281 // Calling convention: f(ecx, edx, r8, r9, ...);
282 // Host function signature: f(ExecState*);
283 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
284
285 // Leave space for the callee parameter home addresses and align the stack.
286 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
287
288 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
289 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
290 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
291 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
292
293 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
294#endif
295
296#elif CPU(ARM64)
297 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
298 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
299 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
300 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
301 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
302
303 // Load caller frame's scope chain into this callframe so that whatever we call can
304 // get to its global data.
305 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, ARM64Registers::x3);
306 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
307 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
308
309 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
310 jit.emitPutToCallFrameHeader(ARM64Registers::lr, JSStack::ReturnPC);
311
312 // Calling convention: f(edi, esi, edx, ecx, ...);
313 // Host function signature: f(ExecState*);
314 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
315
316 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
317 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
318 jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
319 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
320
321 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
322
323#elif CPU(ARM)
324 // Load caller frame's scope chain into this callframe so that whatever we call can
325 // get to its global data.
326 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
327 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
328 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
329
330 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
331 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
332
333 // Calling convention: f(r0 == regT0, r1 == regT1, ...);
334 // Host function signature: f(ExecState*);
335 jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
336
337 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
338 jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
339 jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
340 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
341
342 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
343
344#elif CPU(SH4)
345 // Load caller frame's scope chain into this callframe so that whatever we call can
346 // get to its global data.
347 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
348 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
349 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
350
351 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
352 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
353
354 // Calling convention: f(r0 == regT4, r1 == regT5, ...);
355 // Host function signature: f(ExecState*);
356 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
357
358 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
359 jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
360 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
361
362 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
363 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
364
365#elif CPU(MIPS)
366 // Load caller frame's scope chain into this callframe so that whatever we call can
367 // get to its global data.
368 jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
369 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
370 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
371
372 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
373 jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
374
375 // Calling convention: f(a0, a1, a2, a3);
376 // Host function signature: f(ExecState*);
377
378 // Allocate stack space for 16 bytes (8-byte aligned)
379 // 16 bytes (unused) for 4 arguments
380 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
381
382 // Setup arg0
383 jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
384
385 // Call
386 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
387 jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
388 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
389 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
390
391 // Restore stack space
392 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
393
394 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
395#else
396#error "JIT not supported on this platform."
397 UNUSED_PARAM(executableOffsetToFunction);
398 breakpoint();
399#endif
400
401 // Check for an exception
402#if USE(JSVALUE64)
403 jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
404 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
405#else
406 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
407 JSInterfaceJIT::NotEqual,
408 JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
409 JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
410#endif
411
412 // Return.
413 jit.ret();
414
415 // Handle an exception
416 exceptionHandler.link(&jit);
417
418 // Grab the return address.
419 jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
420
421 jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
422 jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
423 jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
424
425 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
426 // Set the return address.
427 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
428 jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
429
430 jit.ret();
431
432 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
433 return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
434}
435
436MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
437{
438 return nativeForGenerator(vm, CodeForCall);
439}
440
441MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
442{
443 return nativeForGenerator(vm, CodeForConstruct);
444}
445
446static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
4e4e5a6f
A
447{
448 // load string
93a37866 449 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
4e4e5a6f
A
450
451 // Load string length to regT2, and start the process of loading the data pointer into regT0
452 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
453 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
6fe7ccc8 454 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
4e4e5a6f
A
455
456 // load index
457 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
458
459 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
460 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
461
462 // Load the character
6fe7ccc8
A
463 SpecializedThunkJIT::JumpList is16Bit;
464 SpecializedThunkJIT::JumpList cont8Bit;
465 // Load the string flags
93a37866
A
466 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
467 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
468 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
6fe7ccc8
A
469 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
470 cont8Bit.append(jit.jump());
471 is16Bit.link(&jit);
4e4e5a6f 472 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
6fe7ccc8 473 cont8Bit.link(&jit);
4e4e5a6f
A
474}
475
93a37866 476static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
4e4e5a6f 477{
14957cd0 478 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
93a37866 479 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
4e4e5a6f
A
480 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
481 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
482}
483
93a37866 484MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
4e4e5a6f 485{
93a37866
A
486 SpecializedThunkJIT jit(1);
487 stringCharLoad(jit, vm);
4e4e5a6f 488 jit.returnInt32(SpecializedThunkJIT::regT0);
93a37866 489 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
4e4e5a6f
A
490}
491
93a37866 492MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
4e4e5a6f 493{
93a37866
A
494 SpecializedThunkJIT jit(1);
495 stringCharLoad(jit, vm);
496 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
4e4e5a6f 497 jit.returnJSCell(SpecializedThunkJIT::regT0);
93a37866 498 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
4e4e5a6f
A
499}
500
93a37866 501MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
4e4e5a6f 502{
93a37866 503 SpecializedThunkJIT jit(1);
4e4e5a6f
A
504 // load char code
505 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
93a37866 506 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
4e4e5a6f 507 jit.returnJSCell(SpecializedThunkJIT::regT0);
93a37866 508 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
4e4e5a6f
A
509}
510
93a37866 511MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
4e4e5a6f 512{
93a37866 513 SpecializedThunkJIT jit(1);
4e4e5a6f 514 if (!jit.supportsFloatingPointSqrt())
93a37866 515 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
4e4e5a6f
A
516
517 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
518 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
519 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
93a37866 520 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
4e4e5a6f
A
521}
522
6fe7ccc8
A
523
524#define UnaryDoubleOpWrapper(function) function##Wrapper
525enum MathThunkCallingConvention { };
526typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
527extern "C" {
528
93a37866 529double jsRound(double) REFERENCED_FROM_ASM;
6fe7ccc8
A
530double jsRound(double d)
531{
532 double integer = ceil(d);
533 return integer - (integer - d > 0.5);
534}
535
536}
93a37866 537
6fe7ccc8
A
538#if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
539
540#define defineUnaryDoubleOpWrapper(function) \
541 asm( \
542 ".text\n" \
543 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
544 HIDE_SYMBOL(function##Thunk) "\n" \
545 SYMBOL_STRING(function##Thunk) ":" "\n" \
93a37866 546 "call " GLOBAL_REFERENCE(function) "\n" \
6fe7ccc8
A
547 "ret\n" \
548 );\
549 extern "C" { \
550 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
551 } \
552 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
553
554#elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
555#define defineUnaryDoubleOpWrapper(function) \
556 asm( \
557 ".text\n" \
558 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
559 HIDE_SYMBOL(function##Thunk) "\n" \
560 SYMBOL_STRING(function##Thunk) ":" "\n" \
561 "subl $8, %esp\n" \
562 "movsd %xmm0, (%esp) \n" \
93a37866 563 "call " GLOBAL_REFERENCE(function) "\n" \
6fe7ccc8
A
564 "fstpl (%esp) \n" \
565 "movsd (%esp), %xmm0 \n" \
566 "addl $8, %esp\n" \
567 "ret\n" \
568 );\
569 extern "C" { \
570 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
571 } \
572 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
573
93a37866
A
574#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
575
576#define defineUnaryDoubleOpWrapper(function) \
577 asm( \
578 ".text\n" \
579 ".align 2\n" \
580 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
581 HIDE_SYMBOL(function##Thunk) "\n" \
582 ".thumb\n" \
583 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
584 SYMBOL_STRING(function##Thunk) ":" "\n" \
585 "push {lr}\n" \
586 "vmov r0, r1, d0\n" \
587 "blx " GLOBAL_REFERENCE(function) "\n" \
588 "vmov d0, r0, r1\n" \
589 "pop {lr}\n" \
590 "bx lr\n" \
591 ); \
592 extern "C" { \
593 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
594 } \
595 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
596
597#elif CPU(ARM64)
598
599#define defineUnaryDoubleOpWrapper(function) \
600 asm( \
601 ".text\n" \
602 ".align 2\n" \
603 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
604 HIDE_SYMBOL(function##Thunk) "\n" \
605 SYMBOL_STRING(function##Thunk) ":" "\n" \
606 "b " GLOBAL_REFERENCE(function) "\n" \
607 ); \
608 extern "C" { \
609 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
610 } \
611 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
612
6fe7ccc8
A
613#else
614
615#define defineUnaryDoubleOpWrapper(function) \
616 static MathThunk UnaryDoubleOpWrapper(function) = 0
617#endif
618
619defineUnaryDoubleOpWrapper(jsRound);
620defineUnaryDoubleOpWrapper(exp);
621defineUnaryDoubleOpWrapper(log);
622defineUnaryDoubleOpWrapper(floor);
623defineUnaryDoubleOpWrapper(ceil);
624
93a37866
A
625static const double oneConstant = 1.0;
626static const double negativeHalfConstant = -0.5;
627static const double zeroConstant = 0.0;
628static const double halfConstant = 0.5;
629
630MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
6fe7ccc8 631{
93a37866 632 SpecializedThunkJIT jit(1);
6fe7ccc8
A
633 MacroAssembler::Jump nonIntJump;
634 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
93a37866 635 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
6fe7ccc8
A
636 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
637 jit.returnInt32(SpecializedThunkJIT::regT0);
638 nonIntJump.link(&jit);
639 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
93a37866
A
640#if CPU(ARM64)
641 SpecializedThunkJIT::JumpList doubleResult;
642 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
643 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
644 jit.returnInt32(SpecializedThunkJIT::regT0);
645 doubleResult.link(&jit);
646 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
647#else
648 SpecializedThunkJIT::Jump intResult;
6fe7ccc8 649 SpecializedThunkJIT::JumpList doubleResult;
93a37866
A
650 if (jit.supportsFloatingPointTruncate()) {
651 jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
652 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
653 SpecializedThunkJIT::JumpList slowPath;
654 // Handle the negative doubles in the slow path for now.
655 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
656 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
657 intResult = jit.jump();
658 slowPath.link(&jit);
659 }
660 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
6fe7ccc8 661 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
93a37866
A
662 if (jit.supportsFloatingPointTruncate())
663 intResult.link(&jit);
6fe7ccc8
A
664 jit.returnInt32(SpecializedThunkJIT::regT0);
665 doubleResult.link(&jit);
666 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
93a37866
A
667#endif // CPU(ARM64)
668 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
6fe7ccc8
A
669}
670
93a37866 671MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
6fe7ccc8 672{
93a37866 673 SpecializedThunkJIT jit(1);
6fe7ccc8 674 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
93a37866 675 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
6fe7ccc8
A
676 MacroAssembler::Jump nonIntJump;
677 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
678 jit.returnInt32(SpecializedThunkJIT::regT0);
679 nonIntJump.link(&jit);
680 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
93a37866
A
681#if CPU(ARM64)
682 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
683#else
684 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
685#endif // CPU(ARM64)
6fe7ccc8
A
686 SpecializedThunkJIT::JumpList doubleResult;
687 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
688 jit.returnInt32(SpecializedThunkJIT::regT0);
689 doubleResult.link(&jit);
690 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
93a37866 691 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
6fe7ccc8
A
692}
693
93a37866 694MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
6fe7ccc8 695{
93a37866 696 SpecializedThunkJIT jit(1);
6fe7ccc8 697 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
93a37866 698 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
6fe7ccc8
A
699 MacroAssembler::Jump nonIntJump;
700 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
701 jit.returnInt32(SpecializedThunkJIT::regT0);
702 nonIntJump.link(&jit);
703 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
93a37866 704 SpecializedThunkJIT::Jump intResult;
6fe7ccc8 705 SpecializedThunkJIT::JumpList doubleResult;
93a37866
A
706 if (jit.supportsFloatingPointTruncate()) {
707 jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
708 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
709 SpecializedThunkJIT::JumpList slowPath;
710 // Handle the negative doubles in the slow path for now.
711 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
712 jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
713 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
714 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
715 intResult = jit.jump();
716 slowPath.link(&jit);
717 }
718 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
6fe7ccc8 719 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
93a37866
A
720 if (jit.supportsFloatingPointTruncate())
721 intResult.link(&jit);
6fe7ccc8
A
722 jit.returnInt32(SpecializedThunkJIT::regT0);
723 doubleResult.link(&jit);
724 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
93a37866 725 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
6fe7ccc8
A
726}
727
93a37866 728MacroAssemblerCodeRef expThunkGenerator(VM* vm)
6fe7ccc8
A
729{
730 if (!UnaryDoubleOpWrapper(exp))
93a37866
A
731 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
732 SpecializedThunkJIT jit(1);
6fe7ccc8 733 if (!jit.supportsFloatingPoint())
93a37866 734 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
6fe7ccc8 735 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
93a37866 736 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
6fe7ccc8 737 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
93a37866 738 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
6fe7ccc8
A
739}
740
93a37866 741MacroAssemblerCodeRef logThunkGenerator(VM* vm)
6fe7ccc8
A
742{
743 if (!UnaryDoubleOpWrapper(log))
93a37866
A
744 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
745 SpecializedThunkJIT jit(1);
6fe7ccc8 746 if (!jit.supportsFloatingPoint())
93a37866 747 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
6fe7ccc8 748 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
93a37866 749 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
6fe7ccc8 750 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
93a37866 751 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
6fe7ccc8
A
752}
753
93a37866 754MacroAssemblerCodeRef absThunkGenerator(VM* vm)
6fe7ccc8 755{
93a37866 756 SpecializedThunkJIT jit(1);
6fe7ccc8 757 if (!jit.supportsFloatingPointAbs())
93a37866 758 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
6fe7ccc8
A
759 MacroAssembler::Jump nonIntJump;
760 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
761 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
762 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
763 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
764 jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
765 jit.returnInt32(SpecializedThunkJIT::regT0);
766 nonIntJump.link(&jit);
767 // Shame about the double int conversion here.
768 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
769 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
770 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
93a37866 771 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
6fe7ccc8 772}
4e4e5a6f 773
93a37866 774MacroAssemblerCodeRef powThunkGenerator(VM* vm)
4e4e5a6f 775{
93a37866 776 SpecializedThunkJIT jit(2);
4e4e5a6f 777 if (!jit.supportsFloatingPoint())
93a37866 778 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
4e4e5a6f
A
779
780 jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
781 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
782 MacroAssembler::Jump nonIntExponent;
783 jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
14957cd0 784 jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
4e4e5a6f
A
785
786 MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
787 MacroAssembler::Label startLoop(jit.label());
788
14957cd0 789 MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
4e4e5a6f
A
790 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
791 exponentIsEven.link(&jit);
792 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
14957cd0 793 jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
4e4e5a6f
A
794 jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
795
796 exponentIsZero.link(&jit);
14957cd0
A
797
798 {
799 SpecializedThunkJIT::JumpList doubleResult;
800 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
801 jit.returnInt32(SpecializedThunkJIT::regT0);
802 doubleResult.link(&jit);
803 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
804 }
4e4e5a6f
A
805
806 if (jit.supportsFloatingPointSqrt()) {
807 nonIntExponent.link(&jit);
808 jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
809 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
810 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
811 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
812 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
813 jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
14957cd0
A
814
815 SpecializedThunkJIT::JumpList doubleResult;
816 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
817 jit.returnInt32(SpecializedThunkJIT::regT0);
818 doubleResult.link(&jit);
4e4e5a6f
A
819 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
820 } else
821 jit.appendFailure(nonIntExponent);
822
93a37866
A
823 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
824}
825
826MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
827{
828 SpecializedThunkJIT jit(2);
829 MacroAssembler::Jump nonIntArg0Jump;
830 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
831 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
832 MacroAssembler::Jump nonIntArg1Jump;
833 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
834 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
835 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
836 jit.returnInt32(SpecializedThunkJIT::regT0);
837
838 if (jit.supportsFloatingPointTruncate()) {
839 nonIntArg0Jump.link(&jit);
840 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
841 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
842 jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
843 jit.jump(doneLoadingArg0);
844 } else
845 jit.appendFailure(nonIntArg0Jump);
846
847 if (jit.supportsFloatingPointTruncate()) {
848 nonIntArg1Jump.link(&jit);
849 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
850 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
851 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
852 jit.jump(doneLoadingArg1);
853 } else
854 jit.appendFailure(nonIntArg1Jump);
855
856 return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
4e4e5a6f
A
857}
858
859}
860
861#endif // ENABLE(JIT)