]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/ThunkGenerators.cpp
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / jit / ThunkGenerators.cpp
1 /*
2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
32 #include "JSArray.h"
33 #include "JSArrayIterator.h"
34 #include "JSStack.h"
35 #include "MaxFrameExtentForSlowPathCall.h"
36 #include "JSCInlines.h"
37 #include "SpecializedThunkJIT.h"
38 #include <wtf/InlineASM.h>
39 #include <wtf/StringPrintStream.h>
40 #include <wtf/text/StringImpl.h>
41
42 #if ENABLE(JIT)
43
44 namespace JSC {
45
46 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
47 {
48 if (ASSERT_DISABLED)
49 return;
50 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
51 jit.abortWithReason(TGInvalidPointer);
52 isNonZero.link(&jit);
53 jit.pushToSave(pointerGPR);
54 jit.load8(pointerGPR, pointerGPR);
55 jit.popToRestore(pointerGPR);
56 }
57
58 // We will jump here if the JIT code tries to make a call, but the
59 // linking helper (C++ code) decides to throw an exception instead.
60 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
61 {
62 CCallHelpers jit(vm);
63
64 // The call pushed a return address, so we need to pop it back off to re-align the stack,
65 // even though we won't use it.
66 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
67
68 jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
69 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
70 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
71 jit.call(GPRInfo::nonArgGPR0);
72 jit.jumpToExceptionHandler();
73
74 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
75 return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
76 }
77
78 static void slowPathFor(
79 CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction)
80 {
81 jit.emitFunctionPrologue();
82 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
83 if (maxFrameExtentForSlowPathCall)
84 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
85 jit.setupArgumentsWithExecState(GPRInfo::regT2);
86 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
87 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
88 jit.call(GPRInfo::nonArgGPR0);
89 if (maxFrameExtentForSlowPathCall)
90 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
91
92 // This slow call will return the address of one of the following:
93 // 1) Exception throwing thunk.
94 // 2) Host call return value returner thingy.
95 // 3) The function to call.
96 emitPointerValidation(jit, GPRInfo::returnValueGPR);
97 jit.emitFunctionEpilogue();
98 jit.jump(GPRInfo::returnValueGPR);
99 }
100
101 static MacroAssemblerCodeRef linkForThunkGenerator(
102 VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
103 {
104 // The return address is on the stack or in the link register. We will hence
105 // save the return address to the call frame while we make a C++ function call
106 // to perform linking and lazy compilation if necessary. We expect the callee
107 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
108 // been adjusted, and all other registers to be available for use.
109
110 CCallHelpers jit(vm);
111
112 slowPathFor(jit, vm, operationLinkFor(kind, registers));
113
114 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
115 return FINALIZE_CODE(
116 patchBuffer,
117 ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
118 }
119
120 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
121 {
122 return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
123 }
124
125 MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
126 {
127 return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
128 }
129
130 MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm)
131 {
132 return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
133 }
134
135 MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm)
136 {
137 return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
138 }
139
140 static MacroAssemblerCodeRef linkClosureCallForThunkGenerator(
141 VM* vm, RegisterPreservationMode registers)
142 {
143 CCallHelpers jit(vm);
144
145 slowPathFor(jit, vm, operationLinkClosureCallFor(registers));
146
147 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
148 return FINALIZE_CODE(patchBuffer, ("Link closure call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
149 }
150
151 // For closure optimizations, we only include calls, since if you're using closures for
152 // object construction then you're going to lose big time anyway.
153 MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
154 {
155 return linkClosureCallForThunkGenerator(vm, RegisterPreservationNotRequired);
156 }
157
158 MacroAssemblerCodeRef linkClosureCallThatPreservesRegsThunkGenerator(VM* vm)
159 {
160 return linkClosureCallForThunkGenerator(vm, MustPreserveRegisters);
161 }
162
163 static MacroAssemblerCodeRef virtualForThunkGenerator(
164 VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
165 {
166 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
167 // The return address is on the stack, or in the link register. We will hence
168 // jump to the callee, or save the return address to the call frame while we
169 // make a C++ function call to the appropriate JIT operation.
170
171 CCallHelpers jit(vm);
172
173 CCallHelpers::JumpList slowCase;
174
175 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
176 // slow path execution for the profiler.
177 jit.add32(
178 CCallHelpers::TrustedImm32(1),
179 CCallHelpers::Address(GPRInfo::regT2, OBJECT_OFFSETOF(CallLinkInfo, slowPathCount)));
180
181 // FIXME: we should have a story for eliminating these checks. In many cases,
182 // the DFG knows that the value is definitely a cell, or definitely a function.
183
184 #if USE(JSVALUE64)
185 jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
186
187 slowCase.append(
188 jit.branchTest64(
189 CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
190 #else
191 slowCase.append(
192 jit.branch32(
193 CCallHelpers::NotEqual, GPRInfo::regT1,
194 CCallHelpers::TrustedImm32(JSValue::CellTag)));
195 #endif
196 AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
197 slowCase.append(
198 jit.branchPtr(
199 CCallHelpers::NotEqual,
200 CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
201 CCallHelpers::TrustedImmPtr(JSFunction::info())));
202
203 // Now we know we have a JSFunction.
204
205 jit.loadPtr(
206 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
207 GPRInfo::regT4);
208 jit.loadPtr(
209 CCallHelpers::Address(
210 GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
211 GPRInfo::regT4);
212 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
213
214 // Now we know that we have a CodeBlock, and we're committed to making a fast
215 // call.
216
217 jit.loadPtr(
218 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
219 GPRInfo::regT1);
220 #if USE(JSVALUE64)
221 jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
222 #else
223 jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
224 jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
225 JSStack::ScopeChain);
226 #endif
227
228 // Make a tail call. This will return back to JIT code.
229 emitPointerValidation(jit, GPRInfo::regT4);
230 jit.jump(GPRInfo::regT4);
231
232 slowCase.link(&jit);
233
234 // Here we don't know anything, so revert to the full slow path.
235
236 slowPathFor(jit, vm, operationVirtualFor(kind, registers));
237
238 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
239 return FINALIZE_CODE(
240 patchBuffer,
241 ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
242 }
243
244 MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
245 {
246 return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
247 }
248
249 MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
250 {
251 return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
252 }
253
254 MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm)
255 {
256 return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
257 }
258
259 MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm)
260 {
261 return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
262 }
263
264 enum ThunkEntryType { EnterViaCall, EnterViaJump };
265
266 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
267 {
268 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
269
270 JSInterfaceJIT jit(vm);
271
272 if (entryType == EnterViaCall)
273 jit.emitFunctionPrologue();
274
275 jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
276 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
277
278 #if CPU(X86)
279 // Load caller frame's scope chain into this callframe so that whatever we call can
280 // get to its global data.
281 jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
282 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
283 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
284
285 // Calling convention: f(ecx, edx, ...);
286 // Host function signature: f(ExecState*);
287 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
288
289 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
290
291 // call the function
292 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
293 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
294 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
295
296 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
297
298 #elif CPU(X86_64)
299 // Load caller frame's scope chain into this callframe so that whatever we call can
300 // get to its global data.
301 jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
302 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
303 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
304 #if !OS(WINDOWS)
305 // Calling convention: f(edi, esi, edx, ecx, ...);
306 // Host function signature: f(ExecState*);
307 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
308
309 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
310 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
311 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
312
313 #else
314 // Calling convention: f(ecx, edx, r8, r9, ...);
315 // Host function signature: f(ExecState*);
316 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
317
318 // Leave space for the callee parameter home addresses.
319 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
320 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
321
322 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
323 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
324 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
325
326 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
327 #endif
328
329 #elif CPU(ARM64)
330 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
331 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
332 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
333 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
334 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
335
336 // Load caller frame's scope chain into this callframe so that whatever we call can
337 // get to its global data.
338 jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
339 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
340 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
341
342 // Host function signature: f(ExecState*);
343 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
344
345 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
346 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
347 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
348 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
349 // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
350 jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
351 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
352 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
353
354 #if CPU(MIPS)
355 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
356 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
357 #endif
358
359 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
360 // Host function signature is f(ExecState*).
361 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
362
363 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
364 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
365 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
366
367 #if CPU(MIPS)
368 // Restore stack space
369 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
370 #endif
371 #else
372 #error "JIT not supported on this platform."
373 UNUSED_PARAM(executableOffsetToFunction);
374 abortWithReason(TGNotSupported);
375 #endif
376
377 // Check for an exception
378 #if USE(JSVALUE64)
379 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
380 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
381 #else
382 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
383 JSInterfaceJIT::NotEqual,
384 JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
385 JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
386 #endif
387
388 jit.emitFunctionEpilogue();
389 // Return.
390 jit.ret();
391
392 // Handle an exception
393 exceptionHandler.link(&jit);
394
395 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
396
397 #if CPU(X86) && USE(JSVALUE32_64)
398 jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
399 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
400 jit.push(JSInterfaceJIT::regT0);
401 #else
402 #if OS(WINDOWS)
403 // Allocate space on stack for the 4 parameter registers.
404 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
405 #endif
406 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
407 #endif
408 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
409 jit.call(JSInterfaceJIT::regT3);
410 #if CPU(X86) && USE(JSVALUE32_64)
411 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
412 #elif OS(WINDOWS)
413 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
414 #endif
415
416 jit.jumpToExceptionHandler();
417
418 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
419 return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
420 }
421
422 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
423 {
424 return nativeForGenerator(vm, CodeForCall);
425 }
426
427 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
428 {
429 return nativeForGenerator(vm, CodeForCall, EnterViaJump);
430 }
431
432 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
433 {
434 return nativeForGenerator(vm, CodeForConstruct);
435 }
436
437 MacroAssemblerCodeRef arityFixup(VM* vm)
438 {
439 JSInterfaceJIT jit(vm);
440
441 // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
442 // regT5 on 32-bit and regT7 on 64-bit.
443 #if USE(JSVALUE64)
444 # if CPU(X86_64)
445 jit.pop(JSInterfaceJIT::regT4);
446 # endif
447 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
448 jit.neg64(JSInterfaceJIT::regT0);
449 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
450 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
451 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
452
453 // Move current frame down regT0 number of slots
454 JSInterfaceJIT::Label copyLoop(jit.label());
455 jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
456 jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
457 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
458 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
459
460 // Fill in regT0 - 1 missing arg slots with undefined
461 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
462 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
463 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
464 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
465 jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
466 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
467 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
468
469 // Adjust call frame register and stack pointer to account for missing args
470 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
471 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
472 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
473 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
474
475 // Save the original return PC.
476 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
477 jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
478
479 // Install the new return PC.
480 jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
481
482 # if CPU(X86_64)
483 jit.push(JSInterfaceJIT::regT4);
484 # endif
485 jit.ret();
486 #else
487 # if CPU(X86)
488 jit.pop(JSInterfaceJIT::regT4);
489 # endif
490 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
491 jit.neg32(JSInterfaceJIT::regT0);
492 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
493 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
494 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
495
496 // Move current frame down regT0 number of slots
497 JSInterfaceJIT::Label copyLoop(jit.label());
498 jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
499 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
500 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
501 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
502 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
503 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
504
505 // Fill in regT0 - 1 missing arg slots with undefined
506 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
507 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
508 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
509 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
510 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
511 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
512 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
513
514 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
515 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
516
517 // Adjust call frame register and stack pointer to account for missing args
518 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
519 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
520 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
521 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
522
523 // Save the original return PC.
524 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
525 jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
526
527 // Install the new return PC.
528 jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
529
530 # if CPU(X86)
531 jit.push(JSInterfaceJIT::regT4);
532 # endif
533 jit.ret();
534 #endif
535
536 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
537 return FINALIZE_CODE(patchBuffer, ("fixup arity"));
538 }
539
540 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
541 {
542 // load string
543 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
544
545 // Load string length to regT2, and start the process of loading the data pointer into regT0
546 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
547 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
548 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
549
550 // load index
551 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
552
553 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
554 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
555
556 // Load the character
557 SpecializedThunkJIT::JumpList is16Bit;
558 SpecializedThunkJIT::JumpList cont8Bit;
559 // Load the string flags
560 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
561 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
562 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
563 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
564 cont8Bit.append(jit.jump());
565 is16Bit.link(&jit);
566 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
567 cont8Bit.link(&jit);
568 }
569
570 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
571 {
572 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
573 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
574 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
575 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
576 }
577
578 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
579 {
580 SpecializedThunkJIT jit(vm, 1);
581 stringCharLoad(jit, vm);
582 jit.returnInt32(SpecializedThunkJIT::regT0);
583 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
584 }
585
586 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
587 {
588 SpecializedThunkJIT jit(vm, 1);
589 stringCharLoad(jit, vm);
590 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
591 jit.returnJSCell(SpecializedThunkJIT::regT0);
592 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
593 }
594
595 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
596 {
597 SpecializedThunkJIT jit(vm, 1);
598 // load char code
599 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
600 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
601 jit.returnJSCell(SpecializedThunkJIT::regT0);
602 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
603 }
604
605 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
606 {
607 SpecializedThunkJIT jit(vm, 1);
608 if (!jit.supportsFloatingPointSqrt())
609 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
610
611 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
612 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
613 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
614 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
615 }
616
617
618 #define UnaryDoubleOpWrapper(function) function##Wrapper
619 enum MathThunkCallingConvention { };
620 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
621 extern "C" {
622
623 double jsRound(double) REFERENCED_FROM_ASM;
624 double jsRound(double d)
625 {
626 double integer = ceil(d);
627 return integer - (integer - d > 0.5);
628 }
629
630 }
631
632 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
633
634 #define defineUnaryDoubleOpWrapper(function) \
635 asm( \
636 ".text\n" \
637 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
638 HIDE_SYMBOL(function##Thunk) "\n" \
639 SYMBOL_STRING(function##Thunk) ":" "\n" \
640 "pushq %rax\n" \
641 "call " GLOBAL_REFERENCE(function) "\n" \
642 "popq %rcx\n" \
643 "ret\n" \
644 );\
645 extern "C" { \
646 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
647 } \
648 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
649
650 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
651 #define defineUnaryDoubleOpWrapper(function) \
652 asm( \
653 ".text\n" \
654 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
655 HIDE_SYMBOL(function##Thunk) "\n" \
656 SYMBOL_STRING(function##Thunk) ":" "\n" \
657 "subl $20, %esp\n" \
658 "movsd %xmm0, (%esp) \n" \
659 "call " GLOBAL_REFERENCE(function) "\n" \
660 "fstpl (%esp) \n" \
661 "movsd (%esp), %xmm0 \n" \
662 "addl $20, %esp\n" \
663 "ret\n" \
664 );\
665 extern "C" { \
666 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
667 } \
668 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
669
670 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
671
672 #define defineUnaryDoubleOpWrapper(function) \
673 asm( \
674 ".text\n" \
675 ".align 2\n" \
676 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
677 HIDE_SYMBOL(function##Thunk) "\n" \
678 ".thumb\n" \
679 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
680 SYMBOL_STRING(function##Thunk) ":" "\n" \
681 "push {lr}\n" \
682 "vmov r0, r1, d0\n" \
683 "blx " GLOBAL_REFERENCE(function) "\n" \
684 "vmov d0, r0, r1\n" \
685 "pop {lr}\n" \
686 "bx lr\n" \
687 ); \
688 extern "C" { \
689 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
690 } \
691 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
692
693 #elif CPU(ARM64)
694
695 #define defineUnaryDoubleOpWrapper(function) \
696 asm( \
697 ".text\n" \
698 ".align 2\n" \
699 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
700 HIDE_SYMBOL(function##Thunk) "\n" \
701 SYMBOL_STRING(function##Thunk) ":" "\n" \
702 "b " GLOBAL_REFERENCE(function) "\n" \
703 ); \
704 extern "C" { \
705 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
706 } \
707 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
708
709 #else
710
711 #define defineUnaryDoubleOpWrapper(function) \
712 static MathThunk UnaryDoubleOpWrapper(function) = 0
713 #endif
714
715 defineUnaryDoubleOpWrapper(jsRound);
716 defineUnaryDoubleOpWrapper(exp);
717 defineUnaryDoubleOpWrapper(log);
718 defineUnaryDoubleOpWrapper(floor);
719 defineUnaryDoubleOpWrapper(ceil);
720
721 static const double oneConstant = 1.0;
722 static const double negativeHalfConstant = -0.5;
723 static const double zeroConstant = 0.0;
724 static const double halfConstant = 0.5;
725
726 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
727 {
728 SpecializedThunkJIT jit(vm, 1);
729 MacroAssembler::Jump nonIntJump;
730 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
731 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
732 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
733 jit.returnInt32(SpecializedThunkJIT::regT0);
734 nonIntJump.link(&jit);
735 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
736 #if CPU(ARM64)
737 SpecializedThunkJIT::JumpList doubleResult;
738 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
739 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
740 jit.returnInt32(SpecializedThunkJIT::regT0);
741 doubleResult.link(&jit);
742 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
743 #else
744 SpecializedThunkJIT::Jump intResult;
745 SpecializedThunkJIT::JumpList doubleResult;
746 if (jit.supportsFloatingPointTruncate()) {
747 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
748 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
749 SpecializedThunkJIT::JumpList slowPath;
750 // Handle the negative doubles in the slow path for now.
751 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
752 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
753 intResult = jit.jump();
754 slowPath.link(&jit);
755 }
756 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
757 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
758 if (jit.supportsFloatingPointTruncate())
759 intResult.link(&jit);
760 jit.returnInt32(SpecializedThunkJIT::regT0);
761 doubleResult.link(&jit);
762 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
763 #endif // CPU(ARM64)
764 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
765 }
766
767 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
768 {
769 SpecializedThunkJIT jit(vm, 1);
770 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
771 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
772 MacroAssembler::Jump nonIntJump;
773 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
774 jit.returnInt32(SpecializedThunkJIT::regT0);
775 nonIntJump.link(&jit);
776 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
777 #if CPU(ARM64)
778 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
779 #else
780 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
781 #endif // CPU(ARM64)
782 SpecializedThunkJIT::JumpList doubleResult;
783 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
784 jit.returnInt32(SpecializedThunkJIT::regT0);
785 doubleResult.link(&jit);
786 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
787 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
788 }
789
790 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
791 {
792 SpecializedThunkJIT jit(vm, 1);
793 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
794 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
795 MacroAssembler::Jump nonIntJump;
796 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
797 jit.returnInt32(SpecializedThunkJIT::regT0);
798 nonIntJump.link(&jit);
799 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
800 SpecializedThunkJIT::Jump intResult;
801 SpecializedThunkJIT::JumpList doubleResult;
802 if (jit.supportsFloatingPointTruncate()) {
803 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
804 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
805 SpecializedThunkJIT::JumpList slowPath;
806 // Handle the negative doubles in the slow path for now.
807 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
808 jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
809 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
810 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
811 intResult = jit.jump();
812 slowPath.link(&jit);
813 }
814 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
815 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
816 if (jit.supportsFloatingPointTruncate())
817 intResult.link(&jit);
818 jit.returnInt32(SpecializedThunkJIT::regT0);
819 doubleResult.link(&jit);
820 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
821 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
822 }
823
824 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
825 {
826 if (!UnaryDoubleOpWrapper(exp))
827 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
828 SpecializedThunkJIT jit(vm, 1);
829 if (!jit.supportsFloatingPoint())
830 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
831 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
832 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
833 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
834 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
835 }
836
837 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
838 {
839 if (!UnaryDoubleOpWrapper(log))
840 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
841 SpecializedThunkJIT jit(vm, 1);
842 if (!jit.supportsFloatingPoint())
843 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
844 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
845 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
846 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
847 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
848 }
849
850 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
851 {
852 SpecializedThunkJIT jit(vm, 1);
853 if (!jit.supportsFloatingPointAbs())
854 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
855 MacroAssembler::Jump nonIntJump;
856 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
857 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
858 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
859 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
860 jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
861 jit.returnInt32(SpecializedThunkJIT::regT0);
862 nonIntJump.link(&jit);
863 // Shame about the double int conversion here.
864 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
865 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
866 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
867 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
868 }
869
870 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
871 {
872 SpecializedThunkJIT jit(vm, 2);
873 if (!jit.supportsFloatingPoint())
874 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
875
876 jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
877 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
878 MacroAssembler::Jump nonIntExponent;
879 jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
880 jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
881
882 MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
883 MacroAssembler::Label startLoop(jit.label());
884
885 MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
886 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
887 exponentIsEven.link(&jit);
888 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
889 jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
890 jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
891
892 exponentIsZero.link(&jit);
893
894 {
895 SpecializedThunkJIT::JumpList doubleResult;
896 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
897 jit.returnInt32(SpecializedThunkJIT::regT0);
898 doubleResult.link(&jit);
899 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
900 }
901
902 if (jit.supportsFloatingPointSqrt()) {
903 nonIntExponent.link(&jit);
904 jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
905 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
906 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
907 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
908 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
909 jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
910
911 SpecializedThunkJIT::JumpList doubleResult;
912 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
913 jit.returnInt32(SpecializedThunkJIT::regT0);
914 doubleResult.link(&jit);
915 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
916 } else
917 jit.appendFailure(nonIntExponent);
918
919 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
920 }
921
922 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
923 {
924 SpecializedThunkJIT jit(vm, 2);
925 MacroAssembler::Jump nonIntArg0Jump;
926 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
927 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
928 MacroAssembler::Jump nonIntArg1Jump;
929 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
930 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
931 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
932 jit.returnInt32(SpecializedThunkJIT::regT0);
933
934 if (jit.supportsFloatingPointTruncate()) {
935 nonIntArg0Jump.link(&jit);
936 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
937 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
938 jit.appendFailure(jit.jump());
939 } else
940 jit.appendFailure(nonIntArg0Jump);
941
942 if (jit.supportsFloatingPointTruncate()) {
943 nonIntArg1Jump.link(&jit);
944 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
945 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
946 jit.appendFailure(jit.jump());
947 } else
948 jit.appendFailure(nonIntArg1Jump);
949
950 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
951 }
952
953 static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
954 {
955 typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
956 typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
957 typedef SpecializedThunkJIT::Address Address;
958 typedef SpecializedThunkJIT::BaseIndex BaseIndex;
959 typedef SpecializedThunkJIT::Jump Jump;
960
961 SpecializedThunkJIT jit(vm);
962 // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
963 jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
964
965 // Early exit if we don't have a thunk for this form of iteration
966 jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
967
968 jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
969
970 jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
971
972 // Pull out the butterfly from iteratedObject
973 jit.load8(Address(SpecializedThunkJIT::regT0, JSCell::indexingTypeOffset()), SpecializedThunkJIT::regT3);
974 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
975
976 jit.and32(TrustedImm32(IndexingShapeMask), SpecializedThunkJIT::regT3);
977
978 Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
979 // Return the termination signal to indicate that we've finished
980 jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
981 jit.returnJSCell(SpecializedThunkJIT::regT0);
982
983 notDone.link(&jit);
984
985 if (kind == ArrayIterateKey) {
986 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
987 jit.returnInt32(SpecializedThunkJIT::regT1);
988 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-key");
989
990 }
991 ASSERT(kind == ArrayIterateValue);
992
993 // Okay, now we're returning a value so make sure we're inside the vector size
994 jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
995
996 // So now we perform inline loads for int32, value/undecided, and double storage
997 Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(UndecidedShape));
998 Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ContiguousShape));
999
1000 undecidedStorage.link(&jit);
1001
1002 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1003
1004 #if USE(JSVALUE64)
1005 jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
1006 Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
1007 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
1008 notHole.link(&jit);
1009 jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1010 jit.returnJSValue(SpecializedThunkJIT::regT0);
1011 #else
1012 jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
1013 Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
1014 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
1015 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
1016 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1017 jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
1018 notHole.link(&jit);
1019 jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
1020 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1021 jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
1022 jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
1023 #endif
1024 notContiguousStorage.link(&jit);
1025
1026 Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(Int32Shape));
1027 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1028 jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
1029 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1030 jit.returnInt32(SpecializedThunkJIT::regT0);
1031 notInt32Storage.link(&jit);
1032
1033 jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(DoubleShape)));
1034 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1035 jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
1036 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1037 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1038
1039 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-value");
1040 }
1041
1042 MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
1043 {
1044 return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
1045 }
1046
1047 MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
1048 {
1049 return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
1050 }
1051
1052 }
1053
1054 #endif // ENABLE(JIT)