]>
Commit | Line | Data |
---|---|---|
4e4e5a6f | 1 | /* |
81345200 | 2 | * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. |
4e4e5a6f A |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' | |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS | |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |
23 | * THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "ThunkGenerators.h" | |
28 | ||
29 | #include "CodeBlock.h" | |
81345200 A |
30 | #include "DFGSpeculativeJIT.h" |
31 | #include "JITOperations.h" | |
32 | #include "JSArray.h" | |
33 | #include "JSArrayIterator.h" | |
34 | #include "JSStack.h" | |
35 | #include "MaxFrameExtentForSlowPathCall.h" | |
36 | #include "JSCInlines.h" | |
4e4e5a6f | 37 | #include "SpecializedThunkJIT.h" |
93a37866 A |
38 | #include <wtf/InlineASM.h> |
39 | #include <wtf/StringPrintStream.h> | |
6fe7ccc8 | 40 | #include <wtf/text/StringImpl.h> |
4e4e5a6f A |
41 | |
42 | #if ENABLE(JIT) | |
43 | ||
44 | namespace JSC { | |
45 | ||
81345200 | 46 | inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR) |
93a37866 | 47 | { |
81345200 A |
48 | if (ASSERT_DISABLED) |
49 | return; | |
50 | CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR); | |
51 | jit.abortWithReason(TGInvalidPointer); | |
52 | isNonZero.link(&jit); | |
53 | jit.pushToSave(pointerGPR); | |
54 | jit.load8(pointerGPR, pointerGPR); | |
55 | jit.popToRestore(pointerGPR); | |
56 | } | |
93a37866 | 57 | |
81345200 A |
58 | // We will jump here if the JIT code tries to make a call, but the |
59 | // linking helper (C++ code) decides to throw an exception instead. | |
60 | MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm) | |
61 | { | |
62 | CCallHelpers jit(vm); | |
93a37866 | 63 | |
81345200 A |
64 | // The call pushed a return address, so we need to pop it back off to re-align the stack, |
65 | // even though we won't use it. | |
66 | jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); | |
67 | ||
68 | jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister); | |
69 | jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0); | |
70 | emitPointerValidation(jit, GPRInfo::nonArgGPR0); | |
71 | jit.call(GPRInfo::nonArgGPR0); | |
72 | jit.jumpToExceptionHandler(); | |
73 | ||
74 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
75 | return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk")); | |
93a37866 A |
76 | } |
77 | ||
81345200 A |
78 | static void slowPathFor( |
79 | CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction) | |
93a37866 | 80 | { |
81345200 A |
81 | jit.emitFunctionPrologue(); |
82 | jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); | |
83 | if (maxFrameExtentForSlowPathCall) | |
84 | jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); | |
85 | jit.setupArgumentsWithExecState(GPRInfo::regT2); | |
86 | jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0); | |
87 | emitPointerValidation(jit, GPRInfo::nonArgGPR0); | |
88 | jit.call(GPRInfo::nonArgGPR0); | |
89 | if (maxFrameExtentForSlowPathCall) | |
90 | jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); | |
93a37866 | 91 | |
81345200 A |
92 | // This slow call will return the address of one of the following: |
93 | // 1) Exception throwing thunk. | |
94 | // 2) Host call return value returner thingy. | |
95 | // 3) The function to call. | |
96 | emitPointerValidation(jit, GPRInfo::returnValueGPR); | |
97 | jit.emitFunctionEpilogue(); | |
98 | jit.jump(GPRInfo::returnValueGPR); | |
99 | } | |
93a37866 | 100 | |
81345200 A |
101 | static MacroAssemblerCodeRef linkForThunkGenerator( |
102 | VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers) | |
103 | { | |
104 | // The return address is on the stack or in the link register. We will hence | |
105 | // save the return address to the call frame while we make a C++ function call | |
106 | // to perform linking and lazy compilation if necessary. We expect the callee | |
107 | // to be in regT0/regT1 (payload/tag), the CallFrame to have already | |
108 | // been adjusted, and all other registers to be available for use. | |
93a37866 | 109 | |
81345200 | 110 | CCallHelpers jit(vm); |
93a37866 | 111 | |
81345200 | 112 | slowPathFor(jit, vm, operationLinkFor(kind, registers)); |
93a37866 | 113 | |
81345200 A |
114 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); |
115 | return FINALIZE_CODE( | |
116 | patchBuffer, | |
117 | ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : "")); | |
118 | } | |
119 | ||
120 | MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) | |
121 | { | |
122 | return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired); | |
93a37866 A |
123 | } |
124 | ||
81345200 | 125 | MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm) |
93a37866 | 126 | { |
81345200 | 127 | return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired); |
93a37866 A |
128 | } |
129 | ||
81345200 | 130 | MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm) |
93a37866 | 131 | { |
81345200 | 132 | return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters); |
93a37866 A |
133 | } |
134 | ||
81345200 | 135 | MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm) |
93a37866 | 136 | { |
81345200 | 137 | return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters); |
93a37866 A |
138 | } |
139 | ||
81345200 A |
140 | static MacroAssemblerCodeRef linkClosureCallForThunkGenerator( |
141 | VM* vm, RegisterPreservationMode registers) | |
93a37866 | 142 | { |
81345200 A |
143 | CCallHelpers jit(vm); |
144 | ||
145 | slowPathFor(jit, vm, operationLinkClosureCallFor(registers)); | |
93a37866 | 146 | |
81345200 A |
147 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); |
148 | return FINALIZE_CODE(patchBuffer, ("Link closure call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : "")); | |
149 | } | |
93a37866 | 150 | |
81345200 A |
151 | // For closure optimizations, we only include calls, since if you're using closures for |
152 | // object construction then you're going to lose big time anyway. | |
153 | MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm) | |
154 | { | |
155 | return linkClosureCallForThunkGenerator(vm, RegisterPreservationNotRequired); | |
156 | } | |
93a37866 | 157 | |
81345200 A |
158 | MacroAssemblerCodeRef linkClosureCallThatPreservesRegsThunkGenerator(VM* vm) |
159 | { | |
160 | return linkClosureCallForThunkGenerator(vm, MustPreserveRegisters); | |
161 | } | |
93a37866 | 162 | |
81345200 A |
163 | static MacroAssemblerCodeRef virtualForThunkGenerator( |
164 | VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers) | |
165 | { | |
166 | // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1). | |
167 | // The return address is on the stack, or in the link register. We will hence | |
168 | // jump to the callee, or save the return address to the call frame while we | |
169 | // make a C++ function call to the appropriate JIT operation. | |
93a37866 | 170 | |
81345200 A |
171 | CCallHelpers jit(vm); |
172 | ||
173 | CCallHelpers::JumpList slowCase; | |
174 | ||
175 | // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the | |
176 | // slow path execution for the profiler. | |
177 | jit.add32( | |
178 | CCallHelpers::TrustedImm32(1), | |
179 | CCallHelpers::Address(GPRInfo::regT2, OBJECT_OFFSETOF(CallLinkInfo, slowPathCount))); | |
180 | ||
181 | // FIXME: we should have a story for eliminating these checks. In many cases, | |
182 | // the DFG knows that the value is definitely a cell, or definitely a function. | |
183 | ||
184 | #if USE(JSVALUE64) | |
185 | jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4); | |
186 | ||
187 | slowCase.append( | |
188 | jit.branchTest64( | |
189 | CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4)); | |
190 | #else | |
191 | slowCase.append( | |
192 | jit.branch32( | |
193 | CCallHelpers::NotEqual, GPRInfo::regT1, | |
194 | CCallHelpers::TrustedImm32(JSValue::CellTag))); | |
195 | #endif | |
196 | AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1); | |
197 | slowCase.append( | |
198 | jit.branchPtr( | |
199 | CCallHelpers::NotEqual, | |
200 | CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()), | |
201 | CCallHelpers::TrustedImmPtr(JSFunction::info()))); | |
202 | ||
203 | // Now we know we have a JSFunction. | |
204 | ||
205 | jit.loadPtr( | |
206 | CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()), | |
207 | GPRInfo::regT4); | |
208 | jit.loadPtr( | |
209 | CCallHelpers::Address( | |
210 | GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)), | |
211 | GPRInfo::regT4); | |
212 | slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4)); | |
213 | ||
214 | // Now we know that we have a CodeBlock, and we're committed to making a fast | |
215 | // call. | |
216 | ||
217 | jit.loadPtr( | |
218 | CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()), | |
219 | GPRInfo::regT1); | |
220 | #if USE(JSVALUE64) | |
221 | jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain); | |
222 | #else | |
223 | jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain); | |
224 | jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag), | |
225 | JSStack::ScopeChain); | |
226 | #endif | |
93a37866 | 227 | |
81345200 A |
228 | // Make a tail call. This will return back to JIT code. |
229 | emitPointerValidation(jit, GPRInfo::regT4); | |
230 | jit.jump(GPRInfo::regT4); | |
231 | ||
93a37866 | 232 | slowCase.link(&jit); |
93a37866 | 233 | |
81345200 A |
234 | // Here we don't know anything, so revert to the full slow path. |
235 | ||
236 | slowPathFor(jit, vm, operationVirtualFor(kind, registers)); | |
93a37866 | 237 | |
81345200 A |
238 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); |
239 | return FINALIZE_CODE( | |
240 | patchBuffer, | |
241 | ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : "")); | |
93a37866 A |
242 | } |
243 | ||
81345200 | 244 | MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm) |
93a37866 | 245 | { |
81345200 | 246 | return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired); |
93a37866 A |
247 | } |
248 | ||
81345200 | 249 | MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm) |
93a37866 | 250 | { |
81345200 | 251 | return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired); |
93a37866 A |
252 | } |
253 | ||
81345200 | 254 | MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm) |
93a37866 | 255 | { |
81345200 A |
256 | return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters); |
257 | } | |
93a37866 | 258 | |
81345200 A |
259 | MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm) |
260 | { | |
261 | return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters); | |
93a37866 A |
262 | } |
263 | ||
81345200 A |
264 | enum ThunkEntryType { EnterViaCall, EnterViaJump }; |
265 | ||
266 | static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall) | |
93a37866 A |
267 | { |
268 | int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind); | |
269 | ||
81345200 A |
270 | JSInterfaceJIT jit(vm); |
271 | ||
272 | if (entryType == EnterViaCall) | |
273 | jit.emitFunctionPrologue(); | |
274 | ||
93a37866 A |
275 | jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); |
276 | jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); | |
277 | ||
278 | #if CPU(X86) | |
279 | // Load caller frame's scope chain into this callframe so that whatever we call can | |
280 | // get to its global data. | |
81345200 | 281 | jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0); |
93a37866 A |
282 | jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0); |
283 | jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); | |
284 | ||
93a37866 A |
285 | // Calling convention: f(ecx, edx, ...); |
286 | // Host function signature: f(ExecState*); | |
287 | jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); | |
288 | ||
81345200 | 289 | jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue. |
93a37866 A |
290 | |
291 | // call the function | |
292 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1); | |
293 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1); | |
93a37866 A |
294 | jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction)); |
295 | ||
81345200 | 296 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); |
93a37866 A |
297 | |
298 | #elif CPU(X86_64) | |
299 | // Load caller frame's scope chain into this callframe so that whatever we call can | |
300 | // get to its global data. | |
81345200 | 301 | jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0); |
93a37866 A |
302 | jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0); |
303 | jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); | |
93a37866 A |
304 | #if !OS(WINDOWS) |
305 | // Calling convention: f(edi, esi, edx, ecx, ...); | |
306 | // Host function signature: f(ExecState*); | |
307 | jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi); | |
308 | ||
93a37866 A |
309 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi); |
310 | jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9); | |
93a37866 A |
311 | jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); |
312 | ||
93a37866 A |
313 | #else |
314 | // Calling convention: f(ecx, edx, r8, r9, ...); | |
315 | // Host function signature: f(ExecState*); | |
316 | jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); | |
317 | ||
81345200 A |
318 | // Leave space for the callee parameter home addresses. |
319 | // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it. | |
320 | jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); | |
93a37866 A |
321 | |
322 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx); | |
323 | jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9); | |
93a37866 A |
324 | jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); |
325 | ||
81345200 | 326 | jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); |
93a37866 A |
327 | #endif |
328 | ||
329 | #elif CPU(ARM64) | |
330 | COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1); | |
331 | COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3); | |
332 | COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0); | |
333 | COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1); | |
334 | COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2); | |
335 | ||
336 | // Load caller frame's scope chain into this callframe so that whatever we call can | |
337 | // get to its global data. | |
81345200 | 338 | jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3); |
93a37866 A |
339 | jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3); |
340 | jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); | |
341 | ||
93a37866 A |
342 | // Host function signature: f(ExecState*); |
343 | jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0); | |
344 | ||
345 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1); | |
346 | jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2); | |
93a37866 | 347 | jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction)); |
81345200 A |
348 | #elif CPU(ARM) || CPU(SH4) || CPU(MIPS) |
349 | // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data. | |
350 | jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2); | |
93a37866 A |
351 | jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2); |
352 | jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); | |
353 | ||
81345200 A |
354 | #if CPU(MIPS) |
355 | // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. | |
93a37866 | 356 | jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); |
81345200 | 357 | #endif |
93a37866 | 358 | |
81345200 A |
359 | // Calling convention is f(argumentGPR0, argumentGPR1, ...). |
360 | // Host function signature is f(ExecState*). | |
361 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0); | |
93a37866 | 362 | |
81345200 A |
363 | jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1); |
364 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2); | |
93a37866 A |
365 | jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction)); |
366 | ||
81345200 | 367 | #if CPU(MIPS) |
93a37866 A |
368 | // Restore stack space |
369 | jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); | |
81345200 | 370 | #endif |
93a37866 A |
371 | #else |
372 | #error "JIT not supported on this platform." | |
373 | UNUSED_PARAM(executableOffsetToFunction); | |
81345200 | 374 | abortWithReason(TGNotSupported); |
93a37866 A |
375 | #endif |
376 | ||
377 | // Check for an exception | |
378 | #if USE(JSVALUE64) | |
81345200 | 379 | jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2); |
93a37866 A |
380 | JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2); |
381 | #else | |
382 | JSInterfaceJIT::Jump exceptionHandler = jit.branch32( | |
383 | JSInterfaceJIT::NotEqual, | |
81345200 | 384 | JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), |
93a37866 A |
385 | JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag)); |
386 | #endif | |
387 | ||
81345200 | 388 | jit.emitFunctionEpilogue(); |
93a37866 A |
389 | // Return. |
390 | jit.ret(); | |
391 | ||
392 | // Handle an exception | |
393 | exceptionHandler.link(&jit); | |
394 | ||
93a37866 | 395 | jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); |
93a37866 | 396 | |
81345200 A |
397 | #if CPU(X86) && USE(JSVALUE32_64) |
398 | jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister); | |
399 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0); | |
400 | jit.push(JSInterfaceJIT::regT0); | |
401 | #else | |
402 | #if OS(WINDOWS) | |
403 | // Allocate space on stack for the 4 parameter registers. | |
404 | jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); | |
405 | #endif | |
406 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0); | |
407 | #endif | |
408 | jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3); | |
409 | jit.call(JSInterfaceJIT::regT3); | |
410 | #if CPU(X86) && USE(JSVALUE32_64) | |
411 | jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); | |
412 | #elif OS(WINDOWS) | |
413 | jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); | |
414 | #endif | |
93a37866 | 415 | |
81345200 A |
416 | jit.jumpToExceptionHandler(); |
417 | ||
418 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
419 | return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data())); | |
93a37866 A |
420 | } |
421 | ||
422 | MacroAssemblerCodeRef nativeCallGenerator(VM* vm) | |
423 | { | |
424 | return nativeForGenerator(vm, CodeForCall); | |
425 | } | |
426 | ||
81345200 A |
427 | MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm) |
428 | { | |
429 | return nativeForGenerator(vm, CodeForCall, EnterViaJump); | |
430 | } | |
431 | ||
93a37866 A |
432 | MacroAssemblerCodeRef nativeConstructGenerator(VM* vm) |
433 | { | |
434 | return nativeForGenerator(vm, CodeForConstruct); | |
435 | } | |
436 | ||
81345200 A |
437 | MacroAssemblerCodeRef arityFixup(VM* vm) |
438 | { | |
439 | JSInterfaceJIT jit(vm); | |
440 | ||
441 | // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in | |
442 | // regT5 on 32-bit and regT7 on 64-bit. | |
443 | #if USE(JSVALUE64) | |
444 | # if CPU(X86_64) | |
445 | jit.pop(JSInterfaceJIT::regT4); | |
446 | # endif | |
447 | jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); | |
448 | jit.neg64(JSInterfaceJIT::regT0); | |
449 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6); | |
450 | jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); | |
451 | jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); | |
452 | ||
453 | // Move current frame down regT0 number of slots | |
454 | JSInterfaceJIT::Label copyLoop(jit.label()); | |
455 | jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1); | |
456 | jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
457 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); | |
458 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); | |
459 | ||
460 | // Fill in regT0 - 1 missing arg slots with undefined | |
461 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); | |
462 | jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1); | |
463 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); | |
464 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); | |
465 | jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
466 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); | |
467 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); | |
468 | ||
469 | // Adjust call frame register and stack pointer to account for missing args | |
470 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); | |
471 | jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); | |
472 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); | |
473 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); | |
474 | ||
475 | // Save the original return PC. | |
476 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); | |
477 | jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
478 | ||
479 | // Install the new return PC. | |
480 | jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); | |
481 | ||
482 | # if CPU(X86_64) | |
483 | jit.push(JSInterfaceJIT::regT4); | |
484 | # endif | |
485 | jit.ret(); | |
486 | #else | |
487 | # if CPU(X86) | |
488 | jit.pop(JSInterfaceJIT::regT4); | |
489 | # endif | |
490 | jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); | |
491 | jit.neg32(JSInterfaceJIT::regT0); | |
492 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); | |
493 | jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); | |
494 | jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); | |
495 | ||
496 | // Move current frame down regT0 number of slots | |
497 | JSInterfaceJIT::Label copyLoop(jit.label()); | |
498 | jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1); | |
499 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
500 | jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1); | |
501 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4)); | |
502 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); | |
503 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); | |
504 | ||
505 | // Fill in regT0 - 1 missing arg slots with undefined | |
506 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); | |
507 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); | |
508 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); | |
509 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1); | |
510 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
511 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1); | |
512 | jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4)); | |
513 | ||
514 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); | |
515 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); | |
516 | ||
517 | // Adjust call frame register and stack pointer to account for missing args | |
518 | jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); | |
519 | jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); | |
520 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); | |
521 | jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); | |
522 | ||
523 | // Save the original return PC. | |
524 | jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); | |
525 | jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); | |
526 | ||
527 | // Install the new return PC. | |
528 | jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); | |
529 | ||
530 | # if CPU(X86) | |
531 | jit.push(JSInterfaceJIT::regT4); | |
532 | # endif | |
533 | jit.ret(); | |
534 | #endif | |
535 | ||
536 | LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); | |
537 | return FINALIZE_CODE(patchBuffer, ("fixup arity")); | |
538 | } | |
539 | ||
93a37866 | 540 | static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm) |
4e4e5a6f A |
541 | { |
542 | // load string | |
93a37866 | 543 | jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0); |
4e4e5a6f A |
544 | |
545 | // Load string length to regT2, and start the process of loading the data pointer into regT0 | |
546 | jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2); | |
547 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0); | |
6fe7ccc8 | 548 | jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0)); |
4e4e5a6f A |
549 | |
550 | // load index | |
551 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index | |
552 | ||
553 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large | |
554 | jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2)); | |
555 | ||
556 | // Load the character | |
6fe7ccc8 A |
557 | SpecializedThunkJIT::JumpList is16Bit; |
558 | SpecializedThunkJIT::JumpList cont8Bit; | |
559 | // Load the string flags | |
93a37866 A |
560 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2); |
561 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0); | |
562 | is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit()))); | |
6fe7ccc8 A |
563 | jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0); |
564 | cont8Bit.append(jit.jump()); | |
565 | is16Bit.link(&jit); | |
4e4e5a6f | 566 | jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0); |
6fe7ccc8 | 567 | cont8Bit.link(&jit); |
4e4e5a6f A |
568 | } |
569 | ||
93a37866 | 570 | static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch) |
4e4e5a6f | 571 | { |
14957cd0 | 572 | jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100))); |
93a37866 | 573 | jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch); |
4e4e5a6f A |
574 | jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst); |
575 | jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst)); | |
576 | } | |
577 | ||
93a37866 | 578 | MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm) |
4e4e5a6f | 579 | { |
81345200 | 580 | SpecializedThunkJIT jit(vm, 1); |
93a37866 | 581 | stringCharLoad(jit, vm); |
4e4e5a6f | 582 | jit.returnInt32(SpecializedThunkJIT::regT0); |
81345200 | 583 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt"); |
4e4e5a6f A |
584 | } |
585 | ||
93a37866 | 586 | MacroAssemblerCodeRef charAtThunkGenerator(VM* vm) |
4e4e5a6f | 587 | { |
81345200 | 588 | SpecializedThunkJIT jit(vm, 1); |
93a37866 A |
589 | stringCharLoad(jit, vm); |
590 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); | |
4e4e5a6f | 591 | jit.returnJSCell(SpecializedThunkJIT::regT0); |
81345200 | 592 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt"); |
4e4e5a6f A |
593 | } |
594 | ||
93a37866 | 595 | MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm) |
4e4e5a6f | 596 | { |
81345200 | 597 | SpecializedThunkJIT jit(vm, 1); |
4e4e5a6f A |
598 | // load char code |
599 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0); | |
93a37866 | 600 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); |
4e4e5a6f | 601 | jit.returnJSCell(SpecializedThunkJIT::regT0); |
81345200 | 602 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode"); |
4e4e5a6f A |
603 | } |
604 | ||
93a37866 | 605 | MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm) |
4e4e5a6f | 606 | { |
81345200 | 607 | SpecializedThunkJIT jit(vm, 1); |
4e4e5a6f | 608 | if (!jit.supportsFloatingPointSqrt()) |
93a37866 | 609 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
4e4e5a6f A |
610 | |
611 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
612 | jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
613 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
81345200 | 614 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt"); |
4e4e5a6f A |
615 | } |
616 | ||
6fe7ccc8 A |
617 | |
618 | #define UnaryDoubleOpWrapper(function) function##Wrapper | |
619 | enum MathThunkCallingConvention { }; | |
620 | typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); | |
621 | extern "C" { | |
622 | ||
93a37866 | 623 | double jsRound(double) REFERENCED_FROM_ASM; |
6fe7ccc8 A |
624 | double jsRound(double d) |
625 | { | |
626 | double integer = ceil(d); | |
627 | return integer - (integer - d > 0.5); | |
628 | } | |
629 | ||
630 | } | |
93a37866 | 631 | |
81345200 | 632 | #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) |
6fe7ccc8 A |
633 | |
634 | #define defineUnaryDoubleOpWrapper(function) \ | |
635 | asm( \ | |
636 | ".text\n" \ | |
637 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
638 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
639 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
81345200 | 640 | "pushq %rax\n" \ |
93a37866 | 641 | "call " GLOBAL_REFERENCE(function) "\n" \ |
81345200 | 642 | "popq %rcx\n" \ |
6fe7ccc8 A |
643 | "ret\n" \ |
644 | );\ | |
645 | extern "C" { \ | |
646 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
647 | } \ | |
648 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
649 | ||
81345200 | 650 | #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) |
6fe7ccc8 A |
651 | #define defineUnaryDoubleOpWrapper(function) \ |
652 | asm( \ | |
653 | ".text\n" \ | |
654 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
655 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
656 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
81345200 | 657 | "subl $20, %esp\n" \ |
6fe7ccc8 | 658 | "movsd %xmm0, (%esp) \n" \ |
93a37866 | 659 | "call " GLOBAL_REFERENCE(function) "\n" \ |
6fe7ccc8 A |
660 | "fstpl (%esp) \n" \ |
661 | "movsd (%esp), %xmm0 \n" \ | |
81345200 | 662 | "addl $20, %esp\n" \ |
6fe7ccc8 A |
663 | "ret\n" \ |
664 | );\ | |
665 | extern "C" { \ | |
666 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
667 | } \ | |
668 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
669 | ||
93a37866 A |
670 | #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS) |
671 | ||
672 | #define defineUnaryDoubleOpWrapper(function) \ | |
673 | asm( \ | |
674 | ".text\n" \ | |
675 | ".align 2\n" \ | |
676 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
677 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
678 | ".thumb\n" \ | |
679 | ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \ | |
680 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
681 | "push {lr}\n" \ | |
682 | "vmov r0, r1, d0\n" \ | |
683 | "blx " GLOBAL_REFERENCE(function) "\n" \ | |
684 | "vmov d0, r0, r1\n" \ | |
685 | "pop {lr}\n" \ | |
686 | "bx lr\n" \ | |
687 | ); \ | |
688 | extern "C" { \ | |
689 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
690 | } \ | |
691 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
692 | ||
693 | #elif CPU(ARM64) | |
694 | ||
695 | #define defineUnaryDoubleOpWrapper(function) \ | |
696 | asm( \ | |
697 | ".text\n" \ | |
698 | ".align 2\n" \ | |
699 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ | |
700 | HIDE_SYMBOL(function##Thunk) "\n" \ | |
701 | SYMBOL_STRING(function##Thunk) ":" "\n" \ | |
702 | "b " GLOBAL_REFERENCE(function) "\n" \ | |
703 | ); \ | |
704 | extern "C" { \ | |
705 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ | |
706 | } \ | |
707 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; | |
708 | ||
6fe7ccc8 A |
709 | #else |
710 | ||
711 | #define defineUnaryDoubleOpWrapper(function) \ | |
712 | static MathThunk UnaryDoubleOpWrapper(function) = 0 | |
713 | #endif | |
714 | ||
715 | defineUnaryDoubleOpWrapper(jsRound); | |
716 | defineUnaryDoubleOpWrapper(exp); | |
717 | defineUnaryDoubleOpWrapper(log); | |
718 | defineUnaryDoubleOpWrapper(floor); | |
719 | defineUnaryDoubleOpWrapper(ceil); | |
720 | ||
93a37866 A |
721 | static const double oneConstant = 1.0; |
722 | static const double negativeHalfConstant = -0.5; | |
723 | static const double zeroConstant = 0.0; | |
724 | static const double halfConstant = 0.5; | |
725 | ||
726 | MacroAssemblerCodeRef floorThunkGenerator(VM* vm) | |
6fe7ccc8 | 727 | { |
81345200 | 728 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 A |
729 | MacroAssembler::Jump nonIntJump; |
730 | if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint()) | |
93a37866 | 731 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
732 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
733 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
734 | nonIntJump.link(&jit); | |
735 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
93a37866 A |
736 | #if CPU(ARM64) |
737 | SpecializedThunkJIT::JumpList doubleResult; | |
738 | jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
739 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); | |
740 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
741 | doubleResult.link(&jit); | |
742 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
743 | #else | |
744 | SpecializedThunkJIT::Jump intResult; | |
6fe7ccc8 | 745 | SpecializedThunkJIT::JumpList doubleResult; |
93a37866 | 746 | if (jit.supportsFloatingPointTruncate()) { |
81345200 | 747 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); |
93a37866 A |
748 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
749 | SpecializedThunkJIT::JumpList slowPath; | |
750 | // Handle the negative doubles in the slow path for now. | |
751 | slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); | |
752 | slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0)); | |
753 | intResult = jit.jump(); | |
754 | slowPath.link(&jit); | |
755 | } | |
756 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor)); | |
6fe7ccc8 | 757 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
93a37866 A |
758 | if (jit.supportsFloatingPointTruncate()) |
759 | intResult.link(&jit); | |
6fe7ccc8 A |
760 | jit.returnInt32(SpecializedThunkJIT::regT0); |
761 | doubleResult.link(&jit); | |
762 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
93a37866 | 763 | #endif // CPU(ARM64) |
81345200 | 764 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor"); |
6fe7ccc8 A |
765 | } |
766 | ||
93a37866 | 767 | MacroAssemblerCodeRef ceilThunkGenerator(VM* vm) |
6fe7ccc8 | 768 | { |
81345200 | 769 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 770 | if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint()) |
93a37866 | 771 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
772 | MacroAssembler::Jump nonIntJump; |
773 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); | |
774 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
775 | nonIntJump.link(&jit); | |
776 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
93a37866 A |
777 | #if CPU(ARM64) |
778 | jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
779 | #else | |
780 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil)); | |
781 | #endif // CPU(ARM64) | |
6fe7ccc8 A |
782 | SpecializedThunkJIT::JumpList doubleResult; |
783 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); | |
784 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
785 | doubleResult.link(&jit); | |
786 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
81345200 | 787 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil"); |
6fe7ccc8 A |
788 | } |
789 | ||
93a37866 | 790 | MacroAssemblerCodeRef roundThunkGenerator(VM* vm) |
6fe7ccc8 | 791 | { |
81345200 | 792 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 793 | if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint()) |
93a37866 | 794 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
795 | MacroAssembler::Jump nonIntJump; |
796 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); | |
797 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
798 | nonIntJump.link(&jit); | |
799 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
93a37866 | 800 | SpecializedThunkJIT::Jump intResult; |
6fe7ccc8 | 801 | SpecializedThunkJIT::JumpList doubleResult; |
93a37866 | 802 | if (jit.supportsFloatingPointTruncate()) { |
81345200 | 803 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); |
93a37866 A |
804 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
805 | SpecializedThunkJIT::JumpList slowPath; | |
806 | // Handle the negative doubles in the slow path for now. | |
807 | slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); | |
81345200 | 808 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1); |
93a37866 A |
809 | jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); |
810 | slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0)); | |
811 | intResult = jit.jump(); | |
812 | slowPath.link(&jit); | |
813 | } | |
814 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound)); | |
6fe7ccc8 | 815 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
93a37866 A |
816 | if (jit.supportsFloatingPointTruncate()) |
817 | intResult.link(&jit); | |
6fe7ccc8 A |
818 | jit.returnInt32(SpecializedThunkJIT::regT0); |
819 | doubleResult.link(&jit); | |
820 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
81345200 | 821 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round"); |
6fe7ccc8 A |
822 | } |
823 | ||
93a37866 | 824 | MacroAssemblerCodeRef expThunkGenerator(VM* vm) |
6fe7ccc8 A |
825 | { |
826 | if (!UnaryDoubleOpWrapper(exp)) | |
93a37866 | 827 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
81345200 | 828 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 829 | if (!jit.supportsFloatingPoint()) |
93a37866 | 830 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 | 831 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
93a37866 | 832 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp)); |
6fe7ccc8 | 833 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
81345200 | 834 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp"); |
6fe7ccc8 A |
835 | } |
836 | ||
93a37866 | 837 | MacroAssemblerCodeRef logThunkGenerator(VM* vm) |
6fe7ccc8 A |
838 | { |
839 | if (!UnaryDoubleOpWrapper(log)) | |
93a37866 | 840 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
81345200 | 841 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 842 | if (!jit.supportsFloatingPoint()) |
93a37866 | 843 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 | 844 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
93a37866 | 845 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log)); |
6fe7ccc8 | 846 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
81345200 | 847 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log"); |
6fe7ccc8 A |
848 | } |
849 | ||
93a37866 | 850 | MacroAssemblerCodeRef absThunkGenerator(VM* vm) |
6fe7ccc8 | 851 | { |
81345200 | 852 | SpecializedThunkJIT jit(vm, 1); |
6fe7ccc8 | 853 | if (!jit.supportsFloatingPointAbs()) |
93a37866 | 854 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
6fe7ccc8 A |
855 | MacroAssembler::Jump nonIntJump; |
856 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); | |
857 | jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1); | |
858 | jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); | |
859 | jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); | |
860 | jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31))); | |
861 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
862 | nonIntJump.link(&jit); | |
863 | // Shame about the double int conversion here. | |
864 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
865 | jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); | |
866 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); | |
81345200 | 867 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs"); |
6fe7ccc8 | 868 | } |
4e4e5a6f | 869 | |
93a37866 | 870 | MacroAssemblerCodeRef powThunkGenerator(VM* vm) |
4e4e5a6f | 871 | { |
81345200 | 872 | SpecializedThunkJIT jit(vm, 2); |
4e4e5a6f | 873 | if (!jit.supportsFloatingPoint()) |
93a37866 | 874 | return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); |
4e4e5a6f | 875 | |
81345200 | 876 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1); |
4e4e5a6f A |
877 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
878 | MacroAssembler::Jump nonIntExponent; | |
879 | jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent); | |
14957cd0 | 880 | jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0))); |
4e4e5a6f A |
881 | |
882 | MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0); | |
883 | MacroAssembler::Label startLoop(jit.label()); | |
884 | ||
14957cd0 | 885 | MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1)); |
4e4e5a6f A |
886 | jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); |
887 | exponentIsEven.link(&jit); | |
888 | jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
14957cd0 | 889 | jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0); |
4e4e5a6f A |
890 | jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit); |
891 | ||
892 | exponentIsZero.link(&jit); | |
14957cd0 A |
893 | |
894 | { | |
895 | SpecializedThunkJIT::JumpList doubleResult; | |
896 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0); | |
897 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
898 | doubleResult.link(&jit); | |
899 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); | |
900 | } | |
4e4e5a6f A |
901 | |
902 | if (jit.supportsFloatingPointSqrt()) { | |
903 | nonIntExponent.link(&jit); | |
81345200 | 904 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3); |
4e4e5a6f A |
905 | jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0); |
906 | jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); | |
907 | jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3)); | |
908 | jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); | |
909 | jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); | |
14957cd0 A |
910 | |
911 | SpecializedThunkJIT::JumpList doubleResult; | |
912 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0); | |
913 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
914 | doubleResult.link(&jit); | |
4e4e5a6f A |
915 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); |
916 | } else | |
917 | jit.appendFailure(nonIntExponent); | |
918 | ||
81345200 | 919 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow"); |
93a37866 A |
920 | } |
921 | ||
922 | MacroAssemblerCodeRef imulThunkGenerator(VM* vm) | |
923 | { | |
81345200 | 924 | SpecializedThunkJIT jit(vm, 2); |
93a37866 A |
925 | MacroAssembler::Jump nonIntArg0Jump; |
926 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump); | |
927 | SpecializedThunkJIT::Label doneLoadingArg0(&jit); | |
928 | MacroAssembler::Jump nonIntArg1Jump; | |
929 | jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump); | |
930 | SpecializedThunkJIT::Label doneLoadingArg1(&jit); | |
931 | jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); | |
932 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
933 | ||
934 | if (jit.supportsFloatingPointTruncate()) { | |
935 | nonIntArg0Jump.link(&jit); | |
936 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); | |
937 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit); | |
81345200 | 938 | jit.appendFailure(jit.jump()); |
93a37866 A |
939 | } else |
940 | jit.appendFailure(nonIntArg0Jump); | |
941 | ||
942 | if (jit.supportsFloatingPointTruncate()) { | |
943 | nonIntArg1Jump.link(&jit); | |
944 | jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1); | |
945 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit); | |
81345200 | 946 | jit.appendFailure(jit.jump()); |
93a37866 A |
947 | } else |
948 | jit.appendFailure(nonIntArg1Jump); | |
949 | ||
81345200 A |
950 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul"); |
951 | } | |
952 | ||
953 | static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind) | |
954 | { | |
955 | typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32; | |
956 | typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr; | |
957 | typedef SpecializedThunkJIT::Address Address; | |
958 | typedef SpecializedThunkJIT::BaseIndex BaseIndex; | |
959 | typedef SpecializedThunkJIT::Jump Jump; | |
960 | ||
961 | SpecializedThunkJIT jit(vm); | |
962 | // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively | |
963 | jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1); | |
964 | ||
965 | // Early exit if we don't have a thunk for this form of iteration | |
966 | jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue))); | |
967 | ||
968 | jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0); | |
969 | ||
970 | jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1); | |
971 | ||
972 | // Pull out the butterfly from iteratedObject | |
973 | jit.load8(Address(SpecializedThunkJIT::regT0, JSCell::indexingTypeOffset()), SpecializedThunkJIT::regT3); | |
974 | jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); | |
975 | ||
976 | jit.and32(TrustedImm32(IndexingShapeMask), SpecializedThunkJIT::regT3); | |
977 | ||
978 | Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength())); | |
979 | // Return the termination signal to indicate that we've finished | |
980 | jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0); | |
981 | jit.returnJSCell(SpecializedThunkJIT::regT0); | |
982 | ||
983 | notDone.link(&jit); | |
984 | ||
985 | if (kind == ArrayIterateKey) { | |
986 | jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); | |
987 | jit.returnInt32(SpecializedThunkJIT::regT1); | |
988 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-key"); | |
989 | ||
990 | } | |
991 | ASSERT(kind == ArrayIterateValue); | |
992 | ||
993 | // Okay, now we're returning a value so make sure we're inside the vector size | |
994 | jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength()))); | |
995 | ||
996 | // So now we perform inline loads for int32, value/undecided, and double storage | |
997 | Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(UndecidedShape)); | |
998 | Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ContiguousShape)); | |
999 | ||
1000 | undecidedStorage.link(&jit); | |
1001 | ||
1002 | jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); | |
1003 | ||
1004 | #if USE(JSVALUE64) | |
1005 | jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0); | |
1006 | Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0); | |
1007 | jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0); | |
1008 | notHole.link(&jit); | |
1009 | jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); | |
1010 | jit.returnJSValue(SpecializedThunkJIT::regT0); | |
1011 | #else | |
1012 | jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3); | |
1013 | Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag)); | |
1014 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1); | |
1015 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0); | |
1016 | jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); | |
1017 | jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1); | |
1018 | notHole.link(&jit); | |
1019 | jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0); | |
1020 | jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); | |
1021 | jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1); | |
1022 | jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); | |
1023 | #endif | |
1024 | notContiguousStorage.link(&jit); | |
1025 | ||
1026 | Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(Int32Shape)); | |
1027 | jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); | |
1028 | jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0); | |
1029 | jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); | |
1030 | jit.returnInt32(SpecializedThunkJIT::regT0); | |
1031 | notInt32Storage.link(&jit); | |
1032 | ||
1033 | jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(DoubleShape))); | |
1034 | jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); | |
1035 | jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0); | |
1036 | jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); | |
1037 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); | |
1038 | ||
1039 | return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-value"); | |
1040 | } | |
1041 | ||
1042 | MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm) | |
1043 | { | |
1044 | return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey); | |
4e4e5a6f A |
1045 | } |
1046 | ||
81345200 A |
1047 | MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm) |
1048 | { | |
1049 | return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue); | |
1050 | } | |
1051 | ||
4e4e5a6f A |
1052 | } |
1053 | ||
1054 | #endif // ENABLE(JIT) |