2 * Copyright (C) 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "JITInlineMethods.h"
32 #include "JITStubCall.h"
35 #include "JSFunction.h"
36 #include "JSPropertyNameIterator.h"
37 #include "LinkBuffer.h"
43 void JIT::privateCompileCTIMachineTrampolines(RefPtr
<ExecutablePool
>* executablePool
, JSGlobalData
* globalData
, CodePtr
* ctiStringLengthTrampoline
, CodePtr
* ctiVirtualCallLink
, CodePtr
* ctiVirtualCall
, CodePtr
* ctiNativeCallThunk
)
45 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
46 // (1) This function provides fast property access for string length
47 Label stringLengthBegin
= align();
49 // regT0 holds payload, regT1 holds tag
51 Jump string_failureCases1
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
52 Jump string_failureCases2
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
));
54 // Checks out okay! - get the length from the Ustring.
55 load32(Address(regT0
, OBJECT_OFFSETOF(JSString
, m_stringLength
)), regT2
);
57 Jump string_failureCases3
= branch32(Above
, regT2
, Imm32(INT_MAX
));
59 move(Imm32(JSValue::Int32Tag
), regT1
);
64 // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
66 #if ENABLE(JIT_OPTIMIZE_CALL)
67 // VirtualCallLink Trampoline
68 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
69 Label virtualCallLinkBegin
= align();
70 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
72 Jump isNativeFunc2
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
74 Jump hasCodeBlock2
= branch32(GreaterThan
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
75 preserveReturnAddressAfterCall(regT3
);
76 restoreArgumentReference();
77 Call callJSFunction2
= call();
78 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
79 emitGetJITStubArg(2, regT1
); // argCount
80 restoreReturnAddressBeforeReturn(regT3
);
81 hasCodeBlock2
.link(this);
83 // Check argCount matches callee arity.
84 Jump arityCheckOkay2
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), regT1
);
85 preserveReturnAddressAfterCall(regT3
);
86 emitPutJITStubArg(regT3
, 1); // return address
87 restoreArgumentReference();
88 Call callArityCheck2
= call();
89 move(regT1
, callFrameRegister
);
90 emitGetJITStubArg(2, regT1
); // argCount
91 restoreReturnAddressBeforeReturn(regT3
);
92 arityCheckOkay2
.link(this);
94 isNativeFunc2
.link(this);
96 compileOpCallInitializeCallFrame();
98 preserveReturnAddressAfterCall(regT3
);
99 emitPutJITStubArg(regT3
, 1); // return address
100 restoreArgumentReference();
101 Call callLazyLinkCall
= call();
102 restoreReturnAddressBeforeReturn(regT3
);
104 #endif // ENABLE(JIT_OPTIMIZE_CALL)
106 // VirtualCall Trampoline
107 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
108 Label virtualCallBegin
= align();
109 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
111 Jump isNativeFunc3
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
113 Jump hasCodeBlock3
= branch32(GreaterThan
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
114 preserveReturnAddressAfterCall(regT3
);
115 restoreArgumentReference();
116 Call callJSFunction1
= call();
117 emitGetJITStubArg(2, regT1
); // argCount
118 restoreReturnAddressBeforeReturn(regT3
);
119 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
120 hasCodeBlock3
.link(this);
122 // Check argCount matches callee arity.
123 Jump arityCheckOkay3
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), regT1
);
124 preserveReturnAddressAfterCall(regT3
);
125 emitPutJITStubArg(regT3
, 1); // return address
126 restoreArgumentReference();
127 Call callArityCheck1
= call();
128 move(regT1
, callFrameRegister
);
129 emitGetJITStubArg(2, regT1
); // argCount
130 restoreReturnAddressBeforeReturn(regT3
);
131 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
132 arityCheckOkay3
.link(this);
134 isNativeFunc3
.link(this);
136 compileOpCallInitializeCallFrame();
137 loadPtr(Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_jitCode
)), regT0
);
140 #if CPU(X86) || CPU(ARM_TRADITIONAL)
141 Label nativeCallThunk
= align();
142 preserveReturnAddressAfterCall(regT0
);
143 emitPutToCallFrameHeader(regT0
, RegisterFile::ReturnPC
); // Push return address
145 // Load caller frame's scope chain into this callframe so that whatever we call can
146 // get to its global data.
147 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, regT1
);
148 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
, regT1
);
149 emitPutToCallFrameHeader(regT1
, RegisterFile::ScopeChain
);
152 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, regT0
);
154 /* We have two structs that we use to describe the stackframe we set up for our
155 * call to native code. NativeCallFrameStructure describes the how we set up the stack
156 * in advance of the call. NativeFunctionCalleeSignature describes the callframe
157 * as the native code expects it. We do this as we are using the fastcall calling
158 * convention which results in the callee popping its arguments off the stack, but
159 * not the rest of the callframe so we need a nice way to ensure we increment the
160 * stack pointer by the right amount after the call.
163 #if COMPILER(MSVC) || OS(LINUX)
167 #endif // COMPILER(MSVC)
168 struct NativeCallFrameStructure
{
169 // CallFrame* callFrame; // passed in EDX
176 struct NativeFunctionCalleeSignature
{
183 #endif // COMPILER(MSVC)
185 struct NativeCallFrameStructure
{
186 // CallFrame* callFrame; // passed in ECX
187 // JSObject* callee; // passed in EDX
192 struct NativeFunctionCalleeSignature
{
198 const int NativeCallFrameSize
= (sizeof(NativeCallFrameStructure
) + 15) & ~15;
199 // Allocate system stack frame
200 subPtr(Imm32(NativeCallFrameSize
), stackPointerRegister
);
203 subPtr(Imm32(1), regT0
); // Don't include 'this' in argcount
206 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_argCount
)));
208 // Calculate the start of the callframe header, and store in regT1
209 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize
* (int)sizeof(Register
)), callFrameRegister
, regT1
);
211 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
212 mul32(Imm32(sizeof(Register
)), regT0
, regT0
);
213 subPtr(regT0
, regT1
);
214 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_args
)));
216 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
217 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, args
)), stackPointerRegister
, regT0
);
218 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, argPointer
)));
220 // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
221 loadPtr(Address(regT1
, -(int)sizeof(Register
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT2
);
222 loadPtr(Address(regT1
, -(int)sizeof(Register
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT3
);
223 storePtr(regT2
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, thisValue
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
224 storePtr(regT3
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, thisValue
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
226 #if COMPILER(MSVC) || OS(LINUX)
227 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
228 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, result
)), stackPointerRegister
, X86Registers::ecx
);
231 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86Registers::eax
);
232 storePtr(X86Registers::eax
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, callee
)));
235 move(callFrameRegister
, X86Registers::edx
);
237 call(Address(X86Registers::eax
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
239 // JSValue is a non-POD type, so eax points to it
240 emitLoad(0, regT1
, regT0
, X86Registers::eax
);
242 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86Registers::edx
); // callee
243 move(callFrameRegister
, X86Registers::ecx
); // callFrame
244 call(Address(X86Registers::edx
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
247 // We've put a few temporaries on the stack in addition to the actual arguments
248 // so pull them off now
249 addPtr(Imm32(NativeCallFrameSize
- sizeof(NativeFunctionCalleeSignature
)), stackPointerRegister
);
251 #elif CPU(ARM_TRADITIONAL)
252 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, regT0
);
254 // Allocate stack space for our arglist
255 COMPILE_ASSERT((sizeof(ArgList
) & 0x7) == 0 && sizeof(JSValue
) == 8 && sizeof(Register
) == 8, ArgList_should_by_8byte_aligned
);
256 subPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
259 subPtr(Imm32(1), regT0
); // Don't include 'this' in argcount
262 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_argCount
)));
264 // Calculate the start of the callframe header, and store in regT1
265 move(callFrameRegister
, regT1
);
266 sub32(Imm32(RegisterFile::CallFrameHeaderSize
* (int32_t)sizeof(Register
)), regT1
);
268 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
269 mul32(Imm32(sizeof(Register
)), regT0
, regT0
);
270 subPtr(regT0
, regT1
);
272 // push pointer to arguments
273 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_args
)));
275 // Argument passing method:
276 // r0 - points to return value
279 // stack: this(JSValue) and a pointer to ArgList
281 move(stackPointerRegister
, regT3
);
282 subPtr(Imm32(8), stackPointerRegister
);
283 move(stackPointerRegister
, regT0
);
284 subPtr(Imm32(8 + 4 + 4 /* padding */), stackPointerRegister
);
287 storePtr(regT3
, Address(stackPointerRegister
, 8));
290 // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
291 load32(Address(regT1
, -(int32_t)sizeof(void*) * 2), regT3
);
292 storePtr(regT3
, Address(stackPointerRegister
, 0));
293 load32(Address(regT1
, -(int32_t)sizeof(void*)), regT3
);
294 storePtr(regT3
, Address(stackPointerRegister
, 4));
297 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, regT2
);
300 move(callFrameRegister
, regT1
);
302 call(Address(regT2
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
305 load32(Address(stackPointerRegister
, 16), regT0
);
306 load32(Address(stackPointerRegister
, 20), regT1
);
308 addPtr(Imm32(sizeof(ArgList
) + 16 + 8), stackPointerRegister
);
311 // Check for an exception
312 move(ImmPtr(&globalData
->exception
), regT2
);
313 Jump sawException
= branch32(NotEqual
, tagFor(0, regT2
), Imm32(JSValue::EmptyValueTag
));
315 // Grab the return address.
316 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT3
);
318 // Restore our caller's "r".
319 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
322 restoreReturnAddressBeforeReturn(regT3
);
325 // Handle an exception
326 sawException
.link(this);
327 // Grab the return address.
328 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
329 move(ImmPtr(&globalData
->exceptionLocation
), regT2
);
330 storePtr(regT1
, regT2
);
331 move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline
).value()), regT2
);
332 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
333 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
334 restoreReturnAddressBeforeReturn(regT2
);
337 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
338 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
343 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
344 Call string_failureCases1Call
= makeTailRecursiveCall(string_failureCases1
);
345 Call string_failureCases2Call
= makeTailRecursiveCall(string_failureCases2
);
346 Call string_failureCases3Call
= makeTailRecursiveCall(string_failureCases3
);
349 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
350 LinkBuffer
patchBuffer(this, m_globalData
->executableAllocator
.poolForSize(m_assembler
.size()));
352 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
353 patchBuffer
.link(string_failureCases1Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
354 patchBuffer
.link(string_failureCases2Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
355 patchBuffer
.link(string_failureCases3Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
357 patchBuffer
.link(callArityCheck1
, FunctionPtr(cti_op_call_arityCheck
));
358 patchBuffer
.link(callJSFunction1
, FunctionPtr(cti_op_call_JSFunction
));
359 #if ENABLE(JIT_OPTIMIZE_CALL)
360 patchBuffer
.link(callArityCheck2
, FunctionPtr(cti_op_call_arityCheck
));
361 patchBuffer
.link(callJSFunction2
, FunctionPtr(cti_op_call_JSFunction
));
362 patchBuffer
.link(callLazyLinkCall
, FunctionPtr(cti_vm_lazyLinkCall
));
365 CodeRef finalCode
= patchBuffer
.finalizeCode();
366 *executablePool
= finalCode
.m_executablePool
;
368 *ctiVirtualCall
= trampolineAt(finalCode
, virtualCallBegin
);
369 *ctiNativeCallThunk
= trampolineAt(finalCode
, nativeCallThunk
);
370 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
371 *ctiStringLengthTrampoline
= trampolineAt(finalCode
, stringLengthBegin
);
373 UNUSED_PARAM(ctiStringLengthTrampoline
);
375 #if ENABLE(JIT_OPTIMIZE_CALL)
376 *ctiVirtualCallLink
= trampolineAt(finalCode
, virtualCallLinkBegin
);
378 UNUSED_PARAM(ctiVirtualCallLink
);
382 void JIT::emit_op_mov(Instruction
* currentInstruction
)
384 unsigned dst
= currentInstruction
[1].u
.operand
;
385 unsigned src
= currentInstruction
[2].u
.operand
;
387 if (m_codeBlock
->isConstantRegisterIndex(src
))
388 emitStore(dst
, getConstantOperand(src
));
390 emitLoad(src
, regT1
, regT0
);
391 emitStore(dst
, regT1
, regT0
);
392 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_mov
), dst
, regT1
, regT0
);
396 void JIT::emit_op_end(Instruction
* currentInstruction
)
398 if (m_codeBlock
->needsFullScopeChain())
399 JITStubCall(this, cti_op_end
).call();
400 ASSERT(returnValueRegister
!= callFrameRegister
);
401 emitLoad(currentInstruction
[1].u
.operand
, regT1
, regT0
);
402 restoreReturnAddressBeforeReturn(Address(callFrameRegister
, RegisterFile::ReturnPC
* static_cast<int>(sizeof(Register
))));
406 void JIT::emit_op_jmp(Instruction
* currentInstruction
)
408 unsigned target
= currentInstruction
[1].u
.operand
;
409 addJump(jump(), target
);
412 void JIT::emit_op_loop_if_lesseq(Instruction
* currentInstruction
)
414 unsigned op1
= currentInstruction
[1].u
.operand
;
415 unsigned op2
= currentInstruction
[2].u
.operand
;
416 unsigned target
= currentInstruction
[3].u
.operand
;
420 if (isOperandConstantImmediateInt(op1
)) {
421 emitLoad(op2
, regT1
, regT0
);
422 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
423 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(getConstantOperand(op1
).asInt32())), target
);
427 if (isOperandConstantImmediateInt(op2
)) {
428 emitLoad(op1
, regT1
, regT0
);
429 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
430 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
434 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
435 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
436 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
437 addJump(branch32(LessThanOrEqual
, regT0
, regT2
), target
);
440 void JIT::emitSlow_op_loop_if_lesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
442 unsigned op1
= currentInstruction
[1].u
.operand
;
443 unsigned op2
= currentInstruction
[2].u
.operand
;
444 unsigned target
= currentInstruction
[3].u
.operand
;
446 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
447 linkSlowCase(iter
); // int32 check
448 linkSlowCase(iter
); // int32 check
450 JITStubCall
stubCall(this, cti_op_loop_if_lesseq
);
451 stubCall
.addArgument(op1
);
452 stubCall
.addArgument(op2
);
454 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
457 void JIT::emit_op_new_object(Instruction
* currentInstruction
)
459 JITStubCall(this, cti_op_new_object
).call(currentInstruction
[1].u
.operand
);
462 void JIT::emit_op_instanceof(Instruction
* currentInstruction
)
464 unsigned dst
= currentInstruction
[1].u
.operand
;
465 unsigned value
= currentInstruction
[2].u
.operand
;
466 unsigned baseVal
= currentInstruction
[3].u
.operand
;
467 unsigned proto
= currentInstruction
[4].u
.operand
;
469 // Load the operands into registers.
470 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
471 emitLoadPayload(value
, regT2
);
472 emitLoadPayload(baseVal
, regT0
);
473 emitLoadPayload(proto
, regT1
);
475 // Check that value, baseVal, and proto are cells.
476 emitJumpSlowCaseIfNotJSCell(value
);
477 emitJumpSlowCaseIfNotJSCell(baseVal
);
478 emitJumpSlowCaseIfNotJSCell(proto
);
480 // Check that baseVal 'ImplementsDefaultHasInstance'.
481 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
482 addSlowCase(branchTest32(Zero
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(ImplementsDefaultHasInstance
)));
484 // Optimistically load the result true, and start looping.
485 // Initially, regT1 still contains proto and regT2 still contains value.
486 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
487 move(Imm32(JSValue::TrueTag
), regT0
);
490 // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
491 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
492 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
493 load32(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT2
);
494 Jump isInstance
= branchPtr(Equal
, regT2
, regT1
);
495 branchTest32(NonZero
, regT2
).linkTo(loop
, this);
497 // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
498 move(Imm32(JSValue::FalseTag
), regT0
);
500 // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
501 isInstance
.link(this);
502 emitStoreBool(dst
, regT0
);
505 void JIT::emitSlow_op_instanceof(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
507 unsigned dst
= currentInstruction
[1].u
.operand
;
508 unsigned value
= currentInstruction
[2].u
.operand
;
509 unsigned baseVal
= currentInstruction
[3].u
.operand
;
510 unsigned proto
= currentInstruction
[4].u
.operand
;
512 linkSlowCaseIfNotJSCell(iter
, value
);
513 linkSlowCaseIfNotJSCell(iter
, baseVal
);
514 linkSlowCaseIfNotJSCell(iter
, proto
);
517 JITStubCall
stubCall(this, cti_op_instanceof
);
518 stubCall
.addArgument(value
);
519 stubCall
.addArgument(baseVal
);
520 stubCall
.addArgument(proto
);
524 void JIT::emit_op_new_func(Instruction
* currentInstruction
)
526 JITStubCall
stubCall(this, cti_op_new_func
);
527 stubCall
.addArgument(ImmPtr(m_codeBlock
->functionDecl(currentInstruction
[2].u
.operand
)));
528 stubCall
.call(currentInstruction
[1].u
.operand
);
531 void JIT::emit_op_get_global_var(Instruction
* currentInstruction
)
533 int dst
= currentInstruction
[1].u
.operand
;
534 JSGlobalObject
* globalObject
= static_cast<JSGlobalObject
*>(currentInstruction
[2].u
.jsCell
);
535 ASSERT(globalObject
->isGlobalObject());
536 int index
= currentInstruction
[3].u
.operand
;
538 loadPtr(&globalObject
->d()->registers
, regT2
);
540 emitLoad(index
, regT1
, regT0
, regT2
);
541 emitStore(dst
, regT1
, regT0
);
542 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_global_var
), dst
, regT1
, regT0
);
545 void JIT::emit_op_put_global_var(Instruction
* currentInstruction
)
547 JSGlobalObject
* globalObject
= static_cast<JSGlobalObject
*>(currentInstruction
[1].u
.jsCell
);
548 ASSERT(globalObject
->isGlobalObject());
549 int index
= currentInstruction
[2].u
.operand
;
550 int value
= currentInstruction
[3].u
.operand
;
552 emitLoad(value
, regT1
, regT0
);
554 loadPtr(&globalObject
->d()->registers
, regT2
);
555 emitStore(index
, regT1
, regT0
, regT2
);
556 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_put_global_var
), value
, regT1
, regT0
);
559 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
561 int dst
= currentInstruction
[1].u
.operand
;
562 int index
= currentInstruction
[2].u
.operand
;
563 int skip
= currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain();
565 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT2
);
567 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT2
);
569 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT2
);
570 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject
, d
)), regT2
);
571 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData
, registers
)), regT2
);
573 emitLoad(index
, regT1
, regT0
, regT2
);
574 emitStore(dst
, regT1
, regT0
);
575 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_scoped_var
), dst
, regT1
, regT0
);
578 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
580 int index
= currentInstruction
[1].u
.operand
;
581 int skip
= currentInstruction
[2].u
.operand
+ m_codeBlock
->needsFullScopeChain();
582 int value
= currentInstruction
[3].u
.operand
;
584 emitLoad(value
, regT1
, regT0
);
586 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT2
);
588 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT2
);
590 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT2
);
591 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject
, d
)), regT2
);
592 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData
, registers
)), regT2
);
594 emitStore(index
, regT1
, regT0
, regT2
);
595 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_put_scoped_var
), value
, regT1
, regT0
);
598 void JIT::emit_op_tear_off_activation(Instruction
* currentInstruction
)
600 JITStubCall
stubCall(this, cti_op_tear_off_activation
);
601 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
605 void JIT::emit_op_tear_off_arguments(Instruction
*)
607 JITStubCall(this, cti_op_tear_off_arguments
).call();
610 void JIT::emit_op_new_array(Instruction
* currentInstruction
)
612 JITStubCall
stubCall(this, cti_op_new_array
);
613 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
614 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
615 stubCall
.call(currentInstruction
[1].u
.operand
);
618 void JIT::emit_op_resolve(Instruction
* currentInstruction
)
620 JITStubCall
stubCall(this, cti_op_resolve
);
621 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
622 stubCall
.call(currentInstruction
[1].u
.operand
);
625 void JIT::emit_op_to_primitive(Instruction
* currentInstruction
)
627 int dst
= currentInstruction
[1].u
.operand
;
628 int src
= currentInstruction
[2].u
.operand
;
630 emitLoad(src
, regT1
, regT0
);
632 Jump isImm
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
633 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
637 emitStore(dst
, regT1
, regT0
);
638 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_to_primitive
), dst
, regT1
, regT0
);
641 void JIT::emitSlow_op_to_primitive(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
643 int dst
= currentInstruction
[1].u
.operand
;
647 JITStubCall
stubCall(this, cti_op_to_primitive
);
648 stubCall
.addArgument(regT1
, regT0
);
652 void JIT::emit_op_strcat(Instruction
* currentInstruction
)
654 JITStubCall
stubCall(this, cti_op_strcat
);
655 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
656 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
657 stubCall
.call(currentInstruction
[1].u
.operand
);
660 void JIT::emit_op_resolve_base(Instruction
* currentInstruction
)
662 JITStubCall
stubCall(this, cti_op_resolve_base
);
663 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
664 stubCall
.call(currentInstruction
[1].u
.operand
);
667 void JIT::emit_op_resolve_skip(Instruction
* currentInstruction
)
669 JITStubCall
stubCall(this, cti_op_resolve_skip
);
670 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
671 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain()));
672 stubCall
.call(currentInstruction
[1].u
.operand
);
675 void JIT::emit_op_resolve_global(Instruction
* currentInstruction
)
677 // FIXME: Optimize to use patching instead of so many memory accesses.
679 unsigned dst
= currentInstruction
[1].u
.operand
;
680 void* globalObject
= currentInstruction
[2].u
.jsCell
;
682 unsigned currentIndex
= m_globalResolveInfoIndex
++;
683 void* structureAddress
= &(m_codeBlock
->globalResolveInfo(currentIndex
).structure
);
684 void* offsetAddr
= &(m_codeBlock
->globalResolveInfo(currentIndex
).offset
);
687 move(ImmPtr(globalObject
), regT0
);
688 loadPtr(structureAddress
, regT1
);
689 addSlowCase(branchPtr(NotEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
))));
692 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSGlobalObject
, m_externalStorage
)), regT2
);
693 load32(offsetAddr
, regT3
);
694 load32(BaseIndex(regT2
, regT3
, TimesEight
), regT0
); // payload
695 load32(BaseIndex(regT2
, regT3
, TimesEight
, 4), regT1
); // tag
696 emitStore(dst
, regT1
, regT0
);
697 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_resolve_global
), dst
, regT1
, regT0
);
700 void JIT::emitSlow_op_resolve_global(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
702 unsigned dst
= currentInstruction
[1].u
.operand
;
703 void* globalObject
= currentInstruction
[2].u
.jsCell
;
704 Identifier
* ident
= &m_codeBlock
->identifier(currentInstruction
[3].u
.operand
);
706 unsigned currentIndex
= m_globalResolveInfoIndex
++;
709 JITStubCall
stubCall(this, cti_op_resolve_global
);
710 stubCall
.addArgument(ImmPtr(globalObject
));
711 stubCall
.addArgument(ImmPtr(ident
));
712 stubCall
.addArgument(Imm32(currentIndex
));
716 void JIT::emit_op_not(Instruction
* currentInstruction
)
718 unsigned dst
= currentInstruction
[1].u
.operand
;
719 unsigned src
= currentInstruction
[2].u
.operand
;
721 emitLoadTag(src
, regT0
);
723 xor32(Imm32(JSValue::FalseTag
), regT0
);
724 addSlowCase(branchTest32(NonZero
, regT0
, Imm32(~1)));
725 xor32(Imm32(JSValue::TrueTag
), regT0
);
727 emitStoreBool(dst
, regT0
, (dst
== src
));
730 void JIT::emitSlow_op_not(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
732 unsigned dst
= currentInstruction
[1].u
.operand
;
733 unsigned src
= currentInstruction
[2].u
.operand
;
737 JITStubCall
stubCall(this, cti_op_not
);
738 stubCall
.addArgument(src
);
742 void JIT::emit_op_jfalse(Instruction
* currentInstruction
)
744 unsigned cond
= currentInstruction
[1].u
.operand
;
745 unsigned target
= currentInstruction
[2].u
.operand
;
747 emitLoad(cond
, regT1
, regT0
);
749 Jump isTrue
= branch32(Equal
, regT1
, Imm32(JSValue::TrueTag
));
750 addJump(branch32(Equal
, regT1
, Imm32(JSValue::FalseTag
)), target
);
752 Jump isNotInteger
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
753 Jump isTrue2
= branch32(NotEqual
, regT0
, Imm32(0));
754 addJump(jump(), target
);
756 if (supportsFloatingPoint()) {
757 isNotInteger
.link(this);
759 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
762 emitLoadDouble(cond
, fpRegT1
);
763 addJump(branchDouble(DoubleEqualOrUnordered
, fpRegT0
, fpRegT1
), target
);
765 addSlowCase(isNotInteger
);
771 void JIT::emitSlow_op_jfalse(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
773 unsigned cond
= currentInstruction
[1].u
.operand
;
774 unsigned target
= currentInstruction
[2].u
.operand
;
777 JITStubCall
stubCall(this, cti_op_jtrue
);
778 stubCall
.addArgument(cond
);
780 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
); // Inverted.
783 void JIT::emit_op_jtrue(Instruction
* currentInstruction
)
785 unsigned cond
= currentInstruction
[1].u
.operand
;
786 unsigned target
= currentInstruction
[2].u
.operand
;
788 emitLoad(cond
, regT1
, regT0
);
790 Jump isFalse
= branch32(Equal
, regT1
, Imm32(JSValue::FalseTag
));
791 addJump(branch32(Equal
, regT1
, Imm32(JSValue::TrueTag
)), target
);
793 Jump isNotInteger
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
794 Jump isFalse2
= branch32(Equal
, regT0
, Imm32(0));
795 addJump(jump(), target
);
797 if (supportsFloatingPoint()) {
798 isNotInteger
.link(this);
800 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
803 emitLoadDouble(cond
, fpRegT1
);
804 addJump(branchDouble(DoubleNotEqual
, fpRegT0
, fpRegT1
), target
);
806 addSlowCase(isNotInteger
);
812 void JIT::emitSlow_op_jtrue(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
814 unsigned cond
= currentInstruction
[1].u
.operand
;
815 unsigned target
= currentInstruction
[2].u
.operand
;
818 JITStubCall
stubCall(this, cti_op_jtrue
);
819 stubCall
.addArgument(cond
);
821 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
824 void JIT::emit_op_jeq_null(Instruction
* currentInstruction
)
826 unsigned src
= currentInstruction
[1].u
.operand
;
827 unsigned target
= currentInstruction
[2].u
.operand
;
829 emitLoad(src
, regT1
, regT0
);
831 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
833 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
834 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
835 addJump(branchTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
);
837 Jump wasNotImmediate
= jump();
839 // Now handle the immediate cases - undefined & null
840 isImmediate
.link(this);
842 set32(Equal
, regT1
, Imm32(JSValue::NullTag
), regT2
);
843 set32(Equal
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
846 addJump(branchTest32(NonZero
, regT1
), target
);
848 wasNotImmediate
.link(this);
851 void JIT::emit_op_jneq_null(Instruction
* currentInstruction
)
853 unsigned src
= currentInstruction
[1].u
.operand
;
854 unsigned target
= currentInstruction
[2].u
.operand
;
856 emitLoad(src
, regT1
, regT0
);
858 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
860 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
861 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
862 addJump(branchTest32(Zero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
);
864 Jump wasNotImmediate
= jump();
866 // Now handle the immediate cases - undefined & null
867 isImmediate
.link(this);
869 set32(Equal
, regT1
, Imm32(JSValue::NullTag
), regT2
);
870 set32(Equal
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
873 addJump(branchTest32(Zero
, regT1
), target
);
875 wasNotImmediate
.link(this);
878 void JIT::emit_op_jneq_ptr(Instruction
* currentInstruction
)
880 unsigned src
= currentInstruction
[1].u
.operand
;
881 JSCell
* ptr
= currentInstruction
[2].u
.jsCell
;
882 unsigned target
= currentInstruction
[3].u
.operand
;
884 emitLoad(src
, regT1
, regT0
);
885 addJump(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)), target
);
886 addJump(branchPtr(NotEqual
, regT0
, ImmPtr(ptr
)), target
);
889 void JIT::emit_op_jsr(Instruction
* currentInstruction
)
891 int retAddrDst
= currentInstruction
[1].u
.operand
;
892 int target
= currentInstruction
[2].u
.operand
;
893 DataLabelPtr storeLocation
= storePtrWithPatch(ImmPtr(0), Address(callFrameRegister
, sizeof(Register
) * retAddrDst
));
894 addJump(jump(), target
);
895 m_jsrSites
.append(JSRInfo(storeLocation
, label()));
898 void JIT::emit_op_sret(Instruction
* currentInstruction
)
900 jump(Address(callFrameRegister
, sizeof(Register
) * currentInstruction
[1].u
.operand
));
903 void JIT::emit_op_eq(Instruction
* currentInstruction
)
905 unsigned dst
= currentInstruction
[1].u
.operand
;
906 unsigned src1
= currentInstruction
[2].u
.operand
;
907 unsigned src2
= currentInstruction
[3].u
.operand
;
909 emitLoad2(src1
, regT1
, regT0
, src2
, regT3
, regT2
);
910 addSlowCase(branch32(NotEqual
, regT1
, regT3
));
911 addSlowCase(branch32(Equal
, regT1
, Imm32(JSValue::CellTag
)));
912 addSlowCase(branch32(Below
, regT1
, Imm32(JSValue::LowestTag
)));
914 set8(Equal
, regT0
, regT2
, regT0
);
915 or32(Imm32(JSValue::FalseTag
), regT0
);
917 emitStoreBool(dst
, regT0
);
920 void JIT::emitSlow_op_eq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
922 unsigned dst
= currentInstruction
[1].u
.operand
;
923 unsigned op1
= currentInstruction
[2].u
.operand
;
924 unsigned op2
= currentInstruction
[3].u
.operand
;
926 JumpList storeResult
;
927 JumpList genericCase
;
929 genericCase
.append(getSlowCase(iter
)); // tags not equal
931 linkSlowCase(iter
); // tags equal and JSCell
932 genericCase
.append(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
933 genericCase
.append(branchPtr(NotEqual
, Address(regT2
), ImmPtr(m_globalData
->jsStringVPtr
)));
936 JITStubCall
stubCallEqStrings(this, cti_op_eq_strings
);
937 stubCallEqStrings
.addArgument(regT0
);
938 stubCallEqStrings
.addArgument(regT2
);
939 stubCallEqStrings
.call();
940 storeResult
.append(jump());
943 genericCase
.append(getSlowCase(iter
)); // doubles
944 genericCase
.link(this);
945 JITStubCall
stubCallEq(this, cti_op_eq
);
946 stubCallEq
.addArgument(op1
);
947 stubCallEq
.addArgument(op2
);
948 stubCallEq
.call(regT0
);
950 storeResult
.link(this);
951 or32(Imm32(JSValue::FalseTag
), regT0
);
952 emitStoreBool(dst
, regT0
);
955 void JIT::emit_op_neq(Instruction
* currentInstruction
)
957 unsigned dst
= currentInstruction
[1].u
.operand
;
958 unsigned src1
= currentInstruction
[2].u
.operand
;
959 unsigned src2
= currentInstruction
[3].u
.operand
;
961 emitLoad2(src1
, regT1
, regT0
, src2
, regT3
, regT2
);
962 addSlowCase(branch32(NotEqual
, regT1
, regT3
));
963 addSlowCase(branch32(Equal
, regT1
, Imm32(JSValue::CellTag
)));
964 addSlowCase(branch32(Below
, regT1
, Imm32(JSValue::LowestTag
)));
966 set8(NotEqual
, regT0
, regT2
, regT0
);
967 or32(Imm32(JSValue::FalseTag
), regT0
);
969 emitStoreBool(dst
, regT0
);
972 void JIT::emitSlow_op_neq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
974 unsigned dst
= currentInstruction
[1].u
.operand
;
976 JumpList storeResult
;
977 JumpList genericCase
;
979 genericCase
.append(getSlowCase(iter
)); // tags not equal
981 linkSlowCase(iter
); // tags equal and JSCell
982 genericCase
.append(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
983 genericCase
.append(branchPtr(NotEqual
, Address(regT2
), ImmPtr(m_globalData
->jsStringVPtr
)));
986 JITStubCall
stubCallEqStrings(this, cti_op_eq_strings
);
987 stubCallEqStrings
.addArgument(regT0
);
988 stubCallEqStrings
.addArgument(regT2
);
989 stubCallEqStrings
.call(regT0
);
990 storeResult
.append(jump());
993 genericCase
.append(getSlowCase(iter
)); // doubles
994 genericCase
.link(this);
995 JITStubCall
stubCallEq(this, cti_op_eq
);
996 stubCallEq
.addArgument(regT1
, regT0
);
997 stubCallEq
.addArgument(regT3
, regT2
);
998 stubCallEq
.call(regT0
);
1000 storeResult
.link(this);
1001 xor32(Imm32(0x1), regT0
);
1002 or32(Imm32(JSValue::FalseTag
), regT0
);
1003 emitStoreBool(dst
, regT0
);
1006 void JIT::compileOpStrictEq(Instruction
* currentInstruction
, CompileOpStrictEqType type
)
1008 unsigned dst
= currentInstruction
[1].u
.operand
;
1009 unsigned src1
= currentInstruction
[2].u
.operand
;
1010 unsigned src2
= currentInstruction
[3].u
.operand
;
1012 emitLoadTag(src1
, regT0
);
1013 emitLoadTag(src2
, regT1
);
1015 // Jump to a slow case if either operand is double, or if both operands are
1016 // cells and/or Int32s.
1018 and32(regT1
, regT2
);
1019 addSlowCase(branch32(Below
, regT2
, Imm32(JSValue::LowestTag
)));
1020 addSlowCase(branch32(AboveOrEqual
, regT2
, Imm32(JSValue::CellTag
)));
1022 if (type
== OpStrictEq
)
1023 set8(Equal
, regT0
, regT1
, regT0
);
1025 set8(NotEqual
, regT0
, regT1
, regT0
);
1027 or32(Imm32(JSValue::FalseTag
), regT0
);
1029 emitStoreBool(dst
, regT0
);
1032 void JIT::emit_op_stricteq(Instruction
* currentInstruction
)
1034 compileOpStrictEq(currentInstruction
, OpStrictEq
);
1037 void JIT::emitSlow_op_stricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1039 unsigned dst
= currentInstruction
[1].u
.operand
;
1040 unsigned src1
= currentInstruction
[2].u
.operand
;
1041 unsigned src2
= currentInstruction
[3].u
.operand
;
1046 JITStubCall
stubCall(this, cti_op_stricteq
);
1047 stubCall
.addArgument(src1
);
1048 stubCall
.addArgument(src2
);
1052 void JIT::emit_op_nstricteq(Instruction
* currentInstruction
)
1054 compileOpStrictEq(currentInstruction
, OpNStrictEq
);
1057 void JIT::emitSlow_op_nstricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1059 unsigned dst
= currentInstruction
[1].u
.operand
;
1060 unsigned src1
= currentInstruction
[2].u
.operand
;
1061 unsigned src2
= currentInstruction
[3].u
.operand
;
1066 JITStubCall
stubCall(this, cti_op_nstricteq
);
1067 stubCall
.addArgument(src1
);
1068 stubCall
.addArgument(src2
);
1072 void JIT::emit_op_eq_null(Instruction
* currentInstruction
)
1074 unsigned dst
= currentInstruction
[1].u
.operand
;
1075 unsigned src
= currentInstruction
[2].u
.operand
;
1077 emitLoad(src
, regT1
, regT0
);
1078 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
1080 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT1
);
1081 setTest8(NonZero
, Address(regT1
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT1
);
1083 Jump wasNotImmediate
= jump();
1085 isImmediate
.link(this);
1087 set8(Equal
, regT1
, Imm32(JSValue::NullTag
), regT2
);
1088 set8(Equal
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
1091 wasNotImmediate
.link(this);
1093 or32(Imm32(JSValue::FalseTag
), regT1
);
1095 emitStoreBool(dst
, regT1
);
1098 void JIT::emit_op_neq_null(Instruction
* currentInstruction
)
1100 unsigned dst
= currentInstruction
[1].u
.operand
;
1101 unsigned src
= currentInstruction
[2].u
.operand
;
1103 emitLoad(src
, regT1
, regT0
);
1104 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
1106 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT1
);
1107 setTest8(Zero
, Address(regT1
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT1
);
1109 Jump wasNotImmediate
= jump();
1111 isImmediate
.link(this);
1113 set8(NotEqual
, regT1
, Imm32(JSValue::NullTag
), regT2
);
1114 set8(NotEqual
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
1115 and32(regT2
, regT1
);
1117 wasNotImmediate
.link(this);
1119 or32(Imm32(JSValue::FalseTag
), regT1
);
1121 emitStoreBool(dst
, regT1
);
1124 void JIT::emit_op_resolve_with_base(Instruction
* currentInstruction
)
1126 JITStubCall
stubCall(this, cti_op_resolve_with_base
);
1127 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
1128 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
1129 stubCall
.call(currentInstruction
[2].u
.operand
);
1132 void JIT::emit_op_new_func_exp(Instruction
* currentInstruction
)
1134 JITStubCall
stubCall(this, cti_op_new_func_exp
);
1135 stubCall
.addArgument(ImmPtr(m_codeBlock
->functionExpr(currentInstruction
[2].u
.operand
)));
1136 stubCall
.call(currentInstruction
[1].u
.operand
);
1139 void JIT::emit_op_new_regexp(Instruction
* currentInstruction
)
1141 JITStubCall
stubCall(this, cti_op_new_regexp
);
1142 stubCall
.addArgument(ImmPtr(m_codeBlock
->regexp(currentInstruction
[2].u
.operand
)));
1143 stubCall
.call(currentInstruction
[1].u
.operand
);
1146 void JIT::emit_op_throw(Instruction
* currentInstruction
)
1148 unsigned exception
= currentInstruction
[1].u
.operand
;
1149 JITStubCall
stubCall(this, cti_op_throw
);
1150 stubCall
.addArgument(exception
);
1154 // cti_op_throw always changes it's return address,
1155 // this point in the code should never be reached.
1160 void JIT::emit_op_get_pnames(Instruction
* currentInstruction
)
1162 int dst
= currentInstruction
[1].u
.operand
;
1163 int base
= currentInstruction
[2].u
.operand
;
1164 int i
= currentInstruction
[3].u
.operand
;
1165 int size
= currentInstruction
[4].u
.operand
;
1166 int breakTarget
= currentInstruction
[5].u
.operand
;
1168 JumpList isNotObject
;
1170 emitLoad(base
, regT1
, regT0
);
1171 if (!m_codeBlock
->isKnownNotImmediate(base
))
1172 isNotObject
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)));
1173 if (base
!= m_codeBlock
->thisRegister()) {
1174 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1175 isNotObject
.append(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
)));
1178 // We could inline the case where you have a valid cache, but
1179 // this call doesn't seem to be hot.
1180 Label
isObject(this);
1181 JITStubCall
getPnamesStubCall(this, cti_op_get_pnames
);
1182 getPnamesStubCall
.addArgument(regT0
);
1183 getPnamesStubCall
.call(dst
);
1184 load32(Address(regT0
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_jsStringsSize
)), regT3
);
1185 store32(Imm32(0), addressFor(i
));
1186 store32(regT3
, addressFor(size
));
1189 isNotObject
.link(this);
1190 addJump(branch32(Equal
, regT1
, Imm32(JSValue::NullTag
)), breakTarget
);
1191 addJump(branch32(Equal
, regT1
, Imm32(JSValue::UndefinedTag
)), breakTarget
);
1192 JITStubCall
toObjectStubCall(this, cti_to_object
);
1193 toObjectStubCall
.addArgument(regT1
, regT0
);
1194 toObjectStubCall
.call(base
);
1195 jump().linkTo(isObject
, this);
1200 void JIT::emit_op_next_pname(Instruction
* currentInstruction
)
1202 int dst
= currentInstruction
[1].u
.operand
;
1203 int base
= currentInstruction
[2].u
.operand
;
1204 int i
= currentInstruction
[3].u
.operand
;
1205 int size
= currentInstruction
[4].u
.operand
;
1206 int it
= currentInstruction
[5].u
.operand
;
1207 int target
= currentInstruction
[6].u
.operand
;
1209 JumpList callHasProperty
;
1212 load32(addressFor(i
), regT0
);
1213 Jump end
= branch32(Equal
, regT0
, addressFor(size
));
1216 loadPtr(addressFor(it
), regT1
);
1217 loadPtr(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_jsStrings
)), regT2
);
1218 load32(BaseIndex(regT2
, regT0
, TimesEight
), regT2
);
1219 store32(Imm32(JSValue::CellTag
), tagFor(dst
));
1220 store32(regT2
, payloadFor(dst
));
1223 add32(Imm32(1), regT0
);
1224 store32(regT0
, addressFor(i
));
1226 // Verify that i is valid:
1227 loadPtr(addressFor(base
), regT0
);
1229 // Test base's structure
1230 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1231 callHasProperty
.append(branchPtr(NotEqual
, regT2
, Address(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
)))));
1233 // Test base's prototype chain
1234 loadPtr(Address(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedPrototypeChain
))), regT3
);
1235 loadPtr(Address(regT3
, OBJECT_OFFSETOF(StructureChain
, m_vector
)), regT3
);
1236 addJump(branchTestPtr(Zero
, Address(regT3
)), target
);
1238 Label
checkPrototype(this);
1239 callHasProperty
.append(branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), Imm32(JSValue::NullTag
)));
1240 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT2
);
1241 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1242 callHasProperty
.append(branchPtr(NotEqual
, regT2
, Address(regT3
)));
1243 addPtr(Imm32(sizeof(Structure
*)), regT3
);
1244 branchTestPtr(NonZero
, Address(regT3
)).linkTo(checkPrototype
, this);
1247 addJump(jump(), target
);
1249 // Slow case: Ask the object if i is valid.
1250 callHasProperty
.link(this);
1251 loadPtr(addressFor(dst
), regT1
);
1252 JITStubCall
stubCall(this, cti_has_property
);
1253 stubCall
.addArgument(regT0
);
1254 stubCall
.addArgument(regT1
);
1257 // Test for valid key.
1258 addJump(branchTest32(NonZero
, regT0
), target
);
1259 jump().linkTo(begin
, this);
1265 void JIT::emit_op_push_scope(Instruction
* currentInstruction
)
1267 JITStubCall
stubCall(this, cti_op_push_scope
);
1268 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
1269 stubCall
.call(currentInstruction
[1].u
.operand
);
1272 void JIT::emit_op_pop_scope(Instruction
*)
1274 JITStubCall(this, cti_op_pop_scope
).call();
1277 void JIT::emit_op_to_jsnumber(Instruction
* currentInstruction
)
1279 int dst
= currentInstruction
[1].u
.operand
;
1280 int src
= currentInstruction
[2].u
.operand
;
1282 emitLoad(src
, regT1
, regT0
);
1284 Jump isInt32
= branch32(Equal
, regT1
, Imm32(JSValue::Int32Tag
));
1285 addSlowCase(branch32(AboveOrEqual
, regT1
, Imm32(JSValue::EmptyValueTag
)));
1289 emitStore(dst
, regT1
, regT0
);
1290 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_to_jsnumber
), dst
, regT1
, regT0
);
1293 void JIT::emitSlow_op_to_jsnumber(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1295 int dst
= currentInstruction
[1].u
.operand
;
1299 JITStubCall
stubCall(this, cti_op_to_jsnumber
);
1300 stubCall
.addArgument(regT1
, regT0
);
1304 void JIT::emit_op_push_new_scope(Instruction
* currentInstruction
)
1306 JITStubCall
stubCall(this, cti_op_push_new_scope
);
1307 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
1308 stubCall
.addArgument(currentInstruction
[3].u
.operand
);
1309 stubCall
.call(currentInstruction
[1].u
.operand
);
1312 void JIT::emit_op_catch(Instruction
* currentInstruction
)
1314 unsigned exception
= currentInstruction
[1].u
.operand
;
1316 // This opcode only executes after a return from cti_op_throw.
1318 // cti_op_throw may have taken us to a call frame further up the stack; reload
1319 // the call frame pointer to adjust.
1320 peek(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
1322 // Now store the exception returned by cti_op_throw.
1323 emitStore(exception
, regT1
, regT0
);
1324 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_catch
), exception
, regT1
, regT0
);
1327 void JIT::emit_op_jmp_scopes(Instruction
* currentInstruction
)
1329 JITStubCall
stubCall(this, cti_op_jmp_scopes
);
1330 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
1332 addJump(jump(), currentInstruction
[2].u
.operand
);
1335 void JIT::emit_op_switch_imm(Instruction
* currentInstruction
)
1337 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1338 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1339 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1341 // create jump table for switch destinations, track this switch statement.
1342 SimpleJumpTable
* jumpTable
= &m_codeBlock
->immediateSwitchJumpTable(tableIndex
);
1343 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Immediate
));
1344 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
1346 JITStubCall
stubCall(this, cti_op_switch_imm
);
1347 stubCall
.addArgument(scrutinee
);
1348 stubCall
.addArgument(Imm32(tableIndex
));
1353 void JIT::emit_op_switch_char(Instruction
* currentInstruction
)
1355 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1356 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1357 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1359 // create jump table for switch destinations, track this switch statement.
1360 SimpleJumpTable
* jumpTable
= &m_codeBlock
->characterSwitchJumpTable(tableIndex
);
1361 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Character
));
1362 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
1364 JITStubCall
stubCall(this, cti_op_switch_char
);
1365 stubCall
.addArgument(scrutinee
);
1366 stubCall
.addArgument(Imm32(tableIndex
));
1371 void JIT::emit_op_switch_string(Instruction
* currentInstruction
)
1373 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1374 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1375 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1377 // create jump table for switch destinations, track this switch statement.
1378 StringJumpTable
* jumpTable
= &m_codeBlock
->stringSwitchJumpTable(tableIndex
);
1379 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
));
1381 JITStubCall
stubCall(this, cti_op_switch_string
);
1382 stubCall
.addArgument(scrutinee
);
1383 stubCall
.addArgument(Imm32(tableIndex
));
1388 void JIT::emit_op_new_error(Instruction
* currentInstruction
)
1390 unsigned dst
= currentInstruction
[1].u
.operand
;
1391 unsigned type
= currentInstruction
[2].u
.operand
;
1392 unsigned message
= currentInstruction
[3].u
.operand
;
1394 JITStubCall
stubCall(this, cti_op_new_error
);
1395 stubCall
.addArgument(Imm32(type
));
1396 stubCall
.addArgument(m_codeBlock
->getConstant(message
));
1397 stubCall
.addArgument(Imm32(m_bytecodeIndex
));
1401 void JIT::emit_op_debug(Instruction
* currentInstruction
)
1403 JITStubCall
stubCall(this, cti_op_debug
);
1404 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
1405 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
1406 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
1411 void JIT::emit_op_enter(Instruction
*)
1413 // Even though JIT code doesn't use them, we initialize our constant
1414 // registers to zap stale pointers, to avoid unnecessarily prolonging
1415 // object lifetime and increasing GC pressure.
1416 for (int i
= 0; i
< m_codeBlock
->m_numVars
; ++i
)
1417 emitStore(i
, jsUndefined());
1420 void JIT::emit_op_enter_with_activation(Instruction
* currentInstruction
)
1422 emit_op_enter(currentInstruction
);
1424 JITStubCall(this, cti_op_push_activation
).call(currentInstruction
[1].u
.operand
);
1427 void JIT::emit_op_create_arguments(Instruction
*)
1429 Jump argsCreated
= branch32(NotEqual
, tagFor(RegisterFile::ArgumentsRegister
, callFrameRegister
), Imm32(JSValue::EmptyValueTag
));
1431 // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
1432 if (m_codeBlock
->m_numParameters
== 1)
1433 JITStubCall(this, cti_op_create_arguments_no_params
).call();
1435 JITStubCall(this, cti_op_create_arguments
).call();
1437 argsCreated
.link(this);
1440 void JIT::emit_op_init_arguments(Instruction
*)
1442 emitStore(RegisterFile::ArgumentsRegister
, JSValue(), callFrameRegister
);
1445 void JIT::emit_op_convert_this(Instruction
* currentInstruction
)
1447 unsigned thisRegister
= currentInstruction
[1].u
.operand
;
1449 emitLoad(thisRegister
, regT1
, regT0
);
1451 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)));
1453 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1454 addSlowCase(branchTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(NeedsThisConversion
)));
1456 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_convert_this
), thisRegister
, regT1
, regT0
);
1459 void JIT::emitSlow_op_convert_this(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1461 unsigned thisRegister
= currentInstruction
[1].u
.operand
;
1466 JITStubCall
stubCall(this, cti_op_convert_this
);
1467 stubCall
.addArgument(regT1
, regT0
);
1468 stubCall
.call(thisRegister
);
1471 void JIT::emit_op_profile_will_call(Instruction
* currentInstruction
)
1473 peek(regT2
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
1474 Jump noProfiler
= branchTestPtr(Zero
, Address(regT2
));
1476 JITStubCall
stubCall(this, cti_op_profile_will_call
);
1477 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
1479 noProfiler
.link(this);
1482 void JIT::emit_op_profile_did_call(Instruction
* currentInstruction
)
1484 peek(regT2
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
1485 Jump noProfiler
= branchTestPtr(Zero
, Address(regT2
));
1487 JITStubCall
stubCall(this, cti_op_profile_did_call
);
1488 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
1490 noProfiler
.link(this);
1493 #else // USE(JSVALUE32_64)
1495 #define RECORD_JUMP_TARGET(targetOffset) \
1496 do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
1498 void JIT::privateCompileCTIMachineTrampolines(RefPtr
<ExecutablePool
>* executablePool
, JSGlobalData
* globalData
, CodePtr
* ctiStringLengthTrampoline
, CodePtr
* ctiVirtualCallLink
, CodePtr
* ctiVirtualCall
, CodePtr
* ctiNativeCallThunk
)
1500 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1501 // (2) The second function provides fast property access for string length
1502 Label stringLengthBegin
= align();
1504 // Check eax is a string
1505 Jump string_failureCases1
= emitJumpIfNotJSCell(regT0
);
1506 Jump string_failureCases2
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
));
1508 // Checks out okay! - get the length from the Ustring.
1509 load32(Address(regT0
, OBJECT_OFFSETOF(JSString
, m_stringLength
)), regT0
);
1511 Jump string_failureCases3
= branch32(Above
, regT0
, Imm32(JSImmediate::maxImmediateInt
));
1513 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
1514 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1519 // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
1520 COMPILE_ASSERT(sizeof(CodeType
) == 4, CodeTypeEnumMustBe32Bit
);
1522 // VirtualCallLink Trampoline
1523 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
1524 Label virtualCallLinkBegin
= align();
1525 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
1527 Jump isNativeFunc2
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
1529 Jump hasCodeBlock2
= branch32(GreaterThan
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
1530 preserveReturnAddressAfterCall(regT3
);
1531 restoreArgumentReference();
1532 Call callJSFunction2
= call();
1533 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
1534 emitGetJITStubArg(2, regT1
); // argCount
1535 restoreReturnAddressBeforeReturn(regT3
);
1536 hasCodeBlock2
.link(this);
1538 // Check argCount matches callee arity.
1539 Jump arityCheckOkay2
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), regT1
);
1540 preserveReturnAddressAfterCall(regT3
);
1541 emitPutJITStubArg(regT3
, 1); // return address
1542 restoreArgumentReference();
1543 Call callArityCheck2
= call();
1544 move(regT1
, callFrameRegister
);
1545 emitGetJITStubArg(2, regT1
); // argCount
1546 restoreReturnAddressBeforeReturn(regT3
);
1547 arityCheckOkay2
.link(this);
1549 isNativeFunc2
.link(this);
1551 compileOpCallInitializeCallFrame();
1552 preserveReturnAddressAfterCall(regT3
);
1553 emitPutJITStubArg(regT3
, 1); // return address
1554 restoreArgumentReference();
1555 Call callLazyLinkCall
= call();
1556 restoreReturnAddressBeforeReturn(regT3
);
1559 // VirtualCall Trampoline
1560 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
1561 Label virtualCallBegin
= align();
1562 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
1564 Jump isNativeFunc3
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
1566 Jump hasCodeBlock3
= branch32(GreaterThan
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), Imm32(0));
1567 preserveReturnAddressAfterCall(regT3
);
1568 restoreArgumentReference();
1569 Call callJSFunction1
= call();
1570 emitGetJITStubArg(2, regT1
); // argCount
1571 restoreReturnAddressBeforeReturn(regT3
);
1572 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
1573 hasCodeBlock3
.link(this);
1575 // Check argCount matches callee arity.
1576 Jump arityCheckOkay3
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_numParameters
)), regT1
);
1577 preserveReturnAddressAfterCall(regT3
);
1578 emitPutJITStubArg(regT3
, 1); // return address
1579 restoreArgumentReference();
1580 Call callArityCheck1
= call();
1581 move(regT1
, callFrameRegister
);
1582 emitGetJITStubArg(2, regT1
); // argCount
1583 restoreReturnAddressBeforeReturn(regT3
);
1584 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_executable
)), regT2
);
1585 arityCheckOkay3
.link(this);
1587 isNativeFunc3
.link(this);
1589 compileOpCallInitializeCallFrame();
1590 loadPtr(Address(regT2
, OBJECT_OFFSETOF(FunctionExecutable
, m_jitCode
)), regT0
);
1593 Label nativeCallThunk
= align();
1594 preserveReturnAddressAfterCall(regT0
);
1595 emitPutToCallFrameHeader(regT0
, RegisterFile::ReturnPC
); // Push return address
1597 // Load caller frame's scope chain into this callframe so that whatever we call can
1598 // get to its global data.
1599 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, regT1
);
1600 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
, regT1
);
1601 emitPutToCallFrameHeader(regT1
, RegisterFile::ScopeChain
);
1605 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, X86Registers::ecx
);
1607 // Allocate stack space for our arglist
1608 subPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
1609 COMPILE_ASSERT((sizeof(ArgList
) & 0xf) == 0, ArgList_should_by_16byte_aligned
);
1612 subPtr(Imm32(1), X86Registers::ecx
); // Don't include 'this' in argcount
1615 storePtr(X86Registers::ecx
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_argCount
)));
1617 // Calculate the start of the callframe header, and store in edx
1618 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize
* (int32_t)sizeof(Register
)), callFrameRegister
, X86Registers::edx
);
1620 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
1621 mul32(Imm32(sizeof(Register
)), X86Registers::ecx
, X86Registers::ecx
);
1622 subPtr(X86Registers::ecx
, X86Registers::edx
);
1624 // push pointer to arguments
1625 storePtr(X86Registers::edx
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_args
)));
1627 // ArgList is passed by reference so is stackPointerRegister
1628 move(stackPointerRegister
, X86Registers::ecx
);
1630 // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
1631 loadPtr(Address(X86Registers::edx
, -(int32_t)sizeof(Register
)), X86Registers::edx
);
1633 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86Registers::esi
);
1635 move(callFrameRegister
, X86Registers::edi
);
1637 call(Address(X86Registers::esi
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1639 addPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
1641 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, regT0
);
1643 /* We have two structs that we use to describe the stackframe we set up for our
1644 * call to native code. NativeCallFrameStructure describes the how we set up the stack
1645 * in advance of the call. NativeFunctionCalleeSignature describes the callframe
1646 * as the native code expects it. We do this as we are using the fastcall calling
1647 * convention which results in the callee popping its arguments off the stack, but
1648 * not the rest of the callframe so we need a nice way to ensure we increment the
1649 * stack pointer by the right amount after the call.
1651 #if COMPILER(MSVC) || OS(LINUX)
1652 struct NativeCallFrameStructure
{
1653 // CallFrame* callFrame; // passed in EDX
1656 ArgList
* argPointer
;
1660 struct NativeFunctionCalleeSignature
{
1663 ArgList
* argPointer
;
1666 struct NativeCallFrameStructure
{
1667 // CallFrame* callFrame; // passed in ECX
1668 // JSObject* callee; // passed in EDX
1670 ArgList
* argPointer
;
1673 struct NativeFunctionCalleeSignature
{
1675 ArgList
* argPointer
;
1678 const int NativeCallFrameSize
= (sizeof(NativeCallFrameStructure
) + 15) & ~15;
1679 // Allocate system stack frame
1680 subPtr(Imm32(NativeCallFrameSize
), stackPointerRegister
);
1683 subPtr(Imm32(1), regT0
); // Don't include 'this' in argcount
1686 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_argCount
)));
1688 // Calculate the start of the callframe header, and store in regT1
1689 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize
* (int)sizeof(Register
)), callFrameRegister
, regT1
);
1691 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
1692 mul32(Imm32(sizeof(Register
)), regT0
, regT0
);
1693 subPtr(regT0
, regT1
);
1694 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_args
)));
1696 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
1697 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, args
)), stackPointerRegister
, regT0
);
1698 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, argPointer
)));
1700 // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
1701 loadPtr(Address(regT1
, -(int)sizeof(Register
)), regT1
);
1702 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, thisValue
)));
1704 #if COMPILER(MSVC) || OS(LINUX)
1705 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
1706 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, result
)), stackPointerRegister
, X86Registers::ecx
);
1709 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86Registers::eax
);
1710 storePtr(X86Registers::eax
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, callee
)));
1713 move(callFrameRegister
, X86Registers::edx
);
1715 call(Address(X86Registers::eax
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1717 // JSValue is a non-POD type
1718 loadPtr(Address(X86Registers::eax
), X86Registers::eax
);
1721 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86Registers::edx
);
1724 move(callFrameRegister
, X86Registers::ecx
);
1725 call(Address(X86Registers::edx
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1728 // We've put a few temporaries on the stack in addition to the actual arguments
1729 // so pull them off now
1730 addPtr(Imm32(NativeCallFrameSize
- sizeof(NativeFunctionCalleeSignature
)), stackPointerRegister
);
1733 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, regT0
);
1735 // Allocate stack space for our arglist
1736 COMPILE_ASSERT((sizeof(ArgList
) & 0x7) == 0, ArgList_should_by_8byte_aligned
);
1737 subPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
1740 subPtr(Imm32(1), regT0
); // Don't include 'this' in argcount
1743 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_argCount
)));
1745 // Calculate the start of the callframe header, and store in regT1
1746 move(callFrameRegister
, regT1
);
1747 sub32(Imm32(RegisterFile::CallFrameHeaderSize
* (int32_t)sizeof(Register
)), regT1
);
1749 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
1750 mul32(Imm32(sizeof(Register
)), regT0
, regT0
);
1751 subPtr(regT0
, regT1
);
1753 // push pointer to arguments
1754 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_args
)));
1756 // Setup arg3: regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
1757 loadPtr(Address(regT1
, -(int32_t)sizeof(Register
)), regT2
);
1760 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, regT1
);
1763 move(callFrameRegister
, regT0
);
1765 // Setup arg4: This is a plain hack
1766 move(stackPointerRegister
, ARMRegisters::r3
);
1768 call(Address(regT1
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1770 addPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
1772 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
1773 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
1778 // Check for an exception
1779 loadPtr(&(globalData
->exception
), regT2
);
1780 Jump exceptionHandler
= branchTestPtr(NonZero
, regT2
);
1782 // Grab the return address.
1783 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
1785 // Restore our caller's "r".
1786 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
1789 restoreReturnAddressBeforeReturn(regT1
);
1792 // Handle an exception
1793 exceptionHandler
.link(this);
1794 // Grab the return address.
1795 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
1796 move(ImmPtr(&globalData
->exceptionLocation
), regT2
);
1797 storePtr(regT1
, regT2
);
1798 move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline
).value()), regT2
);
1799 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
1800 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
1801 restoreReturnAddressBeforeReturn(regT2
);
1805 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1806 Call string_failureCases1Call
= makeTailRecursiveCall(string_failureCases1
);
1807 Call string_failureCases2Call
= makeTailRecursiveCall(string_failureCases2
);
1808 Call string_failureCases3Call
= makeTailRecursiveCall(string_failureCases3
);
1811 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
1812 LinkBuffer
patchBuffer(this, m_globalData
->executableAllocator
.poolForSize(m_assembler
.size()));
1814 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1815 patchBuffer
.link(string_failureCases1Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
1816 patchBuffer
.link(string_failureCases2Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
1817 patchBuffer
.link(string_failureCases3Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
1819 patchBuffer
.link(callArityCheck1
, FunctionPtr(cti_op_call_arityCheck
));
1820 patchBuffer
.link(callJSFunction1
, FunctionPtr(cti_op_call_JSFunction
));
1821 #if ENABLE(JIT_OPTIMIZE_CALL)
1822 patchBuffer
.link(callArityCheck2
, FunctionPtr(cti_op_call_arityCheck
));
1823 patchBuffer
.link(callJSFunction2
, FunctionPtr(cti_op_call_JSFunction
));
1824 patchBuffer
.link(callLazyLinkCall
, FunctionPtr(cti_vm_lazyLinkCall
));
1827 CodeRef finalCode
= patchBuffer
.finalizeCode();
1828 *executablePool
= finalCode
.m_executablePool
;
1830 *ctiVirtualCallLink
= trampolineAt(finalCode
, virtualCallLinkBegin
);
1831 *ctiVirtualCall
= trampolineAt(finalCode
, virtualCallBegin
);
1832 *ctiNativeCallThunk
= trampolineAt(finalCode
, nativeCallThunk
);
1833 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1834 *ctiStringLengthTrampoline
= trampolineAt(finalCode
, stringLengthBegin
);
1836 UNUSED_PARAM(ctiStringLengthTrampoline
);
1840 void JIT::emit_op_mov(Instruction
* currentInstruction
)
1842 int dst
= currentInstruction
[1].u
.operand
;
1843 int src
= currentInstruction
[2].u
.operand
;
1845 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
1846 storePtr(ImmPtr(JSValue::encode(getConstantOperand(src
))), Address(callFrameRegister
, dst
* sizeof(Register
)));
1847 if (dst
== m_lastResultBytecodeRegister
)
1848 killLastResultRegister();
1849 } else if ((src
== m_lastResultBytecodeRegister
) || (dst
== m_lastResultBytecodeRegister
)) {
1850 // If either the src or dst is the cached register go though
1851 // get/put registers to make sure we track this correctly.
1852 emitGetVirtualRegister(src
, regT0
);
1853 emitPutVirtualRegister(dst
);
1855 // Perform the copy via regT1; do not disturb any mapping in regT0.
1856 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), regT1
);
1857 storePtr(regT1
, Address(callFrameRegister
, dst
* sizeof(Register
)));
1861 void JIT::emit_op_end(Instruction
* currentInstruction
)
1863 if (m_codeBlock
->needsFullScopeChain())
1864 JITStubCall(this, cti_op_end
).call();
1865 ASSERT(returnValueRegister
!= callFrameRegister
);
1866 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, returnValueRegister
);
1867 restoreReturnAddressBeforeReturn(Address(callFrameRegister
, RegisterFile::ReturnPC
* static_cast<int>(sizeof(Register
))));
1871 void JIT::emit_op_jmp(Instruction
* currentInstruction
)
1873 unsigned target
= currentInstruction
[1].u
.operand
;
1874 addJump(jump(), target
);
1875 RECORD_JUMP_TARGET(target
);
1878 void JIT::emit_op_loop_if_lesseq(Instruction
* currentInstruction
)
1882 unsigned op1
= currentInstruction
[1].u
.operand
;
1883 unsigned op2
= currentInstruction
[2].u
.operand
;
1884 unsigned target
= currentInstruction
[3].u
.operand
;
1885 if (isOperandConstantImmediateInt(op2
)) {
1886 emitGetVirtualRegister(op1
, regT0
);
1887 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1889 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
1891 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
1893 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(op2imm
)), target
);
1895 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1896 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1897 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1898 addJump(branch32(LessThanOrEqual
, regT0
, regT1
), target
);
1902 void JIT::emit_op_new_object(Instruction
* currentInstruction
)
1904 JITStubCall(this, cti_op_new_object
).call(currentInstruction
[1].u
.operand
);
1907 void JIT::emit_op_instanceof(Instruction
* currentInstruction
)
1909 unsigned dst
= currentInstruction
[1].u
.operand
;
1910 unsigned value
= currentInstruction
[2].u
.operand
;
1911 unsigned baseVal
= currentInstruction
[3].u
.operand
;
1912 unsigned proto
= currentInstruction
[4].u
.operand
;
1914 // Load the operands (baseVal, proto, and value respectively) into registers.
1915 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
1916 emitGetVirtualRegister(value
, regT2
);
1917 emitGetVirtualRegister(baseVal
, regT0
);
1918 emitGetVirtualRegister(proto
, regT1
);
1920 // Check that baseVal & proto are cells.
1921 emitJumpSlowCaseIfNotJSCell(regT2
, value
);
1922 emitJumpSlowCaseIfNotJSCell(regT0
, baseVal
);
1923 emitJumpSlowCaseIfNotJSCell(regT1
, proto
);
1925 // Check that baseVal 'ImplementsDefaultHasInstance'.
1926 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
1927 addSlowCase(branchTest32(Zero
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(ImplementsDefaultHasInstance
)));
1929 // Optimistically load the result true, and start looping.
1930 // Initially, regT1 still contains proto and regT2 still contains value.
1931 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
1932 move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0
);
1935 // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
1936 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
1937 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1938 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
)), regT2
);
1939 Jump isInstance
= branchPtr(Equal
, regT2
, regT1
);
1940 emitJumpIfJSCell(regT2
).linkTo(loop
, this);
1942 // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
1943 move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0
);
1945 // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
1946 isInstance
.link(this);
1947 emitPutVirtualRegister(dst
);
1950 void JIT::emit_op_new_func(Instruction
* currentInstruction
)
1952 JITStubCall
stubCall(this, cti_op_new_func
);
1953 stubCall
.addArgument(ImmPtr(m_codeBlock
->functionDecl(currentInstruction
[2].u
.operand
)));
1954 stubCall
.call(currentInstruction
[1].u
.operand
);
1957 void JIT::emit_op_call(Instruction
* currentInstruction
)
1959 compileOpCall(op_call
, currentInstruction
, m_callLinkInfoIndex
++);
1962 void JIT::emit_op_call_eval(Instruction
* currentInstruction
)
1964 compileOpCall(op_call_eval
, currentInstruction
, m_callLinkInfoIndex
++);
1967 void JIT::emit_op_load_varargs(Instruction
* currentInstruction
)
1969 int argCountDst
= currentInstruction
[1].u
.operand
;
1970 int argsOffset
= currentInstruction
[2].u
.operand
;
1972 JITStubCall
stubCall(this, cti_op_load_varargs
);
1973 stubCall
.addArgument(Imm32(argsOffset
));
1975 // Stores a naked int32 in the register file.
1976 store32(returnValueRegister
, Address(callFrameRegister
, argCountDst
* sizeof(Register
)));
1979 void JIT::emit_op_call_varargs(Instruction
* currentInstruction
)
1981 compileOpCallVarargs(currentInstruction
);
1984 void JIT::emit_op_construct(Instruction
* currentInstruction
)
1986 compileOpCall(op_construct
, currentInstruction
, m_callLinkInfoIndex
++);
1989 void JIT::emit_op_get_global_var(Instruction
* currentInstruction
)
1991 JSVariableObject
* globalObject
= static_cast<JSVariableObject
*>(currentInstruction
[2].u
.jsCell
);
1992 move(ImmPtr(globalObject
), regT0
);
1993 emitGetVariableObjectRegister(regT0
, currentInstruction
[3].u
.operand
, regT0
);
1994 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1997 void JIT::emit_op_put_global_var(Instruction
* currentInstruction
)
1999 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
2000 JSVariableObject
* globalObject
= static_cast<JSVariableObject
*>(currentInstruction
[1].u
.jsCell
);
2001 move(ImmPtr(globalObject
), regT0
);
2002 emitPutVariableObjectRegister(regT1
, regT0
, currentInstruction
[2].u
.operand
);
2005 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
2007 int skip
= currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain();
2009 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT0
);
2011 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT0
);
2013 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT0
);
2014 emitGetVariableObjectRegister(regT0
, currentInstruction
[2].u
.operand
, regT0
);
2015 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2018 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
2020 int skip
= currentInstruction
[2].u
.operand
+ m_codeBlock
->needsFullScopeChain();
2022 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
);
2023 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT0
);
2025 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT1
);
2027 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT1
);
2028 emitPutVariableObjectRegister(regT0
, regT1
, currentInstruction
[1].u
.operand
);
2031 void JIT::emit_op_tear_off_activation(Instruction
* currentInstruction
)
2033 JITStubCall
stubCall(this, cti_op_tear_off_activation
);
2034 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
2038 void JIT::emit_op_tear_off_arguments(Instruction
*)
2040 JITStubCall(this, cti_op_tear_off_arguments
).call();
2043 void JIT::emit_op_ret(Instruction
* currentInstruction
)
2045 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
2046 if (m_codeBlock
->needsFullScopeChain())
2047 JITStubCall(this, cti_op_ret_scopeChain
).call();
2049 ASSERT(callFrameRegister
!= regT1
);
2050 ASSERT(regT1
!= returnValueRegister
);
2051 ASSERT(returnValueRegister
!= callFrameRegister
);
2053 // Return the result in %eax.
2054 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, returnValueRegister
);
2056 // Grab the return address.
2057 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
2059 // Restore our caller's "r".
2060 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
2063 restoreReturnAddressBeforeReturn(regT1
);
2067 void JIT::emit_op_new_array(Instruction
* currentInstruction
)
2069 JITStubCall
stubCall(this, cti_op_new_array
);
2070 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2071 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
2072 stubCall
.call(currentInstruction
[1].u
.operand
);
2075 void JIT::emit_op_resolve(Instruction
* currentInstruction
)
2077 JITStubCall
stubCall(this, cti_op_resolve
);
2078 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2079 stubCall
.call(currentInstruction
[1].u
.operand
);
2082 void JIT::emit_op_construct_verify(Instruction
* currentInstruction
)
2084 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2086 emitJumpSlowCaseIfNotJSCell(regT0
);
2087 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2088 addSlowCase(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
) + OBJECT_OFFSETOF(TypeInfo
, m_type
)), Imm32(ObjectType
)));
2092 void JIT::emit_op_to_primitive(Instruction
* currentInstruction
)
2094 int dst
= currentInstruction
[1].u
.operand
;
2095 int src
= currentInstruction
[2].u
.operand
;
2097 emitGetVirtualRegister(src
, regT0
);
2099 Jump isImm
= emitJumpIfNotJSCell(regT0
);
2100 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
2104 emitPutVirtualRegister(dst
);
2108 void JIT::emit_op_strcat(Instruction
* currentInstruction
)
2110 JITStubCall
stubCall(this, cti_op_strcat
);
2111 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2112 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
2113 stubCall
.call(currentInstruction
[1].u
.operand
);
2116 void JIT::emit_op_resolve_base(Instruction
* currentInstruction
)
2118 JITStubCall
stubCall(this, cti_op_resolve_base
);
2119 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2120 stubCall
.call(currentInstruction
[1].u
.operand
);
2123 void JIT::emit_op_resolve_skip(Instruction
* currentInstruction
)
2125 JITStubCall
stubCall(this, cti_op_resolve_skip
);
2126 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2127 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain()));
2128 stubCall
.call(currentInstruction
[1].u
.operand
);
2131 void JIT::emit_op_resolve_global(Instruction
* currentInstruction
)
2134 void* globalObject
= currentInstruction
[2].u
.jsCell
;
2135 Identifier
* ident
= &m_codeBlock
->identifier(currentInstruction
[3].u
.operand
);
2137 unsigned currentIndex
= m_globalResolveInfoIndex
++;
2138 void* structureAddress
= &(m_codeBlock
->globalResolveInfo(currentIndex
).structure
);
2139 void* offsetAddr
= &(m_codeBlock
->globalResolveInfo(currentIndex
).offset
);
2141 // Check Structure of global object
2142 move(ImmPtr(globalObject
), regT0
);
2143 loadPtr(structureAddress
, regT1
);
2144 Jump noMatch
= branchPtr(NotEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
))); // Structures don't match
2146 // Load cached property
2147 // Assume that the global object always uses external storage.
2148 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSGlobalObject
, m_externalStorage
)), regT0
);
2149 load32(offsetAddr
, regT1
);
2150 loadPtr(BaseIndex(regT0
, regT1
, ScalePtr
), regT0
);
2151 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2156 JITStubCall
stubCall(this, cti_op_resolve_global
);
2157 stubCall
.addArgument(ImmPtr(globalObject
));
2158 stubCall
.addArgument(ImmPtr(ident
));
2159 stubCall
.addArgument(Imm32(currentIndex
));
2160 stubCall
.call(currentInstruction
[1].u
.operand
);
2164 void JIT::emit_op_not(Instruction
* currentInstruction
)
2166 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
2167 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), regT0
);
2168 addSlowCase(branchTestPtr(NonZero
, regT0
, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue
))));
2169 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
| JSImmediate::ExtendedPayloadBitBoolValue
)), regT0
);
2170 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2173 void JIT::emit_op_jfalse(Instruction
* currentInstruction
)
2175 unsigned target
= currentInstruction
[2].u
.operand
;
2176 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2178 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0)))), target
);
2179 Jump isNonZero
= emitJumpIfImmediateInteger(regT0
);
2181 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsBoolean(false)))), target
);
2182 addSlowCase(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsBoolean(true)))));
2184 isNonZero
.link(this);
2185 RECORD_JUMP_TARGET(target
);
2187 void JIT::emit_op_jeq_null(Instruction
* currentInstruction
)
2189 unsigned src
= currentInstruction
[1].u
.operand
;
2190 unsigned target
= currentInstruction
[2].u
.operand
;
2192 emitGetVirtualRegister(src
, regT0
);
2193 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2195 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
2196 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2197 addJump(branchTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
);
2198 Jump wasNotImmediate
= jump();
2200 // Now handle the immediate cases - undefined & null
2201 isImmediate
.link(this);
2202 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2203 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNull()))), target
);
2205 wasNotImmediate
.link(this);
2206 RECORD_JUMP_TARGET(target
);
2208 void JIT::emit_op_jneq_null(Instruction
* currentInstruction
)
2210 unsigned src
= currentInstruction
[1].u
.operand
;
2211 unsigned target
= currentInstruction
[2].u
.operand
;
2213 emitGetVirtualRegister(src
, regT0
);
2214 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2216 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
2217 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2218 addJump(branchTest32(Zero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
);
2219 Jump wasNotImmediate
= jump();
2221 // Now handle the immediate cases - undefined & null
2222 isImmediate
.link(this);
2223 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2224 addJump(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsNull()))), target
);
2226 wasNotImmediate
.link(this);
2227 RECORD_JUMP_TARGET(target
);
2230 void JIT::emit_op_jneq_ptr(Instruction
* currentInstruction
)
2232 unsigned src
= currentInstruction
[1].u
.operand
;
2233 JSCell
* ptr
= currentInstruction
[2].u
.jsCell
;
2234 unsigned target
= currentInstruction
[3].u
.operand
;
2236 emitGetVirtualRegister(src
, regT0
);
2237 addJump(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(JSValue(ptr
)))), target
);
2239 RECORD_JUMP_TARGET(target
);
2242 void JIT::emit_op_jsr(Instruction
* currentInstruction
)
2244 int retAddrDst
= currentInstruction
[1].u
.operand
;
2245 int target
= currentInstruction
[2].u
.operand
;
2246 DataLabelPtr storeLocation
= storePtrWithPatch(ImmPtr(0), Address(callFrameRegister
, sizeof(Register
) * retAddrDst
));
2247 addJump(jump(), target
);
2248 m_jsrSites
.append(JSRInfo(storeLocation
, label()));
2249 killLastResultRegister();
2250 RECORD_JUMP_TARGET(target
);
2253 void JIT::emit_op_sret(Instruction
* currentInstruction
)
2255 jump(Address(callFrameRegister
, sizeof(Register
) * currentInstruction
[1].u
.operand
));
2256 killLastResultRegister();
2259 void JIT::emit_op_eq(Instruction
* currentInstruction
)
2261 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2262 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2263 set32(Equal
, regT1
, regT0
, regT0
);
2264 emitTagAsBoolImmediate(regT0
);
2265 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2268 void JIT::emit_op_bitnot(Instruction
* currentInstruction
)
2270 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
2271 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2274 emitFastArithIntToImmNoCheck(regT0
, regT0
);
2276 xorPtr(Imm32(~JSImmediate::TagTypeNumber
), regT0
);
2278 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2281 void JIT::emit_op_resolve_with_base(Instruction
* currentInstruction
)
2283 JITStubCall
stubCall(this, cti_op_resolve_with_base
);
2284 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
2285 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
2286 stubCall
.call(currentInstruction
[2].u
.operand
);
2289 void JIT::emit_op_new_func_exp(Instruction
* currentInstruction
)
2291 JITStubCall
stubCall(this, cti_op_new_func_exp
);
2292 stubCall
.addArgument(ImmPtr(m_codeBlock
->functionExpr(currentInstruction
[2].u
.operand
)));
2293 stubCall
.call(currentInstruction
[1].u
.operand
);
2296 void JIT::emit_op_jtrue(Instruction
* currentInstruction
)
2298 unsigned target
= currentInstruction
[2].u
.operand
;
2299 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2301 Jump isZero
= branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0))));
2302 addJump(emitJumpIfImmediateInteger(regT0
), target
);
2304 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsBoolean(true)))), target
);
2305 addSlowCase(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsBoolean(false)))));
2308 RECORD_JUMP_TARGET(target
);
2311 void JIT::emit_op_neq(Instruction
* currentInstruction
)
2313 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2314 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2315 set32(NotEqual
, regT1
, regT0
, regT0
);
2316 emitTagAsBoolImmediate(regT0
);
2318 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2322 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
2324 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2325 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2326 xorPtr(regT1
, regT0
);
2327 emitFastArithReTagImmediate(regT0
, regT0
);
2328 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2331 void JIT::emit_op_new_regexp(Instruction
* currentInstruction
)
2333 JITStubCall
stubCall(this, cti_op_new_regexp
);
2334 stubCall
.addArgument(ImmPtr(m_codeBlock
->regexp(currentInstruction
[2].u
.operand
)));
2335 stubCall
.call(currentInstruction
[1].u
.operand
);
2338 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
2340 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2341 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2342 orPtr(regT1
, regT0
);
2343 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2346 void JIT::emit_op_throw(Instruction
* currentInstruction
)
2348 JITStubCall
stubCall(this, cti_op_throw
);
2349 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
2351 ASSERT(regT0
== returnValueRegister
);
2353 // cti_op_throw always changes it's return address,
2354 // this point in the code should never be reached.
2359 void JIT::emit_op_get_pnames(Instruction
* currentInstruction
)
2361 int dst
= currentInstruction
[1].u
.operand
;
2362 int base
= currentInstruction
[2].u
.operand
;
2363 int i
= currentInstruction
[3].u
.operand
;
2364 int size
= currentInstruction
[4].u
.operand
;
2365 int breakTarget
= currentInstruction
[5].u
.operand
;
2367 JumpList isNotObject
;
2369 emitGetVirtualRegister(base
, regT0
);
2370 if (!m_codeBlock
->isKnownNotImmediate(base
))
2371 isNotObject
.append(emitJumpIfNotJSCell(regT0
));
2372 if (base
!= m_codeBlock
->thisRegister()) {
2373 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2374 isNotObject
.append(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
)));
2377 // We could inline the case where you have a valid cache, but
2378 // this call doesn't seem to be hot.
2379 Label
isObject(this);
2380 JITStubCall
getPnamesStubCall(this, cti_op_get_pnames
);
2381 getPnamesStubCall
.addArgument(regT0
);
2382 getPnamesStubCall
.call(dst
);
2383 load32(Address(regT0
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_jsStringsSize
)), regT3
);
2384 store32(Imm32(0), addressFor(i
));
2385 store32(regT3
, addressFor(size
));
2388 isNotObject
.link(this);
2390 and32(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT1
);
2391 addJump(branch32(Equal
, regT1
, Imm32(JSImmediate::FullTagTypeNull
)), breakTarget
);
2393 JITStubCall
toObjectStubCall(this, cti_to_object
);
2394 toObjectStubCall
.addArgument(regT0
);
2395 toObjectStubCall
.call(base
);
2396 jump().linkTo(isObject
, this);
2401 void JIT::emit_op_next_pname(Instruction
* currentInstruction
)
2403 int dst
= currentInstruction
[1].u
.operand
;
2404 int base
= currentInstruction
[2].u
.operand
;
2405 int i
= currentInstruction
[3].u
.operand
;
2406 int size
= currentInstruction
[4].u
.operand
;
2407 int it
= currentInstruction
[5].u
.operand
;
2408 int target
= currentInstruction
[6].u
.operand
;
2410 JumpList callHasProperty
;
2413 load32(addressFor(i
), regT0
);
2414 Jump end
= branch32(Equal
, regT0
, addressFor(size
));
2417 loadPtr(addressFor(it
), regT1
);
2418 loadPtr(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_jsStrings
)), regT2
);
2421 loadPtr(BaseIndex(regT2
, regT0
, TimesEight
), regT2
);
2423 loadPtr(BaseIndex(regT2
, regT0
, TimesFour
), regT2
);
2426 emitPutVirtualRegister(dst
, regT2
);
2429 add32(Imm32(1), regT0
);
2430 store32(regT0
, addressFor(i
));
2432 // Verify that i is valid:
2433 emitGetVirtualRegister(base
, regT0
);
2435 // Test base's structure
2436 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2437 callHasProperty
.append(branchPtr(NotEqual
, regT2
, Address(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
)))));
2439 // Test base's prototype chain
2440 loadPtr(Address(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedPrototypeChain
))), regT3
);
2441 loadPtr(Address(regT3
, OBJECT_OFFSETOF(StructureChain
, m_vector
)), regT3
);
2442 addJump(branchTestPtr(Zero
, Address(regT3
)), target
);
2444 Label
checkPrototype(this);
2445 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
)), regT2
);
2446 callHasProperty
.append(emitJumpIfNotJSCell(regT2
));
2447 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2448 callHasProperty
.append(branchPtr(NotEqual
, regT2
, Address(regT3
)));
2449 addPtr(Imm32(sizeof(Structure
*)), regT3
);
2450 branchTestPtr(NonZero
, Address(regT3
)).linkTo(checkPrototype
, this);
2453 addJump(jump(), target
);
2455 // Slow case: Ask the object if i is valid.
2456 callHasProperty
.link(this);
2457 emitGetVirtualRegister(dst
, regT1
);
2458 JITStubCall
stubCall(this, cti_has_property
);
2459 stubCall
.addArgument(regT0
);
2460 stubCall
.addArgument(regT1
);
2463 // Test for valid key.
2464 addJump(branchTest32(NonZero
, regT0
), target
);
2465 jump().linkTo(begin
, this);
2471 void JIT::emit_op_push_scope(Instruction
* currentInstruction
)
2473 JITStubCall
stubCall(this, cti_op_push_scope
);
2474 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
2475 stubCall
.call(currentInstruction
[1].u
.operand
);
2478 void JIT::emit_op_pop_scope(Instruction
*)
2480 JITStubCall(this, cti_op_pop_scope
).call();
2483 void JIT::compileOpStrictEq(Instruction
* currentInstruction
, CompileOpStrictEqType type
)
2485 unsigned dst
= currentInstruction
[1].u
.operand
;
2486 unsigned src1
= currentInstruction
[2].u
.operand
;
2487 unsigned src2
= currentInstruction
[3].u
.operand
;
2489 emitGetVirtualRegisters(src1
, regT0
, src2
, regT1
);
2491 // Jump to a slow case if either operand is a number, or if both are JSCell*s.
2493 orPtr(regT1
, regT2
);
2494 addSlowCase(emitJumpIfJSCell(regT2
));
2495 addSlowCase(emitJumpIfImmediateNumber(regT2
));
2497 if (type
== OpStrictEq
)
2498 set32(Equal
, regT1
, regT0
, regT0
);
2500 set32(NotEqual
, regT1
, regT0
, regT0
);
2501 emitTagAsBoolImmediate(regT0
);
2503 emitPutVirtualRegister(dst
);
2506 void JIT::emit_op_stricteq(Instruction
* currentInstruction
)
2508 compileOpStrictEq(currentInstruction
, OpStrictEq
);
2511 void JIT::emit_op_nstricteq(Instruction
* currentInstruction
)
2513 compileOpStrictEq(currentInstruction
, OpNStrictEq
);
2516 void JIT::emit_op_to_jsnumber(Instruction
* currentInstruction
)
2518 int srcVReg
= currentInstruction
[2].u
.operand
;
2519 emitGetVirtualRegister(srcVReg
, regT0
);
2521 Jump wasImmediate
= emitJumpIfImmediateInteger(regT0
);
2523 emitJumpSlowCaseIfNotJSCell(regT0
, srcVReg
);
2524 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2525 addSlowCase(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(NumberType
)));
2527 wasImmediate
.link(this);
2529 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2532 void JIT::emit_op_push_new_scope(Instruction
* currentInstruction
)
2534 JITStubCall
stubCall(this, cti_op_push_new_scope
);
2535 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2536 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
2537 stubCall
.call(currentInstruction
[1].u
.operand
);
2540 void JIT::emit_op_catch(Instruction
* currentInstruction
)
2542 killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
2543 peek(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
2544 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2547 void JIT::emit_op_jmp_scopes(Instruction
* currentInstruction
)
2549 JITStubCall
stubCall(this, cti_op_jmp_scopes
);
2550 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
2552 addJump(jump(), currentInstruction
[2].u
.operand
);
2553 RECORD_JUMP_TARGET(currentInstruction
[2].u
.operand
);
2556 void JIT::emit_op_switch_imm(Instruction
* currentInstruction
)
2558 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
2559 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
2560 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
2562 // create jump table for switch destinations, track this switch statement.
2563 SimpleJumpTable
* jumpTable
= &m_codeBlock
->immediateSwitchJumpTable(tableIndex
);
2564 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Immediate
));
2565 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
2567 JITStubCall
stubCall(this, cti_op_switch_imm
);
2568 stubCall
.addArgument(scrutinee
, regT2
);
2569 stubCall
.addArgument(Imm32(tableIndex
));
2574 void JIT::emit_op_switch_char(Instruction
* currentInstruction
)
2576 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
2577 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
2578 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
2580 // create jump table for switch destinations, track this switch statement.
2581 SimpleJumpTable
* jumpTable
= &m_codeBlock
->characterSwitchJumpTable(tableIndex
);
2582 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Character
));
2583 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
2585 JITStubCall
stubCall(this, cti_op_switch_char
);
2586 stubCall
.addArgument(scrutinee
, regT2
);
2587 stubCall
.addArgument(Imm32(tableIndex
));
2592 void JIT::emit_op_switch_string(Instruction
* currentInstruction
)
2594 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
2595 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
2596 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
2598 // create jump table for switch destinations, track this switch statement.
2599 StringJumpTable
* jumpTable
= &m_codeBlock
->stringSwitchJumpTable(tableIndex
);
2600 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
));
2602 JITStubCall
stubCall(this, cti_op_switch_string
);
2603 stubCall
.addArgument(scrutinee
, regT2
);
2604 stubCall
.addArgument(Imm32(tableIndex
));
2609 void JIT::emit_op_new_error(Instruction
* currentInstruction
)
2611 JITStubCall
stubCall(this, cti_op_new_error
);
2612 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2613 stubCall
.addArgument(ImmPtr(JSValue::encode(m_codeBlock
->getConstant(currentInstruction
[3].u
.operand
))));
2614 stubCall
.addArgument(Imm32(m_bytecodeIndex
));
2615 stubCall
.call(currentInstruction
[1].u
.operand
);
2618 void JIT::emit_op_debug(Instruction
* currentInstruction
)
2620 JITStubCall
stubCall(this, cti_op_debug
);
2621 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
2622 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2623 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
2627 void JIT::emit_op_eq_null(Instruction
* currentInstruction
)
2629 unsigned dst
= currentInstruction
[1].u
.operand
;
2630 unsigned src1
= currentInstruction
[2].u
.operand
;
2632 emitGetVirtualRegister(src1
, regT0
);
2633 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2635 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2636 setTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT0
);
2638 Jump wasNotImmediate
= jump();
2640 isImmediate
.link(this);
2642 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2643 setPtr(Equal
, regT0
, Imm32(JSImmediate::FullTagTypeNull
), regT0
);
2645 wasNotImmediate
.link(this);
2647 emitTagAsBoolImmediate(regT0
);
2648 emitPutVirtualRegister(dst
);
2652 void JIT::emit_op_neq_null(Instruction
* currentInstruction
)
2654 unsigned dst
= currentInstruction
[1].u
.operand
;
2655 unsigned src1
= currentInstruction
[2].u
.operand
;
2657 emitGetVirtualRegister(src1
, regT0
);
2658 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2660 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2661 setTest32(Zero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT0
);
2663 Jump wasNotImmediate
= jump();
2665 isImmediate
.link(this);
2667 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2668 setPtr(NotEqual
, regT0
, Imm32(JSImmediate::FullTagTypeNull
), regT0
);
2670 wasNotImmediate
.link(this);
2672 emitTagAsBoolImmediate(regT0
);
2673 emitPutVirtualRegister(dst
);
2677 void JIT::emit_op_enter(Instruction
*)
2679 // Even though CTI doesn't use them, we initialize our constant
2680 // registers to zap stale pointers, to avoid unnecessarily prolonging
2681 // object lifetime and increasing GC pressure.
2682 size_t count
= m_codeBlock
->m_numVars
;
2683 for (size_t j
= 0; j
< count
; ++j
)
2684 emitInitRegister(j
);
2688 void JIT::emit_op_enter_with_activation(Instruction
* currentInstruction
)
2690 // Even though CTI doesn't use them, we initialize our constant
2691 // registers to zap stale pointers, to avoid unnecessarily prolonging
2692 // object lifetime and increasing GC pressure.
2693 size_t count
= m_codeBlock
->m_numVars
;
2694 for (size_t j
= 0; j
< count
; ++j
)
2695 emitInitRegister(j
);
2697 JITStubCall(this, cti_op_push_activation
).call(currentInstruction
[1].u
.operand
);
2700 void JIT::emit_op_create_arguments(Instruction
*)
2702 Jump argsCreated
= branchTestPtr(NonZero
, Address(callFrameRegister
, sizeof(Register
) * RegisterFile::ArgumentsRegister
));
2703 if (m_codeBlock
->m_numParameters
== 1)
2704 JITStubCall(this, cti_op_create_arguments_no_params
).call();
2706 JITStubCall(this, cti_op_create_arguments
).call();
2707 argsCreated
.link(this);
2710 void JIT::emit_op_init_arguments(Instruction
*)
2712 storePtr(ImmPtr(0), Address(callFrameRegister
, sizeof(Register
) * RegisterFile::ArgumentsRegister
));
2715 void JIT::emit_op_convert_this(Instruction
* currentInstruction
)
2717 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2719 emitJumpSlowCaseIfNotJSCell(regT0
);
2720 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT1
);
2721 addSlowCase(branchTest32(NonZero
, Address(regT1
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(NeedsThisConversion
)));
2725 void JIT::emit_op_profile_will_call(Instruction
* currentInstruction
)
2727 peek(regT1
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
2728 Jump noProfiler
= branchTestPtr(Zero
, Address(regT1
));
2730 JITStubCall
stubCall(this, cti_op_profile_will_call
);
2731 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT1
);
2733 noProfiler
.link(this);
2737 void JIT::emit_op_profile_did_call(Instruction
* currentInstruction
)
2739 peek(regT1
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
2740 Jump noProfiler
= branchTestPtr(Zero
, Address(regT1
));
2742 JITStubCall
stubCall(this, cti_op_profile_did_call
);
2743 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT1
);
2745 noProfiler
.link(this);
2751 void JIT::emitSlow_op_convert_this(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2755 JITStubCall
stubCall(this, cti_op_convert_this
);
2756 stubCall
.addArgument(regT0
);
2757 stubCall
.call(currentInstruction
[1].u
.operand
);
2760 void JIT::emitSlow_op_construct_verify(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2764 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
2765 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2768 void JIT::emitSlow_op_to_primitive(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2772 JITStubCall
stubCall(this, cti_op_to_primitive
);
2773 stubCall
.addArgument(regT0
);
2774 stubCall
.call(currentInstruction
[1].u
.operand
);
2777 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2779 unsigned dst
= currentInstruction
[1].u
.operand
;
2780 unsigned base
= currentInstruction
[2].u
.operand
;
2781 unsigned property
= currentInstruction
[3].u
.operand
;
2783 linkSlowCase(iter
); // property int32 check
2784 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
2785 linkSlowCase(iter
); // base array check
2786 linkSlowCase(iter
); // vector length check
2787 linkSlowCase(iter
); // empty value
2789 JITStubCall
stubCall(this, cti_op_get_by_val
);
2790 stubCall
.addArgument(base
, regT2
);
2791 stubCall
.addArgument(property
, regT2
);
2795 void JIT::emitSlow_op_loop_if_lesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2797 unsigned op2
= currentInstruction
[2].u
.operand
;
2798 unsigned target
= currentInstruction
[3].u
.operand
;
2799 if (isOperandConstantImmediateInt(op2
)) {
2801 JITStubCall
stubCall(this, cti_op_loop_if_lesseq
);
2802 stubCall
.addArgument(regT0
);
2803 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
2805 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
2809 JITStubCall
stubCall(this, cti_op_loop_if_lesseq
);
2810 stubCall
.addArgument(regT0
);
2811 stubCall
.addArgument(regT1
);
2813 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
2817 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2819 unsigned base
= currentInstruction
[1].u
.operand
;
2820 unsigned property
= currentInstruction
[2].u
.operand
;
2821 unsigned value
= currentInstruction
[3].u
.operand
;
2823 linkSlowCase(iter
); // property int32 check
2824 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
2825 linkSlowCase(iter
); // base not array check
2826 linkSlowCase(iter
); // in vector check
2828 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
2829 stubPutByValCall
.addArgument(regT0
);
2830 stubPutByValCall
.addArgument(property
, regT2
);
2831 stubPutByValCall
.addArgument(value
, regT2
);
2832 stubPutByValCall
.call();
2835 void JIT::emitSlow_op_not(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2838 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), regT0
);
2839 JITStubCall
stubCall(this, cti_op_not
);
2840 stubCall
.addArgument(regT0
);
2841 stubCall
.call(currentInstruction
[1].u
.operand
);
2844 void JIT::emitSlow_op_jfalse(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2847 JITStubCall
stubCall(this, cti_op_jtrue
);
2848 stubCall
.addArgument(regT0
);
2850 emitJumpSlowToHot(branchTest32(Zero
, regT0
), currentInstruction
[2].u
.operand
); // inverted!
2853 void JIT::emitSlow_op_bitnot(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2856 JITStubCall
stubCall(this, cti_op_bitnot
);
2857 stubCall
.addArgument(regT0
);
2858 stubCall
.call(currentInstruction
[1].u
.operand
);
2861 void JIT::emitSlow_op_jtrue(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2864 JITStubCall
stubCall(this, cti_op_jtrue
);
2865 stubCall
.addArgument(regT0
);
2867 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), currentInstruction
[2].u
.operand
);
2870 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2873 JITStubCall
stubCall(this, cti_op_bitxor
);
2874 stubCall
.addArgument(regT0
);
2875 stubCall
.addArgument(regT1
);
2876 stubCall
.call(currentInstruction
[1].u
.operand
);
2879 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2882 JITStubCall
stubCall(this, cti_op_bitor
);
2883 stubCall
.addArgument(regT0
);
2884 stubCall
.addArgument(regT1
);
2885 stubCall
.call(currentInstruction
[1].u
.operand
);
2888 void JIT::emitSlow_op_eq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2891 JITStubCall
stubCall(this, cti_op_eq
);
2892 stubCall
.addArgument(regT0
);
2893 stubCall
.addArgument(regT1
);
2895 emitTagAsBoolImmediate(regT0
);
2896 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2899 void JIT::emitSlow_op_neq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2902 JITStubCall
stubCall(this, cti_op_eq
);
2903 stubCall
.addArgument(regT0
);
2904 stubCall
.addArgument(regT1
);
2906 xor32(Imm32(0x1), regT0
);
2907 emitTagAsBoolImmediate(regT0
);
2908 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2911 void JIT::emitSlow_op_stricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2915 JITStubCall
stubCall(this, cti_op_stricteq
);
2916 stubCall
.addArgument(regT0
);
2917 stubCall
.addArgument(regT1
);
2918 stubCall
.call(currentInstruction
[1].u
.operand
);
2921 void JIT::emitSlow_op_nstricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2925 JITStubCall
stubCall(this, cti_op_nstricteq
);
2926 stubCall
.addArgument(regT0
);
2927 stubCall
.addArgument(regT1
);
2928 stubCall
.call(currentInstruction
[1].u
.operand
);
2931 void JIT::emitSlow_op_instanceof(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2933 unsigned dst
= currentInstruction
[1].u
.operand
;
2934 unsigned value
= currentInstruction
[2].u
.operand
;
2935 unsigned baseVal
= currentInstruction
[3].u
.operand
;
2936 unsigned proto
= currentInstruction
[4].u
.operand
;
2938 linkSlowCaseIfNotJSCell(iter
, value
);
2939 linkSlowCaseIfNotJSCell(iter
, baseVal
);
2940 linkSlowCaseIfNotJSCell(iter
, proto
);
2942 JITStubCall
stubCall(this, cti_op_instanceof
);
2943 stubCall
.addArgument(value
, regT2
);
2944 stubCall
.addArgument(baseVal
, regT2
);
2945 stubCall
.addArgument(proto
, regT2
);
2949 void JIT::emitSlow_op_call(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2951 compileOpCallSlowCase(currentInstruction
, iter
, m_callLinkInfoIndex
++, op_call
);
2954 void JIT::emitSlow_op_call_eval(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2956 compileOpCallSlowCase(currentInstruction
, iter
, m_callLinkInfoIndex
++, op_call_eval
);
2959 void JIT::emitSlow_op_call_varargs(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2961 compileOpCallVarargsSlowCase(currentInstruction
, iter
);
2964 void JIT::emitSlow_op_construct(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2966 compileOpCallSlowCase(currentInstruction
, iter
, m_callLinkInfoIndex
++, op_construct
);
2969 void JIT::emitSlow_op_to_jsnumber(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2971 linkSlowCaseIfNotJSCell(iter
, currentInstruction
[2].u
.operand
);
2974 JITStubCall
stubCall(this, cti_op_to_jsnumber
);
2975 stubCall
.addArgument(regT0
);
2976 stubCall
.call(currentInstruction
[1].u
.operand
);
2979 #endif // USE(JSVALUE32_64)
2983 #endif // ENABLE(JIT)