2 * Copyright (C) 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "JITInlineMethods.h"
32 #include "JITStubCall.h"
35 #include "JSFunction.h"
36 #include "LinkBuffer.h"
42 void JIT::privateCompileCTIMachineTrampolines(RefPtr
<ExecutablePool
>* executablePool
, JSGlobalData
* globalData
, CodePtr
* ctiStringLengthTrampoline
, CodePtr
* ctiVirtualCallPreLink
, CodePtr
* ctiVirtualCallLink
, CodePtr
* ctiVirtualCall
, CodePtr
* ctiNativeCallThunk
)
44 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
45 // (1) This function provides fast property access for string length
46 Label stringLengthBegin
= align();
48 // regT0 holds payload, regT1 holds tag
50 Jump string_failureCases1
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
51 Jump string_failureCases2
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
));
53 // Checks out okay! - get the length from the Ustring.
54 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSString
, m_value
) + OBJECT_OFFSETOF(UString
, m_rep
)), regT2
);
55 load32(Address(regT2
, OBJECT_OFFSETOF(UString::Rep
, len
)), regT2
);
57 Jump string_failureCases3
= branch32(Above
, regT2
, Imm32(INT_MAX
));
59 move(Imm32(JSValue::Int32Tag
), regT1
);
64 // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
66 #if ENABLE(JIT_OPTIMIZE_CALL)
67 /* VirtualCallPreLink Trampoline */
68 Label virtualCallPreLinkBegin
= align();
70 // regT0 holds callee, regT1 holds argCount.
71 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT2
);
72 loadPtr(Address(regT2
, OBJECT_OFFSETOF(FunctionBodyNode
, m_code
)), regT2
);
73 Jump hasCodeBlock1
= branchTestPtr(NonZero
, regT2
);
75 // Lazily generate a CodeBlock.
76 preserveReturnAddressAfterCall(regT3
); // return address
77 restoreArgumentReference();
78 Call callJSFunction1
= call();
80 emitGetJITStubArg(1, regT0
); // callee
81 emitGetJITStubArg(5, regT1
); // argCount
82 restoreReturnAddressBeforeReturn(regT3
); // return address
83 hasCodeBlock1
.link(this);
85 // regT2 holds codeBlock.
86 Jump isNativeFunc1
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(CodeBlock
, m_codeType
)), Imm32(NativeCode
));
88 // Check argCount matches callee arity.
89 Jump arityCheckOkay1
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(CodeBlock
, m_numParameters
)), regT1
);
90 preserveReturnAddressAfterCall(regT3
);
91 emitPutJITStubArg(regT3
, 3); // return address
92 emitPutJITStubArg(regT2
, 7); // codeBlock
93 restoreArgumentReference();
94 Call callArityCheck1
= call();
95 move(regT1
, callFrameRegister
);
96 emitGetJITStubArg(1, regT0
); // callee
97 emitGetJITStubArg(5, regT1
); // argCount
98 restoreReturnAddressBeforeReturn(regT3
); // return address
100 arityCheckOkay1
.link(this);
101 isNativeFunc1
.link(this);
103 compileOpCallInitializeCallFrame();
105 preserveReturnAddressAfterCall(regT3
);
106 emitPutJITStubArg(regT3
, 3);
107 restoreArgumentReference();
108 Call callDontLazyLinkCall
= call();
109 restoreReturnAddressBeforeReturn(regT3
);
112 /* VirtualCallLink Trampoline */
113 Label virtualCallLinkBegin
= align();
115 // regT0 holds callee, regT1 holds argCount.
116 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT2
);
117 loadPtr(Address(regT2
, OBJECT_OFFSETOF(FunctionBodyNode
, m_code
)), regT2
);
118 Jump hasCodeBlock2
= branchTestPtr(NonZero
, regT2
);
120 // Lazily generate a CodeBlock.
121 preserveReturnAddressAfterCall(regT3
); // return address
122 restoreArgumentReference();
123 Call callJSFunction2
= call();
125 emitGetJITStubArg(1, regT0
); // callee
126 emitGetJITStubArg(5, regT1
); // argCount
127 restoreReturnAddressBeforeReturn(regT3
); // return address
128 hasCodeBlock2
.link(this);
130 // regT2 holds codeBlock.
131 Jump isNativeFunc2
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(CodeBlock
, m_codeType
)), Imm32(NativeCode
));
133 // Check argCount matches callee arity.
134 Jump arityCheckOkay2
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(CodeBlock
, m_numParameters
)), regT1
);
135 preserveReturnAddressAfterCall(regT3
);
136 emitPutJITStubArg(regT3
, 3); // return address
137 emitPutJITStubArg(regT2
, 7); // codeBlock
138 restoreArgumentReference();
139 Call callArityCheck2
= call();
140 move(regT1
, callFrameRegister
);
141 emitGetJITStubArg(1, regT0
); // callee
142 emitGetJITStubArg(5, regT1
); // argCount
143 restoreReturnAddressBeforeReturn(regT3
); // return address
145 arityCheckOkay2
.link(this);
146 isNativeFunc2
.link(this);
148 compileOpCallInitializeCallFrame();
150 preserveReturnAddressAfterCall(regT3
);
151 emitPutJITStubArg(regT3
, 3);
152 restoreArgumentReference();
153 Call callLazyLinkCall
= call();
154 restoreReturnAddressBeforeReturn(regT3
);
156 #endif // ENABLE(JIT_OPTIMIZE_CALL)
158 /* VirtualCall Trampoline */
159 Label virtualCallBegin
= align();
161 // regT0 holds callee, regT1 holds argCount.
162 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT2
);
163 loadPtr(Address(regT2
, OBJECT_OFFSETOF(FunctionBodyNode
, m_code
)), regT2
);
164 Jump hasCodeBlock3
= branchTestPtr(NonZero
, regT2
);
166 // Lazily generate a CodeBlock.
167 preserveReturnAddressAfterCall(regT3
); // return address
168 restoreArgumentReference();
169 Call callJSFunction3
= call();
171 emitGetJITStubArg(1, regT0
); // callee
172 emitGetJITStubArg(5, regT1
); // argCount
173 restoreReturnAddressBeforeReturn(regT3
); // return address
174 hasCodeBlock3
.link(this);
176 // regT2 holds codeBlock.
177 Jump isNativeFunc3
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(CodeBlock
, m_codeType
)), Imm32(NativeCode
));
179 // Check argCount matches callee.
180 Jump arityCheckOkay3
= branch32(Equal
, Address(regT2
, OBJECT_OFFSETOF(CodeBlock
, m_numParameters
)), regT1
);
181 preserveReturnAddressAfterCall(regT3
);
182 emitPutJITStubArg(regT3
, 3); // return address
183 emitPutJITStubArg(regT2
, 7); // codeBlock
184 restoreArgumentReference();
185 Call callArityCheck3
= call();
186 move(regT1
, callFrameRegister
);
187 emitGetJITStubArg(1, regT0
); // callee
188 emitGetJITStubArg(5, regT1
); // argCount
189 restoreReturnAddressBeforeReturn(regT3
); // return address
191 arityCheckOkay3
.link(this);
192 isNativeFunc3
.link(this);
193 compileOpCallInitializeCallFrame();
194 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT0
);
195 loadPtr(Address(regT0
, OBJECT_OFFSETOF(FunctionBodyNode
, m_jitCode
)), regT0
);
199 Label nativeCallThunk
= align();
200 preserveReturnAddressAfterCall(regT0
);
201 emitPutToCallFrameHeader(regT0
, RegisterFile::ReturnPC
); // Push return address
203 // Load caller frame's scope chain into this callframe so that whatever we call can
204 // get to its global data.
205 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, regT1
);
206 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
, regT1
);
207 emitPutToCallFrameHeader(regT1
, RegisterFile::ScopeChain
);
209 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, regT0
);
211 /* We have two structs that we use to describe the stackframe we set up for our
212 * call to native code. NativeCallFrameStructure describes the how we set up the stack
213 * in advance of the call. NativeFunctionCalleeSignature describes the callframe
214 * as the native code expects it. We do this as we are using the fastcall calling
215 * convention which results in the callee popping its arguments off the stack, but
216 * not the rest of the callframe so we need a nice way to ensure we increment the
217 * stack pointer by the right amount after the call.
220 #if COMPILER(MSVC) || PLATFORM(LINUX)
224 #endif // COMPILER(MSVC)
225 struct NativeCallFrameStructure
{
226 // CallFrame* callFrame; // passed in EDX
233 struct NativeFunctionCalleeSignature
{
240 #endif // COMPILER(MSVC)
242 struct NativeCallFrameStructure
{
243 // CallFrame* callFrame; // passed in ECX
244 // JSObject* callee; // passed in EDX
249 struct NativeFunctionCalleeSignature
{
255 const int NativeCallFrameSize
= (sizeof(NativeCallFrameStructure
) + 15) & ~15;
256 // Allocate system stack frame
257 subPtr(Imm32(NativeCallFrameSize
), stackPointerRegister
);
260 subPtr(Imm32(1), regT0
); // Don't include 'this' in argcount
263 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_argCount
)));
265 // Calculate the start of the callframe header, and store in regT1
266 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize
* (int)sizeof(Register
)), callFrameRegister
, regT1
);
268 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
269 mul32(Imm32(sizeof(Register
)), regT0
, regT0
);
270 subPtr(regT0
, regT1
);
271 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_args
)));
273 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
274 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, args
)), stackPointerRegister
, regT0
);
275 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, argPointer
)));
277 // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
278 loadPtr(Address(regT1
, -(int)sizeof(Register
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT2
);
279 loadPtr(Address(regT1
, -(int)sizeof(Register
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT3
);
280 storePtr(regT2
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, thisValue
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
281 storePtr(regT3
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, thisValue
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
283 #if COMPILER(MSVC) || PLATFORM(LINUX)
284 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
285 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, result
)), stackPointerRegister
, X86::ecx
);
288 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86::eax
);
289 storePtr(X86::eax
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, callee
)));
292 move(callFrameRegister
, X86::edx
);
294 call(Address(X86::eax
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
296 // JSValue is a non-POD type, so eax points to it
297 emitLoad(0, regT1
, regT0
, X86::eax
);
299 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86::edx
); // callee
300 move(callFrameRegister
, X86::ecx
); // callFrame
301 call(Address(X86::edx
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
304 // We've put a few temporaries on the stack in addition to the actual arguments
305 // so pull them off now
306 addPtr(Imm32(NativeCallFrameSize
- sizeof(NativeFunctionCalleeSignature
)), stackPointerRegister
);
308 // Check for an exception
309 // FIXME: Maybe we can optimize this comparison to JSValue().
310 move(ImmPtr(&globalData
->exception
), regT2
);
311 Jump sawException1
= branch32(NotEqual
, tagFor(0, regT2
), Imm32(JSValue::CellTag
));
312 Jump sawException2
= branch32(NonZero
, payloadFor(0, regT2
), Imm32(0));
314 // Grab the return address.
315 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT3
);
317 // Restore our caller's "r".
318 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
321 restoreReturnAddressBeforeReturn(regT3
);
324 // Handle an exception
325 sawException1
.link(this);
326 sawException2
.link(this);
327 // Grab the return address.
328 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
329 move(ImmPtr(&globalData
->exceptionLocation
), regT2
);
330 storePtr(regT1
, regT2
);
331 move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline
)), regT2
);
332 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
333 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
334 restoreReturnAddressBeforeReturn(regT2
);
337 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
338 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
343 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
344 Call string_failureCases1Call
= makeTailRecursiveCall(string_failureCases1
);
345 Call string_failureCases2Call
= makeTailRecursiveCall(string_failureCases2
);
346 Call string_failureCases3Call
= makeTailRecursiveCall(string_failureCases3
);
349 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
350 LinkBuffer
patchBuffer(this, m_globalData
->executableAllocator
.poolForSize(m_assembler
.size()));
352 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
353 patchBuffer
.link(string_failureCases1Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
354 patchBuffer
.link(string_failureCases2Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
355 patchBuffer
.link(string_failureCases3Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
357 #if ENABLE(JIT_OPTIMIZE_CALL)
358 patchBuffer
.link(callArityCheck1
, FunctionPtr(cti_op_call_arityCheck
));
359 patchBuffer
.link(callJSFunction1
, FunctionPtr(cti_op_call_JSFunction
));
360 patchBuffer
.link(callArityCheck2
, FunctionPtr(cti_op_call_arityCheck
));
361 patchBuffer
.link(callJSFunction2
, FunctionPtr(cti_op_call_JSFunction
));
362 patchBuffer
.link(callDontLazyLinkCall
, FunctionPtr(cti_vm_dontLazyLinkCall
));
363 patchBuffer
.link(callLazyLinkCall
, FunctionPtr(cti_vm_lazyLinkCall
));
365 patchBuffer
.link(callArityCheck3
, FunctionPtr(cti_op_call_arityCheck
));
366 patchBuffer
.link(callJSFunction3
, FunctionPtr(cti_op_call_JSFunction
));
368 CodeRef finalCode
= patchBuffer
.finalizeCode();
369 *executablePool
= finalCode
.m_executablePool
;
371 *ctiVirtualCall
= trampolineAt(finalCode
, virtualCallBegin
);
372 *ctiNativeCallThunk
= trampolineAt(finalCode
, nativeCallThunk
);
373 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
374 *ctiStringLengthTrampoline
= trampolineAt(finalCode
, stringLengthBegin
);
376 UNUSED_PARAM(ctiStringLengthTrampoline
);
378 #if ENABLE(JIT_OPTIMIZE_CALL)
379 *ctiVirtualCallPreLink
= trampolineAt(finalCode
, virtualCallPreLinkBegin
);
380 *ctiVirtualCallLink
= trampolineAt(finalCode
, virtualCallLinkBegin
);
382 UNUSED_PARAM(ctiVirtualCallPreLink
);
383 UNUSED_PARAM(ctiVirtualCallLink
);
387 void JIT::emit_op_mov(Instruction
* currentInstruction
)
389 unsigned dst
= currentInstruction
[1].u
.operand
;
390 unsigned src
= currentInstruction
[2].u
.operand
;
392 if (m_codeBlock
->isConstantRegisterIndex(src
))
393 emitStore(dst
, getConstantOperand(src
));
395 emitLoad(src
, regT1
, regT0
);
396 emitStore(dst
, regT1
, regT0
);
397 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_mov
), dst
, regT1
, regT0
);
401 void JIT::emit_op_end(Instruction
* currentInstruction
)
403 if (m_codeBlock
->needsFullScopeChain())
404 JITStubCall(this, cti_op_end
).call();
405 ASSERT(returnValueRegister
!= callFrameRegister
);
406 emitLoad(currentInstruction
[1].u
.operand
, regT1
, regT0
);
407 restoreReturnAddressBeforeReturn(Address(callFrameRegister
, RegisterFile::ReturnPC
* static_cast<int>(sizeof(Register
))));
411 void JIT::emit_op_jmp(Instruction
* currentInstruction
)
413 unsigned target
= currentInstruction
[1].u
.operand
;
414 addJump(jump(), target
+ 1);
417 void JIT::emit_op_loop(Instruction
* currentInstruction
)
419 unsigned target
= currentInstruction
[1].u
.operand
;
421 addJump(jump(), target
+ 1);
424 void JIT::emit_op_loop_if_less(Instruction
* currentInstruction
)
426 unsigned op1
= currentInstruction
[1].u
.operand
;
427 unsigned op2
= currentInstruction
[2].u
.operand
;
428 unsigned target
= currentInstruction
[3].u
.operand
;
432 if (isOperandConstantImmediateInt(op1
)) {
433 emitLoad(op2
, regT1
, regT0
);
434 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
435 addJump(branch32(GreaterThan
, regT0
, Imm32(getConstantOperand(op1
).asInt32())), target
+ 3);
439 if (isOperandConstantImmediateInt(op2
)) {
440 emitLoad(op1
, regT1
, regT0
);
441 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
442 addJump(branch32(LessThan
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
+ 3);
446 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
447 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
448 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
449 addJump(branch32(LessThan
, regT0
, regT2
), target
+ 3);
452 void JIT::emitSlow_op_loop_if_less(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
454 unsigned op1
= currentInstruction
[1].u
.operand
;
455 unsigned op2
= currentInstruction
[2].u
.operand
;
456 unsigned target
= currentInstruction
[3].u
.operand
;
458 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
459 linkSlowCase(iter
); // int32 check
460 linkSlowCase(iter
); // int32 check
462 JITStubCall
stubCall(this, cti_op_loop_if_less
);
463 stubCall
.addArgument(op1
);
464 stubCall
.addArgument(op2
);
466 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
469 void JIT::emit_op_loop_if_lesseq(Instruction
* currentInstruction
)
471 unsigned op1
= currentInstruction
[1].u
.operand
;
472 unsigned op2
= currentInstruction
[2].u
.operand
;
473 unsigned target
= currentInstruction
[3].u
.operand
;
477 if (isOperandConstantImmediateInt(op1
)) {
478 emitLoad(op2
, regT1
, regT0
);
479 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
480 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(getConstantOperand(op1
).asInt32())), target
+ 3);
484 if (isOperandConstantImmediateInt(op2
)) {
485 emitLoad(op1
, regT1
, regT0
);
486 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
487 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
+ 3);
491 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
492 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
493 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
494 addJump(branch32(LessThanOrEqual
, regT0
, regT2
), target
+ 3);
497 void JIT::emitSlow_op_loop_if_lesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
499 unsigned op1
= currentInstruction
[1].u
.operand
;
500 unsigned op2
= currentInstruction
[2].u
.operand
;
501 unsigned target
= currentInstruction
[3].u
.operand
;
503 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
504 linkSlowCase(iter
); // int32 check
505 linkSlowCase(iter
); // int32 check
507 JITStubCall
stubCall(this, cti_op_loop_if_lesseq
);
508 stubCall
.addArgument(op1
);
509 stubCall
.addArgument(op2
);
511 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
514 void JIT::emit_op_new_object(Instruction
* currentInstruction
)
516 JITStubCall(this, cti_op_new_object
).call(currentInstruction
[1].u
.operand
);
519 void JIT::emit_op_instanceof(Instruction
* currentInstruction
)
521 unsigned dst
= currentInstruction
[1].u
.operand
;
522 unsigned value
= currentInstruction
[2].u
.operand
;
523 unsigned baseVal
= currentInstruction
[3].u
.operand
;
524 unsigned proto
= currentInstruction
[4].u
.operand
;
526 // Load the operands (baseVal, proto, and value respectively) into registers.
527 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
528 emitLoadPayload(proto
, regT1
);
529 emitLoadPayload(baseVal
, regT0
);
530 emitLoadPayload(value
, regT2
);
532 // Check that baseVal & proto are cells.
533 emitJumpSlowCaseIfNotJSCell(proto
);
534 emitJumpSlowCaseIfNotJSCell(baseVal
);
536 // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
537 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
538 addSlowCase(branch32(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
))); // FIXME: Maybe remove this test.
539 addSlowCase(branchTest32(Zero
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(ImplementsHasInstance
))); // FIXME: TOT checks ImplementsDefaultHasInstance.
541 // If value is not an Object, return false.
542 emitLoadTag(value
, regT0
);
543 Jump valueIsImmediate
= branch32(NotEqual
, regT0
, Imm32(JSValue::CellTag
));
544 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
545 Jump valueIsNotObject
= branch32(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
)); // FIXME: Maybe remove this test.
547 // Check proto is object.
548 loadPtr(Address(regT1
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
549 addSlowCase(branch32(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
)));
551 // Optimistically load the result true, and start looping.
552 // Initially, regT1 still contains proto and regT2 still contains value.
553 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
554 move(Imm32(JSValue::TrueTag
), regT0
);
557 // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
558 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
559 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
560 load32(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT2
);
561 Jump isInstance
= branchPtr(Equal
, regT2
, regT1
);
562 branch32(NotEqual
, regT2
, Imm32(0), loop
);
564 // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
565 valueIsImmediate
.link(this);
566 valueIsNotObject
.link(this);
567 move(Imm32(JSValue::FalseTag
), regT0
);
569 // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
570 isInstance
.link(this);
571 emitStoreBool(dst
, regT0
);
574 void JIT::emitSlow_op_instanceof(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
576 unsigned dst
= currentInstruction
[1].u
.operand
;
577 unsigned value
= currentInstruction
[2].u
.operand
;
578 unsigned baseVal
= currentInstruction
[3].u
.operand
;
579 unsigned proto
= currentInstruction
[4].u
.operand
;
581 linkSlowCaseIfNotJSCell(iter
, baseVal
);
582 linkSlowCaseIfNotJSCell(iter
, proto
);
587 JITStubCall
stubCall(this, cti_op_instanceof
);
588 stubCall
.addArgument(value
);
589 stubCall
.addArgument(baseVal
);
590 stubCall
.addArgument(proto
);
594 void JIT::emit_op_new_func(Instruction
* currentInstruction
)
596 JITStubCall
stubCall(this, cti_op_new_func
);
597 stubCall
.addArgument(ImmPtr(m_codeBlock
->function(currentInstruction
[2].u
.operand
)));
598 stubCall
.call(currentInstruction
[1].u
.operand
);
601 void JIT::emit_op_get_global_var(Instruction
* currentInstruction
)
603 int dst
= currentInstruction
[1].u
.operand
;
604 JSGlobalObject
* globalObject
= static_cast<JSGlobalObject
*>(currentInstruction
[2].u
.jsCell
);
605 ASSERT(globalObject
->isGlobalObject());
606 int index
= currentInstruction
[3].u
.operand
;
608 loadPtr(&globalObject
->d()->registers
, regT2
);
610 emitLoad(index
, regT1
, regT0
, regT2
);
611 emitStore(dst
, regT1
, regT0
);
612 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_global_var
), dst
, regT1
, regT0
);
615 void JIT::emit_op_put_global_var(Instruction
* currentInstruction
)
617 JSGlobalObject
* globalObject
= static_cast<JSGlobalObject
*>(currentInstruction
[1].u
.jsCell
);
618 ASSERT(globalObject
->isGlobalObject());
619 int index
= currentInstruction
[2].u
.operand
;
620 int value
= currentInstruction
[3].u
.operand
;
622 emitLoad(value
, regT1
, regT0
);
624 loadPtr(&globalObject
->d()->registers
, regT2
);
625 emitStore(index
, regT1
, regT0
, regT2
);
626 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_put_global_var
), value
, regT1
, regT0
);
629 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
631 int dst
= currentInstruction
[1].u
.operand
;
632 int index
= currentInstruction
[2].u
.operand
;
633 int skip
= currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain();
635 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT2
);
637 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT2
);
639 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT2
);
640 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject
, d
)), regT2
);
641 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData
, registers
)), regT2
);
643 emitLoad(index
, regT1
, regT0
, regT2
);
644 emitStore(dst
, regT1
, regT0
);
645 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_scoped_var
), dst
, regT1
, regT0
);
648 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
650 int index
= currentInstruction
[1].u
.operand
;
651 int skip
= currentInstruction
[2].u
.operand
+ m_codeBlock
->needsFullScopeChain();
652 int value
= currentInstruction
[3].u
.operand
;
654 emitLoad(value
, regT1
, regT0
);
656 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT2
);
658 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT2
);
660 loadPtr(Address(regT2
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT2
);
661 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject
, d
)), regT2
);
662 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData
, registers
)), regT2
);
664 emitStore(index
, regT1
, regT0
, regT2
);
665 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_put_scoped_var
), value
, regT1
, regT0
);
668 void JIT::emit_op_tear_off_activation(Instruction
* currentInstruction
)
670 JITStubCall
stubCall(this, cti_op_tear_off_activation
);
671 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
675 void JIT::emit_op_tear_off_arguments(Instruction
*)
677 JITStubCall(this, cti_op_tear_off_arguments
).call();
680 void JIT::emit_op_new_array(Instruction
* currentInstruction
)
682 JITStubCall
stubCall(this, cti_op_new_array
);
683 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
684 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
685 stubCall
.call(currentInstruction
[1].u
.operand
);
688 void JIT::emit_op_resolve(Instruction
* currentInstruction
)
690 JITStubCall
stubCall(this, cti_op_resolve
);
691 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
692 stubCall
.call(currentInstruction
[1].u
.operand
);
695 void JIT::emit_op_to_primitive(Instruction
* currentInstruction
)
697 int dst
= currentInstruction
[1].u
.operand
;
698 int src
= currentInstruction
[2].u
.operand
;
700 emitLoad(src
, regT1
, regT0
);
702 Jump isImm
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
703 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
707 emitStore(dst
, regT1
, regT0
);
708 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_to_primitive
), dst
, regT1
, regT0
);
711 void JIT::emitSlow_op_to_primitive(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
713 int dst
= currentInstruction
[1].u
.operand
;
717 JITStubCall
stubCall(this, cti_op_to_primitive
);
718 stubCall
.addArgument(regT1
, regT0
);
722 void JIT::emit_op_strcat(Instruction
* currentInstruction
)
724 JITStubCall
stubCall(this, cti_op_strcat
);
725 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
726 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
727 stubCall
.call(currentInstruction
[1].u
.operand
);
730 void JIT::emit_op_loop_if_true(Instruction
* currentInstruction
)
732 unsigned cond
= currentInstruction
[1].u
.operand
;
733 unsigned target
= currentInstruction
[2].u
.operand
;
737 emitLoad(cond
, regT1
, regT0
);
739 Jump isNotInteger
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
740 addJump(branch32(NotEqual
, regT0
, Imm32(0)), target
+ 2);
741 Jump isNotZero
= jump();
743 isNotInteger
.link(this);
745 addJump(branch32(Equal
, regT1
, Imm32(JSValue::TrueTag
)), target
+ 2);
746 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::FalseTag
)));
748 isNotZero
.link(this);
751 void JIT::emitSlow_op_loop_if_true(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
753 unsigned cond
= currentInstruction
[1].u
.operand
;
754 unsigned target
= currentInstruction
[2].u
.operand
;
758 JITStubCall
stubCall(this, cti_op_jtrue
);
759 stubCall
.addArgument(cond
);
761 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 2);
764 void JIT::emit_op_resolve_base(Instruction
* currentInstruction
)
766 JITStubCall
stubCall(this, cti_op_resolve_base
);
767 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
768 stubCall
.call(currentInstruction
[1].u
.operand
);
771 void JIT::emit_op_resolve_skip(Instruction
* currentInstruction
)
773 JITStubCall
stubCall(this, cti_op_resolve_skip
);
774 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
775 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain()));
776 stubCall
.call(currentInstruction
[1].u
.operand
);
779 void JIT::emit_op_resolve_global(Instruction
* currentInstruction
)
781 // FIXME: Optimize to use patching instead of so many memory accesses.
783 unsigned dst
= currentInstruction
[1].u
.operand
;
784 void* globalObject
= currentInstruction
[2].u
.jsCell
;
786 unsigned currentIndex
= m_globalResolveInfoIndex
++;
787 void* structureAddress
= &(m_codeBlock
->globalResolveInfo(currentIndex
).structure
);
788 void* offsetAddr
= &(m_codeBlock
->globalResolveInfo(currentIndex
).offset
);
791 move(ImmPtr(globalObject
), regT0
);
792 loadPtr(structureAddress
, regT1
);
793 addSlowCase(branchPtr(NotEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
))));
796 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSGlobalObject
, m_externalStorage
)), regT2
);
797 load32(offsetAddr
, regT3
);
798 load32(BaseIndex(regT2
, regT3
, TimesEight
), regT0
); // payload
799 load32(BaseIndex(regT2
, regT3
, TimesEight
, 4), regT1
); // tag
800 emitStore(dst
, regT1
, regT0
);
801 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_resolve_global
), dst
, regT1
, regT0
);
804 void JIT::emitSlow_op_resolve_global(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
806 unsigned dst
= currentInstruction
[1].u
.operand
;
807 void* globalObject
= currentInstruction
[2].u
.jsCell
;
808 Identifier
* ident
= &m_codeBlock
->identifier(currentInstruction
[3].u
.operand
);
810 unsigned currentIndex
= m_globalResolveInfoIndex
++;
813 JITStubCall
stubCall(this, cti_op_resolve_global
);
814 stubCall
.addArgument(ImmPtr(globalObject
));
815 stubCall
.addArgument(ImmPtr(ident
));
816 stubCall
.addArgument(Imm32(currentIndex
));
820 void JIT::emit_op_not(Instruction
* currentInstruction
)
822 unsigned dst
= currentInstruction
[1].u
.operand
;
823 unsigned src
= currentInstruction
[2].u
.operand
;
825 emitLoadTag(src
, regT0
);
827 xor32(Imm32(JSValue::FalseTag
), regT0
);
828 addSlowCase(branchTest32(NonZero
, regT0
, Imm32(~1)));
829 xor32(Imm32(JSValue::TrueTag
), regT0
);
831 emitStoreBool(dst
, regT0
, (dst
== src
));
834 void JIT::emitSlow_op_not(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
836 unsigned dst
= currentInstruction
[1].u
.operand
;
837 unsigned src
= currentInstruction
[2].u
.operand
;
841 JITStubCall
stubCall(this, cti_op_not
);
842 stubCall
.addArgument(src
);
846 void JIT::emit_op_jfalse(Instruction
* currentInstruction
)
848 unsigned cond
= currentInstruction
[1].u
.operand
;
849 unsigned target
= currentInstruction
[2].u
.operand
;
851 emitLoad(cond
, regT1
, regT0
);
853 Jump isTrue
= branch32(Equal
, regT1
, Imm32(JSValue::TrueTag
));
854 addJump(branch32(Equal
, regT1
, Imm32(JSValue::FalseTag
)), target
+ 2);
856 Jump isNotInteger
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
857 Jump isTrue2
= branch32(NotEqual
, regT0
, Imm32(0));
858 addJump(jump(), target
+ 2);
860 if (supportsFloatingPoint()) {
861 isNotInteger
.link(this);
863 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
866 emitLoadDouble(cond
, fpRegT1
);
867 addJump(branchDouble(DoubleEqual
, fpRegT0
, fpRegT1
), target
+ 2);
869 addSlowCase(isNotInteger
);
875 void JIT::emitSlow_op_jfalse(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
877 unsigned cond
= currentInstruction
[1].u
.operand
;
878 unsigned target
= currentInstruction
[2].u
.operand
;
881 JITStubCall
stubCall(this, cti_op_jtrue
);
882 stubCall
.addArgument(cond
);
884 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 2); // Inverted.
887 void JIT::emit_op_jtrue(Instruction
* currentInstruction
)
889 unsigned cond
= currentInstruction
[1].u
.operand
;
890 unsigned target
= currentInstruction
[2].u
.operand
;
892 emitLoad(cond
, regT1
, regT0
);
894 Jump isFalse
= branch32(Equal
, regT1
, Imm32(JSValue::FalseTag
));
895 addJump(branch32(Equal
, regT1
, Imm32(JSValue::TrueTag
)), target
+ 2);
897 Jump isNotInteger
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
898 Jump isFalse2
= branch32(Equal
, regT0
, Imm32(0));
899 addJump(jump(), target
+ 2);
901 if (supportsFloatingPoint()) {
902 isNotInteger
.link(this);
904 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
907 emitLoadDouble(cond
, fpRegT1
);
908 addJump(branchDouble(DoubleNotEqual
, fpRegT0
, fpRegT1
), target
+ 2);
910 addSlowCase(isNotInteger
);
916 void JIT::emitSlow_op_jtrue(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
918 unsigned cond
= currentInstruction
[1].u
.operand
;
919 unsigned target
= currentInstruction
[2].u
.operand
;
922 JITStubCall
stubCall(this, cti_op_jtrue
);
923 stubCall
.addArgument(cond
);
925 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 2);
928 void JIT::emit_op_jeq_null(Instruction
* currentInstruction
)
930 unsigned src
= currentInstruction
[1].u
.operand
;
931 unsigned target
= currentInstruction
[2].u
.operand
;
933 emitLoad(src
, regT1
, regT0
);
935 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
937 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
938 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
939 addJump(branchTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
+ 2);
941 Jump wasNotImmediate
= jump();
943 // Now handle the immediate cases - undefined & null
944 isImmediate
.link(this);
946 set32(Equal
, regT1
, Imm32(JSValue::NullTag
), regT2
);
947 set32(Equal
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
950 addJump(branchTest32(NonZero
, regT1
), target
+ 2);
952 wasNotImmediate
.link(this);
955 void JIT::emit_op_jneq_null(Instruction
* currentInstruction
)
957 unsigned src
= currentInstruction
[1].u
.operand
;
958 unsigned target
= currentInstruction
[2].u
.operand
;
960 emitLoad(src
, regT1
, regT0
);
962 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
964 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
965 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
966 addJump(branchTest32(Zero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
+ 2);
968 Jump wasNotImmediate
= jump();
970 // Now handle the immediate cases - undefined & null
971 isImmediate
.link(this);
973 set32(Equal
, regT1
, Imm32(JSValue::NullTag
), regT2
);
974 set32(Equal
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
977 addJump(branchTest32(Zero
, regT1
), target
+ 2);
979 wasNotImmediate
.link(this);
982 void JIT::emit_op_jneq_ptr(Instruction
* currentInstruction
)
984 unsigned src
= currentInstruction
[1].u
.operand
;
985 JSCell
* ptr
= currentInstruction
[2].u
.jsCell
;
986 unsigned target
= currentInstruction
[3].u
.operand
;
988 emitLoad(src
, regT1
, regT0
);
989 addJump(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)), target
+ 3);
990 addJump(branchPtr(NotEqual
, regT0
, ImmPtr(ptr
)), target
+ 3);
993 void JIT::emit_op_jsr(Instruction
* currentInstruction
)
995 int retAddrDst
= currentInstruction
[1].u
.operand
;
996 int target
= currentInstruction
[2].u
.operand
;
997 DataLabelPtr storeLocation
= storePtrWithPatch(ImmPtr(0), Address(callFrameRegister
, sizeof(Register
) * retAddrDst
));
998 addJump(jump(), target
+ 2);
999 m_jsrSites
.append(JSRInfo(storeLocation
, label()));
1002 void JIT::emit_op_sret(Instruction
* currentInstruction
)
1004 jump(Address(callFrameRegister
, sizeof(Register
) * currentInstruction
[1].u
.operand
));
1007 void JIT::emit_op_eq(Instruction
* currentInstruction
)
1009 unsigned dst
= currentInstruction
[1].u
.operand
;
1010 unsigned src1
= currentInstruction
[2].u
.operand
;
1011 unsigned src2
= currentInstruction
[3].u
.operand
;
1013 emitLoad2(src1
, regT1
, regT0
, src2
, regT3
, regT2
);
1014 addSlowCase(branch32(NotEqual
, regT1
, regT3
));
1015 addSlowCase(branch32(Equal
, regT1
, Imm32(JSValue::CellTag
)));
1016 addSlowCase(branch32(Below
, regT1
, Imm32(JSValue::LowestTag
)));
1018 set8(Equal
, regT0
, regT2
, regT0
);
1019 or32(Imm32(JSValue::FalseTag
), regT0
);
1021 emitStoreBool(dst
, regT0
);
1024 void JIT::emitSlow_op_eq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1026 unsigned dst
= currentInstruction
[1].u
.operand
;
1027 unsigned op1
= currentInstruction
[2].u
.operand
;
1028 unsigned op2
= currentInstruction
[3].u
.operand
;
1030 JumpList storeResult
;
1031 JumpList genericCase
;
1033 genericCase
.append(getSlowCase(iter
)); // tags not equal
1035 linkSlowCase(iter
); // tags equal and JSCell
1036 genericCase
.append(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
1037 genericCase
.append(branchPtr(NotEqual
, Address(regT2
), ImmPtr(m_globalData
->jsStringVPtr
)));
1040 JITStubCall
stubCallEqStrings(this, cti_op_eq_strings
);
1041 stubCallEqStrings
.addArgument(regT0
);
1042 stubCallEqStrings
.addArgument(regT2
);
1043 stubCallEqStrings
.call();
1044 storeResult
.append(jump());
1047 genericCase
.append(getSlowCase(iter
)); // doubles
1048 genericCase
.link(this);
1049 JITStubCall
stubCallEq(this, cti_op_eq
);
1050 stubCallEq
.addArgument(op1
);
1051 stubCallEq
.addArgument(op2
);
1052 stubCallEq
.call(regT0
);
1054 storeResult
.link(this);
1055 or32(Imm32(JSValue::FalseTag
), regT0
);
1056 emitStoreBool(dst
, regT0
);
1059 void JIT::emit_op_neq(Instruction
* currentInstruction
)
1061 unsigned dst
= currentInstruction
[1].u
.operand
;
1062 unsigned src1
= currentInstruction
[2].u
.operand
;
1063 unsigned src2
= currentInstruction
[3].u
.operand
;
1065 emitLoad2(src1
, regT1
, regT0
, src2
, regT3
, regT2
);
1066 addSlowCase(branch32(NotEqual
, regT1
, regT3
));
1067 addSlowCase(branch32(Equal
, regT1
, Imm32(JSValue::CellTag
)));
1068 addSlowCase(branch32(Below
, regT1
, Imm32(JSValue::LowestTag
)));
1070 set8(NotEqual
, regT0
, regT2
, regT0
);
1071 or32(Imm32(JSValue::FalseTag
), regT0
);
1073 emitStoreBool(dst
, regT0
);
1076 void JIT::emitSlow_op_neq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1078 unsigned dst
= currentInstruction
[1].u
.operand
;
1080 JumpList storeResult
;
1081 JumpList genericCase
;
1083 genericCase
.append(getSlowCase(iter
)); // tags not equal
1085 linkSlowCase(iter
); // tags equal and JSCell
1086 genericCase
.append(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
1087 genericCase
.append(branchPtr(NotEqual
, Address(regT2
), ImmPtr(m_globalData
->jsStringVPtr
)));
1090 JITStubCall
stubCallEqStrings(this, cti_op_eq_strings
);
1091 stubCallEqStrings
.addArgument(regT0
);
1092 stubCallEqStrings
.addArgument(regT2
);
1093 stubCallEqStrings
.call(regT0
);
1094 storeResult
.append(jump());
1097 genericCase
.append(getSlowCase(iter
)); // doubles
1098 genericCase
.link(this);
1099 JITStubCall
stubCallEq(this, cti_op_eq
);
1100 stubCallEq
.addArgument(regT1
, regT0
);
1101 stubCallEq
.addArgument(regT3
, regT2
);
1102 stubCallEq
.call(regT0
);
1104 storeResult
.link(this);
1105 xor32(Imm32(0x1), regT0
);
1106 or32(Imm32(JSValue::FalseTag
), regT0
);
1107 emitStoreBool(dst
, regT0
);
1110 void JIT::compileOpStrictEq(Instruction
* currentInstruction
, CompileOpStrictEqType type
)
1112 unsigned dst
= currentInstruction
[1].u
.operand
;
1113 unsigned src1
= currentInstruction
[2].u
.operand
;
1114 unsigned src2
= currentInstruction
[3].u
.operand
;
1116 emitLoadTag(src1
, regT0
);
1117 emitLoadTag(src2
, regT1
);
1119 // Jump to a slow case if either operand is double, or if both operands are
1120 // cells and/or Int32s.
1122 and32(regT1
, regT2
);
1123 addSlowCase(branch32(Below
, regT2
, Imm32(JSValue::LowestTag
)));
1124 addSlowCase(branch32(AboveOrEqual
, regT2
, Imm32(JSValue::CellTag
)));
1126 if (type
== OpStrictEq
)
1127 set8(Equal
, regT0
, regT1
, regT0
);
1129 set8(NotEqual
, regT0
, regT1
, regT0
);
1131 or32(Imm32(JSValue::FalseTag
), regT0
);
1133 emitStoreBool(dst
, regT0
);
1136 void JIT::emit_op_stricteq(Instruction
* currentInstruction
)
1138 compileOpStrictEq(currentInstruction
, OpStrictEq
);
1141 void JIT::emitSlow_op_stricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1143 unsigned dst
= currentInstruction
[1].u
.operand
;
1144 unsigned src1
= currentInstruction
[2].u
.operand
;
1145 unsigned src2
= currentInstruction
[3].u
.operand
;
1150 JITStubCall
stubCall(this, cti_op_stricteq
);
1151 stubCall
.addArgument(src1
);
1152 stubCall
.addArgument(src2
);
1156 void JIT::emit_op_nstricteq(Instruction
* currentInstruction
)
1158 compileOpStrictEq(currentInstruction
, OpNStrictEq
);
1161 void JIT::emitSlow_op_nstricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1163 unsigned dst
= currentInstruction
[1].u
.operand
;
1164 unsigned src1
= currentInstruction
[2].u
.operand
;
1165 unsigned src2
= currentInstruction
[3].u
.operand
;
1170 JITStubCall
stubCall(this, cti_op_nstricteq
);
1171 stubCall
.addArgument(src1
);
1172 stubCall
.addArgument(src2
);
1176 void JIT::emit_op_eq_null(Instruction
* currentInstruction
)
1178 unsigned dst
= currentInstruction
[1].u
.operand
;
1179 unsigned src
= currentInstruction
[2].u
.operand
;
1181 emitLoad(src
, regT1
, regT0
);
1182 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
1184 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT1
);
1185 setTest8(NonZero
, Address(regT1
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT1
);
1187 Jump wasNotImmediate
= jump();
1189 isImmediate
.link(this);
1191 set8(Equal
, regT1
, Imm32(JSValue::NullTag
), regT2
);
1192 set8(Equal
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
1195 wasNotImmediate
.link(this);
1197 or32(Imm32(JSValue::FalseTag
), regT1
);
1199 emitStoreBool(dst
, regT1
);
1202 void JIT::emit_op_neq_null(Instruction
* currentInstruction
)
1204 unsigned dst
= currentInstruction
[1].u
.operand
;
1205 unsigned src
= currentInstruction
[2].u
.operand
;
1207 emitLoad(src
, regT1
, regT0
);
1208 Jump isImmediate
= branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
));
1210 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT1
);
1211 setTest8(Zero
, Address(regT1
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT1
);
1213 Jump wasNotImmediate
= jump();
1215 isImmediate
.link(this);
1217 set8(NotEqual
, regT1
, Imm32(JSValue::NullTag
), regT2
);
1218 set8(NotEqual
, regT1
, Imm32(JSValue::UndefinedTag
), regT1
);
1219 and32(regT2
, regT1
);
1221 wasNotImmediate
.link(this);
1223 or32(Imm32(JSValue::FalseTag
), regT1
);
1225 emitStoreBool(dst
, regT1
);
1228 void JIT::emit_op_resolve_with_base(Instruction
* currentInstruction
)
1230 JITStubCall
stubCall(this, cti_op_resolve_with_base
);
1231 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
1232 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
1233 stubCall
.call(currentInstruction
[2].u
.operand
);
1236 void JIT::emit_op_new_func_exp(Instruction
* currentInstruction
)
1238 JITStubCall
stubCall(this, cti_op_new_func_exp
);
1239 stubCall
.addArgument(ImmPtr(m_codeBlock
->functionExpression(currentInstruction
[2].u
.operand
)));
1240 stubCall
.call(currentInstruction
[1].u
.operand
);
1243 void JIT::emit_op_new_regexp(Instruction
* currentInstruction
)
1245 JITStubCall
stubCall(this, cti_op_new_regexp
);
1246 stubCall
.addArgument(ImmPtr(m_codeBlock
->regexp(currentInstruction
[2].u
.operand
)));
1247 stubCall
.call(currentInstruction
[1].u
.operand
);
1250 void JIT::emit_op_throw(Instruction
* currentInstruction
)
1252 unsigned exception
= currentInstruction
[1].u
.operand
;
1253 JITStubCall
stubCall(this, cti_op_throw
);
1254 stubCall
.addArgument(exception
);
1258 // cti_op_throw always changes it's return address,
1259 // this point in the code should never be reached.
1264 void JIT::emit_op_next_pname(Instruction
* currentInstruction
)
1266 int dst
= currentInstruction
[1].u
.operand
;
1267 int iter
= currentInstruction
[2].u
.operand
;
1268 int target
= currentInstruction
[3].u
.operand
;
1270 load32(Address(callFrameRegister
, (iter
* sizeof(Register
))), regT0
);
1272 JITStubCall
stubCall(this, cti_op_next_pname
);
1273 stubCall
.addArgument(regT0
);
1276 Jump endOfIter
= branchTestPtr(Zero
, regT0
);
1277 emitStore(dst
, regT1
, regT0
);
1278 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_next_pname
), dst
, regT1
, regT0
);
1279 addJump(jump(), target
+ 3);
1280 endOfIter
.link(this);
1283 void JIT::emit_op_push_scope(Instruction
* currentInstruction
)
1285 JITStubCall
stubCall(this, cti_op_push_scope
);
1286 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
1287 stubCall
.call(currentInstruction
[1].u
.operand
);
1290 void JIT::emit_op_pop_scope(Instruction
*)
1292 JITStubCall(this, cti_op_pop_scope
).call();
1295 void JIT::emit_op_to_jsnumber(Instruction
* currentInstruction
)
1297 int dst
= currentInstruction
[1].u
.operand
;
1298 int src
= currentInstruction
[2].u
.operand
;
1300 emitLoad(src
, regT1
, regT0
);
1302 Jump isInt32
= branch32(Equal
, regT1
, Imm32(JSValue::Int32Tag
));
1303 addSlowCase(branch32(AboveOrEqual
, regT1
, Imm32(JSValue::DeletedValueTag
)));
1307 emitStore(dst
, regT1
, regT0
);
1308 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_to_jsnumber
), dst
, regT1
, regT0
);
1311 void JIT::emitSlow_op_to_jsnumber(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1313 int dst
= currentInstruction
[1].u
.operand
;
1317 JITStubCall
stubCall(this, cti_op_to_jsnumber
);
1318 stubCall
.addArgument(regT1
, regT0
);
1322 void JIT::emit_op_push_new_scope(Instruction
* currentInstruction
)
1324 JITStubCall
stubCall(this, cti_op_push_new_scope
);
1325 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
1326 stubCall
.addArgument(currentInstruction
[3].u
.operand
);
1327 stubCall
.call(currentInstruction
[1].u
.operand
);
1330 void JIT::emit_op_catch(Instruction
* currentInstruction
)
1332 unsigned exception
= currentInstruction
[1].u
.operand
;
1334 // This opcode only executes after a return from cti_op_throw.
1336 // cti_op_throw may have taken us to a call frame further up the stack; reload
1337 // the call frame pointer to adjust.
1338 peek(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
1340 // Now store the exception returned by cti_op_throw.
1341 emitStore(exception
, regT1
, regT0
);
1342 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_catch
), exception
, regT1
, regT0
);
1345 void JIT::emit_op_jmp_scopes(Instruction
* currentInstruction
)
1347 JITStubCall
stubCall(this, cti_op_jmp_scopes
);
1348 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
1350 addJump(jump(), currentInstruction
[2].u
.operand
+ 2);
1353 void JIT::emit_op_switch_imm(Instruction
* currentInstruction
)
1355 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1356 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1357 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1359 // create jump table for switch destinations, track this switch statement.
1360 SimpleJumpTable
* jumpTable
= &m_codeBlock
->immediateSwitchJumpTable(tableIndex
);
1361 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Immediate
));
1362 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
1364 JITStubCall
stubCall(this, cti_op_switch_imm
);
1365 stubCall
.addArgument(scrutinee
);
1366 stubCall
.addArgument(Imm32(tableIndex
));
1371 void JIT::emit_op_switch_char(Instruction
* currentInstruction
)
1373 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1374 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1375 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1377 // create jump table for switch destinations, track this switch statement.
1378 SimpleJumpTable
* jumpTable
= &m_codeBlock
->characterSwitchJumpTable(tableIndex
);
1379 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Character
));
1380 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
1382 JITStubCall
stubCall(this, cti_op_switch_char
);
1383 stubCall
.addArgument(scrutinee
);
1384 stubCall
.addArgument(Imm32(tableIndex
));
1389 void JIT::emit_op_switch_string(Instruction
* currentInstruction
)
1391 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1392 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1393 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1395 // create jump table for switch destinations, track this switch statement.
1396 StringJumpTable
* jumpTable
= &m_codeBlock
->stringSwitchJumpTable(tableIndex
);
1397 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
));
1399 JITStubCall
stubCall(this, cti_op_switch_string
);
1400 stubCall
.addArgument(scrutinee
);
1401 stubCall
.addArgument(Imm32(tableIndex
));
1406 void JIT::emit_op_new_error(Instruction
* currentInstruction
)
1408 unsigned dst
= currentInstruction
[1].u
.operand
;
1409 unsigned type
= currentInstruction
[2].u
.operand
;
1410 unsigned message
= currentInstruction
[3].u
.operand
;
1412 JITStubCall
stubCall(this, cti_op_new_error
);
1413 stubCall
.addArgument(Imm32(type
));
1414 stubCall
.addArgument(m_codeBlock
->getConstant(message
));
1415 stubCall
.addArgument(Imm32(m_bytecodeIndex
));
1419 void JIT::emit_op_debug(Instruction
* currentInstruction
)
1421 JITStubCall
stubCall(this, cti_op_debug
);
1422 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
1423 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
1424 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
1429 void JIT::emit_op_enter(Instruction
*)
1431 // Even though JIT code doesn't use them, we initialize our constant
1432 // registers to zap stale pointers, to avoid unnecessarily prolonging
1433 // object lifetime and increasing GC pressure.
1434 for (int i
= 0; i
< m_codeBlock
->m_numVars
; ++i
)
1435 emitStore(i
, jsUndefined());
1438 void JIT::emit_op_enter_with_activation(Instruction
* currentInstruction
)
1440 emit_op_enter(currentInstruction
);
1442 JITStubCall(this, cti_op_push_activation
).call(currentInstruction
[1].u
.operand
);
1445 void JIT::emit_op_create_arguments(Instruction
*)
1447 Jump argsNotCell
= branch32(NotEqual
, tagFor(RegisterFile::ArgumentsRegister
, callFrameRegister
), Imm32(JSValue::CellTag
));
1448 Jump argsNotNull
= branchTestPtr(NonZero
, payloadFor(RegisterFile::ArgumentsRegister
, callFrameRegister
));
1450 // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
1451 if (m_codeBlock
->m_numParameters
== 1)
1452 JITStubCall(this, cti_op_create_arguments_no_params
).call();
1454 JITStubCall(this, cti_op_create_arguments
).call();
1456 argsNotCell
.link(this);
1457 argsNotNull
.link(this);
1460 void JIT::emit_op_init_arguments(Instruction
*)
1462 emitStore(RegisterFile::ArgumentsRegister
, JSValue(), callFrameRegister
);
1465 void JIT::emit_op_convert_this(Instruction
* currentInstruction
)
1467 unsigned thisRegister
= currentInstruction
[1].u
.operand
;
1469 emitLoad(thisRegister
, regT1
, regT0
);
1471 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)));
1473 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1474 addSlowCase(branchTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(NeedsThisConversion
)));
1476 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_convert_this
), thisRegister
, regT1
, regT0
);
1479 void JIT::emitSlow_op_convert_this(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1481 unsigned thisRegister
= currentInstruction
[1].u
.operand
;
1486 JITStubCall
stubCall(this, cti_op_convert_this
);
1487 stubCall
.addArgument(regT1
, regT0
);
1488 stubCall
.call(thisRegister
);
1491 void JIT::emit_op_profile_will_call(Instruction
* currentInstruction
)
1493 peek(regT2
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
1494 Jump noProfiler
= branchTestPtr(Zero
, Address(regT2
));
1496 JITStubCall
stubCall(this, cti_op_profile_will_call
);
1497 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
1499 noProfiler
.link(this);
1502 void JIT::emit_op_profile_did_call(Instruction
* currentInstruction
)
1504 peek(regT2
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
1505 Jump noProfiler
= branchTestPtr(Zero
, Address(regT2
));
1507 JITStubCall
stubCall(this, cti_op_profile_did_call
);
1508 stubCall
.addArgument(currentInstruction
[1].u
.operand
);
1510 noProfiler
.link(this);
1513 #else // USE(JSVALUE32_64)
1515 #define RECORD_JUMP_TARGET(targetOffset) \
1516 do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
1518 void JIT::privateCompileCTIMachineTrampolines(RefPtr
<ExecutablePool
>* executablePool
, JSGlobalData
* globalData
, CodePtr
* ctiStringLengthTrampoline
, CodePtr
* ctiVirtualCallPreLink
, CodePtr
* ctiVirtualCallLink
, CodePtr
* ctiVirtualCall
, CodePtr
* ctiNativeCallThunk
)
1520 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1521 // (2) The second function provides fast property access for string length
1522 Label stringLengthBegin
= align();
1524 // Check eax is a string
1525 Jump string_failureCases1
= emitJumpIfNotJSCell(regT0
);
1526 Jump string_failureCases2
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
));
1528 // Checks out okay! - get the length from the Ustring.
1529 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSString
, m_value
) + OBJECT_OFFSETOF(UString
, m_rep
)), regT0
);
1530 load32(Address(regT0
, OBJECT_OFFSETOF(UString::Rep
, len
)), regT0
);
1532 Jump string_failureCases3
= branch32(Above
, regT0
, Imm32(JSImmediate::maxImmediateInt
));
1534 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
1535 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1540 // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
1541 COMPILE_ASSERT(sizeof(CodeType
) == 4, CodeTypeEnumMustBe32Bit
);
1543 Label virtualCallPreLinkBegin
= align();
1545 // Load the callee CodeBlock* into eax
1546 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT3
);
1547 loadPtr(Address(regT3
, OBJECT_OFFSETOF(FunctionBodyNode
, m_code
)), regT0
);
1548 Jump hasCodeBlock1
= branchTestPtr(NonZero
, regT0
);
1549 preserveReturnAddressAfterCall(regT3
);
1550 restoreArgumentReference();
1551 Call callJSFunction1
= call();
1552 emitGetJITStubArg(1, regT2
);
1553 emitGetJITStubArg(3, regT1
);
1554 restoreReturnAddressBeforeReturn(regT3
);
1555 hasCodeBlock1
.link(this);
1557 Jump isNativeFunc1
= branch32(Equal
, Address(regT0
, OBJECT_OFFSETOF(CodeBlock
, m_codeType
)), Imm32(NativeCode
));
1559 // Check argCount matches callee arity.
1560 Jump arityCheckOkay1
= branch32(Equal
, Address(regT0
, OBJECT_OFFSETOF(CodeBlock
, m_numParameters
)), regT1
);
1561 preserveReturnAddressAfterCall(regT3
);
1562 emitPutJITStubArg(regT3
, 2);
1563 emitPutJITStubArg(regT0
, 4);
1564 restoreArgumentReference();
1565 Call callArityCheck1
= call();
1566 move(regT1
, callFrameRegister
);
1567 emitGetJITStubArg(1, regT2
);
1568 emitGetJITStubArg(3, regT1
);
1569 restoreReturnAddressBeforeReturn(regT3
);
1570 arityCheckOkay1
.link(this);
1571 isNativeFunc1
.link(this);
1573 compileOpCallInitializeCallFrame();
1575 preserveReturnAddressAfterCall(regT3
);
1576 emitPutJITStubArg(regT3
, 2);
1577 restoreArgumentReference();
1578 Call callDontLazyLinkCall
= call();
1579 emitGetJITStubArg(1, regT2
);
1580 restoreReturnAddressBeforeReturn(regT3
);
1584 Label virtualCallLinkBegin
= align();
1586 // Load the callee CodeBlock* into eax
1587 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT3
);
1588 loadPtr(Address(regT3
, OBJECT_OFFSETOF(FunctionBodyNode
, m_code
)), regT0
);
1589 Jump hasCodeBlock2
= branchTestPtr(NonZero
, regT0
);
1590 preserveReturnAddressAfterCall(regT3
);
1591 restoreArgumentReference();
1592 Call callJSFunction2
= call();
1593 emitGetJITStubArg(1, regT2
);
1594 emitGetJITStubArg(3, regT1
);
1595 restoreReturnAddressBeforeReturn(regT3
);
1596 hasCodeBlock2
.link(this);
1598 Jump isNativeFunc2
= branch32(Equal
, Address(regT0
, OBJECT_OFFSETOF(CodeBlock
, m_codeType
)), Imm32(NativeCode
));
1600 // Check argCount matches callee arity.
1601 Jump arityCheckOkay2
= branch32(Equal
, Address(regT0
, OBJECT_OFFSETOF(CodeBlock
, m_numParameters
)), regT1
);
1602 preserveReturnAddressAfterCall(regT3
);
1603 emitPutJITStubArg(regT3
, 2);
1604 emitPutJITStubArg(regT0
, 4);
1605 restoreArgumentReference();
1606 Call callArityCheck2
= call();
1607 move(regT1
, callFrameRegister
);
1608 emitGetJITStubArg(1, regT2
);
1609 emitGetJITStubArg(3, regT1
);
1610 restoreReturnAddressBeforeReturn(regT3
);
1611 arityCheckOkay2
.link(this);
1612 isNativeFunc2
.link(this);
1614 compileOpCallInitializeCallFrame();
1616 preserveReturnAddressAfterCall(regT3
);
1617 emitPutJITStubArg(regT3
, 2);
1618 restoreArgumentReference();
1619 Call callLazyLinkCall
= call();
1620 restoreReturnAddressBeforeReturn(regT3
);
1624 Label virtualCallBegin
= align();
1626 // Load the callee CodeBlock* into eax
1627 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT3
);
1628 loadPtr(Address(regT3
, OBJECT_OFFSETOF(FunctionBodyNode
, m_code
)), regT0
);
1629 Jump hasCodeBlock3
= branchTestPtr(NonZero
, regT0
);
1630 preserveReturnAddressAfterCall(regT3
);
1631 restoreArgumentReference();
1632 Call callJSFunction3
= call();
1633 emitGetJITStubArg(1, regT2
);
1634 emitGetJITStubArg(3, regT1
);
1635 restoreReturnAddressBeforeReturn(regT3
);
1636 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT3
); // reload the function body nody, so we can reload the code pointer.
1637 hasCodeBlock3
.link(this);
1639 Jump isNativeFunc3
= branch32(Equal
, Address(regT0
, OBJECT_OFFSETOF(CodeBlock
, m_codeType
)), Imm32(NativeCode
));
1641 // Check argCount matches callee arity.
1642 Jump arityCheckOkay3
= branch32(Equal
, Address(regT0
, OBJECT_OFFSETOF(CodeBlock
, m_numParameters
)), regT1
);
1643 preserveReturnAddressAfterCall(regT3
);
1644 emitPutJITStubArg(regT3
, 2);
1645 emitPutJITStubArg(regT0
, 4);
1646 restoreArgumentReference();
1647 Call callArityCheck3
= call();
1648 move(regT1
, callFrameRegister
);
1649 emitGetJITStubArg(1, regT2
);
1650 emitGetJITStubArg(3, regT1
);
1651 restoreReturnAddressBeforeReturn(regT3
);
1652 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSFunction
, m_body
)), regT3
); // reload the function body nody, so we can reload the code pointer.
1653 arityCheckOkay3
.link(this);
1654 isNativeFunc3
.link(this);
1656 // load ctiCode from the new codeBlock.
1657 loadPtr(Address(regT3
, OBJECT_OFFSETOF(FunctionBodyNode
, m_jitCode
)), regT0
);
1659 compileOpCallInitializeCallFrame();
1663 Label nativeCallThunk
= align();
1664 preserveReturnAddressAfterCall(regT0
);
1665 emitPutToCallFrameHeader(regT0
, RegisterFile::ReturnPC
); // Push return address
1667 // Load caller frame's scope chain into this callframe so that whatever we call can
1668 // get to its global data.
1669 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, regT1
);
1670 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
, regT1
);
1671 emitPutToCallFrameHeader(regT1
, RegisterFile::ScopeChain
);
1674 #if PLATFORM(X86_64)
1675 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, X86::ecx
);
1677 // Allocate stack space for our arglist
1678 subPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
1679 COMPILE_ASSERT((sizeof(ArgList
) & 0xf) == 0, ArgList_should_by_16byte_aligned
);
1682 subPtr(Imm32(1), X86::ecx
); // Don't include 'this' in argcount
1685 storePtr(X86::ecx
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_argCount
)));
1687 // Calculate the start of the callframe header, and store in edx
1688 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize
* (int32_t)sizeof(Register
)), callFrameRegister
, X86::edx
);
1690 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
1691 mul32(Imm32(sizeof(Register
)), X86::ecx
, X86::ecx
);
1692 subPtr(X86::ecx
, X86::edx
);
1694 // push pointer to arguments
1695 storePtr(X86::edx
, Address(stackPointerRegister
, OBJECT_OFFSETOF(ArgList
, m_args
)));
1697 // ArgList is passed by reference so is stackPointerRegister
1698 move(stackPointerRegister
, X86::ecx
);
1700 // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
1701 loadPtr(Address(X86::edx
, -(int32_t)sizeof(Register
)), X86::edx
);
1703 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86::esi
);
1705 move(callFrameRegister
, X86::edi
);
1707 call(Address(X86::esi
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1709 addPtr(Imm32(sizeof(ArgList
)), stackPointerRegister
);
1711 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount
, regT0
);
1713 /* We have two structs that we use to describe the stackframe we set up for our
1714 * call to native code. NativeCallFrameStructure describes the how we set up the stack
1715 * in advance of the call. NativeFunctionCalleeSignature describes the callframe
1716 * as the native code expects it. We do this as we are using the fastcall calling
1717 * convention which results in the callee popping its arguments off the stack, but
1718 * not the rest of the callframe so we need a nice way to ensure we increment the
1719 * stack pointer by the right amount after the call.
1721 #if COMPILER(MSVC) || PLATFORM(LINUX)
1722 struct NativeCallFrameStructure
{
1723 // CallFrame* callFrame; // passed in EDX
1726 ArgList
* argPointer
;
1730 struct NativeFunctionCalleeSignature
{
1733 ArgList
* argPointer
;
1736 struct NativeCallFrameStructure
{
1737 // CallFrame* callFrame; // passed in ECX
1738 // JSObject* callee; // passed in EDX
1740 ArgList
* argPointer
;
1743 struct NativeFunctionCalleeSignature
{
1745 ArgList
* argPointer
;
1748 const int NativeCallFrameSize
= (sizeof(NativeCallFrameStructure
) + 15) & ~15;
1749 // Allocate system stack frame
1750 subPtr(Imm32(NativeCallFrameSize
), stackPointerRegister
);
1753 subPtr(Imm32(1), regT0
); // Don't include 'this' in argcount
1756 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_argCount
)));
1758 // Calculate the start of the callframe header, and store in regT1
1759 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize
* (int)sizeof(Register
)), callFrameRegister
, regT1
);
1761 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
1762 mul32(Imm32(sizeof(Register
)), regT0
, regT0
);
1763 subPtr(regT0
, regT1
);
1764 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, args
) + OBJECT_OFFSETOF(ArgList
, m_args
)));
1766 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
1767 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, args
)), stackPointerRegister
, regT0
);
1768 storePtr(regT0
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, argPointer
)));
1770 // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
1771 loadPtr(Address(regT1
, -(int)sizeof(Register
)), regT1
);
1772 storePtr(regT1
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, thisValue
)));
1774 #if COMPILER(MSVC) || PLATFORM(LINUX)
1775 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
1776 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure
, result
)), stackPointerRegister
, X86::ecx
);
1779 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86::eax
);
1780 storePtr(X86::eax
, Address(stackPointerRegister
, OBJECT_OFFSETOF(NativeCallFrameStructure
, callee
)));
1783 move(callFrameRegister
, X86::edx
);
1785 call(Address(X86::eax
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1787 // JSValue is a non-POD type
1788 loadPtr(Address(X86::eax
), X86::eax
);
1791 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee
, X86::edx
);
1794 move(callFrameRegister
, X86::ecx
);
1795 call(Address(X86::edx
, OBJECT_OFFSETOF(JSFunction
, m_data
)));
1798 // We've put a few temporaries on the stack in addition to the actual arguments
1799 // so pull them off now
1800 addPtr(Imm32(NativeCallFrameSize
- sizeof(NativeFunctionCalleeSignature
)), stackPointerRegister
);
1802 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
1803 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
1808 // Check for an exception
1809 loadPtr(&(globalData
->exception
), regT2
);
1810 Jump exceptionHandler
= branchTestPtr(NonZero
, regT2
);
1812 // Grab the return address.
1813 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
1815 // Restore our caller's "r".
1816 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
1819 restoreReturnAddressBeforeReturn(regT1
);
1822 // Handle an exception
1823 exceptionHandler
.link(this);
1824 // Grab the return address.
1825 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
1826 move(ImmPtr(&globalData
->exceptionLocation
), regT2
);
1827 storePtr(regT1
, regT2
);
1828 move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline
)), regT2
);
1829 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
1830 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
1831 restoreReturnAddressBeforeReturn(regT2
);
1835 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1836 Call string_failureCases1Call
= makeTailRecursiveCall(string_failureCases1
);
1837 Call string_failureCases2Call
= makeTailRecursiveCall(string_failureCases2
);
1838 Call string_failureCases3Call
= makeTailRecursiveCall(string_failureCases3
);
1841 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
1842 LinkBuffer
patchBuffer(this, m_globalData
->executableAllocator
.poolForSize(m_assembler
.size()));
1844 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1845 patchBuffer
.link(string_failureCases1Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
1846 patchBuffer
.link(string_failureCases2Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
1847 patchBuffer
.link(string_failureCases3Call
, FunctionPtr(cti_op_get_by_id_string_fail
));
1849 patchBuffer
.link(callArityCheck1
, FunctionPtr(cti_op_call_arityCheck
));
1850 patchBuffer
.link(callArityCheck2
, FunctionPtr(cti_op_call_arityCheck
));
1851 patchBuffer
.link(callArityCheck3
, FunctionPtr(cti_op_call_arityCheck
));
1852 patchBuffer
.link(callJSFunction1
, FunctionPtr(cti_op_call_JSFunction
));
1853 patchBuffer
.link(callJSFunction2
, FunctionPtr(cti_op_call_JSFunction
));
1854 patchBuffer
.link(callJSFunction3
, FunctionPtr(cti_op_call_JSFunction
));
1855 patchBuffer
.link(callDontLazyLinkCall
, FunctionPtr(cti_vm_dontLazyLinkCall
));
1856 patchBuffer
.link(callLazyLinkCall
, FunctionPtr(cti_vm_lazyLinkCall
));
1858 CodeRef finalCode
= patchBuffer
.finalizeCode();
1859 *executablePool
= finalCode
.m_executablePool
;
1861 *ctiVirtualCallPreLink
= trampolineAt(finalCode
, virtualCallPreLinkBegin
);
1862 *ctiVirtualCallLink
= trampolineAt(finalCode
, virtualCallLinkBegin
);
1863 *ctiVirtualCall
= trampolineAt(finalCode
, virtualCallBegin
);
1864 *ctiNativeCallThunk
= trampolineAt(finalCode
, nativeCallThunk
);
1865 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1866 *ctiStringLengthTrampoline
= trampolineAt(finalCode
, stringLengthBegin
);
1868 UNUSED_PARAM(ctiStringLengthTrampoline
);
1872 void JIT::emit_op_mov(Instruction
* currentInstruction
)
1874 int dst
= currentInstruction
[1].u
.operand
;
1875 int src
= currentInstruction
[2].u
.operand
;
1877 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
1878 storePtr(ImmPtr(JSValue::encode(getConstantOperand(src
))), Address(callFrameRegister
, dst
* sizeof(Register
)));
1879 if (dst
== m_lastResultBytecodeRegister
)
1880 killLastResultRegister();
1881 } else if ((src
== m_lastResultBytecodeRegister
) || (dst
== m_lastResultBytecodeRegister
)) {
1882 // If either the src or dst is the cached register go though
1883 // get/put registers to make sure we track this correctly.
1884 emitGetVirtualRegister(src
, regT0
);
1885 emitPutVirtualRegister(dst
);
1887 // Perform the copy via regT1; do not disturb any mapping in regT0.
1888 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), regT1
);
1889 storePtr(regT1
, Address(callFrameRegister
, dst
* sizeof(Register
)));
1893 void JIT::emit_op_end(Instruction
* currentInstruction
)
1895 if (m_codeBlock
->needsFullScopeChain())
1896 JITStubCall(this, cti_op_end
).call();
1897 ASSERT(returnValueRegister
!= callFrameRegister
);
1898 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, returnValueRegister
);
1899 restoreReturnAddressBeforeReturn(Address(callFrameRegister
, RegisterFile::ReturnPC
* static_cast<int>(sizeof(Register
))));
1903 void JIT::emit_op_jmp(Instruction
* currentInstruction
)
1905 unsigned target
= currentInstruction
[1].u
.operand
;
1906 addJump(jump(), target
+ 1);
1907 RECORD_JUMP_TARGET(target
+ 1);
1910 void JIT::emit_op_loop(Instruction
* currentInstruction
)
1914 unsigned target
= currentInstruction
[1].u
.operand
;
1915 addJump(jump(), target
+ 1);
1918 void JIT::emit_op_loop_if_less(Instruction
* currentInstruction
)
1922 unsigned op1
= currentInstruction
[1].u
.operand
;
1923 unsigned op2
= currentInstruction
[2].u
.operand
;
1924 unsigned target
= currentInstruction
[3].u
.operand
;
1925 if (isOperandConstantImmediateInt(op2
)) {
1926 emitGetVirtualRegister(op1
, regT0
);
1927 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1929 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
1931 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
1933 addJump(branch32(LessThan
, regT0
, Imm32(op2imm
)), target
+ 3);
1934 } else if (isOperandConstantImmediateInt(op1
)) {
1935 emitGetVirtualRegister(op2
, regT0
);
1936 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1938 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
1940 int32_t op1imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)));
1942 addJump(branch32(GreaterThan
, regT0
, Imm32(op1imm
)), target
+ 3);
1944 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1945 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1946 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1947 addJump(branch32(LessThan
, regT0
, regT1
), target
+ 3);
1951 void JIT::emit_op_loop_if_lesseq(Instruction
* currentInstruction
)
1955 unsigned op1
= currentInstruction
[1].u
.operand
;
1956 unsigned op2
= currentInstruction
[2].u
.operand
;
1957 unsigned target
= currentInstruction
[3].u
.operand
;
1958 if (isOperandConstantImmediateInt(op2
)) {
1959 emitGetVirtualRegister(op1
, regT0
);
1960 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1962 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
1964 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
1966 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(op2imm
)), target
+ 3);
1968 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1969 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1970 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1971 addJump(branch32(LessThanOrEqual
, regT0
, regT1
), target
+ 3);
1975 void JIT::emit_op_new_object(Instruction
* currentInstruction
)
1977 JITStubCall(this, cti_op_new_object
).call(currentInstruction
[1].u
.operand
);
1980 void JIT::emit_op_instanceof(Instruction
* currentInstruction
)
1982 // Load the operands (baseVal, proto, and value respectively) into registers.
1983 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
1984 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT0
);
1985 emitGetVirtualRegister(currentInstruction
[4].u
.operand
, regT1
);
1986 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT2
);
1988 // Check that baseVal & proto are cells.
1989 emitJumpSlowCaseIfNotJSCell(regT0
);
1990 emitJumpSlowCaseIfNotJSCell(regT1
);
1992 // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
1993 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
1994 addSlowCase(branch32(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
)));
1995 addSlowCase(branchTest32(Zero
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(ImplementsDefaultHasInstance
)));
1997 // If value is not an Object, return false.
1998 Jump valueIsImmediate
= emitJumpIfNotJSCell(regT2
);
1999 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
2000 Jump valueIsNotObject
= branch32(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
));
2002 // Check proto is object.
2003 loadPtr(Address(regT1
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
2004 addSlowCase(branch32(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(ObjectType
)));
2006 // Optimistically load the result true, and start looping.
2007 // Initially, regT1 still contains proto and regT2 still contains value.
2008 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
2009 move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0
);
2012 // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
2013 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
2014 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2015 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
)), regT2
);
2016 Jump isInstance
= branchPtr(Equal
, regT2
, regT1
);
2017 branchPtr(NotEqual
, regT2
, ImmPtr(JSValue::encode(jsNull())), loop
);
2019 // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
2020 valueIsImmediate
.link(this);
2021 valueIsNotObject
.link(this);
2022 move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0
);
2024 // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
2025 isInstance
.link(this);
2026 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2029 void JIT::emit_op_new_func(Instruction
* currentInstruction
)
2031 JITStubCall
stubCall(this, cti_op_new_func
);
2032 stubCall
.addArgument(ImmPtr(m_codeBlock
->function(currentInstruction
[2].u
.operand
)));
2033 stubCall
.call(currentInstruction
[1].u
.operand
);
2036 void JIT::emit_op_call(Instruction
* currentInstruction
)
2038 compileOpCall(op_call
, currentInstruction
, m_callLinkInfoIndex
++);
2041 void JIT::emit_op_call_eval(Instruction
* currentInstruction
)
2043 compileOpCall(op_call_eval
, currentInstruction
, m_callLinkInfoIndex
++);
2046 void JIT::emit_op_load_varargs(Instruction
* currentInstruction
)
2048 int argCountDst
= currentInstruction
[1].u
.operand
;
2049 int argsOffset
= currentInstruction
[2].u
.operand
;
2051 JITStubCall
stubCall(this, cti_op_load_varargs
);
2052 stubCall
.addArgument(Imm32(argsOffset
));
2054 // Stores a naked int32 in the register file.
2055 store32(returnValueRegister
, Address(callFrameRegister
, argCountDst
* sizeof(Register
)));
2058 void JIT::emit_op_call_varargs(Instruction
* currentInstruction
)
2060 compileOpCallVarargs(currentInstruction
);
2063 void JIT::emit_op_construct(Instruction
* currentInstruction
)
2065 compileOpCall(op_construct
, currentInstruction
, m_callLinkInfoIndex
++);
2068 void JIT::emit_op_get_global_var(Instruction
* currentInstruction
)
2070 JSVariableObject
* globalObject
= static_cast<JSVariableObject
*>(currentInstruction
[2].u
.jsCell
);
2071 move(ImmPtr(globalObject
), regT0
);
2072 emitGetVariableObjectRegister(regT0
, currentInstruction
[3].u
.operand
, regT0
);
2073 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2076 void JIT::emit_op_put_global_var(Instruction
* currentInstruction
)
2078 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
2079 JSVariableObject
* globalObject
= static_cast<JSVariableObject
*>(currentInstruction
[1].u
.jsCell
);
2080 move(ImmPtr(globalObject
), regT0
);
2081 emitPutVariableObjectRegister(regT1
, regT0
, currentInstruction
[2].u
.operand
);
2084 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
2086 int skip
= currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain();
2088 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT0
);
2090 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT0
);
2092 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT0
);
2093 emitGetVariableObjectRegister(regT0
, currentInstruction
[2].u
.operand
, regT0
);
2094 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2097 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
2099 int skip
= currentInstruction
[2].u
.operand
+ m_codeBlock
->needsFullScopeChain();
2101 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
);
2102 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT0
);
2104 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT1
);
2106 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT1
);
2107 emitPutVariableObjectRegister(regT0
, regT1
, currentInstruction
[1].u
.operand
);
2110 void JIT::emit_op_tear_off_activation(Instruction
* currentInstruction
)
2112 JITStubCall
stubCall(this, cti_op_tear_off_activation
);
2113 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
2117 void JIT::emit_op_tear_off_arguments(Instruction
*)
2119 JITStubCall(this, cti_op_tear_off_arguments
).call();
2122 void JIT::emit_op_ret(Instruction
* currentInstruction
)
2124 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
2125 if (m_codeBlock
->needsFullScopeChain())
2126 JITStubCall(this, cti_op_ret_scopeChain
).call();
2128 ASSERT(callFrameRegister
!= regT1
);
2129 ASSERT(regT1
!= returnValueRegister
);
2130 ASSERT(returnValueRegister
!= callFrameRegister
);
2132 // Return the result in %eax.
2133 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, returnValueRegister
);
2135 // Grab the return address.
2136 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, regT1
);
2138 // Restore our caller's "r".
2139 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, callFrameRegister
);
2142 restoreReturnAddressBeforeReturn(regT1
);
2146 void JIT::emit_op_new_array(Instruction
* currentInstruction
)
2148 JITStubCall
stubCall(this, cti_op_new_array
);
2149 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2150 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
2151 stubCall
.call(currentInstruction
[1].u
.operand
);
2154 void JIT::emit_op_resolve(Instruction
* currentInstruction
)
2156 JITStubCall
stubCall(this, cti_op_resolve
);
2157 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2158 stubCall
.call(currentInstruction
[1].u
.operand
);
2161 void JIT::emit_op_construct_verify(Instruction
* currentInstruction
)
2163 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2165 emitJumpSlowCaseIfNotJSCell(regT0
);
2166 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2167 addSlowCase(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
) + OBJECT_OFFSETOF(TypeInfo
, m_type
)), Imm32(ObjectType
)));
2171 void JIT::emit_op_to_primitive(Instruction
* currentInstruction
)
2173 int dst
= currentInstruction
[1].u
.operand
;
2174 int src
= currentInstruction
[2].u
.operand
;
2176 emitGetVirtualRegister(src
, regT0
);
2178 Jump isImm
= emitJumpIfNotJSCell(regT0
);
2179 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
)));
2183 emitPutVirtualRegister(dst
);
2187 void JIT::emit_op_strcat(Instruction
* currentInstruction
)
2189 JITStubCall
stubCall(this, cti_op_strcat
);
2190 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2191 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
2192 stubCall
.call(currentInstruction
[1].u
.operand
);
2195 void JIT::emit_op_loop_if_true(Instruction
* currentInstruction
)
2199 unsigned target
= currentInstruction
[2].u
.operand
;
2200 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2202 Jump isZero
= branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0))));
2203 addJump(emitJumpIfImmediateInteger(regT0
), target
+ 2);
2205 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsBoolean(true)))), target
+ 2);
2206 addSlowCase(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsBoolean(false)))));
2210 void JIT::emit_op_resolve_base(Instruction
* currentInstruction
)
2212 JITStubCall
stubCall(this, cti_op_resolve_base
);
2213 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2214 stubCall
.call(currentInstruction
[1].u
.operand
);
2217 void JIT::emit_op_resolve_skip(Instruction
* currentInstruction
)
2219 JITStubCall
stubCall(this, cti_op_resolve_skip
);
2220 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2221 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain()));
2222 stubCall
.call(currentInstruction
[1].u
.operand
);
2225 void JIT::emit_op_resolve_global(Instruction
* currentInstruction
)
2228 void* globalObject
= currentInstruction
[2].u
.jsCell
;
2229 Identifier
* ident
= &m_codeBlock
->identifier(currentInstruction
[3].u
.operand
);
2231 unsigned currentIndex
= m_globalResolveInfoIndex
++;
2232 void* structureAddress
= &(m_codeBlock
->globalResolveInfo(currentIndex
).structure
);
2233 void* offsetAddr
= &(m_codeBlock
->globalResolveInfo(currentIndex
).offset
);
2235 // Check Structure of global object
2236 move(ImmPtr(globalObject
), regT0
);
2237 loadPtr(structureAddress
, regT1
);
2238 Jump noMatch
= branchPtr(NotEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
))); // Structures don't match
2240 // Load cached property
2241 // Assume that the global object always uses external storage.
2242 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSGlobalObject
, m_externalStorage
)), regT0
);
2243 load32(offsetAddr
, regT1
);
2244 loadPtr(BaseIndex(regT0
, regT1
, ScalePtr
), regT0
);
2245 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2250 JITStubCall
stubCall(this, cti_op_resolve_global
);
2251 stubCall
.addArgument(ImmPtr(globalObject
));
2252 stubCall
.addArgument(ImmPtr(ident
));
2253 stubCall
.addArgument(Imm32(currentIndex
));
2254 stubCall
.call(currentInstruction
[1].u
.operand
);
2258 void JIT::emit_op_not(Instruction
* currentInstruction
)
2260 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
2261 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), regT0
);
2262 addSlowCase(branchTestPtr(NonZero
, regT0
, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue
))));
2263 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
| JSImmediate::ExtendedPayloadBitBoolValue
)), regT0
);
2264 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2267 void JIT::emit_op_jfalse(Instruction
* currentInstruction
)
2269 unsigned target
= currentInstruction
[2].u
.operand
;
2270 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2272 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0)))), target
+ 2);
2273 Jump isNonZero
= emitJumpIfImmediateInteger(regT0
);
2275 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsBoolean(false)))), target
+ 2);
2276 addSlowCase(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsBoolean(true)))));
2278 isNonZero
.link(this);
2279 RECORD_JUMP_TARGET(target
+ 2);
2281 void JIT::emit_op_jeq_null(Instruction
* currentInstruction
)
2283 unsigned src
= currentInstruction
[1].u
.operand
;
2284 unsigned target
= currentInstruction
[2].u
.operand
;
2286 emitGetVirtualRegister(src
, regT0
);
2287 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2289 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
2290 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2291 addJump(branchTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
+ 2);
2292 Jump wasNotImmediate
= jump();
2294 // Now handle the immediate cases - undefined & null
2295 isImmediate
.link(this);
2296 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2297 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNull()))), target
+ 2);
2299 wasNotImmediate
.link(this);
2300 RECORD_JUMP_TARGET(target
+ 2);
2302 void JIT::emit_op_jneq_null(Instruction
* currentInstruction
)
2304 unsigned src
= currentInstruction
[1].u
.operand
;
2305 unsigned target
= currentInstruction
[2].u
.operand
;
2307 emitGetVirtualRegister(src
, regT0
);
2308 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2310 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
2311 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2312 addJump(branchTest32(Zero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
+ 2);
2313 Jump wasNotImmediate
= jump();
2315 // Now handle the immediate cases - undefined & null
2316 isImmediate
.link(this);
2317 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2318 addJump(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsNull()))), target
+ 2);
2320 wasNotImmediate
.link(this);
2321 RECORD_JUMP_TARGET(target
+ 2);
2324 void JIT::emit_op_jneq_ptr(Instruction
* currentInstruction
)
2326 unsigned src
= currentInstruction
[1].u
.operand
;
2327 JSCell
* ptr
= currentInstruction
[2].u
.jsCell
;
2328 unsigned target
= currentInstruction
[3].u
.operand
;
2330 emitGetVirtualRegister(src
, regT0
);
2331 addJump(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(JSValue(ptr
)))), target
+ 3);
2333 RECORD_JUMP_TARGET(target
+ 3);
2336 void JIT::emit_op_jsr(Instruction
* currentInstruction
)
2338 int retAddrDst
= currentInstruction
[1].u
.operand
;
2339 int target
= currentInstruction
[2].u
.operand
;
2340 DataLabelPtr storeLocation
= storePtrWithPatch(ImmPtr(0), Address(callFrameRegister
, sizeof(Register
) * retAddrDst
));
2341 addJump(jump(), target
+ 2);
2342 m_jsrSites
.append(JSRInfo(storeLocation
, label()));
2343 killLastResultRegister();
2344 RECORD_JUMP_TARGET(target
+ 2);
2347 void JIT::emit_op_sret(Instruction
* currentInstruction
)
2349 jump(Address(callFrameRegister
, sizeof(Register
) * currentInstruction
[1].u
.operand
));
2350 killLastResultRegister();
2353 void JIT::emit_op_eq(Instruction
* currentInstruction
)
2355 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2356 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2357 set32(Equal
, regT1
, regT0
, regT0
);
2358 emitTagAsBoolImmediate(regT0
);
2359 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2362 void JIT::emit_op_bitnot(Instruction
* currentInstruction
)
2364 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
2365 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2368 emitFastArithIntToImmNoCheck(regT0
, regT0
);
2370 xorPtr(Imm32(~JSImmediate::TagTypeNumber
), regT0
);
2372 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2375 void JIT::emit_op_resolve_with_base(Instruction
* currentInstruction
)
2377 JITStubCall
stubCall(this, cti_op_resolve_with_base
);
2378 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
2379 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
2380 stubCall
.call(currentInstruction
[2].u
.operand
);
2383 void JIT::emit_op_new_func_exp(Instruction
* currentInstruction
)
2385 JITStubCall
stubCall(this, cti_op_new_func_exp
);
2386 stubCall
.addArgument(ImmPtr(m_codeBlock
->functionExpression(currentInstruction
[2].u
.operand
)));
2387 stubCall
.call(currentInstruction
[1].u
.operand
);
2390 void JIT::emit_op_jtrue(Instruction
* currentInstruction
)
2392 unsigned target
= currentInstruction
[2].u
.operand
;
2393 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2395 Jump isZero
= branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0))));
2396 addJump(emitJumpIfImmediateInteger(regT0
), target
+ 2);
2398 addJump(branchPtr(Equal
, regT0
, ImmPtr(JSValue::encode(jsBoolean(true)))), target
+ 2);
2399 addSlowCase(branchPtr(NotEqual
, regT0
, ImmPtr(JSValue::encode(jsBoolean(false)))));
2402 RECORD_JUMP_TARGET(target
+ 2);
2405 void JIT::emit_op_neq(Instruction
* currentInstruction
)
2407 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2408 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2409 set32(NotEqual
, regT1
, regT0
, regT0
);
2410 emitTagAsBoolImmediate(regT0
);
2412 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2416 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
2418 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2419 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2420 xorPtr(regT1
, regT0
);
2421 emitFastArithReTagImmediate(regT0
, regT0
);
2422 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2425 void JIT::emit_op_new_regexp(Instruction
* currentInstruction
)
2427 JITStubCall
stubCall(this, cti_op_new_regexp
);
2428 stubCall
.addArgument(ImmPtr(m_codeBlock
->regexp(currentInstruction
[2].u
.operand
)));
2429 stubCall
.call(currentInstruction
[1].u
.operand
);
2432 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
2434 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
2435 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2436 orPtr(regT1
, regT0
);
2437 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2440 void JIT::emit_op_throw(Instruction
* currentInstruction
)
2442 JITStubCall
stubCall(this, cti_op_throw
);
2443 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
2445 ASSERT(regT0
== returnValueRegister
);
2447 // cti_op_throw always changes it's return address,
2448 // this point in the code should never be reached.
2453 void JIT::emit_op_next_pname(Instruction
* currentInstruction
)
2455 JITStubCall
stubCall(this, cti_op_next_pname
);
2456 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
2458 Jump endOfIter
= branchTestPtr(Zero
, regT0
);
2459 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2460 addJump(jump(), currentInstruction
[3].u
.operand
+ 3);
2461 endOfIter
.link(this);
2464 void JIT::emit_op_push_scope(Instruction
* currentInstruction
)
2466 JITStubCall
stubCall(this, cti_op_push_scope
);
2467 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
2468 stubCall
.call(currentInstruction
[1].u
.operand
);
2471 void JIT::emit_op_pop_scope(Instruction
*)
2473 JITStubCall(this, cti_op_pop_scope
).call();
2476 void JIT::compileOpStrictEq(Instruction
* currentInstruction
, CompileOpStrictEqType type
)
2478 unsigned dst
= currentInstruction
[1].u
.operand
;
2479 unsigned src1
= currentInstruction
[2].u
.operand
;
2480 unsigned src2
= currentInstruction
[3].u
.operand
;
2482 emitGetVirtualRegisters(src1
, regT0
, src2
, regT1
);
2484 // Jump to a slow case if either operand is a number, or if both are JSCell*s.
2486 orPtr(regT1
, regT2
);
2487 addSlowCase(emitJumpIfJSCell(regT2
));
2488 addSlowCase(emitJumpIfImmediateNumber(regT2
));
2490 if (type
== OpStrictEq
)
2491 set32(Equal
, regT1
, regT0
, regT0
);
2493 set32(NotEqual
, regT1
, regT0
, regT0
);
2494 emitTagAsBoolImmediate(regT0
);
2496 emitPutVirtualRegister(dst
);
2499 void JIT::emit_op_stricteq(Instruction
* currentInstruction
)
2501 compileOpStrictEq(currentInstruction
, OpStrictEq
);
2504 void JIT::emit_op_nstricteq(Instruction
* currentInstruction
)
2506 compileOpStrictEq(currentInstruction
, OpNStrictEq
);
2509 void JIT::emit_op_to_jsnumber(Instruction
* currentInstruction
)
2511 int srcVReg
= currentInstruction
[2].u
.operand
;
2512 emitGetVirtualRegister(srcVReg
, regT0
);
2514 Jump wasImmediate
= emitJumpIfImmediateInteger(regT0
);
2516 emitJumpSlowCaseIfNotJSCell(regT0
, srcVReg
);
2517 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2518 addSlowCase(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_type
)), Imm32(NumberType
)));
2520 wasImmediate
.link(this);
2522 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2525 void JIT::emit_op_push_new_scope(Instruction
* currentInstruction
)
2527 JITStubCall
stubCall(this, cti_op_push_new_scope
);
2528 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
2529 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
2530 stubCall
.call(currentInstruction
[1].u
.operand
);
2533 void JIT::emit_op_catch(Instruction
* currentInstruction
)
2535 killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
2536 peek(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
2537 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2540 void JIT::emit_op_jmp_scopes(Instruction
* currentInstruction
)
2542 JITStubCall
stubCall(this, cti_op_jmp_scopes
);
2543 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
2545 addJump(jump(), currentInstruction
[2].u
.operand
+ 2);
2546 RECORD_JUMP_TARGET(currentInstruction
[2].u
.operand
+ 2);
2549 void JIT::emit_op_switch_imm(Instruction
* currentInstruction
)
2551 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
2552 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
2553 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
2555 // create jump table for switch destinations, track this switch statement.
2556 SimpleJumpTable
* jumpTable
= &m_codeBlock
->immediateSwitchJumpTable(tableIndex
);
2557 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Immediate
));
2558 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
2560 JITStubCall
stubCall(this, cti_op_switch_imm
);
2561 stubCall
.addArgument(scrutinee
, regT2
);
2562 stubCall
.addArgument(Imm32(tableIndex
));
2567 void JIT::emit_op_switch_char(Instruction
* currentInstruction
)
2569 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
2570 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
2571 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
2573 // create jump table for switch destinations, track this switch statement.
2574 SimpleJumpTable
* jumpTable
= &m_codeBlock
->characterSwitchJumpTable(tableIndex
);
2575 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Character
));
2576 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
2578 JITStubCall
stubCall(this, cti_op_switch_char
);
2579 stubCall
.addArgument(scrutinee
, regT2
);
2580 stubCall
.addArgument(Imm32(tableIndex
));
2585 void JIT::emit_op_switch_string(Instruction
* currentInstruction
)
2587 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
2588 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
2589 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
2591 // create jump table for switch destinations, track this switch statement.
2592 StringJumpTable
* jumpTable
= &m_codeBlock
->stringSwitchJumpTable(tableIndex
);
2593 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
));
2595 JITStubCall
stubCall(this, cti_op_switch_string
);
2596 stubCall
.addArgument(scrutinee
, regT2
);
2597 stubCall
.addArgument(Imm32(tableIndex
));
2602 void JIT::emit_op_new_error(Instruction
* currentInstruction
)
2604 JITStubCall
stubCall(this, cti_op_new_error
);
2605 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2606 stubCall
.addArgument(ImmPtr(JSValue::encode(m_codeBlock
->getConstant(currentInstruction
[3].u
.operand
))));
2607 stubCall
.addArgument(Imm32(m_bytecodeIndex
));
2608 stubCall
.call(currentInstruction
[1].u
.operand
);
2611 void JIT::emit_op_debug(Instruction
* currentInstruction
)
2613 JITStubCall
stubCall(this, cti_op_debug
);
2614 stubCall
.addArgument(Imm32(currentInstruction
[1].u
.operand
));
2615 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
2616 stubCall
.addArgument(Imm32(currentInstruction
[3].u
.operand
));
2620 void JIT::emit_op_eq_null(Instruction
* currentInstruction
)
2622 unsigned dst
= currentInstruction
[1].u
.operand
;
2623 unsigned src1
= currentInstruction
[2].u
.operand
;
2625 emitGetVirtualRegister(src1
, regT0
);
2626 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2628 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2629 setTest32(NonZero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT0
);
2631 Jump wasNotImmediate
= jump();
2633 isImmediate
.link(this);
2635 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2636 setPtr(Equal
, regT0
, Imm32(JSImmediate::FullTagTypeNull
), regT0
);
2638 wasNotImmediate
.link(this);
2640 emitTagAsBoolImmediate(regT0
);
2641 emitPutVirtualRegister(dst
);
2645 void JIT::emit_op_neq_null(Instruction
* currentInstruction
)
2647 unsigned dst
= currentInstruction
[1].u
.operand
;
2648 unsigned src1
= currentInstruction
[2].u
.operand
;
2650 emitGetVirtualRegister(src1
, regT0
);
2651 Jump isImmediate
= emitJumpIfNotJSCell(regT0
);
2653 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
2654 setTest32(Zero
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), regT0
);
2656 Jump wasNotImmediate
= jump();
2658 isImmediate
.link(this);
2660 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), regT0
);
2661 setPtr(NotEqual
, regT0
, Imm32(JSImmediate::FullTagTypeNull
), regT0
);
2663 wasNotImmediate
.link(this);
2665 emitTagAsBoolImmediate(regT0
);
2666 emitPutVirtualRegister(dst
);
2670 void JIT::emit_op_enter(Instruction
*)
2672 // Even though CTI doesn't use them, we initialize our constant
2673 // registers to zap stale pointers, to avoid unnecessarily prolonging
2674 // object lifetime and increasing GC pressure.
2675 size_t count
= m_codeBlock
->m_numVars
;
2676 for (size_t j
= 0; j
< count
; ++j
)
2677 emitInitRegister(j
);
2681 void JIT::emit_op_enter_with_activation(Instruction
* currentInstruction
)
2683 // Even though CTI doesn't use them, we initialize our constant
2684 // registers to zap stale pointers, to avoid unnecessarily prolonging
2685 // object lifetime and increasing GC pressure.
2686 size_t count
= m_codeBlock
->m_numVars
;
2687 for (size_t j
= 0; j
< count
; ++j
)
2688 emitInitRegister(j
);
2690 JITStubCall(this, cti_op_push_activation
).call(currentInstruction
[1].u
.operand
);
2693 void JIT::emit_op_create_arguments(Instruction
*)
2695 Jump argsCreated
= branchTestPtr(NonZero
, Address(callFrameRegister
, sizeof(Register
) * RegisterFile::ArgumentsRegister
));
2696 if (m_codeBlock
->m_numParameters
== 1)
2697 JITStubCall(this, cti_op_create_arguments_no_params
).call();
2699 JITStubCall(this, cti_op_create_arguments
).call();
2700 argsCreated
.link(this);
2703 void JIT::emit_op_init_arguments(Instruction
*)
2705 storePtr(ImmPtr(0), Address(callFrameRegister
, sizeof(Register
) * RegisterFile::ArgumentsRegister
));
2708 void JIT::emit_op_convert_this(Instruction
* currentInstruction
)
2710 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2712 emitJumpSlowCaseIfNotJSCell(regT0
);
2713 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT1
);
2714 addSlowCase(branchTest32(NonZero
, Address(regT1
, OBJECT_OFFSETOF(Structure
, m_typeInfo
.m_flags
)), Imm32(NeedsThisConversion
)));
2718 void JIT::emit_op_profile_will_call(Instruction
* currentInstruction
)
2720 peek(regT1
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
2721 Jump noProfiler
= branchTestPtr(Zero
, Address(regT1
));
2723 JITStubCall
stubCall(this, cti_op_profile_will_call
);
2724 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT1
);
2726 noProfiler
.link(this);
2730 void JIT::emit_op_profile_did_call(Instruction
* currentInstruction
)
2732 peek(regT1
, OBJECT_OFFSETOF(JITStackFrame
, enabledProfilerReference
) / sizeof (void*));
2733 Jump noProfiler
= branchTestPtr(Zero
, Address(regT1
));
2735 JITStubCall
stubCall(this, cti_op_profile_did_call
);
2736 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT1
);
2738 noProfiler
.link(this);
2744 void JIT::emitSlow_op_convert_this(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2748 JITStubCall
stubCall(this, cti_op_convert_this
);
2749 stubCall
.addArgument(regT0
);
2750 stubCall
.call(currentInstruction
[1].u
.operand
);
2753 void JIT::emitSlow_op_construct_verify(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2757 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
2758 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2761 void JIT::emitSlow_op_to_primitive(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2765 JITStubCall
stubCall(this, cti_op_to_primitive
);
2766 stubCall
.addArgument(regT0
);
2767 stubCall
.call(currentInstruction
[1].u
.operand
);
2770 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2772 // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
2773 Label
beginGetByValSlow(this);
2775 Jump notImm
= getSlowCase(iter
);
2778 emitFastArithIntToImmNoCheck(regT1
, regT1
);
2781 JITStubCall
stubCall(this, cti_op_get_by_val
);
2782 stubCall
.addArgument(regT0
);
2783 stubCall
.addArgument(regT1
);
2784 stubCall
.call(currentInstruction
[1].u
.operand
);
2785 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
2787 // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
2788 // First, check if this is an access to the vector
2790 branch32(AboveOrEqual
, regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_vectorLength
)), beginGetByValSlow
);
2792 // okay, missed the fast region, but it is still in the vector. Get the value.
2793 loadPtr(BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT2
);
2794 // Check whether the value loaded is zero; if so we need to return undefined.
2795 branchTestPtr(Zero
, regT2
, beginGetByValSlow
);
2797 emitPutVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
2800 void JIT::emitSlow_op_loop_if_less(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2802 unsigned op1
= currentInstruction
[1].u
.operand
;
2803 unsigned op2
= currentInstruction
[2].u
.operand
;
2804 unsigned target
= currentInstruction
[3].u
.operand
;
2805 if (isOperandConstantImmediateInt(op2
)) {
2807 JITStubCall
stubCall(this, cti_op_loop_if_less
);
2808 stubCall
.addArgument(regT0
);
2809 stubCall
.addArgument(op2
, regT2
);
2811 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
2812 } else if (isOperandConstantImmediateInt(op1
)) {
2814 JITStubCall
stubCall(this, cti_op_loop_if_less
);
2815 stubCall
.addArgument(op1
, regT2
);
2816 stubCall
.addArgument(regT0
);
2818 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
2822 JITStubCall
stubCall(this, cti_op_loop_if_less
);
2823 stubCall
.addArgument(regT0
);
2824 stubCall
.addArgument(regT1
);
2826 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
2830 void JIT::emitSlow_op_loop_if_lesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2832 unsigned op2
= currentInstruction
[2].u
.operand
;
2833 unsigned target
= currentInstruction
[3].u
.operand
;
2834 if (isOperandConstantImmediateInt(op2
)) {
2836 JITStubCall
stubCall(this, cti_op_loop_if_lesseq
);
2837 stubCall
.addArgument(regT0
);
2838 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
2840 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
2844 JITStubCall
stubCall(this, cti_op_loop_if_lesseq
);
2845 stubCall
.addArgument(regT0
);
2846 stubCall
.addArgument(regT1
);
2848 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
+ 3);
2852 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2854 // Normal slow cases - either is not an immediate imm, or is an array.
2855 Jump notImm
= getSlowCase(iter
);
2858 emitFastArithIntToImmNoCheck(regT1
, regT1
);
2860 notImm
.link(this); {
2861 JITStubCall
stubCall(this, cti_op_put_by_val
);
2862 stubCall
.addArgument(regT0
);
2863 stubCall
.addArgument(regT1
);
2864 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
2866 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val
));
2869 // slow cases for immediate int accesses to arrays
2871 linkSlowCase(iter
); {
2872 JITStubCall
stubCall(this, cti_op_put_by_val_array
);
2873 stubCall
.addArgument(regT0
);
2874 stubCall
.addArgument(regT1
);
2875 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
2880 void JIT::emitSlow_op_loop_if_true(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2883 JITStubCall
stubCall(this, cti_op_jtrue
);
2884 stubCall
.addArgument(regT0
);
2886 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), currentInstruction
[2].u
.operand
+ 2);
2889 void JIT::emitSlow_op_not(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2892 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), regT0
);
2893 JITStubCall
stubCall(this, cti_op_not
);
2894 stubCall
.addArgument(regT0
);
2895 stubCall
.call(currentInstruction
[1].u
.operand
);
2898 void JIT::emitSlow_op_jfalse(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2901 JITStubCall
stubCall(this, cti_op_jtrue
);
2902 stubCall
.addArgument(regT0
);
2904 emitJumpSlowToHot(branchTest32(Zero
, regT0
), currentInstruction
[2].u
.operand
+ 2); // inverted!
2907 void JIT::emitSlow_op_bitnot(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2910 JITStubCall
stubCall(this, cti_op_bitnot
);
2911 stubCall
.addArgument(regT0
);
2912 stubCall
.call(currentInstruction
[1].u
.operand
);
2915 void JIT::emitSlow_op_jtrue(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2918 JITStubCall
stubCall(this, cti_op_jtrue
);
2919 stubCall
.addArgument(regT0
);
2921 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), currentInstruction
[2].u
.operand
+ 2);
2924 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2927 JITStubCall
stubCall(this, cti_op_bitxor
);
2928 stubCall
.addArgument(regT0
);
2929 stubCall
.addArgument(regT1
);
2930 stubCall
.call(currentInstruction
[1].u
.operand
);
2933 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2936 JITStubCall
stubCall(this, cti_op_bitor
);
2937 stubCall
.addArgument(regT0
);
2938 stubCall
.addArgument(regT1
);
2939 stubCall
.call(currentInstruction
[1].u
.operand
);
2942 void JIT::emitSlow_op_eq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2945 JITStubCall
stubCall(this, cti_op_eq
);
2946 stubCall
.addArgument(regT0
);
2947 stubCall
.addArgument(regT1
);
2949 emitTagAsBoolImmediate(regT0
);
2950 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2953 void JIT::emitSlow_op_neq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2956 JITStubCall
stubCall(this, cti_op_eq
);
2957 stubCall
.addArgument(regT0
);
2958 stubCall
.addArgument(regT1
);
2960 xor32(Imm32(0x1), regT0
);
2961 emitTagAsBoolImmediate(regT0
);
2962 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
2965 void JIT::emitSlow_op_stricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2969 JITStubCall
stubCall(this, cti_op_stricteq
);
2970 stubCall
.addArgument(regT0
);
2971 stubCall
.addArgument(regT1
);
2972 stubCall
.call(currentInstruction
[1].u
.operand
);
2975 void JIT::emitSlow_op_nstricteq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2979 JITStubCall
stubCall(this, cti_op_nstricteq
);
2980 stubCall
.addArgument(regT0
);
2981 stubCall
.addArgument(regT1
);
2982 stubCall
.call(currentInstruction
[1].u
.operand
);
2985 void JIT::emitSlow_op_instanceof(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2992 JITStubCall
stubCall(this, cti_op_instanceof
);
2993 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
2994 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
2995 stubCall
.addArgument(currentInstruction
[4].u
.operand
, regT2
);
2996 stubCall
.call(currentInstruction
[1].u
.operand
);
2999 void JIT::emitSlow_op_call(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
3001 compileOpCallSlowCase(currentInstruction
, iter
, m_callLinkInfoIndex
++, op_call
);
3004 void JIT::emitSlow_op_call_eval(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
3006 compileOpCallSlowCase(currentInstruction
, iter
, m_callLinkInfoIndex
++, op_call_eval
);
3009 void JIT::emitSlow_op_call_varargs(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
3011 compileOpCallVarargsSlowCase(currentInstruction
, iter
);
3014 void JIT::emitSlow_op_construct(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
3016 compileOpCallSlowCase(currentInstruction
, iter
, m_callLinkInfoIndex
++, op_construct
);
3019 void JIT::emitSlow_op_to_jsnumber(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
3021 linkSlowCaseIfNotJSCell(iter
, currentInstruction
[2].u
.operand
);
3024 JITStubCall
stubCall(this, cti_op_to_jsnumber
);
3025 stubCall
.addArgument(regT0
);
3026 stubCall
.call(currentInstruction
[1].u
.operand
);
3029 #endif // USE(JSVALUE32_64)
3033 #endif // ENABLE(JIT)