2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
34 #include "JSFunction.h"
35 #include "Interpreter.h"
36 #include "ResultType.h"
37 #include "SamplingTool.h"
47 #if COMPILER(GCC) && PLATFORM(X86)
49 COMPILE_ASSERT(STUB_ARGS_code
== 0x0C, STUB_ARGS_code_is_0x0C
);
50 COMPILE_ASSERT(STUB_ARGS_callFrame
== 0x0E, STUB_ARGS_callFrame_is_0x0E
);
53 #define SYMBOL_STRING(name) "_" #name
55 #define SYMBOL_STRING(name) #name
59 ".globl " SYMBOL_STRING(ctiTrampoline
) "\n"
60 SYMBOL_STRING(ctiTrampoline
) ":" "\n"
62 "movl %esp, %ebp" "\n"
66 "subl $0x1c, %esp" "\n"
67 "movl $512, %esi" "\n"
68 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = STUB_ARGS_callFrame (see assertion above)
69 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = STUB_ARGS_code (see assertion above)
70 "addl $0x1c, %esp" "\n"
79 ".globl " SYMBOL_STRING(ctiVMThrowTrampoline
) "\n"
80 SYMBOL_STRING(ctiVMThrowTrampoline
) ":" "\n"
81 #if USE(JIT_STUB_ARGUMENT_VA_LIST)
82 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPvz
) "\n"
84 #if USE(JIT_STUB_ARGUMENT_REGISTER)
85 "movl %esp, %ecx" "\n"
86 #else // JIT_STUB_ARGUMENT_STACK
87 "movl %esp, 0(%esp)" "\n"
89 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv
) "\n"
91 "addl $0x1c, %esp" "\n"
99 #elif COMPILER(GCC) && PLATFORM(X86_64)
101 COMPILE_ASSERT(STUB_ARGS_code
== 0x10, STUB_ARGS_code_is_0x10
);
102 COMPILE_ASSERT(STUB_ARGS_callFrame
== 0x12, STUB_ARGS_callFrame_is_0x12
);
105 #define SYMBOL_STRING(name) "_" #name
107 #define SYMBOL_STRING(name) #name
111 ".globl " SYMBOL_STRING(ctiTrampoline
) "\n"
112 SYMBOL_STRING(ctiTrampoline
) ":" "\n"
114 "movq %rsp, %rbp" "\n"
120 "subq $0x48, %rsp" "\n"
121 "movq $512, %r12" "\n"
122 "movq $0xFFFF000000000000, %r14" "\n"
123 "movq $0xFFFF000000000002, %r15" "\n"
124 "movq 0x90(%rsp), %r13" "\n" // Ox90 = 0x12 * 8, 0x12 = STUB_ARGS_callFrame (see assertion above)
125 "call *0x80(%rsp)" "\n" // Ox80 = 0x10 * 8, 0x10 = STUB_ARGS_code (see assertion above)
126 "addq $0x48, %rsp" "\n"
137 ".globl " SYMBOL_STRING(ctiVMThrowTrampoline
) "\n"
138 SYMBOL_STRING(ctiVMThrowTrampoline
) ":" "\n"
139 #if USE(JIT_STUB_ARGUMENT_REGISTER)
140 "movq %rsp, %rdi" "\n"
141 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv
) "\n"
142 #else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
143 #error "JIT_STUB_ARGUMENT configuration not supported."
145 "addq $0x48, %rsp" "\n"
159 __declspec(naked
) JSValueEncodedAsPointer
* ctiTrampoline(void* code
, RegisterFile
*, CallFrame
*, JSValuePtr
* exception
, Profiler
**, JSGlobalData
*)
170 mov edi
, [esp
+ 0x38];
171 call
[esp
+ 0x30]; // Ox30 = 0x0C * 4, 0x0C = STUB_ARGS_code (see assertion above)
181 __declspec(naked
) void ctiVMThrowTrampoline()
184 #if USE(JIT_STUB_ARGUMENT_REGISTER)
186 #else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
187 #error "JIT_STUB_ARGUMENT configuration not supported."
189 call
JSC::Interpreter::cti_vm_throw
;
203 void ctiSetReturnAddress(void** where
, void* what
)
208 void ctiPatchCallByReturnAddress(void* where
, void* what
)
210 MacroAssembler::Jump::patch(where
, what
);
213 JIT::JIT(JSGlobalData
* globalData
, CodeBlock
* codeBlock
)
214 : m_interpreter(globalData
->interpreter
)
215 , m_globalData(globalData
)
216 , m_codeBlock(codeBlock
)
217 , m_labels(codeBlock
? codeBlock
->instructions().size() : 0)
218 , m_propertyAccessCompilationInfo(codeBlock
? codeBlock
->numberOfStructureStubInfos() : 0)
219 , m_callStructureStubCompilationInfo(codeBlock
? codeBlock
->numberOfCallLinkInfos() : 0)
220 , m_lastResultBytecodeRegister(std::numeric_limits
<int>::max())
221 , m_jumpTargetsPosition(0)
225 void JIT::compileOpStrictEq(Instruction
* currentInstruction
, CompileOpStrictEqType type
)
227 unsigned dst
= currentInstruction
[1].u
.operand
;
228 unsigned src1
= currentInstruction
[2].u
.operand
;
229 unsigned src2
= currentInstruction
[3].u
.operand
;
231 emitGetVirtualRegisters(src1
, X86::eax
, src2
, X86::edx
);
233 #if USE(ALTERNATE_JSIMMEDIATE)
234 // Jump to a slow case if either operand is a number, or if both are JSCell*s.
235 move(X86::eax
, X86::ecx
);
236 orPtr(X86::edx
, X86::ecx
);
237 addSlowCase(emitJumpIfJSCell(X86::ecx
));
238 addSlowCase(emitJumpIfImmediateNumber(X86::ecx
));
240 if (type
== OpStrictEq
)
241 sete32(X86::edx
, X86::eax
);
243 setne32(X86::edx
, X86::eax
);
244 emitTagAsBoolImmediate(X86::eax
);
246 bool negated
= (type
== OpNStrictEq
);
248 // Check that both are immediates, if so check if they're equal
249 Jump firstNotImmediate
= emitJumpIfJSCell(X86::eax
);
250 Jump secondNotImmediate
= emitJumpIfJSCell(X86::edx
);
251 Jump bothWereImmediatesButNotEqual
= jnePtr(X86::edx
, X86::eax
);
253 // They are equal - set the result to true. (Or false, if negated).
254 move(ImmPtr(JSValuePtr::encode(jsBoolean(!negated
))), X86::eax
);
255 Jump bothWereImmediatesAndEqual
= jump();
257 // eax was not an immediate, we haven't yet checked edx.
258 // If edx is also a JSCell, or is 0, then jump to a slow case,
259 // otherwise these values are not equal.
260 firstNotImmediate
.link(this);
261 emitJumpSlowCaseIfJSCell(X86::edx
);
262 addSlowCase(jePtr(X86::edx
, ImmPtr(JSValuePtr::encode(js0()))));
263 Jump firstWasNotImmediate
= jump();
265 // eax was an immediate, but edx wasn't.
266 // If eax is 0 jump to a slow case, otherwise these values are not equal.
267 secondNotImmediate
.link(this);
268 addSlowCase(jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(js0()))));
270 // We get here if the two values are different immediates, or one is 0 and the other is a JSCell.
271 // Vaelues are not equal, set the result to false.
272 bothWereImmediatesButNotEqual
.link(this);
273 firstWasNotImmediate
.link(this);
274 move(ImmPtr(JSValuePtr::encode(jsBoolean(negated
))), X86::eax
);
276 bothWereImmediatesAndEqual
.link(this);
279 emitPutVirtualRegister(dst
);
282 void JIT::emitSlowScriptCheck()
284 Jump skipTimeout
= jnzSub32(Imm32(1), timeoutCheckRegister
);
285 emitCTICall(Interpreter::cti_timeout_check
);
286 move(X86::eax
, timeoutCheckRegister
);
287 skipTimeout
.link(this);
289 killLastResultRegister();
293 #define NEXT_OPCODE(name) \
294 m_bytecodeIndex += OPCODE_LENGTH(name); \
297 #define CTI_COMPILE_BINARY_OP(name) \
299 emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \
300 emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); \
301 emitCTICall(Interpreter::cti_##name); \
302 emitPutVirtualRegister(currentInstruction[1].u.operand); \
306 #define CTI_COMPILE_UNARY_OP(name) \
308 emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \
309 emitCTICall(Interpreter::cti_##name); \
310 emitPutVirtualRegister(currentInstruction[1].u.operand); \
314 void JIT::privateCompileMainPass()
316 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
317 unsigned instructionCount
= m_codeBlock
->instructions().size();
318 unsigned propertyAccessInstructionIndex
= 0;
319 unsigned globalResolveInfoIndex
= 0;
320 unsigned callLinkInfoIndex
= 0;
322 for (m_bytecodeIndex
= 0; m_bytecodeIndex
< instructionCount
; ) {
323 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeIndex
;
324 ASSERT_WITH_MESSAGE(m_interpreter
->isOpcode(currentInstruction
->u
.opcode
), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex
);
326 #if ENABLE(OPCODE_SAMPLING)
327 if (m_bytecodeIndex
> 0) // Avoid the overhead of sampling op_enter twice.
328 sampleInstruction(currentInstruction
);
331 m_labels
[m_bytecodeIndex
] = label();
332 OpcodeID opcodeID
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
336 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, X86::eax
);
337 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
341 compileFastArith_op_add(currentInstruction
);
345 if (m_codeBlock
->needsFullScopeChain())
346 emitCTICall(Interpreter::cti_op_end
);
347 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
348 push(Address(callFrameRegister
, RegisterFile::ReturnPC
* static_cast<int>(sizeof(Register
))));
353 unsigned target
= currentInstruction
[1].u
.operand
;
354 addJump(jump(), target
+ 1);
358 compileFastArith_op_pre_inc(currentInstruction
[1].u
.operand
);
359 NEXT_OPCODE(op_pre_inc
);
362 emitSlowScriptCheck();
364 unsigned target
= currentInstruction
[1].u
.operand
;
365 addJump(jump(), target
+ 1);
368 case op_loop_if_less
: {
369 emitSlowScriptCheck();
371 unsigned op1
= currentInstruction
[1].u
.operand
;
372 unsigned op2
= currentInstruction
[2].u
.operand
;
373 unsigned target
= currentInstruction
[3].u
.operand
;
374 if (isOperandConstantImmediateInt(op2
)) {
375 emitGetVirtualRegister(op1
, X86::eax
);
376 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
377 #if USE(ALTERNATE_JSIMMEDIATE)
378 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
380 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
382 addJump(jl32(X86::eax
, Imm32(op2imm
)), target
+ 3);
384 emitGetVirtualRegisters(op1
, X86::eax
, op2
, X86::edx
);
385 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
386 emitJumpSlowCaseIfNotImmediateInteger(X86::edx
);
387 addJump(jl32(X86::eax
, X86::edx
), target
+ 3);
389 NEXT_OPCODE(op_loop_if_less
);
391 case op_loop_if_lesseq
: {
392 emitSlowScriptCheck();
394 unsigned op1
= currentInstruction
[1].u
.operand
;
395 unsigned op2
= currentInstruction
[2].u
.operand
;
396 unsigned target
= currentInstruction
[3].u
.operand
;
397 if (isOperandConstantImmediateInt(op2
)) {
398 emitGetVirtualRegister(op1
, X86::eax
);
399 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
400 #if USE(ALTERNATE_JSIMMEDIATE)
401 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
403 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
405 addJump(jle32(X86::eax
, Imm32(op2imm
)), target
+ 3);
407 emitGetVirtualRegisters(op1
, X86::eax
, op2
, X86::edx
);
408 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
409 emitJumpSlowCaseIfNotImmediateInteger(X86::edx
);
410 addJump(jle32(X86::eax
, X86::edx
), target
+ 3);
412 NEXT_OPCODE(op_loop_if_less
);
414 case op_new_object
: {
415 emitCTICall(Interpreter::cti_op_new_object
);
416 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
417 NEXT_OPCODE(op_new_object
);
420 compilePutByIdHotPath(currentInstruction
[1].u
.operand
, &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)), currentInstruction
[3].u
.operand
, propertyAccessInstructionIndex
++);
421 NEXT_OPCODE(op_put_by_id
);
424 compileGetByIdHotPath(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)), propertyAccessInstructionIndex
++);
425 NEXT_OPCODE(op_get_by_id
);
427 case op_instanceof
: {
428 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, X86::eax
); // value
429 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, X86::ecx
); // baseVal
430 emitGetVirtualRegister(currentInstruction
[4].u
.operand
, X86::edx
); // proto
432 // check if any are immediates
433 move(X86::eax
, X86::ebx
);
434 orPtr(X86::ecx
, X86::ebx
);
435 orPtr(X86::edx
, X86::ebx
);
436 emitJumpSlowCaseIfNotJSCell(X86::ebx
);
438 // check that all are object type - this is a bit of a bithack to avoid excess branching;
439 // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
440 // this works because NumberType and StringType are smaller
441 move(Imm32(3 * ObjectType
), X86::ebx
);
442 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::eax
);
443 loadPtr(Address(X86::ecx
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
444 loadPtr(Address(X86::edx
, FIELD_OFFSET(JSCell
, m_structure
)), X86::edx
);
445 sub32(Address(X86::eax
, FIELD_OFFSET(Structure
, m_typeInfo
.m_type
)), X86::ebx
);
446 sub32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_type
)), X86::ebx
);
447 addSlowCase(jne32(Address(X86::edx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_type
)), X86::ebx
));
449 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
450 load32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_flags
)), X86::ecx
);
451 and32(Imm32(ImplementsHasInstance
| OverridesHasInstance
), X86::ecx
);
452 addSlowCase(jne32(X86::ecx
, Imm32(ImplementsHasInstance
)));
454 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, X86::ecx
); // reload value
455 emitGetVirtualRegister(currentInstruction
[4].u
.operand
, X86::edx
); // reload proto
457 // optimistically load true result
458 move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), X86::eax
);
462 // load value's prototype
463 loadPtr(Address(X86::ecx
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
464 loadPtr(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_prototype
)), X86::ecx
);
466 Jump exit
= jePtr(X86::ecx
, X86::edx
);
468 jnePtr(X86::ecx
, ImmPtr(JSValuePtr::encode(jsNull())), loop
);
470 move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), X86::eax
);
474 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
476 NEXT_OPCODE(op_instanceof
);
479 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 1, X86::ecx
);
480 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
481 emitPutJITStubArgConstant(ident
, 2);
482 emitCTICall(Interpreter::cti_op_del_by_id
);
483 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
484 NEXT_OPCODE(op_del_by_id
);
487 compileFastArith_op_mul(currentInstruction
);
491 FuncDeclNode
* func
= m_codeBlock
->function(currentInstruction
[2].u
.operand
);
492 emitPutJITStubArgConstant(func
, 1);
493 emitCTICall(Interpreter::cti_op_new_func
);
494 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
495 NEXT_OPCODE(op_new_func
);
498 compileOpCall(opcodeID
, currentInstruction
, callLinkInfoIndex
++);
499 NEXT_OPCODE(op_call
);
502 compileOpCall(opcodeID
, currentInstruction
, callLinkInfoIndex
++);
503 NEXT_OPCODE(op_call_eval
);
506 compileOpCall(opcodeID
, currentInstruction
, callLinkInfoIndex
++);
507 NEXT_OPCODE(op_construct
);
509 case op_get_global_var
: {
510 JSVariableObject
* globalObject
= static_cast<JSVariableObject
*>(currentInstruction
[2].u
.jsCell
);
511 move(ImmPtr(globalObject
), X86::eax
);
512 emitGetVariableObjectRegister(X86::eax
, currentInstruction
[3].u
.operand
, X86::eax
);
513 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
514 NEXT_OPCODE(op_get_global_var
);
516 case op_put_global_var
: {
517 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, X86::edx
);
518 JSVariableObject
* globalObject
= static_cast<JSVariableObject
*>(currentInstruction
[1].u
.jsCell
);
519 move(ImmPtr(globalObject
), X86::eax
);
520 emitPutVariableObjectRegister(X86::edx
, X86::eax
, currentInstruction
[2].u
.operand
);
521 NEXT_OPCODE(op_put_global_var
);
523 case op_get_scoped_var
: {
524 int skip
= currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain();
526 emitGetFromCallFrameHeader(RegisterFile::ScopeChain
, X86::eax
);
528 loadPtr(Address(X86::eax
, FIELD_OFFSET(ScopeChainNode
, next
)), X86::eax
);
530 loadPtr(Address(X86::eax
, FIELD_OFFSET(ScopeChainNode
, object
)), X86::eax
);
531 emitGetVariableObjectRegister(X86::eax
, currentInstruction
[2].u
.operand
, X86::eax
);
532 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
533 NEXT_OPCODE(op_get_scoped_var
);
535 case op_put_scoped_var
: {
536 int skip
= currentInstruction
[2].u
.operand
+ m_codeBlock
->needsFullScopeChain();
538 emitGetFromCallFrameHeader(RegisterFile::ScopeChain
, X86::edx
);
539 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, X86::eax
);
541 loadPtr(Address(X86::edx
, FIELD_OFFSET(ScopeChainNode
, next
)), X86::edx
);
543 loadPtr(Address(X86::edx
, FIELD_OFFSET(ScopeChainNode
, object
)), X86::edx
);
544 emitPutVariableObjectRegister(X86::eax
, X86::edx
, currentInstruction
[1].u
.operand
);
545 NEXT_OPCODE(op_put_scoped_var
);
547 case op_tear_off_activation
: {
548 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::ecx
);
549 emitCTICall(Interpreter::cti_op_tear_off_activation
);
550 NEXT_OPCODE(op_tear_off_activation
);
552 case op_tear_off_arguments
: {
553 emitCTICall(Interpreter::cti_op_tear_off_arguments
);
554 NEXT_OPCODE(op_tear_off_arguments
);
557 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
558 if (m_codeBlock
->needsFullScopeChain())
559 emitCTICall(Interpreter::cti_op_ret_scopeChain
);
561 // Return the result in %eax.
562 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
564 // Grab the return address.
565 emitGetFromCallFrameHeader(RegisterFile::ReturnPC
, X86::edx
);
567 // Restore our caller's "r".
568 emitGetFromCallFrameHeader(RegisterFile::CallerFrame
, callFrameRegister
);
577 emitPutJITStubArgConstant(currentInstruction
[2].u
.operand
, 1);
578 emitPutJITStubArgConstant(currentInstruction
[3].u
.operand
, 2);
579 emitCTICall(Interpreter::cti_op_new_array
);
580 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
581 NEXT_OPCODE(op_new_array
);
584 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
585 emitPutJITStubArgConstant(ident
, 1);
586 emitCTICall(Interpreter::cti_op_resolve
);
587 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
588 NEXT_OPCODE(op_resolve
);
590 case op_construct_verify
: {
591 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
593 emitJumpSlowCaseIfNotJSCell(X86::eax
);
594 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
595 addSlowCase(jne32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
) + FIELD_OFFSET(TypeInfo
, m_type
)), Imm32(ObjectType
)));
597 NEXT_OPCODE(op_construct_verify
);
599 case op_get_by_val
: {
600 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, X86::eax
, currentInstruction
[3].u
.operand
, X86::edx
);
601 emitJumpSlowCaseIfNotImmediateInteger(X86::edx
);
602 #if USE(ALTERNATE_JSIMMEDIATE)
603 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
604 // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
605 // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
606 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
607 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
608 // extending since it makes it easier to re-tag the value in the slow case.
609 zeroExtend32ToPtr(X86::edx
, X86::edx
);
611 emitFastArithImmToInt(X86::edx
);
613 emitJumpSlowCaseIfNotJSCell(X86::eax
);
614 addSlowCase(jnePtr(Address(X86::eax
), ImmPtr(m_interpreter
->m_jsArrayVptr
)));
616 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
617 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSArray
, m_storage
)), X86::ecx
);
618 addSlowCase(jae32(X86::edx
, Address(X86::eax
, FIELD_OFFSET(JSArray
, m_fastAccessCutoff
))));
620 // Get the value from the vector
621 loadPtr(BaseIndex(X86::ecx
, X86::edx
, ScalePtr
, FIELD_OFFSET(ArrayStorage
, m_vector
[0])), X86::eax
);
622 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
623 NEXT_OPCODE(op_get_by_val
);
625 case op_resolve_func
: {
626 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
627 emitPutJITStubArgConstant(ident
, 1);
628 emitCTICall(Interpreter::cti_op_resolve_func
);
629 emitPutVirtualRegister(currentInstruction
[2].u
.operand
, X86::edx
);
630 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
631 NEXT_OPCODE(op_resolve_func
);
634 compileFastArith_op_sub(currentInstruction
);
637 case op_put_by_val
: {
638 emitGetVirtualRegisters(currentInstruction
[1].u
.operand
, X86::eax
, currentInstruction
[2].u
.operand
, X86::edx
);
639 emitJumpSlowCaseIfNotImmediateInteger(X86::edx
);
640 #if USE(ALTERNATE_JSIMMEDIATE)
641 // See comment in op_get_by_val.
642 zeroExtend32ToPtr(X86::edx
, X86::edx
);
644 emitFastArithImmToInt(X86::edx
);
646 emitJumpSlowCaseIfNotJSCell(X86::eax
);
647 addSlowCase(jnePtr(Address(X86::eax
), ImmPtr(m_interpreter
->m_jsArrayVptr
)));
649 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
650 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSArray
, m_storage
)), X86::ecx
);
651 Jump inFastVector
= jb32(X86::edx
, Address(X86::eax
, FIELD_OFFSET(JSArray
, m_fastAccessCutoff
)));
652 // No; oh well, check if the access if within the vector - if so, we may still be okay.
653 addSlowCase(jae32(X86::edx
, Address(X86::ecx
, FIELD_OFFSET(ArrayStorage
, m_vectorLength
))));
655 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
656 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
657 addSlowCase(jzPtr(BaseIndex(X86::ecx
, X86::edx
, ScalePtr
, FIELD_OFFSET(ArrayStorage
, m_vector
[0]))));
659 // All good - put the value into the array.
660 inFastVector
.link(this);
661 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, X86::eax
);
662 storePtr(X86::eax
, BaseIndex(X86::ecx
, X86::edx
, ScalePtr
, FIELD_OFFSET(ArrayStorage
, m_vector
[0])));
663 NEXT_OPCODE(op_put_by_val
);
665 CTI_COMPILE_BINARY_OP(op_lesseq
)
666 case op_loop_if_true
: {
667 emitSlowScriptCheck();
669 unsigned target
= currentInstruction
[2].u
.operand
;
670 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
672 Jump isZero
= jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(js0())));
673 addJump(emitJumpIfImmediateInteger(X86::eax
), target
+ 2);
675 addJump(jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target
+ 2);
676 addSlowCase(jnePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
679 NEXT_OPCODE(op_loop_if_true
);
681 case op_resolve_base
: {
682 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
683 emitPutJITStubArgConstant(ident
, 1);
684 emitCTICall(Interpreter::cti_op_resolve_base
);
685 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
686 NEXT_OPCODE(op_resolve_base
);
689 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 1, X86::ecx
);
690 emitCTICall(Interpreter::cti_op_negate
);
691 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
692 NEXT_OPCODE(op_negate
);
694 case op_resolve_skip
: {
695 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
696 emitPutJITStubArgConstant(ident
, 1);
697 emitPutJITStubArgConstant(currentInstruction
[3].u
.operand
+ m_codeBlock
->needsFullScopeChain(), 2);
698 emitCTICall(Interpreter::cti_op_resolve_skip
);
699 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
700 NEXT_OPCODE(op_resolve_skip
);
702 case op_resolve_global
: {
704 void* globalObject
= currentInstruction
[2].u
.jsCell
;
705 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
707 unsigned currentIndex
= globalResolveInfoIndex
++;
708 void* structureAddress
= &(m_codeBlock
->globalResolveInfo(currentIndex
).structure
);
709 void* offsetAddr
= &(m_codeBlock
->globalResolveInfo(currentIndex
).offset
);
711 // Check Structure of global object
712 move(ImmPtr(globalObject
), X86::eax
);
713 loadPtr(structureAddress
, X86::edx
);
714 Jump noMatch
= jnePtr(X86::edx
, Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
))); // Structures don't match
716 // Load cached property
717 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSGlobalObject
, m_propertyStorage
)), X86::eax
);
718 load32(offsetAddr
, X86::edx
);
719 loadPtr(BaseIndex(X86::eax
, X86::edx
, ScalePtr
), X86::eax
);
720 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
725 emitPutJITStubArgConstant(globalObject
, 1);
726 emitPutJITStubArgConstant(ident
, 2);
727 emitPutJITStubArgConstant(currentIndex
, 3);
728 emitCTICall(Interpreter::cti_op_resolve_global
);
729 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
731 NEXT_OPCODE(op_resolve_global
);
733 CTI_COMPILE_BINARY_OP(op_div
)
735 compileFastArith_op_pre_dec(currentInstruction
[1].u
.operand
);
736 NEXT_OPCODE(op_pre_dec
);
739 unsigned op1
= currentInstruction
[1].u
.operand
;
740 unsigned op2
= currentInstruction
[2].u
.operand
;
741 unsigned target
= currentInstruction
[3].u
.operand
;
742 if (isOperandConstantImmediateInt(op2
)) {
743 emitGetVirtualRegister(op1
, X86::eax
);
744 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
745 #if USE(ALTERNATE_JSIMMEDIATE)
746 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
748 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
750 addJump(jge32(X86::eax
, Imm32(op2imm
)), target
+ 3);
752 emitGetVirtualRegisters(op1
, X86::eax
, op2
, X86::edx
);
753 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
754 emitJumpSlowCaseIfNotImmediateInteger(X86::edx
);
755 addJump(jge32(X86::eax
, X86::edx
), target
+ 3);
757 NEXT_OPCODE(op_jnless
);
760 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, X86::eax
);
761 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), X86::eax
);
762 addSlowCase(jnzPtr(X86::eax
, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue
))));
763 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
| JSImmediate::ExtendedPayloadBitBoolValue
)), X86::eax
);
764 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
768 unsigned target
= currentInstruction
[2].u
.operand
;
769 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
771 addJump(jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(js0()))), target
+ 2);
772 Jump isNonZero
= emitJumpIfImmediateInteger(X86::eax
);
774 addJump(jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target
+ 2);
775 addSlowCase(jnePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsBoolean(true)))));
777 isNonZero
.link(this);
778 NEXT_OPCODE(op_jfalse
);
781 unsigned src
= currentInstruction
[1].u
.operand
;
782 unsigned target
= currentInstruction
[2].u
.operand
;
784 emitGetVirtualRegister(src
, X86::eax
);
785 Jump isImmediate
= emitJumpIfNotJSCell(X86::eax
);
787 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
788 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
789 addJump(jnz32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
+ 2);
790 Jump wasNotImmediate
= jump();
792 // Now handle the immediate cases - undefined & null
793 isImmediate
.link(this);
794 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), X86::eax
);
795 addJump(jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsNull()))), target
+ 2);
797 wasNotImmediate
.link(this);
798 NEXT_OPCODE(op_jeq_null
);
801 unsigned src
= currentInstruction
[1].u
.operand
;
802 unsigned target
= currentInstruction
[2].u
.operand
;
804 emitGetVirtualRegister(src
, X86::eax
);
805 Jump isImmediate
= emitJumpIfNotJSCell(X86::eax
);
807 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
808 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
809 addJump(jz32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
)), target
+ 2);
810 Jump wasNotImmediate
= jump();
812 // Now handle the immediate cases - undefined & null
813 isImmediate
.link(this);
814 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), X86::eax
);
815 addJump(jnePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsNull()))), target
+ 2);
817 wasNotImmediate
.link(this);
818 NEXT_OPCODE(op_jneq_null
);
821 compileFastArith_op_post_inc(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
);
822 NEXT_OPCODE(op_post_inc
);
824 case op_unexpected_load
: {
825 JSValuePtr v
= m_codeBlock
->unexpectedConstant(currentInstruction
[2].u
.operand
);
826 move(ImmPtr(JSValuePtr::encode(v
)), X86::eax
);
827 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
828 NEXT_OPCODE(op_unexpected_load
);
831 int retAddrDst
= currentInstruction
[1].u
.operand
;
832 int target
= currentInstruction
[2].u
.operand
;
833 DataLabelPtr storeLocation
= storePtrWithPatch(Address(callFrameRegister
, sizeof(Register
) * retAddrDst
));
834 addJump(jump(), target
+ 2);
835 m_jsrSites
.append(JSRInfo(storeLocation
, label()));
839 jump(Address(callFrameRegister
, sizeof(Register
) * currentInstruction
[1].u
.operand
));
840 NEXT_OPCODE(op_sret
);
843 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, X86::eax
, currentInstruction
[3].u
.operand
, X86::edx
);
844 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax
, X86::edx
, X86::ecx
);
845 sete32(X86::edx
, X86::eax
);
846 emitTagAsBoolImmediate(X86::eax
);
847 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
851 compileFastArith_op_lshift(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
);
852 NEXT_OPCODE(op_lshift
);
855 compileFastArith_op_bitand(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
);
856 NEXT_OPCODE(op_bitand
);
859 compileFastArith_op_rshift(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
);
860 NEXT_OPCODE(op_rshift
);
863 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, X86::eax
);
864 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
865 #if USE(ALTERNATE_JSIMMEDIATE)
867 emitFastArithIntToImmNoCheck(X86::eax
, X86::eax
);
869 xorPtr(Imm32(~JSImmediate::TagTypeNumber
), X86::eax
);
871 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
872 NEXT_OPCODE(op_bitnot
);
874 case op_resolve_with_base
: {
875 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
876 emitPutJITStubArgConstant(ident
, 1);
877 emitCTICall(Interpreter::cti_op_resolve_with_base
);
878 emitPutVirtualRegister(currentInstruction
[2].u
.operand
, X86::edx
);
879 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
880 NEXT_OPCODE(op_resolve_with_base
);
882 case op_new_func_exp
: {
883 FuncExprNode
* func
= m_codeBlock
->functionExpression(currentInstruction
[2].u
.operand
);
884 emitPutJITStubArgConstant(func
, 1);
885 emitCTICall(Interpreter::cti_op_new_func_exp
);
886 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
887 NEXT_OPCODE(op_new_func_exp
);
890 compileFastArith_op_mod(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
);
894 unsigned target
= currentInstruction
[2].u
.operand
;
895 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
897 Jump isZero
= jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(js0())));
898 addJump(emitJumpIfImmediateInteger(X86::eax
), target
+ 2);
900 addJump(jePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target
+ 2);
901 addSlowCase(jnePtr(X86::eax
, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
904 NEXT_OPCODE(op_jtrue
);
906 CTI_COMPILE_BINARY_OP(op_less
)
908 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, X86::eax
, currentInstruction
[3].u
.operand
, X86::edx
);
909 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax
, X86::edx
, X86::ecx
);
910 setne32(X86::edx
, X86::eax
);
911 emitTagAsBoolImmediate(X86::eax
);
913 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
918 compileFastArith_op_post_dec(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
);
919 NEXT_OPCODE(op_post_dec
);
921 CTI_COMPILE_BINARY_OP(op_urshift
)
923 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, X86::eax
, currentInstruction
[3].u
.operand
, X86::edx
);
924 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax
, X86::edx
, X86::ecx
);
925 xorPtr(X86::edx
, X86::eax
);
926 emitFastArithReTagImmediate(X86::eax
, X86::eax
);
927 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
928 NEXT_OPCODE(op_bitxor
);
930 case op_new_regexp
: {
931 RegExp
* regExp
= m_codeBlock
->regexp(currentInstruction
[2].u
.operand
);
932 emitPutJITStubArgConstant(regExp
, 1);
933 emitCTICall(Interpreter::cti_op_new_regexp
);
934 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
935 NEXT_OPCODE(op_new_regexp
);
938 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, X86::eax
, currentInstruction
[3].u
.operand
, X86::edx
);
939 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax
, X86::edx
, X86::ecx
);
940 orPtr(X86::edx
, X86::eax
);
941 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
942 NEXT_OPCODE(op_bitor
);
945 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::ecx
);
946 emitCTICall(Interpreter::cti_op_throw
);
948 addPtr(Imm32(0x48), X86::esp
);
957 addPtr(Imm32(0x1c), X86::esp
);
964 NEXT_OPCODE(op_throw
);
966 case op_get_pnames
: {
967 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 1, X86::ecx
);
968 emitCTICall(Interpreter::cti_op_get_pnames
);
969 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
970 NEXT_OPCODE(op_get_pnames
);
972 case op_next_pname
: {
973 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 1, X86::ecx
);
974 unsigned target
= currentInstruction
[3].u
.operand
;
975 emitCTICall(Interpreter::cti_op_next_pname
);
976 Jump endOfIter
= jzPtr(X86::eax
);
977 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
978 addJump(jump(), target
+ 3);
979 endOfIter
.link(this);
980 NEXT_OPCODE(op_next_pname
);
982 case op_push_scope
: {
983 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::ecx
);
984 emitCTICall(Interpreter::cti_op_push_scope
);
985 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
986 NEXT_OPCODE(op_push_scope
);
989 emitCTICall(Interpreter::cti_op_pop_scope
);
990 NEXT_OPCODE(op_pop_scope
);
992 CTI_COMPILE_UNARY_OP(op_typeof
)
993 CTI_COMPILE_UNARY_OP(op_is_undefined
)
994 CTI_COMPILE_UNARY_OP(op_is_boolean
)
995 CTI_COMPILE_UNARY_OP(op_is_number
)
996 CTI_COMPILE_UNARY_OP(op_is_string
)
997 CTI_COMPILE_UNARY_OP(op_is_object
)
998 CTI_COMPILE_UNARY_OP(op_is_function
)
1000 compileOpStrictEq(currentInstruction
, OpStrictEq
);
1001 NEXT_OPCODE(op_stricteq
);
1003 case op_nstricteq
: {
1004 compileOpStrictEq(currentInstruction
, OpNStrictEq
);
1005 NEXT_OPCODE(op_nstricteq
);
1007 case op_to_jsnumber
: {
1008 int srcVReg
= currentInstruction
[2].u
.operand
;
1009 emitGetVirtualRegister(srcVReg
, X86::eax
);
1011 Jump wasImmediate
= emitJumpIfImmediateInteger(X86::eax
);
1013 emitJumpSlowCaseIfNotJSCell(X86::eax
, srcVReg
);
1014 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
1015 addSlowCase(jne32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_type
)), Imm32(NumberType
)));
1017 wasImmediate
.link(this);
1019 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1020 NEXT_OPCODE(op_to_jsnumber
);
1022 CTI_COMPILE_BINARY_OP(op_in
)
1023 case op_push_new_scope
: {
1024 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1025 emitPutJITStubArgConstant(ident
, 1);
1026 emitPutJITStubArgFromVirtualRegister(currentInstruction
[3].u
.operand
, 2, X86::ecx
);
1027 emitCTICall(Interpreter::cti_op_push_new_scope
);
1028 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1029 NEXT_OPCODE(op_push_new_scope
);
1032 emitGetCTIParam(STUB_ARGS_callFrame
, callFrameRegister
);
1033 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1034 NEXT_OPCODE(op_catch
);
1036 case op_jmp_scopes
: {
1037 unsigned count
= currentInstruction
[1].u
.operand
;
1038 emitPutJITStubArgConstant(count
, 1);
1039 emitCTICall(Interpreter::cti_op_jmp_scopes
);
1040 unsigned target
= currentInstruction
[2].u
.operand
;
1041 addJump(jump(), target
+ 2);
1042 NEXT_OPCODE(op_jmp_scopes
);
1044 case op_put_by_index
: {
1045 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::ecx
);
1046 emitPutJITStubArgConstant(currentInstruction
[2].u
.operand
, 2);
1047 emitPutJITStubArgFromVirtualRegister(currentInstruction
[3].u
.operand
, 3, X86::ecx
);
1048 emitCTICall(Interpreter::cti_op_put_by_index
);
1049 NEXT_OPCODE(op_put_by_index
);
1051 case op_switch_imm
: {
1052 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1053 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1054 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1056 // create jump table for switch destinations, track this switch statement.
1057 SimpleJumpTable
* jumpTable
= &m_codeBlock
->immediateSwitchJumpTable(tableIndex
);
1058 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Immediate
));
1059 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
1061 emitPutJITStubArgFromVirtualRegister(scrutinee
, 1, X86::ecx
);
1062 emitPutJITStubArgConstant(tableIndex
, 2);
1063 emitCTICall(Interpreter::cti_op_switch_imm
);
1065 NEXT_OPCODE(op_switch_imm
);
1067 case op_switch_char
: {
1068 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1069 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1070 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1072 // create jump table for switch destinations, track this switch statement.
1073 SimpleJumpTable
* jumpTable
= &m_codeBlock
->characterSwitchJumpTable(tableIndex
);
1074 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
, SwitchRecord::Character
));
1075 jumpTable
->ctiOffsets
.grow(jumpTable
->branchOffsets
.size());
1077 emitPutJITStubArgFromVirtualRegister(scrutinee
, 1, X86::ecx
);
1078 emitPutJITStubArgConstant(tableIndex
, 2);
1079 emitCTICall(Interpreter::cti_op_switch_char
);
1081 NEXT_OPCODE(op_switch_char
);
1083 case op_switch_string
: {
1084 unsigned tableIndex
= currentInstruction
[1].u
.operand
;
1085 unsigned defaultOffset
= currentInstruction
[2].u
.operand
;
1086 unsigned scrutinee
= currentInstruction
[3].u
.operand
;
1088 // create jump table for switch destinations, track this switch statement.
1089 StringJumpTable
* jumpTable
= &m_codeBlock
->stringSwitchJumpTable(tableIndex
);
1090 m_switches
.append(SwitchRecord(jumpTable
, m_bytecodeIndex
, defaultOffset
));
1092 emitPutJITStubArgFromVirtualRegister(scrutinee
, 1, X86::ecx
);
1093 emitPutJITStubArgConstant(tableIndex
, 2);
1094 emitCTICall(Interpreter::cti_op_switch_string
);
1096 NEXT_OPCODE(op_switch_string
);
1098 case op_del_by_val
: {
1099 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 1, X86::ecx
);
1100 emitPutJITStubArgFromVirtualRegister(currentInstruction
[3].u
.operand
, 2, X86::ecx
);
1101 emitCTICall(Interpreter::cti_op_del_by_val
);
1102 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1103 NEXT_OPCODE(op_del_by_val
);
1105 case op_put_getter
: {
1106 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::ecx
);
1107 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1108 emitPutJITStubArgConstant(ident
, 2);
1109 emitPutJITStubArgFromVirtualRegister(currentInstruction
[3].u
.operand
, 3, X86::ecx
);
1110 emitCTICall(Interpreter::cti_op_put_getter
);
1111 NEXT_OPCODE(op_put_getter
);
1113 case op_put_setter
: {
1114 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::ecx
);
1115 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1116 emitPutJITStubArgConstant(ident
, 2);
1117 emitPutJITStubArgFromVirtualRegister(currentInstruction
[3].u
.operand
, 3, X86::ecx
);
1118 emitCTICall(Interpreter::cti_op_put_setter
);
1119 NEXT_OPCODE(op_put_setter
);
1121 case op_new_error
: {
1122 JSValuePtr message
= m_codeBlock
->unexpectedConstant(currentInstruction
[3].u
.operand
);
1123 emitPutJITStubArgConstant(currentInstruction
[2].u
.operand
, 1);
1124 emitPutJITStubArgConstant(JSValuePtr::encode(message
), 2);
1125 emitPutJITStubArgConstant(m_bytecodeIndex
, 3);
1126 emitCTICall(Interpreter::cti_op_new_error
);
1127 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1128 NEXT_OPCODE(op_new_error
);
1131 emitPutJITStubArgConstant(currentInstruction
[1].u
.operand
, 1);
1132 emitPutJITStubArgConstant(currentInstruction
[2].u
.operand
, 2);
1133 emitPutJITStubArgConstant(currentInstruction
[3].u
.operand
, 3);
1134 emitCTICall(Interpreter::cti_op_debug
);
1135 NEXT_OPCODE(op_debug
);
1138 unsigned dst
= currentInstruction
[1].u
.operand
;
1139 unsigned src1
= currentInstruction
[2].u
.operand
;
1141 emitGetVirtualRegister(src1
, X86::eax
);
1142 Jump isImmediate
= emitJumpIfNotJSCell(X86::eax
);
1144 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
1145 setnz32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), X86::eax
);
1147 Jump wasNotImmediate
= jump();
1149 isImmediate
.link(this);
1151 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), X86::eax
);
1152 sete32(Imm32(JSImmediate::FullTagTypeNull
), X86::eax
);
1154 wasNotImmediate
.link(this);
1156 emitTagAsBoolImmediate(X86::eax
);
1157 emitPutVirtualRegister(dst
);
1159 NEXT_OPCODE(op_eq_null
);
1162 unsigned dst
= currentInstruction
[1].u
.operand
;
1163 unsigned src1
= currentInstruction
[2].u
.operand
;
1165 emitGetVirtualRegister(src1
, X86::eax
);
1166 Jump isImmediate
= emitJumpIfNotJSCell(X86::eax
);
1168 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::ecx
);
1169 setz32(Address(X86::ecx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_flags
)), Imm32(MasqueradesAsUndefined
), X86::eax
);
1171 Jump wasNotImmediate
= jump();
1173 isImmediate
.link(this);
1175 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined
), X86::eax
);
1176 setne32(Imm32(JSImmediate::FullTagTypeNull
), X86::eax
);
1178 wasNotImmediate
.link(this);
1180 emitTagAsBoolImmediate(X86::eax
);
1181 emitPutVirtualRegister(dst
);
1183 NEXT_OPCODE(op_neq_null
);
1186 // Even though CTI doesn't use them, we initialize our constant
1187 // registers to zap stale pointers, to avoid unnecessarily prolonging
1188 // object lifetime and increasing GC pressure.
1189 size_t count
= m_codeBlock
->m_numVars
+ m_codeBlock
->numberOfConstantRegisters();
1190 for (size_t j
= 0; j
< count
; ++j
)
1191 emitInitRegister(j
);
1193 NEXT_OPCODE(op_enter
);
1195 case op_enter_with_activation
: {
1196 // Even though CTI doesn't use them, we initialize our constant
1197 // registers to zap stale pointers, to avoid unnecessarily prolonging
1198 // object lifetime and increasing GC pressure.
1199 size_t count
= m_codeBlock
->m_numVars
+ m_codeBlock
->numberOfConstantRegisters();
1200 for (size_t j
= 0; j
< count
; ++j
)
1201 emitInitRegister(j
);
1203 emitCTICall(Interpreter::cti_op_push_activation
);
1204 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1206 NEXT_OPCODE(op_enter_with_activation
);
1208 case op_create_arguments
: {
1209 if (m_codeBlock
->m_numParameters
== 1)
1210 emitCTICall(Interpreter::cti_op_create_arguments_no_params
);
1212 emitCTICall(Interpreter::cti_op_create_arguments
);
1213 NEXT_OPCODE(op_create_arguments
);
1215 case op_convert_this
: {
1216 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
1218 emitJumpSlowCaseIfNotJSCell(X86::eax
);
1219 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSCell
, m_structure
)), X86::edx
);
1220 addSlowCase(jnz32(Address(X86::edx
, FIELD_OFFSET(Structure
, m_typeInfo
.m_flags
)), Imm32(NeedsThisConversion
)));
1222 NEXT_OPCODE(op_convert_this
);
1224 case op_profile_will_call
: {
1225 emitGetCTIParam(STUB_ARGS_profilerReference
, X86::eax
);
1226 Jump noProfiler
= jzPtr(Address(X86::eax
));
1227 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::eax
);
1228 emitCTICall(Interpreter::cti_op_profile_will_call
);
1229 noProfiler
.link(this);
1231 NEXT_OPCODE(op_profile_will_call
);
1233 case op_profile_did_call
: {
1234 emitGetCTIParam(STUB_ARGS_profilerReference
, X86::eax
);
1235 Jump noProfiler
= jzPtr(Address(X86::eax
));
1236 emitPutJITStubArgFromVirtualRegister(currentInstruction
[1].u
.operand
, 1, X86::eax
);
1237 emitCTICall(Interpreter::cti_op_profile_did_call
);
1238 noProfiler
.link(this);
1240 NEXT_OPCODE(op_profile_did_call
);
1242 case op_get_array_length
:
1243 case op_get_by_id_chain
:
1244 case op_get_by_id_generic
:
1245 case op_get_by_id_proto
:
1246 case op_get_by_id_proto_list
:
1247 case op_get_by_id_self
:
1248 case op_get_by_id_self_list
:
1249 case op_get_string_length
:
1250 case op_put_by_id_generic
:
1251 case op_put_by_id_replace
:
1252 case op_put_by_id_transition
:
1253 ASSERT_NOT_REACHED();
1257 ASSERT(propertyAccessInstructionIndex
== m_codeBlock
->numberOfStructureStubInfos());
1258 ASSERT(callLinkInfoIndex
== m_codeBlock
->numberOfCallLinkInfos());
1261 // reset this, in order to guard it's use with asserts
1262 m_bytecodeIndex
= (unsigned)-1;
1267 void JIT::privateCompileLinkPass()
1269 unsigned jmpTableCount
= m_jmpTable
.size();
1270 for (unsigned i
= 0; i
< jmpTableCount
; ++i
)
1271 m_jmpTable
[i
].from
.linkTo(m_labels
[m_jmpTable
[i
].toBytecodeIndex
], this);
1275 void JIT::privateCompileSlowCases()
1277 Instruction
* instructionsBegin
= m_codeBlock
->instructions().begin();
1278 unsigned propertyAccessInstructionIndex
= 0;
1279 unsigned callLinkInfoIndex
= 0;
1281 for (Vector
<SlowCaseEntry
>::iterator iter
= m_slowCases
.begin(); iter
!= m_slowCases
.end();) {
1282 // FIXME: enable peephole optimizations for slow cases when applicable
1283 killLastResultRegister();
1285 m_bytecodeIndex
= iter
->to
;
1287 unsigned firstTo
= m_bytecodeIndex
;
1289 Instruction
* currentInstruction
= instructionsBegin
+ m_bytecodeIndex
;
1291 switch (OpcodeID opcodeID
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
)) {
1292 case op_convert_this
: {
1295 emitPutJITStubArg(X86::eax
, 1);
1296 emitCTICall(Interpreter::cti_op_convert_this
);
1297 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1298 NEXT_OPCODE(op_convert_this
);
1301 compileFastArithSlow_op_add(currentInstruction
, iter
);
1302 NEXT_OPCODE(op_add
);
1304 case op_construct_verify
: {
1307 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, X86::eax
);
1308 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1310 NEXT_OPCODE(op_construct_verify
);
1312 case op_get_by_val
: {
1313 // The slow case that handles accesses to arrays (below) may jump back up to here.
1314 Label
beginGetByValSlow(this);
1316 Jump notImm
= getSlowCase(iter
);
1319 emitFastArithIntToImmNoCheck(X86::edx
, X86::edx
);
1321 emitPutJITStubArg(X86::eax
, 1);
1322 emitPutJITStubArg(X86::edx
, 2);
1323 emitCTICall(Interpreter::cti_op_get_by_val
);
1324 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1325 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
1327 // This is slow case that handles accesses to arrays above the fast cut-off.
1328 // First, check if this is an access to the vector
1330 jae32(X86::edx
, Address(X86::ecx
, FIELD_OFFSET(ArrayStorage
, m_vectorLength
)), beginGetByValSlow
);
1332 // okay, missed the fast region, but it is still in the vector. Get the value.
1333 loadPtr(BaseIndex(X86::ecx
, X86::edx
, ScalePtr
, FIELD_OFFSET(ArrayStorage
, m_vector
[0])), X86::ecx
);
1334 // Check whether the value loaded is zero; if so we need to return undefined.
1335 jzPtr(X86::ecx
, beginGetByValSlow
);
1336 move(X86::ecx
, X86::eax
);
1337 emitPutVirtualRegister(currentInstruction
[1].u
.operand
, X86::eax
);
1339 NEXT_OPCODE(op_get_by_val
);
1342 compileFastArithSlow_op_sub(currentInstruction
, iter
);
1343 NEXT_OPCODE(op_sub
);
1346 compileFastArithSlow_op_rshift(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, iter
);
1347 NEXT_OPCODE(op_rshift
);
1350 compileFastArithSlow_op_lshift(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, iter
);
1351 NEXT_OPCODE(op_lshift
);
1353 case op_loop_if_less
: {
1354 unsigned op2
= currentInstruction
[2].u
.operand
;
1355 unsigned target
= currentInstruction
[3].u
.operand
;
1356 if (isOperandConstantImmediateInt(op2
)) {
1358 emitPutJITStubArg(X86::eax
, 1);
1359 emitPutJITStubArgFromVirtualRegister(op2
, 2, X86::ecx
);
1360 emitCTICall(Interpreter::cti_op_loop_if_less
);
1361 emitJumpSlowToHot(jnz32(X86::eax
), target
+ 3);
1365 emitPutJITStubArg(X86::eax
, 1);
1366 emitPutJITStubArg(X86::edx
, 2);
1367 emitCTICall(Interpreter::cti_op_loop_if_less
);
1368 emitJumpSlowToHot(jnz32(X86::eax
), target
+ 3);
1370 NEXT_OPCODE(op_loop_if_less
);
1372 case op_put_by_id
: {
1373 compilePutByIdSlowCase(currentInstruction
[1].u
.operand
, &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)), currentInstruction
[3].u
.operand
, iter
, propertyAccessInstructionIndex
++);
1374 NEXT_OPCODE(op_put_by_id
);
1376 case op_get_by_id
: {
1377 compileGetByIdSlowCase(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)), iter
, propertyAccessInstructionIndex
++);
1378 NEXT_OPCODE(op_get_by_id
);
1380 case op_loop_if_lesseq
: {
1381 unsigned op2
= currentInstruction
[2].u
.operand
;
1382 unsigned target
= currentInstruction
[3].u
.operand
;
1383 if (isOperandConstantImmediateInt(op2
)) {
1385 emitPutJITStubArg(X86::eax
, 1);
1386 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 2, X86::ecx
);
1387 emitCTICall(Interpreter::cti_op_loop_if_lesseq
);
1388 emitJumpSlowToHot(jnz32(X86::eax
), target
+ 3);
1392 emitPutJITStubArg(X86::eax
, 1);
1393 emitPutJITStubArg(X86::edx
, 2);
1394 emitCTICall(Interpreter::cti_op_loop_if_lesseq
);
1395 emitJumpSlowToHot(jnz32(X86::eax
), target
+ 3);
1397 NEXT_OPCODE(op_loop_if_lesseq
);
1400 compileFastArithSlow_op_pre_inc(currentInstruction
[1].u
.operand
, iter
);
1401 NEXT_OPCODE(op_pre_inc
);
1403 case op_put_by_val
: {
1404 // Normal slow cases - either is not an immediate imm, or is an array.
1405 Jump notImm
= getSlowCase(iter
);
1408 emitFastArithIntToImmNoCheck(X86::edx
, X86::edx
);
1410 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, X86::ecx
);
1411 emitPutJITStubArg(X86::eax
, 1);
1412 emitPutJITStubArg(X86::edx
, 2);
1413 emitPutJITStubArg(X86::ecx
, 3);
1414 emitCTICall(Interpreter::cti_op_put_by_val
);
1415 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val
));
1417 // slow cases for immediate int accesses to arrays
1420 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, X86::ecx
);
1421 emitPutJITStubArg(X86::eax
, 1);
1422 emitPutJITStubArg(X86::edx
, 2);
1423 emitPutJITStubArg(X86::ecx
, 3);
1424 emitCTICall(Interpreter::cti_op_put_by_val_array
);
1426 NEXT_OPCODE(op_put_by_val
);
1428 case op_loop_if_true
: {
1430 emitPutJITStubArg(X86::eax
, 1);
1431 emitCTICall(Interpreter::cti_op_jtrue
);
1432 unsigned target
= currentInstruction
[2].u
.operand
;
1433 emitJumpSlowToHot(jnz32(X86::eax
), target
+ 2);
1434 NEXT_OPCODE(op_loop_if_true
);
1437 compileFastArithSlow_op_pre_dec(currentInstruction
[1].u
.operand
, iter
);
1438 NEXT_OPCODE(op_pre_dec
);
1441 unsigned op2
= currentInstruction
[2].u
.operand
;
1442 unsigned target
= currentInstruction
[3].u
.operand
;
1443 if (isOperandConstantImmediateInt(op2
)) {
1445 emitPutJITStubArg(X86::eax
, 1);
1446 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 2, X86::ecx
);
1447 emitCTICall(Interpreter::cti_op_jless
);
1448 emitJumpSlowToHot(jz32(X86::eax
), target
+ 3);
1452 emitPutJITStubArg(X86::eax
, 1);
1453 emitPutJITStubArg(X86::edx
, 2);
1454 emitCTICall(Interpreter::cti_op_jless
);
1455 emitJumpSlowToHot(jz32(X86::eax
), target
+ 3);
1457 NEXT_OPCODE(op_jnless
);
1461 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), X86::eax
);
1462 emitPutJITStubArg(X86::eax
, 1);
1463 emitCTICall(Interpreter::cti_op_not
);
1464 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1465 NEXT_OPCODE(op_not
);
1469 emitPutJITStubArg(X86::eax
, 1);
1470 emitCTICall(Interpreter::cti_op_jtrue
);
1471 unsigned target
= currentInstruction
[2].u
.operand
;
1472 emitJumpSlowToHot(jz32(X86::eax
), target
+ 2); // inverted!
1473 NEXT_OPCODE(op_jfalse
);
1476 compileFastArithSlow_op_post_inc(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, iter
);
1477 NEXT_OPCODE(op_post_inc
);
1481 emitPutJITStubArg(X86::eax
, 1);
1482 emitCTICall(Interpreter::cti_op_bitnot
);
1483 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1484 NEXT_OPCODE(op_bitnot
);
1487 compileFastArithSlow_op_bitand(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, iter
);
1488 NEXT_OPCODE(op_bitand
);
1492 emitPutJITStubArg(X86::eax
, 1);
1493 emitCTICall(Interpreter::cti_op_jtrue
);
1494 unsigned target
= currentInstruction
[2].u
.operand
;
1495 emitJumpSlowToHot(jnz32(X86::eax
), target
+ 2);
1496 NEXT_OPCODE(op_jtrue
);
1499 compileFastArithSlow_op_post_dec(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, iter
);
1500 NEXT_OPCODE(op_post_dec
);
1504 emitPutJITStubArg(X86::eax
, 1);
1505 emitPutJITStubArg(X86::edx
, 2);
1506 emitCTICall(Interpreter::cti_op_bitxor
);
1507 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1508 NEXT_OPCODE(op_bitxor
);
1512 emitPutJITStubArg(X86::eax
, 1);
1513 emitPutJITStubArg(X86::edx
, 2);
1514 emitCTICall(Interpreter::cti_op_bitor
);
1515 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1516 NEXT_OPCODE(op_bitor
);
1520 emitPutJITStubArg(X86::eax
, 1);
1521 emitPutJITStubArg(X86::edx
, 2);
1522 emitCTICall(Interpreter::cti_op_eq
);
1523 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1528 emitPutJITStubArg(X86::eax
, 1);
1529 emitPutJITStubArg(X86::edx
, 2);
1530 emitCTICall(Interpreter::cti_op_neq
);
1531 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1532 NEXT_OPCODE(op_neq
);
1537 #if !USE(ALTERNATE_JSIMMEDIATE)
1540 emitPutJITStubArg(X86::eax
, 1);
1541 emitPutJITStubArg(X86::edx
, 2);
1542 emitCTICall(Interpreter::cti_op_stricteq
);
1543 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1544 NEXT_OPCODE(op_stricteq
);
1546 case op_nstricteq
: {
1549 #if !USE(ALTERNATE_JSIMMEDIATE)
1552 emitPutJITStubArg(X86::eax
, 1);
1553 emitPutJITStubArg(X86::edx
, 2);
1554 emitCTICall(Interpreter::cti_op_nstricteq
);
1555 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1556 NEXT_OPCODE(op_nstricteq
);
1558 case op_instanceof
: {
1562 emitPutJITStubArgFromVirtualRegister(currentInstruction
[2].u
.operand
, 1, X86::ecx
);
1563 emitPutJITStubArgFromVirtualRegister(currentInstruction
[3].u
.operand
, 2, X86::ecx
);
1564 emitPutJITStubArgFromVirtualRegister(currentInstruction
[4].u
.operand
, 3, X86::ecx
);
1565 emitCTICall(Interpreter::cti_op_instanceof
);
1566 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1567 NEXT_OPCODE(op_instanceof
);
1570 compileFastArithSlow_op_mod(currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, iter
);
1571 NEXT_OPCODE(op_mod
);
1574 compileFastArithSlow_op_mul(currentInstruction
, iter
);
1575 NEXT_OPCODE(op_mul
);
1579 compileOpCallSlowCase(currentInstruction
, iter
, callLinkInfoIndex
++, opcodeID
);
1580 NEXT_OPCODE(op_call
);
1582 case op_call_eval
: {
1583 compileOpCallSlowCase(currentInstruction
, iter
, callLinkInfoIndex
++, opcodeID
);
1584 NEXT_OPCODE(op_call_eval
);
1586 case op_construct
: {
1587 compileOpCallSlowCase(currentInstruction
, iter
, callLinkInfoIndex
++, opcodeID
);
1588 NEXT_OPCODE(op_construct
);
1590 case op_to_jsnumber
: {
1591 linkSlowCaseIfNotJSCell(iter
, currentInstruction
[2].u
.operand
);
1594 emitPutJITStubArg(X86::eax
, 1);
1595 emitCTICall(Interpreter::cti_op_to_jsnumber
);
1597 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1598 NEXT_OPCODE(op_to_jsnumber
);
1602 ASSERT_NOT_REACHED();
1605 ASSERT_WITH_MESSAGE(iter
== m_slowCases
.end() || firstTo
!= iter
->to
,"Not enough jumps linked in slow case codegen.");
1606 ASSERT_WITH_MESSAGE(firstTo
== (iter
- 1)->to
, "Too many jumps linked in slow case codegen.");
1608 emitJumpSlowToHot(jump(), 0);
1611 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1612 ASSERT(propertyAccessInstructionIndex
== m_codeBlock
->numberOfStructureStubInfos());
1614 ASSERT(callLinkInfoIndex
== m_codeBlock
->numberOfCallLinkInfos());
1617 // reset this, in order to guard it's use with asserts
1618 m_bytecodeIndex
= (unsigned)-1;
1622 void JIT::privateCompile()
1624 sampleCodeBlock(m_codeBlock
);
1625 #if ENABLE(OPCODE_SAMPLING)
1626 sampleInstruction(m_codeBlock
->instructions().begin());
1629 // Could use a pop_m, but would need to offset the following instruction if so.
1631 emitPutToCallFrameHeader(X86::ecx
, RegisterFile::ReturnPC
);
1633 Jump slowRegisterFileCheck
;
1634 Label afterRegisterFileCheck
;
1635 if (m_codeBlock
->codeType() == FunctionCode
) {
1636 // In the case of a fast linked call, we do not set this up in the caller.
1637 emitPutImmediateToCallFrameHeader(m_codeBlock
, RegisterFile::CodeBlock
);
1639 emitGetCTIParam(STUB_ARGS_registerFile
, X86::eax
);
1640 addPtr(Imm32(m_codeBlock
->m_numCalleeRegisters
* sizeof(Register
)), callFrameRegister
, X86::edx
);
1642 slowRegisterFileCheck
= jg32(X86::edx
, Address(X86::eax
, FIELD_OFFSET(RegisterFile
, m_end
)));
1643 afterRegisterFileCheck
= label();
1646 privateCompileMainPass();
1647 privateCompileLinkPass();
1648 privateCompileSlowCases();
1650 if (m_codeBlock
->codeType() == FunctionCode
) {
1651 slowRegisterFileCheck
.link(this);
1652 m_bytecodeIndex
= 0; // emitCTICall will add to the map, but doesn't actually need this...
1653 emitCTICall(Interpreter::cti_register_file_check
);
1655 // reset this, in order to guard it's use with asserts
1656 m_bytecodeIndex
= (unsigned)-1;
1658 jump(afterRegisterFileCheck
);
1661 ASSERT(m_jmpTable
.isEmpty());
1663 RefPtr
<ExecutablePool
> allocator
= m_globalData
->poolForSize(m_assembler
.size());
1664 void* code
= m_assembler
.executableCopy(allocator
.get());
1665 JITCodeRef
codeRef(code
, allocator
);
1667 codeRef
.codeSize
= m_assembler
.size();
1670 PatchBuffer
patchBuffer(code
);
1672 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
1673 for (unsigned i
= 0; i
< m_switches
.size(); ++i
) {
1674 SwitchRecord record
= m_switches
[i
];
1675 unsigned bytecodeIndex
= record
.bytecodeIndex
;
1677 if (record
.type
!= SwitchRecord::String
) {
1678 ASSERT(record
.type
== SwitchRecord::Immediate
|| record
.type
== SwitchRecord::Character
);
1679 ASSERT(record
.jumpTable
.simpleJumpTable
->branchOffsets
.size() == record
.jumpTable
.simpleJumpTable
->ctiOffsets
.size());
1681 record
.jumpTable
.simpleJumpTable
->ctiDefault
= patchBuffer
.addressOf(m_labels
[bytecodeIndex
+ 3 + record
.defaultOffset
]);
1683 for (unsigned j
= 0; j
< record
.jumpTable
.simpleJumpTable
->branchOffsets
.size(); ++j
) {
1684 unsigned offset
= record
.jumpTable
.simpleJumpTable
->branchOffsets
[j
];
1685 record
.jumpTable
.simpleJumpTable
->ctiOffsets
[j
] = offset
? patchBuffer
.addressOf(m_labels
[bytecodeIndex
+ 3 + offset
]) : record
.jumpTable
.simpleJumpTable
->ctiDefault
;
1688 ASSERT(record
.type
== SwitchRecord::String
);
1690 record
.jumpTable
.stringJumpTable
->ctiDefault
= patchBuffer
.addressOf(m_labels
[bytecodeIndex
+ 3 + record
.defaultOffset
]);
1692 StringJumpTable::StringOffsetTable::iterator end
= record
.jumpTable
.stringJumpTable
->offsetTable
.end();
1693 for (StringJumpTable::StringOffsetTable::iterator it
= record
.jumpTable
.stringJumpTable
->offsetTable
.begin(); it
!= end
; ++it
) {
1694 unsigned offset
= it
->second
.branchOffset
;
1695 it
->second
.ctiOffset
= offset
? patchBuffer
.addressOf(m_labels
[bytecodeIndex
+ 3 + offset
]) : record
.jumpTable
.stringJumpTable
->ctiDefault
;
1700 for (size_t i
= 0; i
< m_codeBlock
->numberOfExceptionHandlers(); ++i
) {
1701 HandlerInfo
& handler
= m_codeBlock
->exceptionHandler(i
);
1702 handler
.nativeCode
= patchBuffer
.addressOf(m_labels
[handler
.target
]);
1705 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1707 patchBuffer
.link(iter
->from
, iter
->to
);
1710 if (m_codeBlock
->hasExceptionInfo()) {
1711 m_codeBlock
->pcVector().reserveCapacity(m_calls
.size());
1712 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
)
1713 m_codeBlock
->pcVector().append(PC(reinterpret_cast<void**>(patchBuffer
.addressOf(iter
->from
)) - reinterpret_cast<void**>(code
), iter
->bytecodeIndex
));
1716 // Link absolute addresses for jsr
1717 for (Vector
<JSRInfo
>::iterator iter
= m_jsrSites
.begin(); iter
!= m_jsrSites
.end(); ++iter
)
1718 patchBuffer
.setPtr(iter
->storeLocation
, patchBuffer
.addressOf(iter
->target
));
1720 for (unsigned i
= 0; i
< m_codeBlock
->numberOfStructureStubInfos(); ++i
) {
1721 StructureStubInfo
& info
= m_codeBlock
->structureStubInfo(i
);
1722 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1723 info
.callReturnLocation
= patchBuffer
.addressOf(m_propertyAccessCompilationInfo
[i
].callReturnLocation
);
1724 info
.hotPathBegin
= patchBuffer
.addressOf(m_propertyAccessCompilationInfo
[i
].hotPathBegin
);
1726 info
.callReturnLocation
= 0;
1727 info
.hotPathBegin
= 0;
1730 for (unsigned i
= 0; i
< m_codeBlock
->numberOfCallLinkInfos(); ++i
) {
1731 CallLinkInfo
& info
= m_codeBlock
->callLinkInfo(i
);
1732 #if ENABLE(JIT_OPTIMIZE_CALL)
1733 info
.callReturnLocation
= patchBuffer
.addressOf(m_callStructureStubCompilationInfo
[i
].callReturnLocation
);
1734 info
.hotPathBegin
= patchBuffer
.addressOf(m_callStructureStubCompilationInfo
[i
].hotPathBegin
);
1735 info
.hotPathOther
= patchBuffer
.addressOf(m_callStructureStubCompilationInfo
[i
].hotPathOther
);
1736 info
.coldPathOther
= patchBuffer
.addressOf(m_callStructureStubCompilationInfo
[i
].coldPathOther
);
1738 info
.callReturnLocation
= 0;
1739 info
.hotPathBegin
= 0;
1740 info
.hotPathOther
= 0;
1741 info
.coldPathOther
= 0;
1745 m_codeBlock
->setJITCode(codeRef
);
1748 void JIT::privateCompileCTIMachineTrampolines()
1750 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1751 // (1) The first function provides fast property access for array length
1752 Label arrayLengthBegin
= align();
1754 // Check eax is an array
1755 Jump array_failureCases1
= emitJumpIfNotJSCell(X86::eax
);
1756 Jump array_failureCases2
= jnePtr(Address(X86::eax
), ImmPtr(m_interpreter
->m_jsArrayVptr
));
1758 // Checks out okay! - get the length from the storage
1759 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSArray
, m_storage
)), X86::eax
);
1760 load32(Address(X86::eax
, FIELD_OFFSET(ArrayStorage
, m_length
)), X86::eax
);
1762 Jump array_failureCases3
= ja32(X86::eax
, Imm32(JSImmediate::maxImmediateInt
));
1764 // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
1765 emitFastArithIntToImmNoCheck(X86::eax
, X86::eax
);
1769 // (2) The second function provides fast property access for string length
1770 Label stringLengthBegin
= align();
1772 // Check eax is a string
1773 Jump string_failureCases1
= emitJumpIfNotJSCell(X86::eax
);
1774 Jump string_failureCases2
= jnePtr(Address(X86::eax
), ImmPtr(m_interpreter
->m_jsStringVptr
));
1776 // Checks out okay! - get the length from the Ustring.
1777 loadPtr(Address(X86::eax
, FIELD_OFFSET(JSString
, m_value
) + FIELD_OFFSET(UString
, m_rep
)), X86::eax
);
1778 load32(Address(X86::eax
, FIELD_OFFSET(UString::Rep
, len
)), X86::eax
);
1780 Jump string_failureCases3
= ja32(X86::eax
, Imm32(JSImmediate::maxImmediateInt
));
1782 // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
1783 emitFastArithIntToImmNoCheck(X86::eax
, X86::eax
);
1788 // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
1790 Label virtualCallPreLinkBegin
= align();
1792 // Load the callee CodeBlock* into eax
1793 loadPtr(Address(X86::ecx
, FIELD_OFFSET(JSFunction
, m_body
)), X86::eax
);
1794 loadPtr(Address(X86::eax
, FIELD_OFFSET(FunctionBodyNode
, m_code
)), X86::eax
);
1795 Jump hasCodeBlock1
= jnzPtr(X86::eax
);
1797 restoreArgumentReference();
1798 Jump callJSFunction1
= call();
1799 emitGetJITStubArg(1, X86::ecx
);
1800 emitGetJITStubArg(3, X86::edx
);
1802 hasCodeBlock1
.link(this);
1804 // Check argCount matches callee arity.
1805 Jump arityCheckOkay1
= je32(Address(X86::eax
, FIELD_OFFSET(CodeBlock
, m_numParameters
)), X86::edx
);
1807 emitPutJITStubArg(X86::ebx
, 2);
1808 emitPutJITStubArg(X86::eax
, 4);
1809 restoreArgumentReference();
1810 Jump callArityCheck1
= call();
1811 move(X86::edx
, callFrameRegister
);
1812 emitGetJITStubArg(1, X86::ecx
);
1813 emitGetJITStubArg(3, X86::edx
);
1815 arityCheckOkay1
.link(this);
1817 compileOpCallInitializeCallFrame();
1820 emitPutJITStubArg(X86::ebx
, 2);
1821 restoreArgumentReference();
1822 Jump callDontLazyLinkCall
= call();
1827 Label virtualCallLinkBegin
= align();
1829 // Load the callee CodeBlock* into eax
1830 loadPtr(Address(X86::ecx
, FIELD_OFFSET(JSFunction
, m_body
)), X86::eax
);
1831 loadPtr(Address(X86::eax
, FIELD_OFFSET(FunctionBodyNode
, m_code
)), X86::eax
);
1832 Jump hasCodeBlock2
= jnzPtr(X86::eax
);
1834 restoreArgumentReference();
1835 Jump callJSFunction2
= call();
1836 emitGetJITStubArg(1, X86::ecx
);
1837 emitGetJITStubArg(3, X86::edx
);
1839 hasCodeBlock2
.link(this);
1841 // Check argCount matches callee arity.
1842 Jump arityCheckOkay2
= je32(Address(X86::eax
, FIELD_OFFSET(CodeBlock
, m_numParameters
)), X86::edx
);
1844 emitPutJITStubArg(X86::ebx
, 2);
1845 emitPutJITStubArg(X86::eax
, 4);
1846 restoreArgumentReference();
1847 Jump callArityCheck2
= call();
1848 move(X86::edx
, callFrameRegister
);
1849 emitGetJITStubArg(1, X86::ecx
);
1850 emitGetJITStubArg(3, X86::edx
);
1852 arityCheckOkay2
.link(this);
1854 compileOpCallInitializeCallFrame();
1857 emitPutJITStubArg(X86::ebx
, 2);
1858 restoreArgumentReference();
1859 Jump callLazyLinkCall
= call();
1864 Label virtualCallBegin
= align();
1866 // Load the callee CodeBlock* into eax
1867 loadPtr(Address(X86::ecx
, FIELD_OFFSET(JSFunction
, m_body
)), X86::eax
);
1868 loadPtr(Address(X86::eax
, FIELD_OFFSET(FunctionBodyNode
, m_code
)), X86::eax
);
1869 Jump hasCodeBlock3
= jnzPtr(X86::eax
);
1871 restoreArgumentReference();
1872 Jump callJSFunction3
= call();
1873 emitGetJITStubArg(1, X86::ecx
);
1874 emitGetJITStubArg(3, X86::edx
);
1876 hasCodeBlock3
.link(this);
1878 // Check argCount matches callee arity.
1879 Jump arityCheckOkay3
= je32(Address(X86::eax
, FIELD_OFFSET(CodeBlock
, m_numParameters
)), X86::edx
);
1881 emitPutJITStubArg(X86::ebx
, 2);
1882 emitPutJITStubArg(X86::eax
, 4);
1883 restoreArgumentReference();
1884 Jump callArityCheck3
= call();
1885 move(X86::edx
, callFrameRegister
);
1886 emitGetJITStubArg(1, X86::ecx
);
1887 emitGetJITStubArg(3, X86::edx
);
1889 arityCheckOkay3
.link(this);
1891 compileOpCallInitializeCallFrame();
1893 // load ctiCode from the new codeBlock.
1894 loadPtr(Address(X86::eax
, FIELD_OFFSET(CodeBlock
, m_jitCode
)), X86::eax
);
1898 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
1899 m_interpreter
->m_executablePool
= m_globalData
->poolForSize(m_assembler
.size());
1900 void* code
= m_assembler
.executableCopy(m_interpreter
->m_executablePool
.get());
1901 PatchBuffer
patchBuffer(code
);
1903 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1904 patchBuffer
.link(array_failureCases1
, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail
));
1905 patchBuffer
.link(array_failureCases2
, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail
));
1906 patchBuffer
.link(array_failureCases3
, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail
));
1907 patchBuffer
.link(string_failureCases1
, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail
));
1908 patchBuffer
.link(string_failureCases2
, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail
));
1909 patchBuffer
.link(string_failureCases3
, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail
));
1911 m_interpreter
->m_ctiArrayLengthTrampoline
= patchBuffer
.addressOf(arrayLengthBegin
);
1912 m_interpreter
->m_ctiStringLengthTrampoline
= patchBuffer
.addressOf(stringLengthBegin
);
1914 patchBuffer
.link(callArityCheck1
, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck
));
1915 patchBuffer
.link(callArityCheck2
, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck
));
1916 patchBuffer
.link(callArityCheck3
, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck
));
1917 patchBuffer
.link(callJSFunction1
, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction
));
1918 patchBuffer
.link(callJSFunction2
, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction
));
1919 patchBuffer
.link(callJSFunction3
, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction
));
1920 patchBuffer
.link(callDontLazyLinkCall
, reinterpret_cast<void*>(Interpreter::cti_vm_dontLazyLinkCall
));
1921 patchBuffer
.link(callLazyLinkCall
, reinterpret_cast<void*>(Interpreter::cti_vm_lazyLinkCall
));
1923 m_interpreter
->m_ctiVirtualCallPreLink
= patchBuffer
.addressOf(virtualCallPreLinkBegin
);
1924 m_interpreter
->m_ctiVirtualCallLink
= patchBuffer
.addressOf(virtualCallLinkBegin
);
1925 m_interpreter
->m_ctiVirtualCall
= patchBuffer
.addressOf(virtualCallBegin
);
1928 void JIT::emitGetVariableObjectRegister(RegisterID variableObject
, int index
, RegisterID dst
)
1930 loadPtr(Address(variableObject
, FIELD_OFFSET(JSVariableObject
, d
)), dst
);
1931 loadPtr(Address(dst
, FIELD_OFFSET(JSVariableObject::JSVariableObjectData
, registers
)), dst
);
1932 loadPtr(Address(dst
, index
* sizeof(Register
)), dst
);
1935 void JIT::emitPutVariableObjectRegister(RegisterID src
, RegisterID variableObject
, int index
)
1937 loadPtr(Address(variableObject
, FIELD_OFFSET(JSVariableObject
, d
)), variableObject
);
1938 loadPtr(Address(variableObject
, FIELD_OFFSET(JSVariableObject::JSVariableObjectData
, registers
)), variableObject
);
1939 storePtr(src
, Address(variableObject
, index
* sizeof(Register
)));
1944 #endif // ENABLE(JIT)