2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
29 #include <wtf/Platform.h>
35 /* Deprecated: Please use JITStubCall instead. */
37 // puts an arg onto the stack, as an arg to a context threaded function.
38 ALWAYS_INLINE
void JIT::emitPutJITStubArg(RegisterID src
, unsigned argumentNumber
)
40 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
41 poke(src
, argumentStackOffset
);
44 /* Deprecated: Please use JITStubCall instead. */
46 ALWAYS_INLINE
void JIT::emitPutJITStubArgConstant(unsigned value
, unsigned argumentNumber
)
48 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
49 poke(Imm32(value
), argumentStackOffset
);
52 /* Deprecated: Please use JITStubCall instead. */
54 ALWAYS_INLINE
void JIT::emitPutJITStubArgConstant(void* value
, unsigned argumentNumber
)
56 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
57 poke(ImmPtr(value
), argumentStackOffset
);
60 /* Deprecated: Please use JITStubCall instead. */
62 ALWAYS_INLINE
void JIT::emitGetJITStubArg(unsigned argumentNumber
, RegisterID dst
)
64 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
65 peek(dst
, argumentStackOffset
);
68 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateDouble(unsigned src
)
70 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isDouble();
73 ALWAYS_INLINE JSValue
JIT::getConstantOperand(unsigned src
)
75 ASSERT(m_codeBlock
->isConstantRegisterIndex(src
));
76 return m_codeBlock
->getConstant(src
);
79 ALWAYS_INLINE
void JIT::emitPutToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
81 storePtr(from
, Address(callFrameRegister
, entry
* sizeof(Register
)));
84 ALWAYS_INLINE
void JIT::emitPutImmediateToCallFrameHeader(void* value
, RegisterFile::CallFrameHeaderEntry entry
)
86 storePtr(ImmPtr(value
), Address(callFrameRegister
, entry
* sizeof(Register
)));
89 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
91 loadPtr(Address(from
, entry
* sizeof(Register
)), to
);
92 #if !USE(JSVALUE32_64)
93 killLastResultRegister();
97 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
99 load32(Address(from
, entry
* sizeof(Register
)), to
);
100 #if !USE(JSVALUE32_64)
101 killLastResultRegister();
105 ALWAYS_INLINE
JIT::Call
JIT::emitNakedCall(CodePtr function
)
107 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
109 Call nakedCall
= nearCall();
110 m_calls
.append(CallRecord(nakedCall
, m_bytecodeIndex
, function
.executableAddress()));
114 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
116 ALWAYS_INLINE
void JIT::beginUninterruptedSequence(int insnSpace
, int constSpace
)
118 #if CPU(ARM_TRADITIONAL)
120 // Ensure the label after the sequence can also fit
121 insnSpace
+= sizeof(ARMWord
);
122 constSpace
+= sizeof(uint64_t);
125 ensureSpace(insnSpace
, constSpace
);
129 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
131 m_uninterruptedInstructionSequenceBegin
= label();
132 m_uninterruptedConstantSequenceBegin
= sizeOfConstantPool();
137 ALWAYS_INLINE
void JIT::endUninterruptedSequence(int insnSpace
, int constSpace
)
139 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
140 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin
, label()) == insnSpace
);
141 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin
== constSpace
);
149 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
151 move(linkRegister
, reg
);
154 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
156 move(reg
, linkRegister
);
159 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
161 loadPtr(address
, linkRegister
);
164 #else // CPU(X86) || CPU(X86_64)
166 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
171 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
176 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
183 #if USE(JIT_STUB_ARGUMENT_VA_LIST)
184 ALWAYS_INLINE
void JIT::restoreArgumentReference()
186 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
188 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline() {}
190 ALWAYS_INLINE
void JIT::restoreArgumentReference()
192 move(stackPointerRegister
, firstArgumentRegister
);
193 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
195 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline()
198 // Within a trampoline the return address will be on the stack at this point.
199 addPtr(Imm32(sizeof(void*)), stackPointerRegister
, firstArgumentRegister
);
201 move(stackPointerRegister
, firstArgumentRegister
);
203 // In the trampoline on x86-64, the first argument register is not overwritten.
207 ALWAYS_INLINE
JIT::Jump
JIT::checkStructure(RegisterID reg
, Structure
* structure
)
209 return branchPtr(NotEqual
, Address(reg
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(structure
));
212 ALWAYS_INLINE
void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, int vReg
)
214 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
218 ALWAYS_INLINE
void JIT::addSlowCase(Jump jump
)
220 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
222 m_slowCases
.append(SlowCaseEntry(jump
, m_bytecodeIndex
));
225 ALWAYS_INLINE
void JIT::addSlowCase(JumpList jumpList
)
227 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
229 const JumpList::JumpVector
& jumpVector
= jumpList
.jumps();
230 size_t size
= jumpVector
.size();
231 for (size_t i
= 0; i
< size
; ++i
)
232 m_slowCases
.append(SlowCaseEntry(jumpVector
[i
], m_bytecodeIndex
));
235 ALWAYS_INLINE
void JIT::addJump(Jump jump
, int relativeOffset
)
237 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
239 m_jmpTable
.append(JumpTable(jump
, m_bytecodeIndex
+ relativeOffset
));
242 ALWAYS_INLINE
void JIT::emitJumpSlowToHot(Jump jump
, int relativeOffset
)
244 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
246 jump
.linkTo(m_labels
[m_bytecodeIndex
+ relativeOffset
], this);
249 #if ENABLE(SAMPLING_FLAGS)
250 ALWAYS_INLINE
void JIT::setSamplingFlag(int32_t flag
)
254 or32(Imm32(1u << (flag
- 1)), AbsoluteAddress(&SamplingFlags::s_flags
));
257 ALWAYS_INLINE
void JIT::clearSamplingFlag(int32_t flag
)
261 and32(Imm32(~(1u << (flag
- 1))), AbsoluteAddress(&SamplingFlags::s_flags
));
265 #if ENABLE(SAMPLING_COUNTERS)
266 ALWAYS_INLINE
void JIT::emitCount(AbstractSamplingCounter
& counter
, uint32_t count
)
268 #if CPU(X86_64) // Or any other 64-bit plattform.
269 addPtr(Imm32(count
), AbsoluteAddress(&counter
.m_counter
));
270 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
271 intptr_t hiWord
= reinterpret_cast<intptr_t>(&counter
.m_counter
) + sizeof(int32_t);
272 add32(Imm32(count
), AbsoluteAddress(&counter
.m_counter
));
273 addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord
)));
275 #error "SAMPLING_FLAGS not implemented on this platform."
280 #if ENABLE(OPCODE_SAMPLING)
282 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
284 move(ImmPtr(m_interpreter
->sampler()->sampleSlot()), X86Registers::ecx
);
285 storePtr(ImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), X86Registers::ecx
);
288 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
290 storePtr(ImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), m_interpreter
->sampler()->sampleSlot());
295 #if ENABLE(CODEBLOCK_SAMPLING)
297 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
299 move(ImmPtr(m_interpreter
->sampler()->codeBlockSlot()), X86Registers::ecx
);
300 storePtr(ImmPtr(codeBlock
), X86Registers::ecx
);
303 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
305 storePtr(ImmPtr(codeBlock
), m_interpreter
->sampler()->codeBlockSlot());
310 inline JIT::Address
JIT::addressFor(unsigned index
, RegisterID base
)
312 return Address(base
, (index
* sizeof(Register
)));
315 #if USE(JSVALUE32_64)
317 inline JIT::Address
JIT::tagFor(unsigned index
, RegisterID base
)
319 return Address(base
, (index
* sizeof(Register
)) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
));
322 inline JIT::Address
JIT::payloadFor(unsigned index
, RegisterID base
)
324 return Address(base
, (index
* sizeof(Register
)) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
));
327 inline void JIT::emitLoadTag(unsigned index
, RegisterID tag
)
329 RegisterID mappedTag
;
330 if (getMappedTag(index
, mappedTag
)) {
331 move(mappedTag
, tag
);
336 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
337 move(Imm32(getConstantOperand(index
).tag()), tag
);
342 load32(tagFor(index
), tag
);
346 inline void JIT::emitLoadPayload(unsigned index
, RegisterID payload
)
348 RegisterID mappedPayload
;
349 if (getMappedPayload(index
, mappedPayload
)) {
350 move(mappedPayload
, payload
);
355 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
356 move(Imm32(getConstantOperand(index
).payload()), payload
);
361 load32(payloadFor(index
), payload
);
365 inline void JIT::emitLoad(const JSValue
& v
, RegisterID tag
, RegisterID payload
)
367 move(Imm32(v
.payload()), payload
);
368 move(Imm32(v
.tag()), tag
);
371 inline void JIT::emitLoad(unsigned index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
373 ASSERT(tag
!= payload
);
375 if (base
== callFrameRegister
) {
376 ASSERT(payload
!= base
);
377 emitLoadPayload(index
, payload
);
378 emitLoadTag(index
, tag
);
382 if (payload
== base
) { // avoid stomping base
383 load32(tagFor(index
, base
), tag
);
384 load32(payloadFor(index
, base
), payload
);
388 load32(payloadFor(index
, base
), payload
);
389 load32(tagFor(index
, base
), tag
);
392 inline void JIT::emitLoad2(unsigned index1
, RegisterID tag1
, RegisterID payload1
, unsigned index2
, RegisterID tag2
, RegisterID payload2
)
394 if (isMapped(index1
)) {
395 emitLoad(index1
, tag1
, payload1
);
396 emitLoad(index2
, tag2
, payload2
);
399 emitLoad(index2
, tag2
, payload2
);
400 emitLoad(index1
, tag1
, payload1
);
403 inline void JIT::emitLoadDouble(unsigned index
, FPRegisterID value
)
405 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
406 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
407 loadDouble(&inConstantPool
, value
);
409 loadDouble(addressFor(index
), value
);
412 inline void JIT::emitLoadInt32ToDouble(unsigned index
, FPRegisterID value
)
414 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
415 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
416 char* bytePointer
= reinterpret_cast<char*>(&inConstantPool
);
417 convertInt32ToDouble(AbsoluteAddress(bytePointer
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), value
);
419 convertInt32ToDouble(payloadFor(index
), value
);
422 inline void JIT::emitStore(unsigned index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
424 store32(payload
, payloadFor(index
, base
));
425 store32(tag
, tagFor(index
, base
));
428 inline void JIT::emitStoreInt32(unsigned index
, RegisterID payload
, bool indexIsInt32
)
430 store32(payload
, payloadFor(index
, callFrameRegister
));
432 store32(Imm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
435 inline void JIT::emitStoreInt32(unsigned index
, Imm32 payload
, bool indexIsInt32
)
437 store32(payload
, payloadFor(index
, callFrameRegister
));
439 store32(Imm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
442 inline void JIT::emitStoreCell(unsigned index
, RegisterID payload
, bool indexIsCell
)
444 store32(payload
, payloadFor(index
, callFrameRegister
));
446 store32(Imm32(JSValue::CellTag
), tagFor(index
, callFrameRegister
));
449 inline void JIT::emitStoreBool(unsigned index
, RegisterID tag
, bool indexIsBool
)
452 store32(Imm32(0), payloadFor(index
, callFrameRegister
));
453 store32(tag
, tagFor(index
, callFrameRegister
));
456 inline void JIT::emitStoreDouble(unsigned index
, FPRegisterID value
)
458 storeDouble(value
, addressFor(index
));
461 inline void JIT::emitStore(unsigned index
, const JSValue constant
, RegisterID base
)
463 store32(Imm32(constant
.payload()), payloadFor(index
, base
));
464 store32(Imm32(constant
.tag()), tagFor(index
, base
));
467 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
469 emitStore(dst
, jsUndefined());
472 inline bool JIT::isLabeled(unsigned bytecodeIndex
)
474 for (size_t numberOfJumpTargets
= m_codeBlock
->numberOfJumpTargets(); m_jumpTargetIndex
!= numberOfJumpTargets
; ++m_jumpTargetIndex
) {
475 unsigned jumpTarget
= m_codeBlock
->jumpTarget(m_jumpTargetIndex
);
476 if (jumpTarget
== bytecodeIndex
)
478 if (jumpTarget
> bytecodeIndex
)
484 inline void JIT::map(unsigned bytecodeIndex
, unsigned virtualRegisterIndex
, RegisterID tag
, RegisterID payload
)
486 if (isLabeled(bytecodeIndex
))
489 m_mappedBytecodeIndex
= bytecodeIndex
;
490 m_mappedVirtualRegisterIndex
= virtualRegisterIndex
;
492 m_mappedPayload
= payload
;
495 inline void JIT::unmap(RegisterID registerID
)
497 if (m_mappedTag
== registerID
)
498 m_mappedTag
= (RegisterID
)-1;
499 else if (m_mappedPayload
== registerID
)
500 m_mappedPayload
= (RegisterID
)-1;
503 inline void JIT::unmap()
505 m_mappedBytecodeIndex
= (unsigned)-1;
506 m_mappedVirtualRegisterIndex
= (unsigned)-1;
507 m_mappedTag
= (RegisterID
)-1;
508 m_mappedPayload
= (RegisterID
)-1;
511 inline bool JIT::isMapped(unsigned virtualRegisterIndex
)
513 if (m_mappedBytecodeIndex
!= m_bytecodeIndex
)
515 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
520 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex
, RegisterID
& payload
)
522 if (m_mappedBytecodeIndex
!= m_bytecodeIndex
)
524 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
526 if (m_mappedPayload
== (RegisterID
)-1)
528 payload
= m_mappedPayload
;
532 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex
, RegisterID
& tag
)
534 if (m_mappedBytecodeIndex
!= m_bytecodeIndex
)
536 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
538 if (m_mappedTag
== (RegisterID
)-1)
544 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex
)
546 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
))
547 addSlowCase(branch32(NotEqual
, tagFor(virtualRegisterIndex
), Imm32(JSValue::CellTag
)));
550 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex
, RegisterID tag
)
552 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
))
553 addSlowCase(branch32(NotEqual
, tag
, Imm32(JSValue::CellTag
)));
556 inline void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned virtualRegisterIndex
)
558 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
))
562 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
564 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
567 ALWAYS_INLINE
bool JIT::getOperandConstantImmediateInt(unsigned op1
, unsigned op2
, unsigned& op
, int32_t& constant
)
569 if (isOperandConstantImmediateInt(op1
)) {
570 constant
= getConstantOperand(op1
).asInt32();
575 if (isOperandConstantImmediateInt(op2
)) {
576 constant
= getConstantOperand(op2
).asInt32();
584 /* Deprecated: Please use JITStubCall instead. */
586 ALWAYS_INLINE
void JIT::emitPutJITStubArg(RegisterID tag
, RegisterID payload
, unsigned argumentNumber
)
588 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
589 poke(payload
, argumentStackOffset
);
590 poke(tag
, argumentStackOffset
+ 1);
593 /* Deprecated: Please use JITStubCall instead. */
595 ALWAYS_INLINE
void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src
, unsigned argumentNumber
, RegisterID scratch1
, RegisterID scratch2
)
597 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
598 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
599 JSValue constant
= m_codeBlock
->getConstant(src
);
600 poke(Imm32(constant
.payload()), argumentStackOffset
);
601 poke(Imm32(constant
.tag()), argumentStackOffset
+ 1);
603 emitLoad(src
, scratch1
, scratch2
);
604 poke(scratch2
, argumentStackOffset
);
605 poke(scratch1
, argumentStackOffset
+ 1);
609 #else // USE(JSVALUE32_64)
611 ALWAYS_INLINE
void JIT::killLastResultRegister()
613 m_lastResultBytecodeRegister
= std::numeric_limits
<int>::max();
616 // get arg puts an arg from the SF register array into a h/w register
617 ALWAYS_INLINE
void JIT::emitGetVirtualRegister(int src
, RegisterID dst
)
619 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
621 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
622 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
623 JSValue value
= m_codeBlock
->getConstant(src
);
624 move(ImmPtr(JSValue::encode(value
)), dst
);
625 killLastResultRegister();
629 if (src
== m_lastResultBytecodeRegister
&& m_codeBlock
->isTemporaryRegisterIndex(src
)) {
630 bool atJumpTarget
= false;
631 while (m_jumpTargetsPosition
< m_codeBlock
->numberOfJumpTargets() && m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) <= m_bytecodeIndex
) {
632 if (m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) == m_bytecodeIndex
)
634 ++m_jumpTargetsPosition
;
638 // The argument we want is already stored in eax
639 if (dst
!= cachedResultRegister
)
640 move(cachedResultRegister
, dst
);
641 killLastResultRegister();
646 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), dst
);
647 killLastResultRegister();
650 ALWAYS_INLINE
void JIT::emitGetVirtualRegisters(int src1
, RegisterID dst1
, int src2
, RegisterID dst2
)
652 if (src2
== m_lastResultBytecodeRegister
) {
653 emitGetVirtualRegister(src2
, dst2
);
654 emitGetVirtualRegister(src1
, dst1
);
656 emitGetVirtualRegister(src1
, dst1
);
657 emitGetVirtualRegister(src2
, dst2
);
661 ALWAYS_INLINE
int32_t JIT::getConstantOperandImmediateInt(unsigned src
)
663 return getConstantOperand(src
).asInt32();
666 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
668 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
671 ALWAYS_INLINE
void JIT::emitPutVirtualRegister(unsigned dst
, RegisterID from
)
673 storePtr(from
, Address(callFrameRegister
, dst
* sizeof(Register
)));
674 m_lastResultBytecodeRegister
= (from
== cachedResultRegister
) ? dst
: std::numeric_limits
<int>::max();
677 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
679 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister
, dst
* sizeof(Register
)));
682 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfJSCell(RegisterID reg
)
685 return branchTestPtr(Zero
, reg
, tagMaskRegister
);
687 return branchTest32(Zero
, reg
, Imm32(JSImmediate::TagMask
));
691 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfBothJSCells(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
694 orPtr(reg2
, scratch
);
695 return emitJumpIfJSCell(scratch
);
698 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg
)
700 addSlowCase(emitJumpIfJSCell(reg
));
703 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotJSCell(RegisterID reg
)
706 return branchTestPtr(NonZero
, reg
, tagMaskRegister
);
708 return branchTest32(NonZero
, reg
, Imm32(JSImmediate::TagMask
));
712 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
)
714 addSlowCase(emitJumpIfNotJSCell(reg
));
717 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
, int vReg
)
719 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
720 emitJumpSlowCaseIfNotJSCell(reg
);
724 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateNumber(RegisterID reg
)
726 return branchTestPtr(NonZero
, reg
, tagTypeNumberRegister
);
728 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateNumber(RegisterID reg
)
730 return branchTestPtr(Zero
, reg
, tagTypeNumberRegister
);
733 inline void JIT::emitLoadDouble(unsigned index
, FPRegisterID value
)
735 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
736 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
737 loadDouble(&inConstantPool
, value
);
739 loadDouble(addressFor(index
), value
);
742 inline void JIT::emitLoadInt32ToDouble(unsigned index
, FPRegisterID value
)
744 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
745 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
746 convertInt32ToDouble(AbsoluteAddress(&inConstantPool
), value
);
748 convertInt32ToDouble(addressFor(index
), value
);
752 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateInteger(RegisterID reg
)
755 return branchPtr(AboveOrEqual
, reg
, tagTypeNumberRegister
);
757 return branchTest32(NonZero
, reg
, Imm32(JSImmediate::TagTypeNumber
));
761 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateInteger(RegisterID reg
)
764 return branchPtr(Below
, reg
, tagTypeNumberRegister
);
766 return branchTest32(Zero
, reg
, Imm32(JSImmediate::TagTypeNumber
));
770 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
773 andPtr(reg2
, scratch
);
774 return emitJumpIfNotImmediateInteger(scratch
);
777 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg
)
779 addSlowCase(emitJumpIfNotImmediateInteger(reg
));
782 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
784 addSlowCase(emitJumpIfNotImmediateIntegers(reg1
, reg2
, scratch
));
787 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg
)
789 addSlowCase(emitJumpIfNotImmediateNumber(reg
));
793 ALWAYS_INLINE
void JIT::emitFastArithDeTagImmediate(RegisterID reg
)
795 subPtr(Imm32(JSImmediate::TagTypeNumber
), reg
);
798 ALWAYS_INLINE
JIT::Jump
JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg
)
800 return branchSubPtr(Zero
, Imm32(JSImmediate::TagTypeNumber
), reg
);
804 ALWAYS_INLINE
void JIT::emitFastArithReTagImmediate(RegisterID src
, RegisterID dest
)
807 emitFastArithIntToImmNoCheck(src
, dest
);
811 addPtr(Imm32(JSImmediate::TagTypeNumber
), dest
);
815 ALWAYS_INLINE
void JIT::emitFastArithImmToInt(RegisterID reg
)
820 rshift32(Imm32(JSImmediate::IntegerPayloadShift
), reg
);
824 // operand is int32_t, must have been zero-extended if register is 64-bit.
825 ALWAYS_INLINE
void JIT::emitFastArithIntToImmNoCheck(RegisterID src
, RegisterID dest
)
830 orPtr(tagTypeNumberRegister
, dest
);
832 signExtend32ToPtr(src
, dest
);
834 emitFastArithReTagImmediate(dest
, dest
);
838 ALWAYS_INLINE
void JIT::emitTagAsBoolImmediate(RegisterID reg
)
840 lshift32(Imm32(JSImmediate::ExtendedPayloadShift
), reg
);
841 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), reg
);
844 /* Deprecated: Please use JITStubCall instead. */
846 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
847 ALWAYS_INLINE
void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src
, unsigned argumentNumber
, RegisterID scratch
)
849 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
850 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
851 JSValue value
= m_codeBlock
->getConstant(src
);
852 poke(ImmPtr(JSValue::encode(value
)), argumentStackOffset
);
854 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), scratch
);
855 poke(scratch
, argumentStackOffset
);
858 killLastResultRegister();
861 #endif // USE(JSVALUE32_64)
865 #endif // ENABLE(JIT)