2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
34 /* Deprecated: Please use JITStubCall instead. */
36 ALWAYS_INLINE
void JIT::emitGetJITStubArg(unsigned argumentNumber
, RegisterID dst
)
38 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
39 peek(dst
, argumentStackOffset
);
42 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateDouble(unsigned src
)
44 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isDouble();
47 ALWAYS_INLINE JSValue
JIT::getConstantOperand(unsigned src
)
49 ASSERT(m_codeBlock
->isConstantRegisterIndex(src
));
50 return m_codeBlock
->getConstant(src
);
53 ALWAYS_INLINE
void JIT::emitPutToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
55 storePtr(from
, payloadFor(entry
, callFrameRegister
));
58 ALWAYS_INLINE
void JIT::emitPutCellToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
61 store32(TrustedImm32(JSValue::CellTag
), tagFor(entry
, callFrameRegister
));
63 storePtr(from
, payloadFor(entry
, callFrameRegister
));
66 ALWAYS_INLINE
void JIT::emitPutIntToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
68 store32(TrustedImm32(Int32Tag
), intTagFor(entry
, callFrameRegister
));
69 store32(from
, intPayloadFor(entry
, callFrameRegister
));
72 ALWAYS_INLINE
void JIT::emitPutImmediateToCallFrameHeader(void* value
, RegisterFile::CallFrameHeaderEntry entry
)
74 storePtr(TrustedImmPtr(value
), Address(callFrameRegister
, entry
* sizeof(Register
)));
77 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
79 loadPtr(Address(from
, entry
* sizeof(Register
)), to
);
81 killLastResultRegister();
85 ALWAYS_INLINE
void JIT::emitLoadCharacterString(RegisterID src
, RegisterID dst
, JumpList
& failures
)
87 failures
.append(branchPtr(NotEqual
, Address(src
), TrustedImmPtr(m_globalData
->jsStringVPtr
)));
88 failures
.append(branchTest32(NonZero
, Address(src
, OBJECT_OFFSETOF(JSString
, m_fiberCount
))));
89 failures
.append(branch32(NotEqual
, MacroAssembler::Address(src
, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
90 loadPtr(MacroAssembler::Address(src
, ThunkHelpers::jsStringValueOffset()), dst
);
91 loadPtr(MacroAssembler::Address(dst
, ThunkHelpers::stringImplDataOffset()), dst
);
92 load16(MacroAssembler::Address(dst
, 0), dst
);
95 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
97 load32(Address(from
, entry
* sizeof(Register
)), to
);
99 killLastResultRegister();
103 ALWAYS_INLINE
JIT::Call
JIT::emitNakedCall(CodePtr function
)
105 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
107 Call nakedCall
= nearCall();
108 m_calls
.append(CallRecord(nakedCall
, m_bytecodeOffset
, function
.executableAddress()));
112 ALWAYS_INLINE
bool JIT::atJumpTarget()
114 while (m_jumpTargetsPosition
< m_codeBlock
->numberOfJumpTargets() && m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) <= m_bytecodeOffset
) {
115 if (m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) == m_bytecodeOffset
)
117 ++m_jumpTargetsPosition
;
122 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
124 ALWAYS_INLINE
void JIT::beginUninterruptedSequence(int insnSpace
, int constSpace
)
126 JSInterfaceJIT::beginUninterruptedSequence();
127 #if CPU(ARM_TRADITIONAL)
129 // Ensure the label after the sequence can also fit
130 insnSpace
+= sizeof(ARMWord
);
131 constSpace
+= sizeof(uint64_t);
134 ensureSpace(insnSpace
, constSpace
);
138 insnSpace
+= sizeof(SH4Word
);
139 constSpace
+= sizeof(uint64_t);
142 m_assembler
.ensureSpace(insnSpace
+ m_assembler
.maxInstructionSize
+ 2, constSpace
+ 8);
145 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
147 m_uninterruptedInstructionSequenceBegin
= label();
148 m_uninterruptedConstantSequenceBegin
= sizeOfConstantPool();
153 ALWAYS_INLINE
void JIT::endUninterruptedSequence(int insnSpace
, int constSpace
, int dst
)
156 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
157 /* There are several cases when the uninterrupted sequence is larger than
158 * maximum required offset for pathing the same sequence. Eg.: if in a
159 * uninterrupted sequence the last macroassembler's instruction is a stub
160 * call, it emits store instruction(s) which should not be included in the
161 * calculation of length of uninterrupted sequence. So, the insnSpace and
162 * constSpace should be upper limit instead of hard limit.
165 if ((dst
> 15) || (dst
< -16)) {
170 if (((dst
>= -16) && (dst
< 0)) || ((dst
> 7) && (dst
<= 15)))
173 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin
, label()) <= insnSpace
);
174 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin
<= constSpace
);
176 JSInterfaceJIT::endUninterruptedSequence();
183 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
185 move(linkRegister
, reg
);
188 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
190 move(reg
, linkRegister
);
193 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
195 loadPtr(address
, linkRegister
);
199 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
201 m_assembler
.stspr(reg
);
204 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
206 m_assembler
.ldspr(reg
);
209 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
211 loadPtrLinkReg(address
);
216 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
218 move(returnAddressRegister
, reg
);
221 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
223 move(reg
, returnAddressRegister
);
226 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
228 loadPtr(address
, returnAddressRegister
);
231 #else // CPU(X86) || CPU(X86_64)
233 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
238 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
243 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
250 ALWAYS_INLINE
void JIT::restoreArgumentReference()
252 move(stackPointerRegister
, firstArgumentRegister
);
253 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof(void*));
256 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline()
259 // Within a trampoline the return address will be on the stack at this point.
260 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
, firstArgumentRegister
);
262 move(stackPointerRegister
, firstArgumentRegister
);
264 move(stackPointerRegister
, firstArgumentRegister
);
266 // In the trampoline on x86-64, the first argument register is not overwritten.
269 ALWAYS_INLINE
JIT::Jump
JIT::checkStructure(RegisterID reg
, Structure
* structure
)
271 return branchPtr(NotEqual
, Address(reg
, JSCell::structureOffset()), TrustedImmPtr(structure
));
274 ALWAYS_INLINE
void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, int vReg
)
276 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
280 ALWAYS_INLINE
void JIT::addSlowCase(Jump jump
)
282 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
284 m_slowCases
.append(SlowCaseEntry(jump
, m_bytecodeOffset
));
287 ALWAYS_INLINE
void JIT::addSlowCase(JumpList jumpList
)
289 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
291 const JumpList::JumpVector
& jumpVector
= jumpList
.jumps();
292 size_t size
= jumpVector
.size();
293 for (size_t i
= 0; i
< size
; ++i
)
294 m_slowCases
.append(SlowCaseEntry(jumpVector
[i
], m_bytecodeOffset
));
297 ALWAYS_INLINE
void JIT::addJump(Jump jump
, int relativeOffset
)
299 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
301 m_jmpTable
.append(JumpTable(jump
, m_bytecodeOffset
+ relativeOffset
));
304 ALWAYS_INLINE
void JIT::emitJumpSlowToHot(Jump jump
, int relativeOffset
)
306 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
308 jump
.linkTo(m_labels
[m_bytecodeOffset
+ relativeOffset
], this);
311 #if ENABLE(SAMPLING_FLAGS)
312 ALWAYS_INLINE
void JIT::setSamplingFlag(int32_t flag
)
316 or32(TrustedImm32(1u << (flag
- 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
319 ALWAYS_INLINE
void JIT::clearSamplingFlag(int32_t flag
)
323 and32(TrustedImm32(~(1u << (flag
- 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
327 #if ENABLE(SAMPLING_COUNTERS)
328 ALWAYS_INLINE
void JIT::emitCount(AbstractSamplingCounter
& counter
, uint32_t count
)
330 #if CPU(X86_64) // Or any other 64-bit plattform.
331 addPtr(TrustedImm32(count
), AbsoluteAddress(counter
.addressOfCounter()));
332 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
333 intptr_t hiWord
= reinterpret_cast<intptr_t>(counter
.addressOfCounter()) + sizeof(int32_t);
334 add32(TrustedImm32(count
), AbsoluteAddress(counter
.addressOfCounter()));
335 addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord
)));
337 #error "SAMPLING_FLAGS not implemented on this platform."
342 #if ENABLE(OPCODE_SAMPLING)
344 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
346 move(TrustedImmPtr(m_interpreter
->sampler()->sampleSlot()), X86Registers::ecx
);
347 storePtr(TrustedImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), X86Registers::ecx
);
350 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
352 storePtr(TrustedImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), m_interpreter
->sampler()->sampleSlot());
357 #if ENABLE(CODEBLOCK_SAMPLING)
359 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
361 move(TrustedImmPtr(m_interpreter
->sampler()->codeBlockSlot()), X86Registers::ecx
);
362 storePtr(TrustedImmPtr(codeBlock
), X86Registers::ecx
);
365 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
367 storePtr(TrustedImmPtr(codeBlock
), m_interpreter
->sampler()->codeBlockSlot());
372 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateChar(unsigned src
)
374 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isString() && asString(getConstantOperand(src
).asCell())->length() == 1;
377 #if USE(JSVALUE32_64)
379 inline void JIT::emitLoadTag(unsigned index
, RegisterID tag
)
381 RegisterID mappedTag
;
382 if (getMappedTag(index
, mappedTag
)) {
383 move(mappedTag
, tag
);
388 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
389 move(Imm32(getConstantOperand(index
).tag()), tag
);
394 load32(tagFor(index
), tag
);
398 inline void JIT::emitLoadPayload(unsigned index
, RegisterID payload
)
400 RegisterID mappedPayload
;
401 if (getMappedPayload(index
, mappedPayload
)) {
402 move(mappedPayload
, payload
);
407 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
408 move(Imm32(getConstantOperand(index
).payload()), payload
);
413 load32(payloadFor(index
), payload
);
417 inline void JIT::emitLoad(const JSValue
& v
, RegisterID tag
, RegisterID payload
)
419 move(Imm32(v
.payload()), payload
);
420 move(Imm32(v
.tag()), tag
);
423 inline void JIT::emitLoad(unsigned index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
425 ASSERT(tag
!= payload
);
427 if (base
== callFrameRegister
) {
428 ASSERT(payload
!= base
);
429 emitLoadPayload(index
, payload
);
430 emitLoadTag(index
, tag
);
434 if (payload
== base
) { // avoid stomping base
435 load32(tagFor(index
, base
), tag
);
436 load32(payloadFor(index
, base
), payload
);
440 load32(payloadFor(index
, base
), payload
);
441 load32(tagFor(index
, base
), tag
);
444 inline void JIT::emitLoad2(unsigned index1
, RegisterID tag1
, RegisterID payload1
, unsigned index2
, RegisterID tag2
, RegisterID payload2
)
446 if (isMapped(index1
)) {
447 emitLoad(index1
, tag1
, payload1
);
448 emitLoad(index2
, tag2
, payload2
);
451 emitLoad(index2
, tag2
, payload2
);
452 emitLoad(index1
, tag1
, payload1
);
455 inline void JIT::emitLoadDouble(unsigned index
, FPRegisterID value
)
457 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
458 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
459 loadDouble(&inConstantPool
, value
);
461 loadDouble(addressFor(index
), value
);
464 inline void JIT::emitLoadInt32ToDouble(unsigned index
, FPRegisterID value
)
466 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
467 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
468 char* bytePointer
= reinterpret_cast<char*>(&inConstantPool
);
469 convertInt32ToDouble(AbsoluteAddress(bytePointer
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), value
);
471 convertInt32ToDouble(payloadFor(index
), value
);
474 inline void JIT::emitStore(unsigned index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
476 store32(payload
, payloadFor(index
, base
));
477 store32(tag
, tagFor(index
, base
));
480 inline void JIT::emitStoreInt32(unsigned index
, RegisterID payload
, bool indexIsInt32
)
482 store32(payload
, payloadFor(index
, callFrameRegister
));
484 store32(TrustedImm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
487 inline void JIT::emitStoreInt32(unsigned index
, TrustedImm32 payload
, bool indexIsInt32
)
489 store32(payload
, payloadFor(index
, callFrameRegister
));
491 store32(TrustedImm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
494 inline void JIT::emitStoreCell(unsigned index
, RegisterID payload
, bool indexIsCell
)
496 store32(payload
, payloadFor(index
, callFrameRegister
));
498 store32(TrustedImm32(JSValue::CellTag
), tagFor(index
, callFrameRegister
));
501 inline void JIT::emitStoreBool(unsigned index
, RegisterID payload
, bool indexIsBool
)
503 store32(payload
, payloadFor(index
, callFrameRegister
));
505 store32(TrustedImm32(JSValue::BooleanTag
), tagFor(index
, callFrameRegister
));
508 inline void JIT::emitStoreDouble(unsigned index
, FPRegisterID value
)
510 storeDouble(value
, addressFor(index
));
513 inline void JIT::emitStore(unsigned index
, const JSValue constant
, RegisterID base
)
515 store32(Imm32(constant
.payload()), payloadFor(index
, base
));
516 store32(Imm32(constant
.tag()), tagFor(index
, base
));
519 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
521 emitStore(dst
, jsUndefined());
524 inline bool JIT::isLabeled(unsigned bytecodeOffset
)
526 for (size_t numberOfJumpTargets
= m_codeBlock
->numberOfJumpTargets(); m_jumpTargetIndex
!= numberOfJumpTargets
; ++m_jumpTargetIndex
) {
527 unsigned jumpTarget
= m_codeBlock
->jumpTarget(m_jumpTargetIndex
);
528 if (jumpTarget
== bytecodeOffset
)
530 if (jumpTarget
> bytecodeOffset
)
536 inline void JIT::map(unsigned bytecodeOffset
, unsigned virtualRegisterIndex
, RegisterID tag
, RegisterID payload
)
538 if (isLabeled(bytecodeOffset
))
541 m_mappedBytecodeOffset
= bytecodeOffset
;
542 m_mappedVirtualRegisterIndex
= virtualRegisterIndex
;
544 m_mappedPayload
= payload
;
547 inline void JIT::unmap(RegisterID registerID
)
549 if (m_mappedTag
== registerID
)
550 m_mappedTag
= (RegisterID
)-1;
551 else if (m_mappedPayload
== registerID
)
552 m_mappedPayload
= (RegisterID
)-1;
555 inline void JIT::unmap()
557 m_mappedBytecodeOffset
= (unsigned)-1;
558 m_mappedVirtualRegisterIndex
= (unsigned)-1;
559 m_mappedTag
= (RegisterID
)-1;
560 m_mappedPayload
= (RegisterID
)-1;
563 inline bool JIT::isMapped(unsigned virtualRegisterIndex
)
565 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
567 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
572 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex
, RegisterID
& payload
)
574 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
576 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
578 if (m_mappedPayload
== (RegisterID
)-1)
580 payload
= m_mappedPayload
;
584 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex
, RegisterID
& tag
)
586 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
588 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
590 if (m_mappedTag
== (RegisterID
)-1)
596 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex
)
598 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
599 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
602 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex
));
606 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex
, RegisterID tag
)
608 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
609 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
612 addSlowCase(branch32(NotEqual
, tag
, TrustedImm32(JSValue::CellTag
)));
616 inline void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned virtualRegisterIndex
)
618 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
))
622 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
624 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
627 ALWAYS_INLINE
bool JIT::getOperandConstantImmediateInt(unsigned op1
, unsigned op2
, unsigned& op
, int32_t& constant
)
629 if (isOperandConstantImmediateInt(op1
)) {
630 constant
= getConstantOperand(op1
).asInt32();
635 if (isOperandConstantImmediateInt(op2
)) {
636 constant
= getConstantOperand(op2
).asInt32();
644 #else // USE(JSVALUE32_64)
646 ALWAYS_INLINE
void JIT::killLastResultRegister()
648 m_lastResultBytecodeRegister
= std::numeric_limits
<int>::max();
651 // get arg puts an arg from the SF register array into a h/w register
652 ALWAYS_INLINE
void JIT::emitGetVirtualRegister(int src
, RegisterID dst
)
654 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
656 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
657 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
658 JSValue value
= m_codeBlock
->getConstant(src
);
659 move(ImmPtr(JSValue::encode(value
)), dst
);
660 killLastResultRegister();
664 if (src
== m_lastResultBytecodeRegister
&& m_codeBlock
->isTemporaryRegisterIndex(src
) && !atJumpTarget()) {
665 // The argument we want is already stored in eax
666 if (dst
!= cachedResultRegister
)
667 move(cachedResultRegister
, dst
);
668 killLastResultRegister();
672 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), dst
);
673 killLastResultRegister();
676 ALWAYS_INLINE
void JIT::emitGetVirtualRegisters(int src1
, RegisterID dst1
, int src2
, RegisterID dst2
)
678 if (src2
== m_lastResultBytecodeRegister
) {
679 emitGetVirtualRegister(src2
, dst2
);
680 emitGetVirtualRegister(src1
, dst1
);
682 emitGetVirtualRegister(src1
, dst1
);
683 emitGetVirtualRegister(src2
, dst2
);
687 ALWAYS_INLINE
int32_t JIT::getConstantOperandImmediateInt(unsigned src
)
689 return getConstantOperand(src
).asInt32();
692 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
694 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
697 ALWAYS_INLINE
void JIT::emitPutVirtualRegister(unsigned dst
, RegisterID from
)
699 storePtr(from
, Address(callFrameRegister
, dst
* sizeof(Register
)));
700 m_lastResultBytecodeRegister
= (from
== cachedResultRegister
) ? static_cast<int>(dst
) : std::numeric_limits
<int>::max();
703 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
705 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister
, dst
* sizeof(Register
)));
708 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfJSCell(RegisterID reg
)
711 return branchTestPtr(Zero
, reg
, tagMaskRegister
);
713 return branchTest32(Zero
, reg
, TrustedImm32(TagMask
));
717 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfBothJSCells(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
720 orPtr(reg2
, scratch
);
721 return emitJumpIfJSCell(scratch
);
724 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg
)
726 addSlowCase(emitJumpIfJSCell(reg
));
729 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotJSCell(RegisterID reg
)
732 return branchTestPtr(NonZero
, reg
, tagMaskRegister
);
734 return branchTest32(NonZero
, reg
, TrustedImm32(TagMask
));
738 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
)
740 addSlowCase(emitJumpIfNotJSCell(reg
));
743 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
, int vReg
)
745 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
746 emitJumpSlowCaseIfNotJSCell(reg
);
751 inline void JIT::emitLoadDouble(unsigned index
, FPRegisterID value
)
753 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
754 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
755 loadDouble(&inConstantPool
, value
);
757 loadDouble(addressFor(index
), value
);
760 inline void JIT::emitLoadInt32ToDouble(unsigned index
, FPRegisterID value
)
762 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
763 ASSERT(isOperandConstantImmediateInt(index
));
764 convertInt32ToDouble(Imm32(getConstantOperand(index
).asInt32()), value
);
766 convertInt32ToDouble(addressFor(index
), value
);
770 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateInteger(RegisterID reg
)
773 return branchPtr(AboveOrEqual
, reg
, tagTypeNumberRegister
);
775 return branchTest32(NonZero
, reg
, TrustedImm32(TagTypeNumber
));
779 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateInteger(RegisterID reg
)
782 return branchPtr(Below
, reg
, tagTypeNumberRegister
);
784 return branchTest32(Zero
, reg
, TrustedImm32(TagTypeNumber
));
788 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
791 andPtr(reg2
, scratch
);
792 return emitJumpIfNotImmediateInteger(scratch
);
795 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg
)
797 addSlowCase(emitJumpIfNotImmediateInteger(reg
));
800 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
802 addSlowCase(emitJumpIfNotImmediateIntegers(reg1
, reg2
, scratch
));
805 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg
)
807 addSlowCase(emitJumpIfNotImmediateNumber(reg
));
810 #if USE(JSVALUE32_64)
811 ALWAYS_INLINE
void JIT::emitFastArithDeTagImmediate(RegisterID reg
)
813 subPtr(TrustedImm32(TagTypeNumber
), reg
);
816 ALWAYS_INLINE
JIT::Jump
JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg
)
818 return branchSubPtr(Zero
, TrustedImm32(TagTypeNumber
), reg
);
822 ALWAYS_INLINE
void JIT::emitFastArithReTagImmediate(RegisterID src
, RegisterID dest
)
825 emitFastArithIntToImmNoCheck(src
, dest
);
829 addPtr(TrustedImm32(TagTypeNumber
), dest
);
833 // operand is int32_t, must have been zero-extended if register is 64-bit.
834 ALWAYS_INLINE
void JIT::emitFastArithIntToImmNoCheck(RegisterID src
, RegisterID dest
)
839 orPtr(tagTypeNumberRegister
, dest
);
841 signExtend32ToPtr(src
, dest
);
843 emitFastArithReTagImmediate(dest
, dest
);
847 ALWAYS_INLINE
void JIT::emitTagAsBoolImmediate(RegisterID reg
)
849 or32(TrustedImm32(static_cast<int32_t>(ValueFalse
)), reg
);
852 #endif // USE(JSVALUE32_64)
856 #endif // ENABLE(JIT)