2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
34 /* Deprecated: Please use JITStubCall instead. */
36 // puts an arg onto the stack, as an arg to a context threaded function.
37 ALWAYS_INLINE
void JIT::emitPutJITStubArg(RegisterID src
, unsigned argumentNumber
)
39 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
40 poke(src
, argumentStackOffset
);
43 /* Deprecated: Please use JITStubCall instead. */
45 ALWAYS_INLINE
void JIT::emitPutJITStubArgConstant(unsigned value
, unsigned argumentNumber
)
47 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
48 poke(Imm32(value
), argumentStackOffset
);
51 /* Deprecated: Please use JITStubCall instead. */
53 ALWAYS_INLINE
void JIT::emitPutJITStubArgConstant(void* value
, unsigned argumentNumber
)
55 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
56 poke(ImmPtr(value
), argumentStackOffset
);
59 /* Deprecated: Please use JITStubCall instead. */
61 ALWAYS_INLINE
void JIT::emitGetJITStubArg(unsigned argumentNumber
, RegisterID dst
)
63 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
64 peek(dst
, argumentStackOffset
);
67 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateDouble(unsigned src
)
69 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isDouble();
72 ALWAYS_INLINE JSValue
JIT::getConstantOperand(unsigned src
)
74 ASSERT(m_codeBlock
->isConstantRegisterIndex(src
));
75 return m_codeBlock
->getConstant(src
);
78 ALWAYS_INLINE
void JIT::emitPutToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
80 storePtr(from
, Address(callFrameRegister
, entry
* sizeof(Register
)));
83 ALWAYS_INLINE
void JIT::emitPutImmediateToCallFrameHeader(void* value
, RegisterFile::CallFrameHeaderEntry entry
)
85 storePtr(ImmPtr(value
), Address(callFrameRegister
, entry
* sizeof(Register
)));
88 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
90 loadPtr(Address(from
, entry
* sizeof(Register
)), to
);
91 #if !USE(JSVALUE32_64)
92 killLastResultRegister();
96 ALWAYS_INLINE
void JIT::emitLoadCharacterString(RegisterID src
, RegisterID dst
, JumpList
& failures
)
98 failures
.append(branchPtr(NotEqual
, Address(src
), ImmPtr(m_globalData
->jsStringVPtr
)));
99 failures
.append(branchTest32(NonZero
, Address(src
, OBJECT_OFFSETOF(JSString
, m_fiberCount
))));
100 failures
.append(branch32(NotEqual
, MacroAssembler::Address(src
, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
101 loadPtr(MacroAssembler::Address(src
, ThunkHelpers::jsStringValueOffset()), dst
);
102 loadPtr(MacroAssembler::Address(dst
, ThunkHelpers::stringImplDataOffset()), dst
);
103 load16(MacroAssembler::Address(dst
, 0), dst
);
106 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
108 load32(Address(from
, entry
* sizeof(Register
)), to
);
109 #if !USE(JSVALUE32_64)
110 killLastResultRegister();
114 ALWAYS_INLINE
JIT::Call
JIT::emitNakedCall(CodePtr function
)
116 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
118 Call nakedCall
= nearCall();
119 m_calls
.append(CallRecord(nakedCall
, m_bytecodeIndex
, function
.executableAddress()));
123 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
125 ALWAYS_INLINE
void JIT::beginUninterruptedSequence(int insnSpace
, int constSpace
)
127 #if CPU(ARM_TRADITIONAL)
129 // Ensure the label after the sequence can also fit
130 insnSpace
+= sizeof(ARMWord
);
131 constSpace
+= sizeof(uint64_t);
134 ensureSpace(insnSpace
, constSpace
);
138 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
140 m_uninterruptedInstructionSequenceBegin
= label();
141 m_uninterruptedConstantSequenceBegin
= sizeOfConstantPool();
146 ALWAYS_INLINE
void JIT::endUninterruptedSequence(int insnSpace
, int constSpace
)
148 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
149 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin
, label()) == insnSpace
);
150 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin
== constSpace
);
158 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
160 move(linkRegister
, reg
);
163 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
165 move(reg
, linkRegister
);
168 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
170 loadPtr(address
, linkRegister
);
175 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
177 move(returnAddressRegister
, reg
);
180 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
182 move(reg
, returnAddressRegister
);
185 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
187 loadPtr(address
, returnAddressRegister
);
190 #else // CPU(X86) || CPU(X86_64)
192 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
197 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
202 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
209 #if USE(JIT_STUB_ARGUMENT_VA_LIST)
210 ALWAYS_INLINE
void JIT::restoreArgumentReference()
212 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
214 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline() {}
216 ALWAYS_INLINE
void JIT::restoreArgumentReference()
218 move(stackPointerRegister
, firstArgumentRegister
);
219 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof (void*));
221 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline()
224 // Within a trampoline the return address will be on the stack at this point.
225 addPtr(Imm32(sizeof(void*)), stackPointerRegister
, firstArgumentRegister
);
227 move(stackPointerRegister
, firstArgumentRegister
);
229 // In the trampoline on x86-64, the first argument register is not overwritten.
233 ALWAYS_INLINE
JIT::Jump
JIT::checkStructure(RegisterID reg
, Structure
* structure
)
235 return branchPtr(NotEqual
, Address(reg
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(structure
));
238 ALWAYS_INLINE
void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, int vReg
)
240 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
244 ALWAYS_INLINE
void JIT::addSlowCase(Jump jump
)
246 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
248 m_slowCases
.append(SlowCaseEntry(jump
, m_bytecodeIndex
));
251 ALWAYS_INLINE
void JIT::addSlowCase(JumpList jumpList
)
253 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
255 const JumpList::JumpVector
& jumpVector
= jumpList
.jumps();
256 size_t size
= jumpVector
.size();
257 for (size_t i
= 0; i
< size
; ++i
)
258 m_slowCases
.append(SlowCaseEntry(jumpVector
[i
], m_bytecodeIndex
));
261 ALWAYS_INLINE
void JIT::addJump(Jump jump
, int relativeOffset
)
263 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
265 m_jmpTable
.append(JumpTable(jump
, m_bytecodeIndex
+ relativeOffset
));
268 ALWAYS_INLINE
void JIT::emitJumpSlowToHot(Jump jump
, int relativeOffset
)
270 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
272 jump
.linkTo(m_labels
[m_bytecodeIndex
+ relativeOffset
], this);
275 #if ENABLE(SAMPLING_FLAGS)
276 ALWAYS_INLINE
void JIT::setSamplingFlag(int32_t flag
)
280 or32(Imm32(1u << (flag
- 1)), AbsoluteAddress(&SamplingFlags::s_flags
));
283 ALWAYS_INLINE
void JIT::clearSamplingFlag(int32_t flag
)
287 and32(Imm32(~(1u << (flag
- 1))), AbsoluteAddress(&SamplingFlags::s_flags
));
291 #if ENABLE(SAMPLING_COUNTERS)
292 ALWAYS_INLINE
void JIT::emitCount(AbstractSamplingCounter
& counter
, uint32_t count
)
294 #if CPU(X86_64) // Or any other 64-bit plattform.
295 addPtr(Imm32(count
), AbsoluteAddress(&counter
.m_counter
));
296 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
297 intptr_t hiWord
= reinterpret_cast<intptr_t>(&counter
.m_counter
) + sizeof(int32_t);
298 add32(Imm32(count
), AbsoluteAddress(&counter
.m_counter
));
299 addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord
)));
301 #error "SAMPLING_FLAGS not implemented on this platform."
306 #if ENABLE(OPCODE_SAMPLING)
308 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
310 move(ImmPtr(m_interpreter
->sampler()->sampleSlot()), X86Registers::ecx
);
311 storePtr(ImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), X86Registers::ecx
);
314 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
316 storePtr(ImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), m_interpreter
->sampler()->sampleSlot());
321 #if ENABLE(CODEBLOCK_SAMPLING)
323 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
325 move(ImmPtr(m_interpreter
->sampler()->codeBlockSlot()), X86Registers::ecx
);
326 storePtr(ImmPtr(codeBlock
), X86Registers::ecx
);
329 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
331 storePtr(ImmPtr(codeBlock
), m_interpreter
->sampler()->codeBlockSlot());
336 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateChar(unsigned src
)
338 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isString() && asString(getConstantOperand(src
).asCell())->length() == 1;
341 #if USE(JSVALUE32_64)
343 inline void JIT::emitLoadTag(unsigned index
, RegisterID tag
)
345 RegisterID mappedTag
;
346 if (getMappedTag(index
, mappedTag
)) {
347 move(mappedTag
, tag
);
352 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
353 move(Imm32(getConstantOperand(index
).tag()), tag
);
358 load32(tagFor(index
), tag
);
362 inline void JIT::emitLoadPayload(unsigned index
, RegisterID payload
)
364 RegisterID mappedPayload
;
365 if (getMappedPayload(index
, mappedPayload
)) {
366 move(mappedPayload
, payload
);
371 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
372 move(Imm32(getConstantOperand(index
).payload()), payload
);
377 load32(payloadFor(index
), payload
);
381 inline void JIT::emitLoad(const JSValue
& v
, RegisterID tag
, RegisterID payload
)
383 move(Imm32(v
.payload()), payload
);
384 move(Imm32(v
.tag()), tag
);
387 inline void JIT::emitLoad(unsigned index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
389 ASSERT(tag
!= payload
);
391 if (base
== callFrameRegister
) {
392 ASSERT(payload
!= base
);
393 emitLoadPayload(index
, payload
);
394 emitLoadTag(index
, tag
);
398 if (payload
== base
) { // avoid stomping base
399 load32(tagFor(index
, base
), tag
);
400 load32(payloadFor(index
, base
), payload
);
404 load32(payloadFor(index
, base
), payload
);
405 load32(tagFor(index
, base
), tag
);
408 inline void JIT::emitLoad2(unsigned index1
, RegisterID tag1
, RegisterID payload1
, unsigned index2
, RegisterID tag2
, RegisterID payload2
)
410 if (isMapped(index1
)) {
411 emitLoad(index1
, tag1
, payload1
);
412 emitLoad(index2
, tag2
, payload2
);
415 emitLoad(index2
, tag2
, payload2
);
416 emitLoad(index1
, tag1
, payload1
);
419 inline void JIT::emitLoadDouble(unsigned index
, FPRegisterID value
)
421 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
422 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
423 loadDouble(&inConstantPool
, value
);
425 loadDouble(addressFor(index
), value
);
428 inline void JIT::emitLoadInt32ToDouble(unsigned index
, FPRegisterID value
)
430 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
431 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
432 char* bytePointer
= reinterpret_cast<char*>(&inConstantPool
);
433 convertInt32ToDouble(AbsoluteAddress(bytePointer
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), value
);
435 convertInt32ToDouble(payloadFor(index
), value
);
438 inline void JIT::emitStore(unsigned index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
440 store32(payload
, payloadFor(index
, base
));
441 store32(tag
, tagFor(index
, base
));
444 inline void JIT::emitStoreInt32(unsigned index
, RegisterID payload
, bool indexIsInt32
)
446 store32(payload
, payloadFor(index
, callFrameRegister
));
448 store32(Imm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
451 inline void JIT::emitStoreInt32(unsigned index
, Imm32 payload
, bool indexIsInt32
)
453 store32(payload
, payloadFor(index
, callFrameRegister
));
455 store32(Imm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
458 inline void JIT::emitStoreCell(unsigned index
, RegisterID payload
, bool indexIsCell
)
460 store32(payload
, payloadFor(index
, callFrameRegister
));
462 store32(Imm32(JSValue::CellTag
), tagFor(index
, callFrameRegister
));
465 inline void JIT::emitStoreBool(unsigned index
, RegisterID tag
, bool indexIsBool
)
468 store32(Imm32(0), payloadFor(index
, callFrameRegister
));
469 store32(tag
, tagFor(index
, callFrameRegister
));
472 inline void JIT::emitStoreDouble(unsigned index
, FPRegisterID value
)
474 storeDouble(value
, addressFor(index
));
477 inline void JIT::emitStore(unsigned index
, const JSValue constant
, RegisterID base
)
479 store32(Imm32(constant
.payload()), payloadFor(index
, base
));
480 store32(Imm32(constant
.tag()), tagFor(index
, base
));
483 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
485 emitStore(dst
, jsUndefined());
488 inline bool JIT::isLabeled(unsigned bytecodeIndex
)
490 for (size_t numberOfJumpTargets
= m_codeBlock
->numberOfJumpTargets(); m_jumpTargetIndex
!= numberOfJumpTargets
; ++m_jumpTargetIndex
) {
491 unsigned jumpTarget
= m_codeBlock
->jumpTarget(m_jumpTargetIndex
);
492 if (jumpTarget
== bytecodeIndex
)
494 if (jumpTarget
> bytecodeIndex
)
500 inline void JIT::map(unsigned bytecodeIndex
, unsigned virtualRegisterIndex
, RegisterID tag
, RegisterID payload
)
502 if (isLabeled(bytecodeIndex
))
505 m_mappedBytecodeIndex
= bytecodeIndex
;
506 m_mappedVirtualRegisterIndex
= virtualRegisterIndex
;
508 m_mappedPayload
= payload
;
511 inline void JIT::unmap(RegisterID registerID
)
513 if (m_mappedTag
== registerID
)
514 m_mappedTag
= (RegisterID
)-1;
515 else if (m_mappedPayload
== registerID
)
516 m_mappedPayload
= (RegisterID
)-1;
519 inline void JIT::unmap()
521 m_mappedBytecodeIndex
= (unsigned)-1;
522 m_mappedVirtualRegisterIndex
= (unsigned)-1;
523 m_mappedTag
= (RegisterID
)-1;
524 m_mappedPayload
= (RegisterID
)-1;
527 inline bool JIT::isMapped(unsigned virtualRegisterIndex
)
529 if (m_mappedBytecodeIndex
!= m_bytecodeIndex
)
531 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
536 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex
, RegisterID
& payload
)
538 if (m_mappedBytecodeIndex
!= m_bytecodeIndex
)
540 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
542 if (m_mappedPayload
== (RegisterID
)-1)
544 payload
= m_mappedPayload
;
548 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex
, RegisterID
& tag
)
550 if (m_mappedBytecodeIndex
!= m_bytecodeIndex
)
552 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
554 if (m_mappedTag
== (RegisterID
)-1)
560 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex
)
562 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
563 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
566 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex
));
570 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex
, RegisterID tag
)
572 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
573 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
576 addSlowCase(branch32(NotEqual
, tag
, Imm32(JSValue::CellTag
)));
580 inline void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned virtualRegisterIndex
)
582 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
))
586 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
588 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
591 ALWAYS_INLINE
bool JIT::getOperandConstantImmediateInt(unsigned op1
, unsigned op2
, unsigned& op
, int32_t& constant
)
593 if (isOperandConstantImmediateInt(op1
)) {
594 constant
= getConstantOperand(op1
).asInt32();
599 if (isOperandConstantImmediateInt(op2
)) {
600 constant
= getConstantOperand(op2
).asInt32();
608 /* Deprecated: Please use JITStubCall instead. */
610 ALWAYS_INLINE
void JIT::emitPutJITStubArg(RegisterID tag
, RegisterID payload
, unsigned argumentNumber
)
612 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
613 poke(payload
, argumentStackOffset
);
614 poke(tag
, argumentStackOffset
+ 1);
617 /* Deprecated: Please use JITStubCall instead. */
619 ALWAYS_INLINE
void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src
, unsigned argumentNumber
, RegisterID scratch1
, RegisterID scratch2
)
621 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
622 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
623 JSValue constant
= m_codeBlock
->getConstant(src
);
624 poke(Imm32(constant
.payload()), argumentStackOffset
);
625 poke(Imm32(constant
.tag()), argumentStackOffset
+ 1);
627 emitLoad(src
, scratch1
, scratch2
);
628 poke(scratch2
, argumentStackOffset
);
629 poke(scratch1
, argumentStackOffset
+ 1);
633 #else // USE(JSVALUE32_64)
635 ALWAYS_INLINE
void JIT::killLastResultRegister()
637 m_lastResultBytecodeRegister
= std::numeric_limits
<int>::max();
640 // get arg puts an arg from the SF register array into a h/w register
641 ALWAYS_INLINE
void JIT::emitGetVirtualRegister(int src
, RegisterID dst
)
643 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
645 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
646 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
647 JSValue value
= m_codeBlock
->getConstant(src
);
648 move(ImmPtr(JSValue::encode(value
)), dst
);
649 killLastResultRegister();
653 if (src
== m_lastResultBytecodeRegister
&& m_codeBlock
->isTemporaryRegisterIndex(src
)) {
654 bool atJumpTarget
= false;
655 while (m_jumpTargetsPosition
< m_codeBlock
->numberOfJumpTargets() && m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) <= m_bytecodeIndex
) {
656 if (m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) == m_bytecodeIndex
)
658 ++m_jumpTargetsPosition
;
662 // The argument we want is already stored in eax
663 if (dst
!= cachedResultRegister
)
664 move(cachedResultRegister
, dst
);
665 killLastResultRegister();
670 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), dst
);
671 killLastResultRegister();
674 ALWAYS_INLINE
void JIT::emitGetVirtualRegisters(int src1
, RegisterID dst1
, int src2
, RegisterID dst2
)
676 if (src2
== m_lastResultBytecodeRegister
) {
677 emitGetVirtualRegister(src2
, dst2
);
678 emitGetVirtualRegister(src1
, dst1
);
680 emitGetVirtualRegister(src1
, dst1
);
681 emitGetVirtualRegister(src2
, dst2
);
685 ALWAYS_INLINE
int32_t JIT::getConstantOperandImmediateInt(unsigned src
)
687 return getConstantOperand(src
).asInt32();
690 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
692 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
695 ALWAYS_INLINE
void JIT::emitPutVirtualRegister(unsigned dst
, RegisterID from
)
697 storePtr(from
, Address(callFrameRegister
, dst
* sizeof(Register
)));
698 m_lastResultBytecodeRegister
= (from
== cachedResultRegister
) ? dst
: std::numeric_limits
<int>::max();
701 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
703 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister
, dst
* sizeof(Register
)));
706 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfJSCell(RegisterID reg
)
709 return branchTestPtr(Zero
, reg
, tagMaskRegister
);
711 return branchTest32(Zero
, reg
, Imm32(JSImmediate::TagMask
));
715 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfBothJSCells(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
718 orPtr(reg2
, scratch
);
719 return emitJumpIfJSCell(scratch
);
722 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg
)
724 addSlowCase(emitJumpIfJSCell(reg
));
727 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotJSCell(RegisterID reg
)
730 return branchTestPtr(NonZero
, reg
, tagMaskRegister
);
732 return branchTest32(NonZero
, reg
, Imm32(JSImmediate::TagMask
));
736 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
)
738 addSlowCase(emitJumpIfNotJSCell(reg
));
741 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
, int vReg
)
743 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
744 emitJumpSlowCaseIfNotJSCell(reg
);
749 inline void JIT::emitLoadDouble(unsigned index
, FPRegisterID value
)
751 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
752 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
753 loadDouble(&inConstantPool
, value
);
755 loadDouble(addressFor(index
), value
);
758 inline void JIT::emitLoadInt32ToDouble(unsigned index
, FPRegisterID value
)
760 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
761 Register
& inConstantPool
= m_codeBlock
->constantRegister(index
);
762 convertInt32ToDouble(AbsoluteAddress(&inConstantPool
), value
);
764 convertInt32ToDouble(addressFor(index
), value
);
768 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateInteger(RegisterID reg
)
771 return branchPtr(AboveOrEqual
, reg
, tagTypeNumberRegister
);
773 return branchTest32(NonZero
, reg
, Imm32(JSImmediate::TagTypeNumber
));
777 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateInteger(RegisterID reg
)
780 return branchPtr(Below
, reg
, tagTypeNumberRegister
);
782 return branchTest32(Zero
, reg
, Imm32(JSImmediate::TagTypeNumber
));
786 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
789 andPtr(reg2
, scratch
);
790 return emitJumpIfNotImmediateInteger(scratch
);
793 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg
)
795 addSlowCase(emitJumpIfNotImmediateInteger(reg
));
798 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
800 addSlowCase(emitJumpIfNotImmediateIntegers(reg1
, reg2
, scratch
));
803 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg
)
805 addSlowCase(emitJumpIfNotImmediateNumber(reg
));
809 ALWAYS_INLINE
void JIT::emitFastArithDeTagImmediate(RegisterID reg
)
811 subPtr(Imm32(JSImmediate::TagTypeNumber
), reg
);
814 ALWAYS_INLINE
JIT::Jump
JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg
)
816 return branchSubPtr(Zero
, Imm32(JSImmediate::TagTypeNumber
), reg
);
820 ALWAYS_INLINE
void JIT::emitFastArithReTagImmediate(RegisterID src
, RegisterID dest
)
823 emitFastArithIntToImmNoCheck(src
, dest
);
827 addPtr(Imm32(JSImmediate::TagTypeNumber
), dest
);
831 // operand is int32_t, must have been zero-extended if register is 64-bit.
832 ALWAYS_INLINE
void JIT::emitFastArithIntToImmNoCheck(RegisterID src
, RegisterID dest
)
837 orPtr(tagTypeNumberRegister
, dest
);
839 signExtend32ToPtr(src
, dest
);
841 emitFastArithReTagImmediate(dest
, dest
);
845 ALWAYS_INLINE
void JIT::emitTagAsBoolImmediate(RegisterID reg
)
847 lshift32(Imm32(JSImmediate::ExtendedPayloadShift
), reg
);
848 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), reg
);
851 /* Deprecated: Please use JITStubCall instead. */
853 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
854 ALWAYS_INLINE
void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src
, unsigned argumentNumber
, RegisterID scratch
)
856 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
857 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
858 JSValue value
= m_codeBlock
->getConstant(src
);
859 poke(ImmPtr(JSValue::encode(value
)), argumentStackOffset
);
861 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), scratch
);
862 poke(scratch
, argumentStackOffset
);
865 killLastResultRegister();
868 #endif // USE(JSVALUE32_64)
872 #endif // ENABLE(JIT)