2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateDouble(unsigned src
)
36 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isDouble();
39 ALWAYS_INLINE JSValue
JIT::getConstantOperand(unsigned src
)
41 ASSERT(m_codeBlock
->isConstantRegisterIndex(src
));
42 return m_codeBlock
->getConstant(src
);
45 ALWAYS_INLINE
void JIT::emitPutIntToCallFrameHeader(RegisterID from
, JSStack::CallFrameHeaderEntry entry
)
48 store32(TrustedImm32(Int32Tag
), intTagFor(entry
, callFrameRegister
));
49 store32(from
, intPayloadFor(entry
, callFrameRegister
));
51 store64(from
, addressFor(entry
, callFrameRegister
));
55 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
57 loadPtr(Address(from
, entry
* sizeof(Register
)), to
);
59 killLastResultRegister();
63 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
65 load32(Address(from
, entry
* sizeof(Register
)), to
);
67 killLastResultRegister();
72 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
74 load64(Address(from
, entry
* sizeof(Register
)), to
);
75 killLastResultRegister();
79 ALWAYS_INLINE
void JIT::emitLoadCharacterString(RegisterID src
, RegisterID dst
, JumpList
& failures
)
81 failures
.append(branchPtr(NotEqual
, Address(src
, JSCell::structureOffset()), TrustedImmPtr(m_vm
->stringStructure
.get())));
82 failures
.append(branch32(NotEqual
, MacroAssembler::Address(src
, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
83 loadPtr(MacroAssembler::Address(src
, ThunkHelpers::jsStringValueOffset()), dst
);
84 failures
.append(branchTest32(Zero
, dst
));
85 loadPtr(MacroAssembler::Address(dst
, StringImpl::flagsOffset()), regT1
);
86 loadPtr(MacroAssembler::Address(dst
, StringImpl::dataOffset()), dst
);
90 is16Bit
.append(branchTest32(Zero
, regT1
, TrustedImm32(StringImpl::flagIs8Bit())));
91 load8(MacroAssembler::Address(dst
, 0), dst
);
92 cont8Bit
.append(jump());
94 load16(MacroAssembler::Address(dst
, 0), dst
);
98 ALWAYS_INLINE
JIT::Call
JIT::emitNakedCall(CodePtr function
)
100 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
102 Call nakedCall
= nearCall();
103 m_calls
.append(CallRecord(nakedCall
, m_bytecodeOffset
, function
.executableAddress()));
107 ALWAYS_INLINE
bool JIT::atJumpTarget()
109 while (m_jumpTargetsPosition
< m_codeBlock
->numberOfJumpTargets() && m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) <= m_bytecodeOffset
) {
110 if (m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) == m_bytecodeOffset
)
112 ++m_jumpTargetsPosition
;
117 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
119 ALWAYS_INLINE
void JIT::beginUninterruptedSequence(int insnSpace
, int constSpace
)
121 #if CPU(ARM_TRADITIONAL)
123 // Ensure the label after the sequence can also fit
124 insnSpace
+= sizeof(ARMWord
);
125 constSpace
+= sizeof(uint64_t);
128 ensureSpace(insnSpace
, constSpace
);
132 insnSpace
+= sizeof(SH4Word
);
133 constSpace
+= sizeof(uint64_t);
136 m_assembler
.ensureSpace(insnSpace
+ m_assembler
.maxInstructionSize
+ 2, constSpace
+ 8);
140 m_uninterruptedInstructionSequenceBegin
= label();
141 m_uninterruptedConstantSequenceBegin
= sizeOfConstantPool();
145 ALWAYS_INLINE
void JIT::endUninterruptedSequence(int insnSpace
, int constSpace
, int dst
)
148 /* There are several cases when the uninterrupted sequence is larger than
149 * maximum required offset for pathing the same sequence. Eg.: if in a
150 * uninterrupted sequence the last macroassembler's instruction is a stub
151 * call, it emits store instruction(s) which should not be included in the
152 * calculation of length of uninterrupted sequence. So, the insnSpace and
153 * constSpace should be upper limit instead of hard limit.
157 if ((dst
> 15) || (dst
< -16)) {
162 if (((dst
>= -16) && (dst
< 0)) || ((dst
> 7) && (dst
<= 15)))
168 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin
, label()) <= insnSpace
);
169 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin
<= constSpace
);
171 UNUSED_PARAM(insnSpace
);
172 UNUSED_PARAM(constSpace
);
177 #endif // ASSEMBLER_HAS_CONSTANT_POOL
179 ALWAYS_INLINE
void JIT::updateTopCallFrame()
181 ASSERT(static_cast<int>(m_bytecodeOffset
) >= 0);
182 if (m_bytecodeOffset
) {
183 #if USE(JSVALUE32_64)
184 storePtr(TrustedImmPtr(m_codeBlock
->instructions().begin() + m_bytecodeOffset
+ 1), intTagFor(JSStack::ArgumentCount
));
186 store32(TrustedImm32(m_bytecodeOffset
+ 1), intTagFor(JSStack::ArgumentCount
));
189 storePtr(callFrameRegister
, &m_vm
->topCallFrame
);
192 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline()
195 // Within a trampoline the return address will be on the stack at this point.
196 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
, firstArgumentRegister
);
197 #elif CPU(ARM) || CPU(ARM64)
198 move(stackPointerRegister
, firstArgumentRegister
);
200 move(stackPointerRegister
, firstArgumentRegister
);
202 // In the trampoline on x86-64, the first argument register is not overwritten.
205 ALWAYS_INLINE
JIT::Jump
JIT::checkStructure(RegisterID reg
, Structure
* structure
)
207 return branchPtr(NotEqual
, Address(reg
, JSCell::structureOffset()), TrustedImmPtr(structure
));
210 ALWAYS_INLINE
void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, int vReg
)
212 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
216 ALWAYS_INLINE
void JIT::addSlowCase(Jump jump
)
218 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
220 m_slowCases
.append(SlowCaseEntry(jump
, m_bytecodeOffset
));
223 ALWAYS_INLINE
void JIT::addSlowCase(JumpList jumpList
)
225 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
227 const JumpList::JumpVector
& jumpVector
= jumpList
.jumps();
228 size_t size
= jumpVector
.size();
229 for (size_t i
= 0; i
< size
; ++i
)
230 m_slowCases
.append(SlowCaseEntry(jumpVector
[i
], m_bytecodeOffset
));
233 ALWAYS_INLINE
void JIT::addSlowCase()
235 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
237 Jump emptyJump
; // Doing it this way to make Windows happy.
238 m_slowCases
.append(SlowCaseEntry(emptyJump
, m_bytecodeOffset
));
241 ALWAYS_INLINE
void JIT::addJump(Jump jump
, int relativeOffset
)
243 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
245 m_jmpTable
.append(JumpTable(jump
, m_bytecodeOffset
+ relativeOffset
));
248 ALWAYS_INLINE
void JIT::emitJumpSlowToHot(Jump jump
, int relativeOffset
)
250 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
252 jump
.linkTo(m_labels
[m_bytecodeOffset
+ relativeOffset
], this);
255 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotObject(RegisterID structureReg
)
257 return branch8(Below
, Address(structureReg
, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType
));
260 #if ENABLE(SAMPLING_FLAGS)
261 ALWAYS_INLINE
void JIT::setSamplingFlag(int32_t flag
)
265 or32(TrustedImm32(1u << (flag
- 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
268 ALWAYS_INLINE
void JIT::clearSamplingFlag(int32_t flag
)
272 and32(TrustedImm32(~(1u << (flag
- 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
276 #if ENABLE(SAMPLING_COUNTERS)
277 ALWAYS_INLINE
void JIT::emitCount(AbstractSamplingCounter
& counter
, int32_t count
)
279 add64(TrustedImm32(count
), AbsoluteAddress(counter
.addressOfCounter()));
283 #if ENABLE(OPCODE_SAMPLING)
285 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
287 move(TrustedImmPtr(m_interpreter
->sampler()->sampleSlot()), X86Registers::ecx
);
288 storePtr(TrustedImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), X86Registers::ecx
);
291 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
293 storePtr(TrustedImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), m_interpreter
->sampler()->sampleSlot());
298 #if ENABLE(CODEBLOCK_SAMPLING)
300 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
302 move(TrustedImmPtr(m_interpreter
->sampler()->codeBlockSlot()), X86Registers::ecx
);
303 storePtr(TrustedImmPtr(codeBlock
), X86Registers::ecx
);
306 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
308 storePtr(TrustedImmPtr(codeBlock
), m_interpreter
->sampler()->codeBlockSlot());
313 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateChar(unsigned src
)
315 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isString() && asString(getConstantOperand(src
).asCell())->length() == 1;
318 template<typename StructureType
>
319 inline void JIT::emitAllocateJSObject(RegisterID allocator
, StructureType structure
, RegisterID result
, RegisterID scratch
)
321 loadPtr(Address(allocator
, MarkedAllocator::offsetOfFreeListHead()), result
);
322 addSlowCase(branchTestPtr(Zero
, result
));
324 // remove the object from the free list
325 loadPtr(Address(result
), scratch
);
326 storePtr(scratch
, Address(allocator
, MarkedAllocator::offsetOfFreeListHead()));
328 // initialize the object's structure
329 storePtr(structure
, Address(result
, JSCell::structureOffset()));
331 // initialize the object's property storage pointer
332 storePtr(TrustedImmPtr(0), Address(result
, JSObject::butterflyOffset()));
335 #if ENABLE(VALUE_PROFILER)
336 inline void JIT::emitValueProfilingSite(ValueProfile
* valueProfile
)
338 ASSERT(shouldEmitProfiling());
339 ASSERT(valueProfile
);
341 const RegisterID value
= regT0
;
342 #if USE(JSVALUE32_64)
343 const RegisterID valueTag
= regT1
;
345 const RegisterID scratch
= regT3
;
347 if (ValueProfile::numberOfBuckets
== 1) {
348 // We're in a simple configuration: only one bucket, so we can just do a direct
351 store64(value
, valueProfile
->m_buckets
);
353 EncodedValueDescriptor
* descriptor
= bitwise_cast
<EncodedValueDescriptor
*>(valueProfile
->m_buckets
);
354 store32(value
, &descriptor
->asBits
.payload
);
355 store32(valueTag
, &descriptor
->asBits
.tag
);
360 if (m_randomGenerator
.getUint32() & 1)
361 add32(TrustedImm32(1), bucketCounterRegister
);
363 add32(TrustedImm32(3), bucketCounterRegister
);
364 and32(TrustedImm32(ValueProfile::bucketIndexMask
), bucketCounterRegister
);
365 move(TrustedImmPtr(valueProfile
->m_buckets
), scratch
);
367 store64(value
, BaseIndex(scratch
, bucketCounterRegister
, TimesEight
));
368 #elif USE(JSVALUE32_64)
369 store32(value
, BaseIndex(scratch
, bucketCounterRegister
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
370 store32(valueTag
, BaseIndex(scratch
, bucketCounterRegister
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
374 inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset
)
376 if (!shouldEmitProfiling())
378 emitValueProfilingSite(m_codeBlock
->valueProfileForBytecodeOffset(bytecodeOffset
));
381 inline void JIT::emitValueProfilingSite()
383 emitValueProfilingSite(m_bytecodeOffset
);
385 #endif // ENABLE(VALUE_PROFILER)
387 inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType
, RegisterID scratch
, ArrayProfile
* arrayProfile
)
389 UNUSED_PARAM(scratch
); // We had found this scratch register useful here before, so I will keep it for now.
391 RegisterID structure
= structureAndIndexingType
;
392 RegisterID indexingType
= structureAndIndexingType
;
394 if (shouldEmitProfiling())
395 storePtr(structure
, arrayProfile
->addressOfLastSeenStructure());
397 load8(Address(structure
, Structure::indexingTypeOffset()), indexingType
);
400 inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType
, RegisterID scratch
, unsigned bytecodeIndex
)
402 #if ENABLE(VALUE_PROFILER)
403 emitArrayProfilingSite(structureAndIndexingType
, scratch
, m_codeBlock
->getOrAddArrayProfile(bytecodeIndex
));
405 UNUSED_PARAM(bytecodeIndex
);
406 emitArrayProfilingSite(structureAndIndexingType
, scratch
, 0);
410 inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile
* arrayProfile
)
412 #if ENABLE(VALUE_PROFILER)
413 store8(TrustedImm32(1), arrayProfile
->addressOfMayStoreToHole());
415 UNUSED_PARAM(arrayProfile
);
419 inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile
* arrayProfile
)
421 #if ENABLE(VALUE_PROFILER)
422 store8(TrustedImm32(1), arrayProfile
->addressOfOutOfBounds());
424 UNUSED_PARAM(arrayProfile
);
428 static inline bool arrayProfileSaw(ArrayModes arrayModes
, IndexingType capability
)
430 #if ENABLE(VALUE_PROFILER)
431 return arrayModesInclude(arrayModes
, capability
);
433 UNUSED_PARAM(arrayModes
);
434 UNUSED_PARAM(capability
);
439 inline JITArrayMode
JIT::chooseArrayMode(ArrayProfile
* profile
)
441 #if ENABLE(VALUE_PROFILER)
442 profile
->computeUpdatedPrediction(m_codeBlock
);
443 ArrayModes arrayModes
= profile
->observedArrayModes();
444 if (arrayProfileSaw(arrayModes
, DoubleShape
))
446 if (arrayProfileSaw(arrayModes
, Int32Shape
))
448 if (arrayProfileSaw(arrayModes
, ArrayStorageShape
))
449 return JITArrayStorage
;
450 return JITContiguous
;
452 UNUSED_PARAM(profile
);
453 return JITContiguous
;
457 #if USE(JSVALUE32_64)
459 inline void JIT::emitLoadTag(int index
, RegisterID tag
)
461 RegisterID mappedTag
;
462 if (getMappedTag(index
, mappedTag
)) {
463 move(mappedTag
, tag
);
468 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
469 move(Imm32(getConstantOperand(index
).tag()), tag
);
474 load32(tagFor(index
), tag
);
478 inline void JIT::emitLoadPayload(int index
, RegisterID payload
)
480 RegisterID mappedPayload
;
481 if (getMappedPayload(index
, mappedPayload
)) {
482 move(mappedPayload
, payload
);
487 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
488 move(Imm32(getConstantOperand(index
).payload()), payload
);
493 load32(payloadFor(index
), payload
);
497 inline void JIT::emitLoad(const JSValue
& v
, RegisterID tag
, RegisterID payload
)
499 move(Imm32(v
.payload()), payload
);
500 move(Imm32(v
.tag()), tag
);
503 inline void JIT::emitLoad(int index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
505 RELEASE_ASSERT(tag
!= payload
);
507 if (base
== callFrameRegister
) {
508 RELEASE_ASSERT(payload
!= base
);
509 emitLoadPayload(index
, payload
);
510 emitLoadTag(index
, tag
);
514 if (payload
== base
) { // avoid stomping base
515 load32(tagFor(index
, base
), tag
);
516 load32(payloadFor(index
, base
), payload
);
520 load32(payloadFor(index
, base
), payload
);
521 load32(tagFor(index
, base
), tag
);
524 inline void JIT::emitLoad2(int index1
, RegisterID tag1
, RegisterID payload1
, int index2
, RegisterID tag2
, RegisterID payload2
)
526 if (isMapped(index1
)) {
527 emitLoad(index1
, tag1
, payload1
);
528 emitLoad(index2
, tag2
, payload2
);
531 emitLoad(index2
, tag2
, payload2
);
532 emitLoad(index1
, tag1
, payload1
);
535 inline void JIT::emitLoadDouble(int index
, FPRegisterID value
)
537 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
538 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
539 loadDouble(&inConstantPool
, value
);
541 loadDouble(addressFor(index
), value
);
544 inline void JIT::emitLoadInt32ToDouble(int index
, FPRegisterID value
)
546 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
547 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
548 char* bytePointer
= reinterpret_cast<char*>(&inConstantPool
);
549 convertInt32ToDouble(AbsoluteAddress(bytePointer
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), value
);
551 convertInt32ToDouble(payloadFor(index
), value
);
554 inline void JIT::emitStore(int index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
556 store32(payload
, payloadFor(index
, base
));
557 store32(tag
, tagFor(index
, base
));
560 inline void JIT::emitStoreInt32(int index
, RegisterID payload
, bool indexIsInt32
)
562 store32(payload
, payloadFor(index
, callFrameRegister
));
564 store32(TrustedImm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
567 inline void JIT::emitStoreAndMapInt32(int index
, RegisterID tag
, RegisterID payload
, bool indexIsInt32
, size_t opcodeLength
)
569 emitStoreInt32(index
, payload
, indexIsInt32
);
570 map(m_bytecodeOffset
+ opcodeLength
, index
, tag
, payload
);
573 inline void JIT::emitStoreInt32(int index
, TrustedImm32 payload
, bool indexIsInt32
)
575 store32(payload
, payloadFor(index
, callFrameRegister
));
577 store32(TrustedImm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
580 inline void JIT::emitStoreCell(int index
, RegisterID payload
, bool indexIsCell
)
582 store32(payload
, payloadFor(index
, callFrameRegister
));
584 store32(TrustedImm32(JSValue::CellTag
), tagFor(index
, callFrameRegister
));
587 inline void JIT::emitStoreBool(int index
, RegisterID payload
, bool indexIsBool
)
589 store32(payload
, payloadFor(index
, callFrameRegister
));
591 store32(TrustedImm32(JSValue::BooleanTag
), tagFor(index
, callFrameRegister
));
594 inline void JIT::emitStoreDouble(int index
, FPRegisterID value
)
596 storeDouble(value
, addressFor(index
));
599 inline void JIT::emitStore(int index
, const JSValue constant
, RegisterID base
)
601 store32(Imm32(constant
.payload()), payloadFor(index
, base
));
602 store32(Imm32(constant
.tag()), tagFor(index
, base
));
605 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
607 emitStore(dst
, jsUndefined());
610 inline bool JIT::isLabeled(unsigned bytecodeOffset
)
612 for (size_t numberOfJumpTargets
= m_codeBlock
->numberOfJumpTargets(); m_jumpTargetIndex
!= numberOfJumpTargets
; ++m_jumpTargetIndex
) {
613 unsigned jumpTarget
= m_codeBlock
->jumpTarget(m_jumpTargetIndex
);
614 if (jumpTarget
== bytecodeOffset
)
616 if (jumpTarget
> bytecodeOffset
)
622 inline void JIT::map(unsigned bytecodeOffset
, int virtualRegisterIndex
, RegisterID tag
, RegisterID payload
)
624 if (isLabeled(bytecodeOffset
))
627 m_mappedBytecodeOffset
= bytecodeOffset
;
628 m_mappedVirtualRegisterIndex
= virtualRegisterIndex
;
630 m_mappedPayload
= payload
;
632 ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload
== regT0
);
633 ASSERT(!canBeOptimizedOrInlined() || m_mappedTag
== regT1
);
636 inline void JIT::unmap(RegisterID registerID
)
638 if (m_mappedTag
== registerID
)
639 m_mappedTag
= (RegisterID
)-1;
640 else if (m_mappedPayload
== registerID
)
641 m_mappedPayload
= (RegisterID
)-1;
644 inline void JIT::unmap()
646 m_mappedBytecodeOffset
= (unsigned)-1;
647 m_mappedVirtualRegisterIndex
= JSStack::ReturnPC
;
648 m_mappedTag
= (RegisterID
)-1;
649 m_mappedPayload
= (RegisterID
)-1;
652 inline bool JIT::isMapped(int virtualRegisterIndex
)
654 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
656 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
661 inline bool JIT::getMappedPayload(int virtualRegisterIndex
, RegisterID
& payload
)
663 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
665 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
667 if (m_mappedPayload
== (RegisterID
)-1)
669 payload
= m_mappedPayload
;
673 inline bool JIT::getMappedTag(int virtualRegisterIndex
, RegisterID
& tag
)
675 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
677 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
679 if (m_mappedTag
== (RegisterID
)-1)
685 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex
)
687 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
688 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
691 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex
));
695 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex
, RegisterID tag
)
697 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
698 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
701 addSlowCase(branch32(NotEqual
, tag
, TrustedImm32(JSValue::CellTag
)));
705 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
707 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
710 ALWAYS_INLINE
bool JIT::getOperandConstantImmediateInt(unsigned op1
, unsigned op2
, unsigned& op
, int32_t& constant
)
712 if (isOperandConstantImmediateInt(op1
)) {
713 constant
= getConstantOperand(op1
).asInt32();
718 if (isOperandConstantImmediateInt(op2
)) {
719 constant
= getConstantOperand(op2
).asInt32();
727 #else // USE(JSVALUE32_64)
729 /* Deprecated: Please use JITStubCall instead. */
731 ALWAYS_INLINE
void JIT::emitGetJITStubArg(unsigned argumentNumber
, RegisterID dst
)
733 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
734 peek64(dst
, argumentStackOffset
);
737 ALWAYS_INLINE
void JIT::killLastResultRegister()
739 m_lastResultBytecodeRegister
= std::numeric_limits
<int>::max();
742 // get arg puts an arg from the SF register array into a h/w register
743 ALWAYS_INLINE
void JIT::emitGetVirtualRegister(int src
, RegisterID dst
)
745 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
747 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
748 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
749 JSValue value
= m_codeBlock
->getConstant(src
);
750 if (!value
.isNumber())
751 move(TrustedImm64(JSValue::encode(value
)), dst
);
753 move(Imm64(JSValue::encode(value
)), dst
);
754 killLastResultRegister();
758 if (src
== m_lastResultBytecodeRegister
&& m_codeBlock
->isTemporaryRegisterIndex(src
) && !atJumpTarget()) {
759 // The argument we want is already stored in eax
760 if (dst
!= cachedResultRegister
)
761 move(cachedResultRegister
, dst
);
762 killLastResultRegister();
766 load64(Address(callFrameRegister
, src
* sizeof(Register
)), dst
);
767 killLastResultRegister();
770 ALWAYS_INLINE
void JIT::emitGetVirtualRegisters(int src1
, RegisterID dst1
, int src2
, RegisterID dst2
)
772 if (src2
== m_lastResultBytecodeRegister
) {
773 emitGetVirtualRegister(src2
, dst2
);
774 emitGetVirtualRegister(src1
, dst1
);
776 emitGetVirtualRegister(src1
, dst1
);
777 emitGetVirtualRegister(src2
, dst2
);
781 ALWAYS_INLINE
int32_t JIT::getConstantOperandImmediateInt(unsigned src
)
783 return getConstantOperand(src
).asInt32();
786 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
788 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
791 ALWAYS_INLINE
void JIT::emitPutVirtualRegister(unsigned dst
, RegisterID from
)
793 store64(from
, Address(callFrameRegister
, dst
* sizeof(Register
)));
794 m_lastResultBytecodeRegister
= (from
== cachedResultRegister
) ? static_cast<int>(dst
) : std::numeric_limits
<int>::max();
797 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
799 store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister
, dst
* sizeof(Register
)));
802 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfJSCell(RegisterID reg
)
804 return branchTest64(Zero
, reg
, tagMaskRegister
);
807 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfBothJSCells(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
811 return emitJumpIfJSCell(scratch
);
814 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg
)
816 addSlowCase(emitJumpIfJSCell(reg
));
819 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
)
821 addSlowCase(emitJumpIfNotJSCell(reg
));
824 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
, int vReg
)
826 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
827 emitJumpSlowCaseIfNotJSCell(reg
);
830 inline void JIT::emitLoadDouble(int index
, FPRegisterID value
)
832 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
833 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
834 loadDouble(&inConstantPool
, value
);
836 loadDouble(addressFor(index
), value
);
839 inline void JIT::emitLoadInt32ToDouble(int index
, FPRegisterID value
)
841 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
842 ASSERT(isOperandConstantImmediateInt(index
));
843 convertInt32ToDouble(Imm32(getConstantOperand(index
).asInt32()), value
);
845 convertInt32ToDouble(addressFor(index
), value
);
848 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateInteger(RegisterID reg
)
850 return branch64(AboveOrEqual
, reg
, tagTypeNumberRegister
);
853 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateInteger(RegisterID reg
)
855 return branch64(Below
, reg
, tagTypeNumberRegister
);
858 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
861 and64(reg2
, scratch
);
862 return emitJumpIfNotImmediateInteger(scratch
);
865 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg
)
867 addSlowCase(emitJumpIfNotImmediateInteger(reg
));
870 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
872 addSlowCase(emitJumpIfNotImmediateIntegers(reg1
, reg2
, scratch
));
875 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg
)
877 addSlowCase(emitJumpIfNotImmediateNumber(reg
));
880 ALWAYS_INLINE
void JIT::emitFastArithReTagImmediate(RegisterID src
, RegisterID dest
)
882 emitFastArithIntToImmNoCheck(src
, dest
);
885 ALWAYS_INLINE
void JIT::emitTagAsBoolImmediate(RegisterID reg
)
887 or32(TrustedImm32(static_cast<int32_t>(ValueFalse
)), reg
);
890 #endif // USE(JSVALUE32_64)
894 #endif // ENABLE(JIT)
896 #endif // JITInlines_h