2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
34 /* Deprecated: Please use JITStubCall instead. */
36 ALWAYS_INLINE
void JIT::emitGetJITStubArg(unsigned argumentNumber
, RegisterID dst
)
38 unsigned argumentStackOffset
= (argumentNumber
* (sizeof(JSValue
) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX
;
39 peek(dst
, argumentStackOffset
);
42 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateDouble(unsigned src
)
44 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isDouble();
47 ALWAYS_INLINE JSValue
JIT::getConstantOperand(unsigned src
)
49 ASSERT(m_codeBlock
->isConstantRegisterIndex(src
));
50 return m_codeBlock
->getConstant(src
);
53 ALWAYS_INLINE
void JIT::emitPutToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
55 storePtr(from
, payloadFor(entry
, callFrameRegister
));
58 ALWAYS_INLINE
void JIT::emitPutCellToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
61 store32(TrustedImm32(JSValue::CellTag
), tagFor(entry
, callFrameRegister
));
63 storePtr(from
, payloadFor(entry
, callFrameRegister
));
66 ALWAYS_INLINE
void JIT::emitPutIntToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
68 store32(TrustedImm32(Int32Tag
), intTagFor(entry
, callFrameRegister
));
69 store32(from
, intPayloadFor(entry
, callFrameRegister
));
72 ALWAYS_INLINE
void JIT::emitPutImmediateToCallFrameHeader(void* value
, RegisterFile::CallFrameHeaderEntry entry
)
74 storePtr(TrustedImmPtr(value
), Address(callFrameRegister
, entry
* sizeof(Register
)));
77 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
79 loadPtr(Address(from
, entry
* sizeof(Register
)), to
);
81 killLastResultRegister();
85 ALWAYS_INLINE
void JIT::emitLoadCharacterString(RegisterID src
, RegisterID dst
, JumpList
& failures
)
87 failures
.append(branchPtr(NotEqual
, Address(src
, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info
)));
88 failures
.append(branch32(NotEqual
, MacroAssembler::Address(src
, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
89 loadPtr(MacroAssembler::Address(src
, ThunkHelpers::jsStringValueOffset()), dst
);
90 failures
.append(branchTest32(Zero
, dst
));
91 loadPtr(MacroAssembler::Address(dst
, ThunkHelpers::stringImplFlagsOffset()), regT1
);
92 loadPtr(MacroAssembler::Address(dst
, ThunkHelpers::stringImplDataOffset()), dst
);
96 is16Bit
.append(branchTest32(Zero
, regT1
, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
97 load8(MacroAssembler::Address(dst
, 0), dst
);
98 cont8Bit
.append(jump());
100 load16(MacroAssembler::Address(dst
, 0), dst
);
104 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
, RegisterID from
)
106 load32(Address(from
, entry
* sizeof(Register
)), to
);
108 killLastResultRegister();
112 ALWAYS_INLINE
JIT::Call
JIT::emitNakedCall(CodePtr function
)
114 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
116 Call nakedCall
= nearCall();
117 m_calls
.append(CallRecord(nakedCall
, m_bytecodeOffset
, function
.executableAddress()));
121 ALWAYS_INLINE
bool JIT::atJumpTarget()
123 while (m_jumpTargetsPosition
< m_codeBlock
->numberOfJumpTargets() && m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) <= m_bytecodeOffset
) {
124 if (m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) == m_bytecodeOffset
)
126 ++m_jumpTargetsPosition
;
131 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
133 ALWAYS_INLINE
void JIT::beginUninterruptedSequence(int insnSpace
, int constSpace
)
135 #if CPU(ARM_TRADITIONAL)
137 // Ensure the label after the sequence can also fit
138 insnSpace
+= sizeof(ARMWord
);
139 constSpace
+= sizeof(uint64_t);
142 ensureSpace(insnSpace
, constSpace
);
146 insnSpace
+= sizeof(SH4Word
);
147 constSpace
+= sizeof(uint64_t);
150 m_assembler
.ensureSpace(insnSpace
+ m_assembler
.maxInstructionSize
+ 2, constSpace
+ 8);
153 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
155 m_uninterruptedInstructionSequenceBegin
= label();
156 m_uninterruptedConstantSequenceBegin
= sizeOfConstantPool();
161 ALWAYS_INLINE
void JIT::endUninterruptedSequence(int insnSpace
, int constSpace
, int dst
)
164 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
165 /* There are several cases when the uninterrupted sequence is larger than
166 * maximum required offset for pathing the same sequence. Eg.: if in a
167 * uninterrupted sequence the last macroassembler's instruction is a stub
168 * call, it emits store instruction(s) which should not be included in the
169 * calculation of length of uninterrupted sequence. So, the insnSpace and
170 * constSpace should be upper limit instead of hard limit.
173 if ((dst
> 15) || (dst
< -16)) {
178 if (((dst
>= -16) && (dst
< 0)) || ((dst
> 7) && (dst
<= 15)))
181 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin
, label()) <= insnSpace
);
182 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin
<= constSpace
);
190 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
192 move(linkRegister
, reg
);
195 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
197 move(reg
, linkRegister
);
200 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
202 loadPtr(address
, linkRegister
);
206 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
208 m_assembler
.stspr(reg
);
211 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
213 m_assembler
.ldspr(reg
);
216 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
218 loadPtrLinkReg(address
);
223 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
225 move(returnAddressRegister
, reg
);
228 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
230 move(reg
, returnAddressRegister
);
233 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
235 loadPtr(address
, returnAddressRegister
);
238 #else // CPU(X86) || CPU(X86_64)
240 ALWAYS_INLINE
void JIT::preserveReturnAddressAfterCall(RegisterID reg
)
245 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(RegisterID reg
)
250 ALWAYS_INLINE
void JIT::restoreReturnAddressBeforeReturn(Address address
)
257 ALWAYS_INLINE
void JIT::restoreArgumentReference()
259 move(stackPointerRegister
, firstArgumentRegister
);
260 poke(callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof(void*));
263 ALWAYS_INLINE
void JIT::updateTopCallFrame()
265 ASSERT(static_cast<int>(m_bytecodeOffset
) >= 0);
266 if (m_bytecodeOffset
) {
267 #if USE(JSVALUE32_64)
268 storePtr(TrustedImmPtr(m_codeBlock
->instructions().begin() + m_bytecodeOffset
+ 1), intTagFor(RegisterFile::ArgumentCount
));
270 store32(TrustedImm32(m_bytecodeOffset
+ 1), intTagFor(RegisterFile::ArgumentCount
));
273 storePtr(callFrameRegister
, &m_globalData
->topCallFrame
);
276 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline()
279 // Within a trampoline the return address will be on the stack at this point.
280 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
, firstArgumentRegister
);
282 move(stackPointerRegister
, firstArgumentRegister
);
284 move(stackPointerRegister
, firstArgumentRegister
);
286 // In the trampoline on x86-64, the first argument register is not overwritten.
289 ALWAYS_INLINE
JIT::Jump
JIT::checkStructure(RegisterID reg
, Structure
* structure
)
291 return branchPtr(NotEqual
, Address(reg
, JSCell::structureOffset()), TrustedImmPtr(structure
));
294 ALWAYS_INLINE
void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, int vReg
)
296 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
300 ALWAYS_INLINE
void JIT::addSlowCase(Jump jump
)
302 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
304 m_slowCases
.append(SlowCaseEntry(jump
, m_bytecodeOffset
));
307 ALWAYS_INLINE
void JIT::addSlowCase(JumpList jumpList
)
309 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
311 const JumpList::JumpVector
& jumpVector
= jumpList
.jumps();
312 size_t size
= jumpVector
.size();
313 for (size_t i
= 0; i
< size
; ++i
)
314 m_slowCases
.append(SlowCaseEntry(jumpVector
[i
], m_bytecodeOffset
));
317 ALWAYS_INLINE
void JIT::addSlowCase()
319 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
321 Jump emptyJump
; // Doing it this way to make Windows happy.
322 m_slowCases
.append(SlowCaseEntry(emptyJump
, m_bytecodeOffset
));
325 ALWAYS_INLINE
void JIT::addJump(Jump jump
, int relativeOffset
)
327 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
329 m_jmpTable
.append(JumpTable(jump
, m_bytecodeOffset
+ relativeOffset
));
332 ALWAYS_INLINE
void JIT::emitJumpSlowToHot(Jump jump
, int relativeOffset
)
334 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
336 jump
.linkTo(m_labels
[m_bytecodeOffset
+ relativeOffset
], this);
339 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotObject(RegisterID structureReg
)
341 return branch8(Below
, Address(structureReg
, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType
));
344 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotType(RegisterID baseReg
, RegisterID scratchReg
, JSType type
)
346 loadPtr(Address(baseReg
, JSCell::structureOffset()), scratchReg
);
347 return branch8(NotEqual
, Address(scratchReg
, Structure::typeInfoTypeOffset()), TrustedImm32(type
));
350 #if ENABLE(SAMPLING_FLAGS)
351 ALWAYS_INLINE
void JIT::setSamplingFlag(int32_t flag
)
355 or32(TrustedImm32(1u << (flag
- 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
358 ALWAYS_INLINE
void JIT::clearSamplingFlag(int32_t flag
)
362 and32(TrustedImm32(~(1u << (flag
- 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
366 #if ENABLE(SAMPLING_COUNTERS)
367 ALWAYS_INLINE
void JIT::emitCount(AbstractSamplingCounter
& counter
, int32_t count
)
369 add64(TrustedImm32(count
), AbsoluteAddress(counter
.addressOfCounter()));
373 #if ENABLE(OPCODE_SAMPLING)
375 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
377 move(TrustedImmPtr(m_interpreter
->sampler()->sampleSlot()), X86Registers::ecx
);
378 storePtr(TrustedImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), X86Registers::ecx
);
381 ALWAYS_INLINE
void JIT::sampleInstruction(Instruction
* instruction
, bool inHostFunction
)
383 storePtr(TrustedImmPtr(m_interpreter
->sampler()->encodeSample(instruction
, inHostFunction
)), m_interpreter
->sampler()->sampleSlot());
388 #if ENABLE(CODEBLOCK_SAMPLING)
390 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
392 move(TrustedImmPtr(m_interpreter
->sampler()->codeBlockSlot()), X86Registers::ecx
);
393 storePtr(TrustedImmPtr(codeBlock
), X86Registers::ecx
);
396 ALWAYS_INLINE
void JIT::sampleCodeBlock(CodeBlock
* codeBlock
)
398 storePtr(TrustedImmPtr(codeBlock
), m_interpreter
->sampler()->codeBlockSlot());
403 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateChar(unsigned src
)
405 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isString() && asString(getConstantOperand(src
).asCell())->length() == 1;
408 template <typename ClassType
, bool destructor
, typename StructureType
> inline void JIT::emitAllocateBasicJSObject(StructureType structure
, RegisterID result
, RegisterID storagePtr
)
410 MarkedAllocator
* allocator
= 0;
412 allocator
= &m_globalData
->heap
.allocatorForObjectWithDestructor(sizeof(ClassType
));
414 allocator
= &m_globalData
->heap
.allocatorForObjectWithoutDestructor(sizeof(ClassType
));
415 loadPtr(&allocator
->m_freeList
.head
, result
);
416 addSlowCase(branchTestPtr(Zero
, result
));
418 // remove the object from the free list
419 loadPtr(Address(result
), storagePtr
);
420 storePtr(storagePtr
, &allocator
->m_freeList
.head
);
422 // initialize the object's structure
423 storePtr(structure
, Address(result
, JSCell::structureOffset()));
425 // initialize the object's classInfo pointer
426 storePtr(TrustedImmPtr(&ClassType::s_info
), Address(result
, JSCell::classInfoOffset()));
428 // initialize the inheritor ID
429 storePtr(TrustedImmPtr(0), Address(result
, JSObject::offsetOfInheritorID()));
431 // initialize the object's property storage pointer
432 addPtr(TrustedImm32(sizeof(JSObject
)), result
, storagePtr
);
433 storePtr(storagePtr
, Address(result
, ClassType::offsetOfPropertyStorage()));
436 template <typename T
> inline void JIT::emitAllocateJSFinalObject(T structure
, RegisterID result
, RegisterID scratch
)
438 emitAllocateBasicJSObject
<JSFinalObject
, false, T
>(structure
, result
, scratch
);
441 inline void JIT::emitAllocateJSFunction(FunctionExecutable
* executable
, RegisterID scopeChain
, RegisterID result
, RegisterID storagePtr
)
443 emitAllocateBasicJSObject
<JSFunction
, true>(TrustedImmPtr(m_codeBlock
->globalObject()->namedFunctionStructure()), result
, storagePtr
);
445 // store the function's scope chain
446 storePtr(scopeChain
, Address(result
, JSFunction::offsetOfScopeChain()));
448 // store the function's executable member
449 storePtr(TrustedImmPtr(executable
), Address(result
, JSFunction::offsetOfExecutable()));
451 // store the function's name
452 ASSERT(executable
->nameValue());
453 int functionNameOffset
= sizeof(JSValue
) * m_codeBlock
->globalObject()->functionNameOffset();
454 storePtr(TrustedImmPtr(executable
->nameValue()), Address(regT1
, functionNameOffset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
455 #if USE(JSVALUE32_64)
456 store32(TrustedImm32(JSValue::CellTag
), Address(regT1
, functionNameOffset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
460 inline void JIT::emitAllocateBasicStorage(size_t size
, RegisterID result
, RegisterID storagePtr
)
462 CopiedAllocator
* allocator
= &m_globalData
->heap
.storageAllocator();
464 // FIXME: We need to check for wrap-around.
465 // Check to make sure that the allocation will fit in the current block.
466 loadPtr(&allocator
->m_currentOffset
, result
);
467 addPtr(TrustedImm32(size
), result
);
468 loadPtr(&allocator
->m_currentBlock
, storagePtr
);
469 addPtr(TrustedImm32(HeapBlock::s_blockSize
), storagePtr
);
470 addSlowCase(branchPtr(AboveOrEqual
, result
, storagePtr
));
472 // Load the original offset.
473 loadPtr(&allocator
->m_currentOffset
, result
);
475 // Bump the pointer forward.
476 move(result
, storagePtr
);
477 addPtr(TrustedImm32(size
), storagePtr
);
478 storePtr(storagePtr
, &allocator
->m_currentOffset
);
481 inline void JIT::emitAllocateJSArray(unsigned valuesRegister
, unsigned length
, RegisterID cellResult
, RegisterID storageResult
, RegisterID storagePtr
)
483 unsigned initialLength
= std::max(length
, 4U);
484 size_t initialStorage
= JSArray::storageSize(initialLength
);
486 // We allocate the backing store first to ensure that garbage collection
487 // doesn't happen during JSArray initialization.
488 emitAllocateBasicStorage(initialStorage
, storageResult
, storagePtr
);
490 // Allocate the cell for the array.
491 emitAllocateBasicJSObject
<JSArray
, false>(TrustedImmPtr(m_codeBlock
->globalObject()->arrayStructure()), cellResult
, storagePtr
);
493 // Store all the necessary info in the ArrayStorage.
494 storePtr(storageResult
, Address(storageResult
, ArrayStorage::allocBaseOffset()));
495 store32(Imm32(length
), Address(storageResult
, ArrayStorage::lengthOffset()));
496 store32(Imm32(length
), Address(storageResult
, ArrayStorage::numValuesInVectorOffset()));
498 // Store the newly allocated ArrayStorage.
499 storePtr(storageResult
, Address(cellResult
, JSArray::storageOffset()));
501 // Store the vector length and index bias.
502 store32(Imm32(initialLength
), Address(cellResult
, JSArray::vectorLengthOffset()));
503 store32(TrustedImm32(0), Address(cellResult
, JSArray::indexBiasOffset()));
505 // Initialize the sparse value map.
506 storePtr(TrustedImmPtr(0), Address(cellResult
, JSArray::sparseValueMapOffset()));
508 // Store the values we have.
509 for (unsigned i
= 0; i
< length
; i
++) {
511 loadPtr(Address(callFrameRegister
, (valuesRegister
+ i
) * sizeof(Register
)), storagePtr
);
512 storePtr(storagePtr
, Address(storageResult
, ArrayStorage::vectorOffset() + sizeof(WriteBarrier
<Unknown
>) * i
));
514 load32(Address(callFrameRegister
, (valuesRegister
+ i
) * sizeof(Register
)), storagePtr
);
515 store32(storagePtr
, Address(storageResult
, ArrayStorage::vectorOffset() + sizeof(WriteBarrier
<Unknown
>) * i
));
516 load32(Address(callFrameRegister
, (valuesRegister
+ i
) * sizeof(Register
) + sizeof(uint32_t)), storagePtr
);
517 store32(storagePtr
, Address(storageResult
, ArrayStorage::vectorOffset() + sizeof(WriteBarrier
<Unknown
>) * i
+ sizeof(uint32_t)));
521 // Zero out the remaining slots.
522 for (unsigned i
= length
; i
< initialLength
; i
++) {
524 storePtr(TrustedImmPtr(0), Address(storageResult
, ArrayStorage::vectorOffset() + sizeof(WriteBarrier
<Unknown
>) * i
));
526 store32(TrustedImm32(static_cast<int>(JSValue::EmptyValueTag
)), Address(storageResult
, ArrayStorage::vectorOffset() + sizeof(WriteBarrier
<Unknown
>) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
527 store32(TrustedImm32(0), Address(storageResult
, ArrayStorage::vectorOffset() + sizeof(WriteBarrier
<Unknown
>) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
532 #if ENABLE(VALUE_PROFILER)
533 inline void JIT::emitValueProfilingSite(ValueProfile
* valueProfile
)
535 ASSERT(shouldEmitProfiling());
536 ASSERT(valueProfile
);
538 const RegisterID value
= regT0
;
539 #if USE(JSVALUE32_64)
540 const RegisterID valueTag
= regT1
;
542 const RegisterID scratch
= regT3
;
544 if (ValueProfile::numberOfBuckets
== 1) {
545 // We're in a simple configuration: only one bucket, so we can just do a direct
548 storePtr(value
, valueProfile
->m_buckets
);
550 EncodedValueDescriptor
* descriptor
= bitwise_cast
<EncodedValueDescriptor
*>(valueProfile
->m_buckets
);
551 store32(value
, &descriptor
->asBits
.payload
);
552 store32(valueTag
, &descriptor
->asBits
.tag
);
557 if (m_randomGenerator
.getUint32() & 1)
558 add32(TrustedImm32(1), bucketCounterRegister
);
560 add32(TrustedImm32(3), bucketCounterRegister
);
561 and32(TrustedImm32(ValueProfile::bucketIndexMask
), bucketCounterRegister
);
562 move(TrustedImmPtr(valueProfile
->m_buckets
), scratch
);
564 storePtr(value
, BaseIndex(scratch
, bucketCounterRegister
, TimesEight
));
565 #elif USE(JSVALUE32_64)
566 store32(value
, BaseIndex(scratch
, bucketCounterRegister
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
567 store32(valueTag
, BaseIndex(scratch
, bucketCounterRegister
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
571 inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset
)
573 if (!shouldEmitProfiling())
575 emitValueProfilingSite(m_codeBlock
->valueProfileForBytecodeOffset(bytecodeOffset
));
578 inline void JIT::emitValueProfilingSite()
580 emitValueProfilingSite(m_bytecodeOffset
);
584 #if USE(JSVALUE32_64)
586 inline void JIT::emitLoadTag(int index
, RegisterID tag
)
588 RegisterID mappedTag
;
589 if (getMappedTag(index
, mappedTag
)) {
590 move(mappedTag
, tag
);
595 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
596 move(Imm32(getConstantOperand(index
).tag()), tag
);
601 load32(tagFor(index
), tag
);
605 inline void JIT::emitLoadPayload(int index
, RegisterID payload
)
607 RegisterID mappedPayload
;
608 if (getMappedPayload(index
, mappedPayload
)) {
609 move(mappedPayload
, payload
);
614 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
615 move(Imm32(getConstantOperand(index
).payload()), payload
);
620 load32(payloadFor(index
), payload
);
624 inline void JIT::emitLoad(const JSValue
& v
, RegisterID tag
, RegisterID payload
)
626 move(Imm32(v
.payload()), payload
);
627 move(Imm32(v
.tag()), tag
);
630 inline void JIT::emitLoad(int index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
632 ASSERT(tag
!= payload
);
634 if (base
== callFrameRegister
) {
635 ASSERT(payload
!= base
);
636 emitLoadPayload(index
, payload
);
637 emitLoadTag(index
, tag
);
641 if (payload
== base
) { // avoid stomping base
642 load32(tagFor(index
, base
), tag
);
643 load32(payloadFor(index
, base
), payload
);
647 load32(payloadFor(index
, base
), payload
);
648 load32(tagFor(index
, base
), tag
);
651 inline void JIT::emitLoad2(int index1
, RegisterID tag1
, RegisterID payload1
, int index2
, RegisterID tag2
, RegisterID payload2
)
653 if (isMapped(index1
)) {
654 emitLoad(index1
, tag1
, payload1
);
655 emitLoad(index2
, tag2
, payload2
);
658 emitLoad(index2
, tag2
, payload2
);
659 emitLoad(index1
, tag1
, payload1
);
662 inline void JIT::emitLoadDouble(int index
, FPRegisterID value
)
664 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
665 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
666 loadDouble(&inConstantPool
, value
);
668 loadDouble(addressFor(index
), value
);
671 inline void JIT::emitLoadInt32ToDouble(int index
, FPRegisterID value
)
673 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
674 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
675 char* bytePointer
= reinterpret_cast<char*>(&inConstantPool
);
676 convertInt32ToDouble(AbsoluteAddress(bytePointer
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), value
);
678 convertInt32ToDouble(payloadFor(index
), value
);
681 inline void JIT::emitStore(int index
, RegisterID tag
, RegisterID payload
, RegisterID base
)
683 store32(payload
, payloadFor(index
, base
));
684 store32(tag
, tagFor(index
, base
));
687 inline void JIT::emitStoreInt32(int index
, RegisterID payload
, bool indexIsInt32
)
689 store32(payload
, payloadFor(index
, callFrameRegister
));
691 store32(TrustedImm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
694 inline void JIT::emitStoreAndMapInt32(int index
, RegisterID tag
, RegisterID payload
, bool indexIsInt32
, size_t opcodeLength
)
696 emitStoreInt32(index
, payload
, indexIsInt32
);
697 map(m_bytecodeOffset
+ opcodeLength
, index
, tag
, payload
);
700 inline void JIT::emitStoreInt32(int index
, TrustedImm32 payload
, bool indexIsInt32
)
702 store32(payload
, payloadFor(index
, callFrameRegister
));
704 store32(TrustedImm32(JSValue::Int32Tag
), tagFor(index
, callFrameRegister
));
707 inline void JIT::emitStoreCell(int index
, RegisterID payload
, bool indexIsCell
)
709 store32(payload
, payloadFor(index
, callFrameRegister
));
711 store32(TrustedImm32(JSValue::CellTag
), tagFor(index
, callFrameRegister
));
714 inline void JIT::emitStoreBool(int index
, RegisterID payload
, bool indexIsBool
)
716 store32(payload
, payloadFor(index
, callFrameRegister
));
718 store32(TrustedImm32(JSValue::BooleanTag
), tagFor(index
, callFrameRegister
));
721 inline void JIT::emitStoreDouble(int index
, FPRegisterID value
)
723 storeDouble(value
, addressFor(index
));
726 inline void JIT::emitStore(int index
, const JSValue constant
, RegisterID base
)
728 store32(Imm32(constant
.payload()), payloadFor(index
, base
));
729 store32(Imm32(constant
.tag()), tagFor(index
, base
));
732 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
734 emitStore(dst
, jsUndefined());
737 inline bool JIT::isLabeled(unsigned bytecodeOffset
)
739 for (size_t numberOfJumpTargets
= m_codeBlock
->numberOfJumpTargets(); m_jumpTargetIndex
!= numberOfJumpTargets
; ++m_jumpTargetIndex
) {
740 unsigned jumpTarget
= m_codeBlock
->jumpTarget(m_jumpTargetIndex
);
741 if (jumpTarget
== bytecodeOffset
)
743 if (jumpTarget
> bytecodeOffset
)
749 inline void JIT::map(unsigned bytecodeOffset
, int virtualRegisterIndex
, RegisterID tag
, RegisterID payload
)
751 if (isLabeled(bytecodeOffset
))
754 m_mappedBytecodeOffset
= bytecodeOffset
;
755 m_mappedVirtualRegisterIndex
= virtualRegisterIndex
;
757 m_mappedPayload
= payload
;
759 ASSERT(!canBeOptimized() || m_mappedPayload
== regT0
);
760 ASSERT(!canBeOptimized() || m_mappedTag
== regT1
);
763 inline void JIT::unmap(RegisterID registerID
)
765 if (m_mappedTag
== registerID
)
766 m_mappedTag
= (RegisterID
)-1;
767 else if (m_mappedPayload
== registerID
)
768 m_mappedPayload
= (RegisterID
)-1;
771 inline void JIT::unmap()
773 m_mappedBytecodeOffset
= (unsigned)-1;
774 m_mappedVirtualRegisterIndex
= RegisterFile::ReturnPC
;
775 m_mappedTag
= (RegisterID
)-1;
776 m_mappedPayload
= (RegisterID
)-1;
779 inline bool JIT::isMapped(int virtualRegisterIndex
)
781 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
783 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
788 inline bool JIT::getMappedPayload(int virtualRegisterIndex
, RegisterID
& payload
)
790 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
792 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
794 if (m_mappedPayload
== (RegisterID
)-1)
796 payload
= m_mappedPayload
;
800 inline bool JIT::getMappedTag(int virtualRegisterIndex
, RegisterID
& tag
)
802 if (m_mappedBytecodeOffset
!= m_bytecodeOffset
)
804 if (m_mappedVirtualRegisterIndex
!= virtualRegisterIndex
)
806 if (m_mappedTag
== (RegisterID
)-1)
812 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex
)
814 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
815 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
818 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex
));
822 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex
, RegisterID tag
)
824 if (!m_codeBlock
->isKnownNotImmediate(virtualRegisterIndex
)) {
825 if (m_codeBlock
->isConstantRegisterIndex(virtualRegisterIndex
))
828 addSlowCase(branch32(NotEqual
, tag
, TrustedImm32(JSValue::CellTag
)));
832 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
834 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
837 ALWAYS_INLINE
bool JIT::getOperandConstantImmediateInt(unsigned op1
, unsigned op2
, unsigned& op
, int32_t& constant
)
839 if (isOperandConstantImmediateInt(op1
)) {
840 constant
= getConstantOperand(op1
).asInt32();
845 if (isOperandConstantImmediateInt(op2
)) {
846 constant
= getConstantOperand(op2
).asInt32();
854 #else // USE(JSVALUE32_64)
856 ALWAYS_INLINE
void JIT::killLastResultRegister()
858 m_lastResultBytecodeRegister
= std::numeric_limits
<int>::max();
861 // get arg puts an arg from the SF register array into a h/w register
862 ALWAYS_INLINE
void JIT::emitGetVirtualRegister(int src
, RegisterID dst
)
864 ASSERT(m_bytecodeOffset
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
866 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
867 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
868 JSValue value
= m_codeBlock
->getConstant(src
);
869 if (!value
.isNumber())
870 move(TrustedImmPtr(JSValue::encode(value
)), dst
);
872 move(ImmPtr(JSValue::encode(value
)), dst
);
873 killLastResultRegister();
877 if (src
== m_lastResultBytecodeRegister
&& m_codeBlock
->isTemporaryRegisterIndex(src
) && !atJumpTarget()) {
878 // The argument we want is already stored in eax
879 if (dst
!= cachedResultRegister
)
880 move(cachedResultRegister
, dst
);
881 killLastResultRegister();
885 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), dst
);
886 killLastResultRegister();
889 ALWAYS_INLINE
void JIT::emitGetVirtualRegisters(int src1
, RegisterID dst1
, int src2
, RegisterID dst2
)
891 if (src2
== m_lastResultBytecodeRegister
) {
892 emitGetVirtualRegister(src2
, dst2
);
893 emitGetVirtualRegister(src1
, dst1
);
895 emitGetVirtualRegister(src1
, dst1
);
896 emitGetVirtualRegister(src2
, dst2
);
900 ALWAYS_INLINE
int32_t JIT::getConstantOperandImmediateInt(unsigned src
)
902 return getConstantOperand(src
).asInt32();
905 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
907 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32();
910 ALWAYS_INLINE
void JIT::emitPutVirtualRegister(unsigned dst
, RegisterID from
)
912 storePtr(from
, Address(callFrameRegister
, dst
* sizeof(Register
)));
913 m_lastResultBytecodeRegister
= (from
== cachedResultRegister
) ? static_cast<int>(dst
) : std::numeric_limits
<int>::max();
916 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
918 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister
, dst
* sizeof(Register
)));
921 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfJSCell(RegisterID reg
)
924 return branchTestPtr(Zero
, reg
, tagMaskRegister
);
926 return branchTest32(Zero
, reg
, TrustedImm32(TagMask
));
930 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfBothJSCells(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
933 orPtr(reg2
, scratch
);
934 return emitJumpIfJSCell(scratch
);
937 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg
)
939 addSlowCase(emitJumpIfJSCell(reg
));
942 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotJSCell(RegisterID reg
)
945 return branchTestPtr(NonZero
, reg
, tagMaskRegister
);
947 return branchTest32(NonZero
, reg
, TrustedImm32(TagMask
));
951 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
)
953 addSlowCase(emitJumpIfNotJSCell(reg
));
956 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
, int vReg
)
958 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
959 emitJumpSlowCaseIfNotJSCell(reg
);
964 inline void JIT::emitLoadDouble(int index
, FPRegisterID value
)
966 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
967 WriteBarrier
<Unknown
>& inConstantPool
= m_codeBlock
->constantRegister(index
);
968 loadDouble(&inConstantPool
, value
);
970 loadDouble(addressFor(index
), value
);
973 inline void JIT::emitLoadInt32ToDouble(int index
, FPRegisterID value
)
975 if (m_codeBlock
->isConstantRegisterIndex(index
)) {
976 ASSERT(isOperandConstantImmediateInt(index
));
977 convertInt32ToDouble(Imm32(getConstantOperand(index
).asInt32()), value
);
979 convertInt32ToDouble(addressFor(index
), value
);
983 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateInteger(RegisterID reg
)
986 return branchPtr(AboveOrEqual
, reg
, tagTypeNumberRegister
);
988 return branchTest32(NonZero
, reg
, TrustedImm32(TagTypeNumber
));
992 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateInteger(RegisterID reg
)
995 return branchPtr(Below
, reg
, tagTypeNumberRegister
);
997 return branchTest32(Zero
, reg
, TrustedImm32(TagTypeNumber
));
1001 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
1003 move(reg1
, scratch
);
1004 andPtr(reg2
, scratch
);
1005 return emitJumpIfNotImmediateInteger(scratch
);
1008 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg
)
1010 addSlowCase(emitJumpIfNotImmediateInteger(reg
));
1013 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
1015 addSlowCase(emitJumpIfNotImmediateIntegers(reg1
, reg2
, scratch
));
1018 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg
)
1020 addSlowCase(emitJumpIfNotImmediateNumber(reg
));
1023 #if USE(JSVALUE32_64)
1024 ALWAYS_INLINE
void JIT::emitFastArithDeTagImmediate(RegisterID reg
)
1026 subPtr(TrustedImm32(TagTypeNumber
), reg
);
1029 ALWAYS_INLINE
JIT::Jump
JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg
)
1031 return branchSubPtr(Zero
, TrustedImm32(TagTypeNumber
), reg
);
1035 ALWAYS_INLINE
void JIT::emitFastArithReTagImmediate(RegisterID src
, RegisterID dest
)
1038 emitFastArithIntToImmNoCheck(src
, dest
);
1042 addPtr(TrustedImm32(TagTypeNumber
), dest
);
1046 // operand is int32_t, must have been zero-extended if register is 64-bit.
1047 ALWAYS_INLINE
void JIT::emitFastArithIntToImmNoCheck(RegisterID src
, RegisterID dest
)
1052 orPtr(tagTypeNumberRegister
, dest
);
1054 signExtend32ToPtr(src
, dest
);
1056 emitFastArithReTagImmediate(dest
, dest
);
1060 ALWAYS_INLINE
void JIT::emitTagAsBoolImmediate(RegisterID reg
)
1062 or32(TrustedImm32(static_cast<int32_t>(ValueFalse
)), reg
);
1065 #endif // USE(JSVALUE32_64)
1069 #endif // ENABLE(JIT)