X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/ba379fdc102753d6be2c4d937058fe40257329fe..6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174:/jit/JITInlineMethods.h diff --git a/jit/JITInlineMethods.h b/jit/JITInlineMethods.h index c22a692..cd33821 100644 --- a/jit/JITInlineMethods.h +++ b/jit/JITInlineMethods.h @@ -26,7 +26,6 @@ #ifndef JITInlineMethods_h #define JITInlineMethods_h -#include #if ENABLE(JIT) @@ -34,137 +33,262 @@ namespace JSC { /* Deprecated: Please use JITStubCall instead. */ -// puts an arg onto the stack, as an arg to a context threaded function. -ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber) +ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst) { - poke(src, argumentNumber); + unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX; + peek(dst, argumentStackOffset); } -/* Deprecated: Please use JITStubCall instead. */ - -ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber) +ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src) { - poke(Imm32(value), argumentNumber); + return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble(); } -/* Deprecated: Please use JITStubCall instead. */ - -ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber) +ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src) { - poke(ImmPtr(value), argumentNumber); + ASSERT(m_codeBlock->isConstantRegisterIndex(src)); + return m_codeBlock->getConstant(src); } -/* Deprecated: Please use JITStubCall instead. */ - -ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst) +ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) { - peek(dst, argumentNumber); + storePtr(from, payloadFor(entry, callFrameRegister)); } -ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src) +ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) { - ASSERT(m_codeBlock->isConstantRegisterIndex(src)); - return m_codeBlock->getConstant(src); +#if USE(JSVALUE32_64) + store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister)); +#endif + storePtr(from, payloadFor(entry, callFrameRegister)); } -ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) +ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) { - storePtr(from, Address(callFrameRegister, entry * sizeof(Register))); + store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister)); + store32(from, intPayloadFor(entry, callFrameRegister)); } ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry) { - storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register))); + storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register))); } ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) { loadPtr(Address(from, entry * sizeof(Register)), to); -#if !USE(JSVALUE32_64) +#if USE(JSVALUE64) killLastResultRegister(); #endif } +ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures) +{ + failures.append(branchPtr(NotEqual, Address(src, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info))); + failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1))); + loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst); + failures.append(branchTest32(Zero, dst)); + loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1); + loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst); + + JumpList is16Bit; + JumpList cont8Bit; + is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag()))); + load8(MacroAssembler::Address(dst, 0), dst); + cont8Bit.append(jump()); + is16Bit.link(this); + load16(MacroAssembler::Address(dst, 0), dst); + cont8Bit.link(this); +} + ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) { load32(Address(from, entry * sizeof(Register)), to); -#if !USE(JSVALUE32_64) +#if USE(JSVALUE64) killLastResultRegister(); #endif } ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function) { - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. Call nakedCall = nearCall(); - m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress())); + m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress())); return nakedCall; } -#if PLATFORM(X86) || PLATFORM(X86_64) +ALWAYS_INLINE bool JIT::atJumpTarget() +{ + while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) { + if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset) + return true; + ++m_jumpTargetsPosition; + } + return false; +} + +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL + +ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace) +{ +#if CPU(ARM_TRADITIONAL) +#ifndef NDEBUG + // Ensure the label after the sequence can also fit + insnSpace += sizeof(ARMWord); + constSpace += sizeof(uint64_t); +#endif + + ensureSpace(insnSpace, constSpace); + +#elif CPU(SH4) +#ifndef NDEBUG + insnSpace += sizeof(SH4Word); + constSpace += sizeof(uint64_t); +#endif + + m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8); +#endif + +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL +#ifndef NDEBUG + m_uninterruptedInstructionSequenceBegin = label(); + m_uninterruptedConstantSequenceBegin = sizeOfConstantPool(); +#endif +#endif +} + +ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst) +{ + UNUSED_PARAM(dst); +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL + /* There are several cases when the uninterrupted sequence is larger than + * maximum required offset for pathing the same sequence. Eg.: if in a + * uninterrupted sequence the last macroassembler's instruction is a stub + * call, it emits store instruction(s) which should not be included in the + * calculation of length of uninterrupted sequence. So, the insnSpace and + * constSpace should be upper limit instead of hard limit. + */ +#if CPU(SH4) + if ((dst > 15) || (dst < -16)) { + insnSpace += 8; + constSpace += 2; + } + + if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15))) + insnSpace += 8; +#endif + ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace); + ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace); +#endif +} + +#endif + +#if CPU(ARM) ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) { - pop(reg); + move(linkRegister, reg); } ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) { - push(reg); + move(reg, linkRegister); } ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) { - push(address); + loadPtr(address, linkRegister); +} +#elif CPU(SH4) + +ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) +{ + m_assembler.stspr(reg); } -#elif PLATFORM_ARM_ARCH(7) +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + m_assembler.ldspr(reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + loadPtrLinkReg(address); +} + +#elif CPU(MIPS) ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) { - move(linkRegister, reg); + move(returnAddressRegister, reg); } ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) { - move(reg, linkRegister); + move(reg, returnAddressRegister); } ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) { - loadPtr(address, linkRegister); + loadPtr(address, returnAddressRegister); } -#endif +#else // CPU(X86) || CPU(X86_64) -#if USE(JIT_STUB_ARGUMENT_VA_LIST) -ALWAYS_INLINE void JIT::restoreArgumentReference() +ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) { - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); + pop(reg); } -ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {} -#else + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + push(reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + push(address); +} + +#endif + ALWAYS_INLINE void JIT::restoreArgumentReference() { move(stackPointerRegister, firstArgumentRegister); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); } + +ALWAYS_INLINE void JIT::updateTopCallFrame() +{ + ASSERT(static_cast(m_bytecodeOffset) >= 0); + if (m_bytecodeOffset) { +#if USE(JSVALUE32_64) + storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount)); +#else + store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount)); +#endif + } + storePtr(callFrameRegister, &m_globalData->topCallFrame); +} + ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() { -#if PLATFORM(X86) +#if CPU(X86) // Within a trampoline the return address will be on the stack at this point. - addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister); -#elif PLATFORM_ARM_ARCH(7) + addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister); +#elif CPU(ARM) + move(stackPointerRegister, firstArgumentRegister); +#elif CPU(SH4) move(stackPointerRegister, firstArgumentRegister); #endif // In the trampoline on x86-64, the first argument register is not overwritten. } -#endif ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) { - return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure)); + return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure)); } ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector::iterator& iter, int vReg) @@ -175,33 +299,52 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector::iterator& ALWAYS_INLINE void JIT::addSlowCase(Jump jump) { - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex)); + m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset)); } ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList) { - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. const JumpList::JumpVector& jumpVector = jumpList.jumps(); size_t size = jumpVector.size(); for (size_t i = 0; i < size; ++i) - m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex)); + m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset)); +} + +ALWAYS_INLINE void JIT::addSlowCase() +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + Jump emptyJump; // Doing it this way to make Windows happy. + m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset)); } ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset) { - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset)); + m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); } ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset) { - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg) +{ + return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); +} - jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this); +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type) +{ + loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg); + return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type)); } #if ENABLE(SAMPLING_FLAGS) @@ -209,80 +352,238 @@ ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag) { ASSERT(flag >= 1); ASSERT(flag <= 32); - or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags)); + or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags())); } ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag) { ASSERT(flag >= 1); ASSERT(flag <= 32); - and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags)); + and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags())); } #endif #if ENABLE(SAMPLING_COUNTERS) -ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count) -{ -#if PLATFORM(X86_64) // Or any other 64-bit plattform. - addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter)); -#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform. - intptr_t hiWord = reinterpret_cast(&counter.m_counter) + sizeof(int32_t); - add32(Imm32(count), AbsoluteAddress(&counter.m_counter)); - addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast(hiWord))); -#else -#error "SAMPLING_FLAGS not implemented on this platform." -#endif +ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count) +{ + add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter())); } #endif #if ENABLE(OPCODE_SAMPLING) -#if PLATFORM(X86_64) +#if CPU(X86_64) ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) { - move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx); - storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx); + move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx); + storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx); } #else ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) { - storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot()); + storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot()); } #endif #endif #if ENABLE(CODEBLOCK_SAMPLING) -#if PLATFORM(X86_64) +#if CPU(X86_64) ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) { - move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx); - storePtr(ImmPtr(codeBlock), X86::ecx); + move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx); + storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx); } #else ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) { - storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot()); + storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot()); } #endif #endif +ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src) +{ + return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1; +} + +template inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr) +{ + MarkedAllocator* allocator = 0; + if (destructor) + allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType)); + else + allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType)); + loadPtr(&allocator->m_freeList.head, result); + addSlowCase(branchTestPtr(Zero, result)); + + // remove the object from the free list + loadPtr(Address(result), storagePtr); + storePtr(storagePtr, &allocator->m_freeList.head); + + // initialize the object's structure + storePtr(structure, Address(result, JSCell::structureOffset())); + + // initialize the object's classInfo pointer + storePtr(TrustedImmPtr(&ClassType::s_info), Address(result, JSCell::classInfoOffset())); + + // initialize the inheritor ID + storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID())); + + // initialize the object's property storage pointer + addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr); + storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage())); +} + +template inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch) +{ + emitAllocateBasicJSObject(structure, result, scratch); +} + +inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr) +{ + emitAllocateBasicJSObject(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr); + + // store the function's scope chain + storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain())); + + // store the function's executable member + storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable())); + + // store the function's name + ASSERT(executable->nameValue()); + int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset(); + storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); #if USE(JSVALUE32_64) + store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); +#endif +} + +inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr) +{ + CopiedAllocator* allocator = &m_globalData->heap.storageAllocator(); + + // FIXME: We need to check for wrap-around. + // Check to make sure that the allocation will fit in the current block. + loadPtr(&allocator->m_currentOffset, result); + addPtr(TrustedImm32(size), result); + loadPtr(&allocator->m_currentBlock, storagePtr); + addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr); + addSlowCase(branchPtr(AboveOrEqual, result, storagePtr)); -inline JIT::Address JIT::tagFor(unsigned index, RegisterID base) + // Load the original offset. + loadPtr(&allocator->m_currentOffset, result); + + // Bump the pointer forward. + move(result, storagePtr); + addPtr(TrustedImm32(size), storagePtr); + storePtr(storagePtr, &allocator->m_currentOffset); +} + +inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr) { - return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); + unsigned initialLength = std::max(length, 4U); + size_t initialStorage = JSArray::storageSize(initialLength); + + // We allocate the backing store first to ensure that garbage collection + // doesn't happen during JSArray initialization. + emitAllocateBasicStorage(initialStorage, storageResult, storagePtr); + + // Allocate the cell for the array. + emitAllocateBasicJSObject(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr); + + // Store all the necessary info in the ArrayStorage. + storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset())); + store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset())); + store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset())); + + // Store the newly allocated ArrayStorage. + storePtr(storageResult, Address(cellResult, JSArray::storageOffset())); + + // Store the vector length and index bias. + store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset())); + store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset())); + + // Initialize the sparse value map. + storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset())); + + // Store the values we have. + for (unsigned i = 0; i < length; i++) { +#if USE(JSVALUE64) + loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr); + storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier) * i)); +#else + load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr); + store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier) * i)); + load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr); + store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier) * i + sizeof(uint32_t))); +#endif + } + + // Zero out the remaining slots. + for (unsigned i = length; i < initialLength; i++) { +#if USE(JSVALUE64) + storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier) * i)); +#else + store32(TrustedImm32(static_cast(JSValue::EmptyValueTag)), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + store32(TrustedImm32(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); +#endif + } } -inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base) +#if ENABLE(VALUE_PROFILER) +inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile) { - return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); + ASSERT(shouldEmitProfiling()); + ASSERT(valueProfile); + + const RegisterID value = regT0; +#if USE(JSVALUE32_64) + const RegisterID valueTag = regT1; +#endif + const RegisterID scratch = regT3; + + if (ValueProfile::numberOfBuckets == 1) { + // We're in a simple configuration: only one bucket, so we can just do a direct + // store. +#if USE(JSVALUE64) + storePtr(value, valueProfile->m_buckets); +#else + EncodedValueDescriptor* descriptor = bitwise_cast(valueProfile->m_buckets); + store32(value, &descriptor->asBits.payload); + store32(valueTag, &descriptor->asBits.tag); +#endif + return; + } + + if (m_randomGenerator.getUint32() & 1) + add32(TrustedImm32(1), bucketCounterRegister); + else + add32(TrustedImm32(3), bucketCounterRegister); + and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister); + move(TrustedImmPtr(valueProfile->m_buckets), scratch); +#if USE(JSVALUE64) + storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight)); +#elif USE(JSVALUE32_64) + store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); +#endif } -inline JIT::Address JIT::addressFor(unsigned index, RegisterID base) +inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset) { - return Address(base, (index * sizeof(Register))); + if (!shouldEmitProfiling()) + return; + emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset)); } -inline void JIT::emitLoadTag(unsigned index, RegisterID tag) +inline void JIT::emitValueProfilingSite() +{ + emitValueProfilingSite(m_bytecodeOffset); +} +#endif + +#if USE(JSVALUE32_64) + +inline void JIT::emitLoadTag(int index, RegisterID tag) { RegisterID mappedTag; if (getMappedTag(index, mappedTag)) { @@ -301,7 +602,7 @@ inline void JIT::emitLoadTag(unsigned index, RegisterID tag) unmap(tag); } -inline void JIT::emitLoadPayload(unsigned index, RegisterID payload) +inline void JIT::emitLoadPayload(int index, RegisterID payload) { RegisterID mappedPayload; if (getMappedPayload(index, mappedPayload)) { @@ -326,7 +627,7 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload) move(Imm32(v.tag()), tag); } -inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base) +inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base) { ASSERT(tag != payload); @@ -347,7 +648,7 @@ inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, Re load32(tagFor(index, base), tag); } -inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2) +inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2) { if (isMapped(index1)) { emitLoad(index1, tag1, payload1); @@ -358,65 +659,71 @@ inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1 emitLoad(index1, tag1, payload1); } -inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value) +inline void JIT::emitLoadDouble(int index, FPRegisterID value) { if (m_codeBlock->isConstantRegisterIndex(index)) { - Register& inConstantPool = m_codeBlock->constantRegister(index); + WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); loadDouble(&inConstantPool, value); } else loadDouble(addressFor(index), value); } -inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value) +inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value) { if (m_codeBlock->isConstantRegisterIndex(index)) { - Register& inConstantPool = m_codeBlock->constantRegister(index); + WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); char* bytePointer = reinterpret_cast(&inConstantPool); convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value); } else convertInt32ToDouble(payloadFor(index), value); } -inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base) +inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base) { store32(payload, payloadFor(index, base)); store32(tag, tagFor(index, base)); } -inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32) +inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32) { store32(payload, payloadFor(index, callFrameRegister)); if (!indexIsInt32) - store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); + store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); +} + +inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength) +{ + emitStoreInt32(index, payload, indexIsInt32); + map(m_bytecodeOffset + opcodeLength, index, tag, payload); } -inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32) +inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32) { store32(payload, payloadFor(index, callFrameRegister)); if (!indexIsInt32) - store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); + store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); } -inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell) +inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell) { store32(payload, payloadFor(index, callFrameRegister)); if (!indexIsCell) - store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister)); + store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister)); } -inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool) +inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool) { + store32(payload, payloadFor(index, callFrameRegister)); if (!indexIsBool) - store32(Imm32(0), payloadFor(index, callFrameRegister)); - store32(tag, tagFor(index, callFrameRegister)); + store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister)); } -inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value) +inline void JIT::emitStoreDouble(int index, FPRegisterID value) { storeDouble(value, addressFor(index)); } -inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base) +inline void JIT::emitStore(int index, const JSValue constant, RegisterID base) { store32(Imm32(constant.payload()), payloadFor(index, base)); store32(Imm32(constant.tag()), tagFor(index, base)); @@ -427,27 +734,30 @@ ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) emitStore(dst, jsUndefined()); } -inline bool JIT::isLabeled(unsigned bytecodeIndex) +inline bool JIT::isLabeled(unsigned bytecodeOffset) { for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) { unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex); - if (jumpTarget == bytecodeIndex) + if (jumpTarget == bytecodeOffset) return true; - if (jumpTarget > bytecodeIndex) + if (jumpTarget > bytecodeOffset) return false; } return false; } -inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload) +inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload) { - if (isLabeled(bytecodeIndex)) + if (isLabeled(bytecodeOffset)) return; - m_mappedBytecodeIndex = bytecodeIndex; + m_mappedBytecodeOffset = bytecodeOffset; m_mappedVirtualRegisterIndex = virtualRegisterIndex; m_mappedTag = tag; m_mappedPayload = payload; + + ASSERT(!canBeOptimized() || m_mappedPayload == regT0); + ASSERT(!canBeOptimized() || m_mappedTag == regT1); } inline void JIT::unmap(RegisterID registerID) @@ -460,24 +770,24 @@ inline void JIT::unmap(RegisterID registerID) inline void JIT::unmap() { - m_mappedBytecodeIndex = (unsigned)-1; - m_mappedVirtualRegisterIndex = (unsigned)-1; + m_mappedBytecodeOffset = (unsigned)-1; + m_mappedVirtualRegisterIndex = RegisterFile::ReturnPC; m_mappedTag = (RegisterID)-1; m_mappedPayload = (RegisterID)-1; } -inline bool JIT::isMapped(unsigned virtualRegisterIndex) +inline bool JIT::isMapped(int virtualRegisterIndex) { - if (m_mappedBytecodeIndex != m_bytecodeIndex) + if (m_mappedBytecodeOffset != m_bytecodeOffset) return false; if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) return false; return true; } -inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload) +inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload) { - if (m_mappedBytecodeIndex != m_bytecodeIndex) + if (m_mappedBytecodeOffset != m_bytecodeOffset) return false; if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) return false; @@ -487,9 +797,9 @@ inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& pay return true; } -inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag) +inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag) { - if (m_mappedBytecodeIndex != m_bytecodeIndex) + if (m_mappedBytecodeOffset != m_bytecodeOffset) return false; if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) return false; @@ -499,22 +809,24 @@ inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag) return true; } -inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex) +inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex) { - if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) - addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag))); -} - -inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag) -{ - if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) - addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag))); + if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) { + if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex)) + addSlowCase(jump()); + else + addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex)); + } } -inline void JIT::linkSlowCaseIfNotJSCell(Vector::iterator& iter, unsigned virtualRegisterIndex) +inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag) { - if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) - linkSlowCase(iter); + if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) { + if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex)) + addSlowCase(jump()); + else + addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag))); + } } ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) @@ -539,26 +851,6 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op return false; } -ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src) -{ - return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble(); -} - -/* Deprecated: Please use JITStubCall instead. */ - -ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2) -{ - if (m_codeBlock->isConstantRegisterIndex(src)) { - JSValue constant = m_codeBlock->getConstant(src); - poke(Imm32(constant.payload()), argumentNumber); - poke(Imm32(constant.tag()), argumentNumber + 1); - } else { - emitLoad(src, scratch1, scratch2); - poke(scratch2, argumentNumber); - poke(scratch1, argumentNumber + 1); - } -} - #else // USE(JSVALUE32_64) ALWAYS_INLINE void JIT::killLastResultRegister() @@ -569,31 +861,25 @@ ALWAYS_INLINE void JIT::killLastResultRegister() // get arg puts an arg from the SF register array into a h/w register ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst) { - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. // TODO: we want to reuse values that are already in registers if we can - add a register allocator! if (m_codeBlock->isConstantRegisterIndex(src)) { JSValue value = m_codeBlock->getConstant(src); - move(ImmPtr(JSValue::encode(value)), dst); + if (!value.isNumber()) + move(TrustedImmPtr(JSValue::encode(value)), dst); + else + move(ImmPtr(JSValue::encode(value)), dst); killLastResultRegister(); return; } - if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) { - bool atJumpTarget = false; - while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) { - if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex) - atJumpTarget = true; - ++m_jumpTargetsPosition; - } - - if (!atJumpTarget) { - // The argument we want is already stored in eax - if (dst != cachedResultRegister) - move(cachedResultRegister, dst); - killLastResultRegister(); - return; - } + if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) { + // The argument we want is already stored in eax + if (dst != cachedResultRegister) + move(cachedResultRegister, dst); + killLastResultRegister(); + return; } loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst); @@ -624,12 +910,12 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from) { storePtr(from, Address(callFrameRegister, dst * sizeof(Register))); - m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits::max(); + m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast(dst) : std::numeric_limits::max(); } ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) { - storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register))); + storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register))); } ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) @@ -637,7 +923,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) #if USE(JSVALUE64) return branchTestPtr(Zero, reg, tagMaskRegister); #else - return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask)); + return branchTest32(Zero, reg, TrustedImm32(TagMask)); #endif } @@ -658,7 +944,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg) #if USE(JSVALUE64) return branchTestPtr(NonZero, reg, tagMaskRegister); #else - return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask)); + return branchTest32(NonZero, reg, TrustedImm32(TagMask)); #endif } @@ -674,13 +960,23 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg) } #if USE(JSVALUE64) -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg) + +inline void JIT::emitLoadDouble(int index, FPRegisterID value) { - return branchTestPtr(NonZero, reg, tagTypeNumberRegister); + if (m_codeBlock->isConstantRegisterIndex(index)) { + WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); + loadDouble(&inConstantPool, value); + } else + loadDouble(addressFor(index), value); } -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg) + +inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value) { - return branchTestPtr(Zero, reg, tagTypeNumberRegister); + if (m_codeBlock->isConstantRegisterIndex(index)) { + ASSERT(isOperandConstantImmediateInt(index)); + convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value); + } else + convertInt32ToDouble(addressFor(index), value); } #endif @@ -689,7 +985,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg) #if USE(JSVALUE64) return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister); #else - return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber)); + return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber)); #endif } @@ -698,7 +994,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg) #if USE(JSVALUE64) return branchPtr(Below, reg, tagTypeNumberRegister); #else - return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber)); + return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber)); #endif } @@ -719,15 +1015,20 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch)); } -#if !USE(JSVALUE64) +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg) +{ + addSlowCase(emitJumpIfNotImmediateNumber(reg)); +} + +#if USE(JSVALUE32_64) ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg) { - subPtr(Imm32(JSImmediate::TagTypeNumber), reg); + subPtr(TrustedImm32(TagTypeNumber), reg); } ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg) { - return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg); + return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg); } #endif @@ -738,16 +1039,7 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d #else if (src != dest) move(src, dest); - addPtr(Imm32(JSImmediate::TagTypeNumber), dest); -#endif -} - -ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg) -{ -#if USE(JSVALUE64) - UNUSED_PARAM(reg); -#else - rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg); + addPtr(TrustedImm32(TagTypeNumber), dest); #endif } @@ -767,24 +1059,7 @@ ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg) { - lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg); - or32(Imm32(static_cast(JSImmediate::FullTagTypeBool)), reg); -} - -/* Deprecated: Please use JITStubCall instead. */ - -// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function. -ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch) -{ - if (m_codeBlock->isConstantRegisterIndex(src)) { - JSValue value = m_codeBlock->getConstant(src); - emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber); - } else { - loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch); - emitPutJITStubArg(scratch, argumentNumber); - } - - killLastResultRegister(); + or32(TrustedImm32(static_cast(ValueFalse)), reg); } #endif // USE(JSVALUE32_64)