2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef AssemblyHelpers_h
27 #define AssemblyHelpers_h
31 #include "CodeBlock.h"
35 #include "MacroAssembler.h"
40 typedef void (*V_DebugOperation_EPP
)(ExecState
*, void*, void*);
42 class AssemblyHelpers
: public MacroAssembler
{
44 AssemblyHelpers(VM
* vm
, CodeBlock
* codeBlock
)
46 , m_codeBlock(codeBlock
)
47 , m_baselineCodeBlock(codeBlock
? codeBlock
->baselineAlternative() : 0)
50 ASSERT(m_baselineCodeBlock
);
51 ASSERT(!m_baselineCodeBlock
->alternative());
52 ASSERT(m_baselineCodeBlock
->jitType() == JITCode::None
|| JITCode::isBaselineCode(m_baselineCodeBlock
->jitType()));
56 CodeBlock
* codeBlock() { return m_codeBlock
; }
57 VM
* vm() { return m_vm
; }
58 AssemblerType_T
& assembler() { return m_assembler
; }
60 void checkStackPointerAlignment()
62 // This check is both unneeded and harder to write correctly for ARM64
63 #if !defined(NDEBUG) && !CPU(ARM64)
64 Jump stackPointerAligned
= branchTestPtr(Zero
, stackPointerRegister
, TrustedImm32(0xf));
65 abortWithReason(AHStackPointerMisaligned
);
66 stackPointerAligned
.link(this);
71 void storeCell(T cell
, Address address
)
74 store64(cell
, address
);
76 store32(cell
, address
.withOffset(PayloadOffset
));
77 store32(TrustedImm32(JSValue::CellTag
), address
.withOffset(TagOffset
));
81 void storeValue(JSValueRegs regs
, Address address
)
84 store64(regs
.gpr(), address
);
86 store32(regs
.payloadGPR(), address
.withOffset(PayloadOffset
));
87 store32(regs
.tagGPR(), address
.withOffset(TagOffset
));
91 void moveTrustedValue(JSValue value
, JSValueRegs regs
)
94 move(TrustedImm64(JSValue::encode(value
)), regs
.gpr());
96 move(TrustedImm32(value
.tag()), regs
.tagGPR());
97 move(TrustedImm32(value
.payload()), regs
.payloadGPR());
101 #if CPU(X86_64) || CPU(X86)
102 static size_t prologueStackPointerDelta()
104 // Prologue only saves the framePointerRegister
105 return sizeof(void*);
108 void emitFunctionPrologue()
110 push(framePointerRegister
);
111 move(stackPointerRegister
, framePointerRegister
);
114 void emitFunctionEpilogue()
116 move(framePointerRegister
, stackPointerRegister
);
117 pop(framePointerRegister
);
120 void preserveReturnAddressAfterCall(GPRReg reg
)
125 void restoreReturnAddressBeforeReturn(GPRReg reg
)
130 void restoreReturnAddressBeforeReturn(Address address
)
134 #endif // CPU(X86_64) || CPU(X86)
136 #if CPU(ARM) || CPU(ARM64)
137 static size_t prologueStackPointerDelta()
139 // Prologue saves the framePointerRegister and linkRegister
140 return 2 * sizeof(void*);
143 void emitFunctionPrologue()
145 pushPair(framePointerRegister
, linkRegister
);
146 move(stackPointerRegister
, framePointerRegister
);
149 void emitFunctionEpilogue()
151 move(framePointerRegister
, stackPointerRegister
);
152 popPair(framePointerRegister
, linkRegister
);
155 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
157 move(linkRegister
, reg
);
160 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
162 move(reg
, linkRegister
);
165 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
167 loadPtr(address
, linkRegister
);
172 static size_t prologueStackPointerDelta()
174 // Prologue saves the framePointerRegister and returnAddressRegister
175 return 2 * sizeof(void*);
178 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
180 move(returnAddressRegister
, reg
);
183 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
185 move(reg
, returnAddressRegister
);
188 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
190 loadPtr(address
, returnAddressRegister
);
195 static size_t prologueStackPointerDelta()
197 // Prologue saves the framePointerRegister and link register
198 return 2 * sizeof(void*);
201 ALWAYS_INLINE
void preserveReturnAddressAfterCall(RegisterID reg
)
203 m_assembler
.stspr(reg
);
206 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(RegisterID reg
)
208 m_assembler
.ldspr(reg
);
211 ALWAYS_INLINE
void restoreReturnAddressBeforeReturn(Address address
)
213 loadPtrLinkReg(address
);
217 void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry
, GPRReg to
)
219 loadPtr(Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)), to
);
221 void emitPutToCallFrameHeader(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
223 storePtr(from
, Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
226 void emitPutImmediateToCallFrameHeader(void* value
, JSStack::CallFrameHeaderEntry entry
)
228 storePtr(TrustedImmPtr(value
), Address(GPRInfo::callFrameRegister
, entry
* sizeof(Register
)));
231 void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to
)
233 loadPtr(Address(GPRInfo::callFrameRegister
, CallFrame::callerFrameOffset()), to
);
235 void emitPutCallerFrameToCallFrameHeader(RegisterID from
)
237 storePtr(from
, Address(GPRInfo::callFrameRegister
, CallFrame::callerFrameOffset()));
240 void emitPutReturnPCToCallFrameHeader(RegisterID from
)
242 storePtr(from
, Address(GPRInfo::callFrameRegister
, CallFrame::returnPCOffset()));
244 void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from
)
246 storePtr(from
, Address(GPRInfo::callFrameRegister
, CallFrame::returnPCOffset()));
249 // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
250 // fields before the code from emitFunctionPrologue() has executed.
251 // First, the access is via the stack pointer. Second, the address calculation must also take
252 // into account that the stack pointer may not have been adjusted down for the return PC and/or
253 // caller's frame pointer. On some platforms, the callee is responsible for pushing the
254 // "link register" containing the return address in the function prologue.
256 void emitPutToCallFrameHeaderBeforePrologue(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
258 storePtr(from
, Address(stackPointerRegister
, entry
* static_cast<ptrdiff_t>(sizeof(Register
)) - prologueStackPointerDelta()));
261 void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from
, JSStack::CallFrameHeaderEntry entry
)
263 storePtr(from
, Address(stackPointerRegister
, entry
* static_cast<ptrdiff_t>(sizeof(Register
)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
266 void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag
, JSStack::CallFrameHeaderEntry entry
)
268 storePtr(tag
, Address(stackPointerRegister
, entry
* static_cast<ptrdiff_t>(sizeof(Register
)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
272 Jump
branchIfNotCell(GPRReg reg
)
275 return branchTest64(MacroAssembler::NonZero
, reg
, GPRInfo::tagMaskRegister
);
277 return branch32(MacroAssembler::NotEqual
, reg
, TrustedImm32(JSValue::CellTag
));
281 static Address
addressForByteOffset(ptrdiff_t byteOffset
)
283 return Address(GPRInfo::callFrameRegister
, byteOffset
);
285 static Address
addressFor(VirtualRegister virtualRegister
, GPRReg baseReg
)
287 ASSERT(virtualRegister
.isValid());
288 return Address(baseReg
, virtualRegister
.offset() * sizeof(Register
));
290 static Address
addressFor(VirtualRegister virtualRegister
)
292 ASSERT(virtualRegister
.isValid());
293 return Address(GPRInfo::callFrameRegister
, virtualRegister
.offset() * sizeof(Register
));
295 static Address
addressFor(int operand
)
297 return addressFor(static_cast<VirtualRegister
>(operand
));
300 static Address
tagFor(VirtualRegister virtualRegister
)
302 ASSERT(virtualRegister
.isValid());
303 return Address(GPRInfo::callFrameRegister
, virtualRegister
.offset() * sizeof(Register
) + TagOffset
);
305 static Address
tagFor(int operand
)
307 return tagFor(static_cast<VirtualRegister
>(operand
));
310 static Address
payloadFor(VirtualRegister virtualRegister
)
312 ASSERT(virtualRegister
.isValid());
313 return Address(GPRInfo::callFrameRegister
, virtualRegister
.offset() * sizeof(Register
) + PayloadOffset
);
315 static Address
payloadFor(int operand
)
317 return payloadFor(static_cast<VirtualRegister
>(operand
));
320 Jump
branchIfCellNotObject(GPRReg cellReg
)
322 return branch8(Below
, Address(cellReg
, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType
));
325 static GPRReg
selectScratchGPR(GPRReg preserve1
= InvalidGPRReg
, GPRReg preserve2
= InvalidGPRReg
, GPRReg preserve3
= InvalidGPRReg
, GPRReg preserve4
= InvalidGPRReg
)
327 if (preserve1
!= GPRInfo::regT0
&& preserve2
!= GPRInfo::regT0
&& preserve3
!= GPRInfo::regT0
&& preserve4
!= GPRInfo::regT0
)
328 return GPRInfo::regT0
;
330 if (preserve1
!= GPRInfo::regT1
&& preserve2
!= GPRInfo::regT1
&& preserve3
!= GPRInfo::regT1
&& preserve4
!= GPRInfo::regT1
)
331 return GPRInfo::regT1
;
333 if (preserve1
!= GPRInfo::regT2
&& preserve2
!= GPRInfo::regT2
&& preserve3
!= GPRInfo::regT2
&& preserve4
!= GPRInfo::regT2
)
334 return GPRInfo::regT2
;
336 if (preserve1
!= GPRInfo::regT3
&& preserve2
!= GPRInfo::regT3
&& preserve3
!= GPRInfo::regT3
&& preserve4
!= GPRInfo::regT3
)
337 return GPRInfo::regT3
;
339 return GPRInfo::regT4
;
342 // Add a debug call. This call has no effect on JIT code execution state.
343 void debugCall(V_DebugOperation_EPP function
, void* argument
)
345 size_t scratchSize
= sizeof(EncodedJSValue
) * (GPRInfo::numberOfRegisters
+ FPRInfo::numberOfRegisters
);
346 ScratchBuffer
* scratchBuffer
= m_vm
->scratchBufferForSize(scratchSize
);
347 EncodedJSValue
* buffer
= static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer());
349 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
351 store64(GPRInfo::toRegister(i
), buffer
+ i
);
353 store32(GPRInfo::toRegister(i
), buffer
+ i
);
357 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
358 move(TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
359 storeDouble(FPRInfo::toRegister(i
), GPRInfo::regT0
);
362 // Tell GC mark phase how much of the scratch buffer is active during call.
363 move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
364 storePtr(TrustedImmPtr(scratchSize
), GPRInfo::regT0
);
366 #if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
367 move(TrustedImmPtr(buffer
), GPRInfo::argumentGPR2
);
368 move(TrustedImmPtr(argument
), GPRInfo::argumentGPR1
);
369 move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
370 GPRReg scratch
= selectScratchGPR(GPRInfo::argumentGPR0
, GPRInfo::argumentGPR1
, GPRInfo::argumentGPR2
);
372 poke(GPRInfo::callFrameRegister
, 0);
373 poke(TrustedImmPtr(argument
), 1);
374 poke(TrustedImmPtr(buffer
), 2);
375 GPRReg scratch
= GPRInfo::regT0
;
377 #error "JIT not supported on this platform."
379 move(TrustedImmPtr(reinterpret_cast<void*>(function
)), scratch
);
382 move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), GPRInfo::regT0
);
383 storePtr(TrustedImmPtr(0), GPRInfo::regT0
);
385 for (unsigned i
= 0; i
< FPRInfo::numberOfRegisters
; ++i
) {
386 move(TrustedImmPtr(buffer
+ GPRInfo::numberOfRegisters
+ i
), GPRInfo::regT0
);
387 loadDouble(GPRInfo::regT0
, FPRInfo::toRegister(i
));
389 for (unsigned i
= 0; i
< GPRInfo::numberOfRegisters
; ++i
) {
391 load64(buffer
+ i
, GPRInfo::toRegister(i
));
393 load32(buffer
+ i
, GPRInfo::toRegister(i
));
398 // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
400 void jitAssertIsInt32(GPRReg
);
401 void jitAssertIsJSInt32(GPRReg
);
402 void jitAssertIsJSNumber(GPRReg
);
403 void jitAssertIsJSDouble(GPRReg
);
404 void jitAssertIsCell(GPRReg
);
405 void jitAssertHasValidCallFrame();
406 void jitAssertIsNull(GPRReg
);
407 void jitAssertTagsInPlace();
408 void jitAssertArgumentCountSane();
410 void jitAssertIsInt32(GPRReg
) { }
411 void jitAssertIsJSInt32(GPRReg
) { }
412 void jitAssertIsJSNumber(GPRReg
) { }
413 void jitAssertIsJSDouble(GPRReg
) { }
414 void jitAssertIsCell(GPRReg
) { }
415 void jitAssertHasValidCallFrame() { }
416 void jitAssertIsNull(GPRReg
) { }
417 void jitAssertTagsInPlace() { }
418 void jitAssertArgumentCountSane() { }
421 void purifyNaN(FPRReg
);
423 // These methods convert between doubles, and doubles boxed and JSValues.
425 GPRReg
boxDouble(FPRReg fpr
, GPRReg gpr
)
427 moveDoubleTo64(fpr
, gpr
);
428 sub64(GPRInfo::tagTypeNumberRegister
, gpr
);
429 jitAssertIsJSDouble(gpr
);
432 FPRReg
unboxDouble(GPRReg gpr
, FPRReg fpr
)
434 jitAssertIsJSDouble(gpr
);
435 add64(GPRInfo::tagTypeNumberRegister
, gpr
);
436 move64ToDouble(gpr
, fpr
);
440 void boxDouble(FPRReg fpr
, JSValueRegs regs
)
442 boxDouble(fpr
, regs
.gpr());
445 // Here are possible arrangements of source, target, scratch:
446 // - source, target, scratch can all be separate registers.
447 // - source and target can be the same but scratch is separate.
448 // - target and scratch can be the same but source is separate.
449 void boxInt52(GPRReg source
, GPRReg target
, GPRReg scratch
, FPRReg fpScratch
)
452 signExtend32ToPtr(source
, scratch
);
453 Jump isInt32
= branch64(Equal
, source
, scratch
);
455 // Nope, it's not, but regT0 contains the int64 value.
456 convertInt64ToDouble(source
, fpScratch
);
457 boxDouble(fpScratch
, target
);
461 zeroExtend32ToPtr(source
, target
);
462 or64(GPRInfo::tagTypeNumberRegister
, target
);
468 #if USE(JSVALUE32_64)
469 void boxDouble(FPRReg fpr
, GPRReg tagGPR
, GPRReg payloadGPR
)
471 moveDoubleToInts(fpr
, payloadGPR
, tagGPR
);
473 void unboxDouble(GPRReg tagGPR
, GPRReg payloadGPR
, FPRReg fpr
, FPRReg scratchFPR
)
475 moveIntsToDouble(payloadGPR
, tagGPR
, fpr
, scratchFPR
);
478 void boxDouble(FPRReg fpr
, JSValueRegs regs
)
480 boxDouble(fpr
, regs
.tagGPR(), regs
.payloadGPR());
484 void callExceptionFuzz();
486 enum ExceptionCheckKind
{ NormalExceptionCheck
, InvertedExceptionCheck
};
487 Jump
emitExceptionCheck(ExceptionCheckKind kind
= NormalExceptionCheck
);
489 #if ENABLE(SAMPLING_COUNTERS)
490 static void emitCount(MacroAssembler
& jit
, AbstractSamplingCounter
& counter
, int32_t increment
= 1)
492 jit
.add64(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
494 void emitCount(AbstractSamplingCounter
& counter
, int32_t increment
= 1)
496 add64(TrustedImm32(increment
), AbsoluteAddress(counter
.addressOfCounter()));
500 #if ENABLE(SAMPLING_FLAGS)
501 void setSamplingFlag(int32_t);
502 void clearSamplingFlag(int32_t flag
);
505 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
507 return codeBlock()->globalObjectFor(codeOrigin
);
510 bool isStrictModeFor(CodeOrigin codeOrigin
)
512 if (!codeOrigin
.inlineCallFrame
)
513 return codeBlock()->isStrictMode();
514 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->isStrictMode();
517 ECMAMode
ecmaModeFor(CodeOrigin codeOrigin
)
519 return isStrictModeFor(codeOrigin
) ? StrictMode
: NotStrictMode
;
522 ExecutableBase
* executableFor(const CodeOrigin
& codeOrigin
);
524 CodeBlock
* baselineCodeBlockFor(const CodeOrigin
& codeOrigin
)
526 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin
, baselineCodeBlock());
529 CodeBlock
* baselineCodeBlockFor(InlineCallFrame
* inlineCallFrame
)
531 if (!inlineCallFrame
)
532 return baselineCodeBlock();
533 return baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
536 CodeBlock
* baselineCodeBlock()
538 return m_baselineCodeBlock
;
541 VirtualRegister
baselineArgumentsRegisterFor(InlineCallFrame
* inlineCallFrame
)
543 if (!inlineCallFrame
)
544 return baselineCodeBlock()->argumentsRegister();
546 return VirtualRegister(baselineCodeBlockForInlineCallFrame(
547 inlineCallFrame
)->argumentsRegister().offset() + inlineCallFrame
->stackOffset
);
550 VirtualRegister
baselineArgumentsRegisterFor(const CodeOrigin
& codeOrigin
)
552 return baselineArgumentsRegisterFor(codeOrigin
.inlineCallFrame
);
555 SymbolTable
* symbolTableFor(const CodeOrigin
& codeOrigin
)
557 return baselineCodeBlockFor(codeOrigin
)->symbolTable();
560 int offsetOfLocals(const CodeOrigin
& codeOrigin
)
562 if (!codeOrigin
.inlineCallFrame
)
564 return codeOrigin
.inlineCallFrame
->stackOffset
* sizeof(Register
);
567 int offsetOfArgumentsIncludingThis(InlineCallFrame
* inlineCallFrame
)
569 if (!inlineCallFrame
)
570 return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register
);
571 if (inlineCallFrame
->arguments
.size() <= 1)
573 ValueRecovery recovery
= inlineCallFrame
->arguments
[1];
574 RELEASE_ASSERT(recovery
.technique() == DisplacedInJSStack
);
575 return (recovery
.virtualRegister().offset() - 1) * sizeof(Register
);
578 int offsetOfArgumentsIncludingThis(const CodeOrigin
& codeOrigin
)
580 return offsetOfArgumentsIncludingThis(codeOrigin
.inlineCallFrame
);
583 void emitLoadStructure(RegisterID source
, RegisterID dest
, RegisterID scratch
)
586 load32(MacroAssembler::Address(source
, JSCell::structureIDOffset()), dest
);
587 loadPtr(vm()->heap
.structureIDTable().base(), scratch
);
588 loadPtr(MacroAssembler::BaseIndex(scratch
, dest
, MacroAssembler::TimesEight
), dest
);
590 UNUSED_PARAM(scratch
);
591 loadPtr(MacroAssembler::Address(source
, JSCell::structureIDOffset()), dest
);
595 static void emitLoadStructure(AssemblyHelpers
& jit
, RegisterID base
, RegisterID dest
, RegisterID scratch
)
598 jit
.load32(MacroAssembler::Address(base
, JSCell::structureIDOffset()), dest
);
599 jit
.loadPtr(jit
.vm()->heap
.structureIDTable().base(), scratch
);
600 jit
.loadPtr(MacroAssembler::BaseIndex(scratch
, dest
, MacroAssembler::TimesEight
), dest
);
602 UNUSED_PARAM(scratch
);
603 jit
.loadPtr(MacroAssembler::Address(base
, JSCell::structureIDOffset()), dest
);
607 void emitStoreStructureWithTypeInfo(TrustedImmPtr structure
, RegisterID dest
, RegisterID
)
609 emitStoreStructureWithTypeInfo(*this, structure
, dest
);
612 void emitStoreStructureWithTypeInfo(RegisterID structure
, RegisterID dest
, RegisterID scratch
)
615 load64(MacroAssembler::Address(structure
, Structure::structureIDOffset()), scratch
);
616 store64(scratch
, MacroAssembler::Address(dest
, JSCell::structureIDOffset()));
618 // Store all the info flags using a single 32-bit wide load and store.
619 load32(MacroAssembler::Address(structure
, Structure::indexingTypeOffset()), scratch
);
620 store32(scratch
, MacroAssembler::Address(dest
, JSCell::indexingTypeOffset()));
622 // Store the StructureID
623 storePtr(structure
, MacroAssembler::Address(dest
, JSCell::structureIDOffset()));
627 static void emitStoreStructureWithTypeInfo(AssemblyHelpers
& jit
, TrustedImmPtr structure
, RegisterID dest
);
629 Jump
checkMarkByte(GPRReg cell
)
631 return branchTest8(MacroAssembler::NonZero
, MacroAssembler::Address(cell
, JSCell::gcDataOffset()));
634 Jump
checkMarkByte(JSCell
* cell
)
636 uint8_t* address
= reinterpret_cast<uint8_t*>(cell
) + JSCell::gcDataOffset();
637 return branchTest8(MacroAssembler::NonZero
, MacroAssembler::AbsoluteAddress(address
));
640 Vector
<BytecodeAndMachineOffset
>& decodedCodeMapFor(CodeBlock
*);
644 CodeBlock
* m_codeBlock
;
645 CodeBlock
* m_baselineCodeBlock
;
647 HashMap
<CodeBlock
*, Vector
<BytecodeAndMachineOffset
>> m_decodedCodeMaps
;
652 #endif // ENABLE(JIT)
654 #endif // AssemblyHelpers_h