]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/AssemblyHelpers.h
9aa39f2cada5c431205711d2957a4ae8e15ca28f
[apple/javascriptcore.git] / jit / AssemblyHelpers.h
1 /*
2 * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef AssemblyHelpers_h
27 #define AssemblyHelpers_h
28
29 #if ENABLE(JIT)
30
31 #include "CodeBlock.h"
32 #include "FPRInfo.h"
33 #include "GPRInfo.h"
34 #include "JITCode.h"
35 #include "MacroAssembler.h"
36 #include "VM.h"
37
38 namespace JSC {
39
40 typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
41
42 class AssemblyHelpers : public MacroAssembler {
43 public:
44 AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
45 : m_vm(vm)
46 , m_codeBlock(codeBlock)
47 , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
48 {
49 if (m_codeBlock) {
50 ASSERT(m_baselineCodeBlock);
51 ASSERT(!m_baselineCodeBlock->alternative());
52 ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType()));
53 }
54 }
55
56 CodeBlock* codeBlock() { return m_codeBlock; }
57 VM* vm() { return m_vm; }
58 AssemblerType_T& assembler() { return m_assembler; }
59
60 void checkStackPointerAlignment()
61 {
62 // This check is both unneeded and harder to write correctly for ARM64
63 #if !defined(NDEBUG) && !CPU(ARM64)
64 Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf));
65 abortWithReason(AHStackPointerMisaligned);
66 stackPointerAligned.link(this);
67 #endif
68 }
69
70 template<typename T>
71 void storeCell(T cell, Address address)
72 {
73 #if USE(JSVALUE64)
74 store64(cell, address);
75 #else
76 store32(cell, address.withOffset(PayloadOffset));
77 store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset));
78 #endif
79 }
80
81 void storeValue(JSValueRegs regs, Address address)
82 {
83 #if USE(JSVALUE64)
84 store64(regs.gpr(), address);
85 #else
86 store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
87 store32(regs.tagGPR(), address.withOffset(TagOffset));
88 #endif
89 }
90
91 void moveTrustedValue(JSValue value, JSValueRegs regs)
92 {
93 #if USE(JSVALUE64)
94 move(TrustedImm64(JSValue::encode(value)), regs.gpr());
95 #else
96 move(TrustedImm32(value.tag()), regs.tagGPR());
97 move(TrustedImm32(value.payload()), regs.payloadGPR());
98 #endif
99 }
100
101 #if CPU(X86_64) || CPU(X86)
102 static size_t prologueStackPointerDelta()
103 {
104 // Prologue only saves the framePointerRegister
105 return sizeof(void*);
106 }
107
108 void emitFunctionPrologue()
109 {
110 push(framePointerRegister);
111 move(stackPointerRegister, framePointerRegister);
112 }
113
114 void emitFunctionEpilogue()
115 {
116 move(framePointerRegister, stackPointerRegister);
117 pop(framePointerRegister);
118 }
119
120 void preserveReturnAddressAfterCall(GPRReg reg)
121 {
122 pop(reg);
123 }
124
125 void restoreReturnAddressBeforeReturn(GPRReg reg)
126 {
127 push(reg);
128 }
129
130 void restoreReturnAddressBeforeReturn(Address address)
131 {
132 push(address);
133 }
134 #endif // CPU(X86_64) || CPU(X86)
135
136 #if CPU(ARM) || CPU(ARM64)
137 static size_t prologueStackPointerDelta()
138 {
139 // Prologue saves the framePointerRegister and linkRegister
140 return 2 * sizeof(void*);
141 }
142
143 void emitFunctionPrologue()
144 {
145 pushPair(framePointerRegister, linkRegister);
146 move(stackPointerRegister, framePointerRegister);
147 }
148
149 void emitFunctionEpilogue()
150 {
151 move(framePointerRegister, stackPointerRegister);
152 popPair(framePointerRegister, linkRegister);
153 }
154
155 ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
156 {
157 move(linkRegister, reg);
158 }
159
160 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
161 {
162 move(reg, linkRegister);
163 }
164
165 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
166 {
167 loadPtr(address, linkRegister);
168 }
169 #endif
170
171 #if CPU(MIPS)
172 static size_t prologueStackPointerDelta()
173 {
174 // Prologue saves the framePointerRegister and returnAddressRegister
175 return 2 * sizeof(void*);
176 }
177
178 ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
179 {
180 move(returnAddressRegister, reg);
181 }
182
183 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
184 {
185 move(reg, returnAddressRegister);
186 }
187
188 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
189 {
190 loadPtr(address, returnAddressRegister);
191 }
192 #endif
193
194 #if CPU(SH4)
195 static size_t prologueStackPointerDelta()
196 {
197 // Prologue saves the framePointerRegister and link register
198 return 2 * sizeof(void*);
199 }
200
201 ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
202 {
203 m_assembler.stspr(reg);
204 }
205
206 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
207 {
208 m_assembler.ldspr(reg);
209 }
210
211 ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
212 {
213 loadPtrLinkReg(address);
214 }
215 #endif
216
217 void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to)
218 {
219 loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
220 }
221 void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
222 {
223 storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
224 }
225
226 void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
227 {
228 storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
229 }
230
231 void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to)
232 {
233 loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to);
234 }
235 void emitPutCallerFrameToCallFrameHeader(RegisterID from)
236 {
237 storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()));
238 }
239
240 void emitPutReturnPCToCallFrameHeader(RegisterID from)
241 {
242 storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
243 }
244 void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from)
245 {
246 storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
247 }
248
249 // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
250 // fields before the code from emitFunctionPrologue() has executed.
251 // First, the access is via the stack pointer. Second, the address calculation must also take
252 // into account that the stack pointer may not have been adjusted down for the return PC and/or
253 // caller's frame pointer. On some platforms, the callee is responsible for pushing the
254 // "link register" containing the return address in the function prologue.
255 #if USE(JSVALUE64)
256 void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
257 {
258 storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
259 }
260 #else
261 void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
262 {
263 storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
264 }
265
266 void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry)
267 {
268 storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
269 }
270 #endif
271
272 Jump branchIfNotCell(GPRReg reg)
273 {
274 #if USE(JSVALUE64)
275 return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
276 #else
277 return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
278 #endif
279 }
280
281 static Address addressForByteOffset(ptrdiff_t byteOffset)
282 {
283 return Address(GPRInfo::callFrameRegister, byteOffset);
284 }
285 static Address addressFor(VirtualRegister virtualRegister, GPRReg baseReg)
286 {
287 ASSERT(virtualRegister.isValid());
288 return Address(baseReg, virtualRegister.offset() * sizeof(Register));
289 }
290 static Address addressFor(VirtualRegister virtualRegister)
291 {
292 ASSERT(virtualRegister.isValid());
293 return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register));
294 }
295 static Address addressFor(int operand)
296 {
297 return addressFor(static_cast<VirtualRegister>(operand));
298 }
299
300 static Address tagFor(VirtualRegister virtualRegister)
301 {
302 ASSERT(virtualRegister.isValid());
303 return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset);
304 }
305 static Address tagFor(int operand)
306 {
307 return tagFor(static_cast<VirtualRegister>(operand));
308 }
309
310 static Address payloadFor(VirtualRegister virtualRegister)
311 {
312 ASSERT(virtualRegister.isValid());
313 return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
314 }
315 static Address payloadFor(int operand)
316 {
317 return payloadFor(static_cast<VirtualRegister>(operand));
318 }
319
320 Jump branchIfCellNotObject(GPRReg cellReg)
321 {
322 return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
323 }
324
325 static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
326 {
327 if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
328 return GPRInfo::regT0;
329
330 if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
331 return GPRInfo::regT1;
332
333 if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
334 return GPRInfo::regT2;
335
336 if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
337 return GPRInfo::regT3;
338
339 return GPRInfo::regT4;
340 }
341
342 // Add a debug call. This call has no effect on JIT code execution state.
343 void debugCall(V_DebugOperation_EPP function, void* argument)
344 {
345 size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
346 ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
347 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
348
349 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
350 #if USE(JSVALUE64)
351 store64(GPRInfo::toRegister(i), buffer + i);
352 #else
353 store32(GPRInfo::toRegister(i), buffer + i);
354 #endif
355 }
356
357 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
358 move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
359 storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
360 }
361
362 // Tell GC mark phase how much of the scratch buffer is active during call.
363 move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
364 storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
365
366 #if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
367 move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
368 move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
369 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
370 GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
371 #elif CPU(X86)
372 poke(GPRInfo::callFrameRegister, 0);
373 poke(TrustedImmPtr(argument), 1);
374 poke(TrustedImmPtr(buffer), 2);
375 GPRReg scratch = GPRInfo::regT0;
376 #else
377 #error "JIT not supported on this platform."
378 #endif
379 move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
380 call(scratch);
381
382 move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
383 storePtr(TrustedImmPtr(0), GPRInfo::regT0);
384
385 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
386 move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
387 loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
388 }
389 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
390 #if USE(JSVALUE64)
391 load64(buffer + i, GPRInfo::toRegister(i));
392 #else
393 load32(buffer + i, GPRInfo::toRegister(i));
394 #endif
395 }
396 }
397
398 // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
399 #if !ASSERT_DISABLED
400 void jitAssertIsInt32(GPRReg);
401 void jitAssertIsJSInt32(GPRReg);
402 void jitAssertIsJSNumber(GPRReg);
403 void jitAssertIsJSDouble(GPRReg);
404 void jitAssertIsCell(GPRReg);
405 void jitAssertHasValidCallFrame();
406 void jitAssertIsNull(GPRReg);
407 void jitAssertTagsInPlace();
408 void jitAssertArgumentCountSane();
409 #else
410 void jitAssertIsInt32(GPRReg) { }
411 void jitAssertIsJSInt32(GPRReg) { }
412 void jitAssertIsJSNumber(GPRReg) { }
413 void jitAssertIsJSDouble(GPRReg) { }
414 void jitAssertIsCell(GPRReg) { }
415 void jitAssertHasValidCallFrame() { }
416 void jitAssertIsNull(GPRReg) { }
417 void jitAssertTagsInPlace() { }
418 void jitAssertArgumentCountSane() { }
419 #endif
420
421 void purifyNaN(FPRReg);
422
423 // These methods convert between doubles, and doubles boxed and JSValues.
424 #if USE(JSVALUE64)
425 GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
426 {
427 moveDoubleTo64(fpr, gpr);
428 sub64(GPRInfo::tagTypeNumberRegister, gpr);
429 jitAssertIsJSDouble(gpr);
430 return gpr;
431 }
432 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
433 {
434 jitAssertIsJSDouble(gpr);
435 add64(GPRInfo::tagTypeNumberRegister, gpr);
436 move64ToDouble(gpr, fpr);
437 return fpr;
438 }
439
440 void boxDouble(FPRReg fpr, JSValueRegs regs)
441 {
442 boxDouble(fpr, regs.gpr());
443 }
444
445 // Here are possible arrangements of source, target, scratch:
446 // - source, target, scratch can all be separate registers.
447 // - source and target can be the same but scratch is separate.
448 // - target and scratch can be the same but source is separate.
449 void boxInt52(GPRReg source, GPRReg target, GPRReg scratch, FPRReg fpScratch)
450 {
451 // Is it an int32?
452 signExtend32ToPtr(source, scratch);
453 Jump isInt32 = branch64(Equal, source, scratch);
454
455 // Nope, it's not, but regT0 contains the int64 value.
456 convertInt64ToDouble(source, fpScratch);
457 boxDouble(fpScratch, target);
458 Jump done = jump();
459
460 isInt32.link(this);
461 zeroExtend32ToPtr(source, target);
462 or64(GPRInfo::tagTypeNumberRegister, target);
463
464 done.link(this);
465 }
466 #endif
467
468 #if USE(JSVALUE32_64)
469 void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
470 {
471 moveDoubleToInts(fpr, payloadGPR, tagGPR);
472 }
473 void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
474 {
475 moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
476 }
477
478 void boxDouble(FPRReg fpr, JSValueRegs regs)
479 {
480 boxDouble(fpr, regs.tagGPR(), regs.payloadGPR());
481 }
482 #endif
483
484 void callExceptionFuzz();
485
486 enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
487 Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck);
488
489 #if ENABLE(SAMPLING_COUNTERS)
490 static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
491 {
492 jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
493 }
494 void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1)
495 {
496 add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
497 }
498 #endif
499
500 #if ENABLE(SAMPLING_FLAGS)
501 void setSamplingFlag(int32_t);
502 void clearSamplingFlag(int32_t flag);
503 #endif
504
505 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
506 {
507 return codeBlock()->globalObjectFor(codeOrigin);
508 }
509
510 bool isStrictModeFor(CodeOrigin codeOrigin)
511 {
512 if (!codeOrigin.inlineCallFrame)
513 return codeBlock()->isStrictMode();
514 return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
515 }
516
517 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
518 {
519 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
520 }
521
522 ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
523
524 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
525 {
526 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
527 }
528
529 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
530 {
531 if (!inlineCallFrame)
532 return baselineCodeBlock();
533 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
534 }
535
536 CodeBlock* baselineCodeBlock()
537 {
538 return m_baselineCodeBlock;
539 }
540
541 VirtualRegister baselineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
542 {
543 if (!inlineCallFrame)
544 return baselineCodeBlock()->argumentsRegister();
545
546 return VirtualRegister(baselineCodeBlockForInlineCallFrame(
547 inlineCallFrame)->argumentsRegister().offset() + inlineCallFrame->stackOffset);
548 }
549
550 VirtualRegister baselineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
551 {
552 return baselineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
553 }
554
555 SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
556 {
557 return baselineCodeBlockFor(codeOrigin)->symbolTable();
558 }
559
560 int offsetOfLocals(const CodeOrigin& codeOrigin)
561 {
562 if (!codeOrigin.inlineCallFrame)
563 return 0;
564 return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register);
565 }
566
567 int offsetOfArgumentsIncludingThis(InlineCallFrame* inlineCallFrame)
568 {
569 if (!inlineCallFrame)
570 return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register);
571 if (inlineCallFrame->arguments.size() <= 1)
572 return 0;
573 ValueRecovery recovery = inlineCallFrame->arguments[1];
574 RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
575 return (recovery.virtualRegister().offset() - 1) * sizeof(Register);
576 }
577
578 int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin)
579 {
580 return offsetOfArgumentsIncludingThis(codeOrigin.inlineCallFrame);
581 }
582
583 void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
584 {
585 #if USE(JSVALUE64)
586 load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
587 loadPtr(vm()->heap.structureIDTable().base(), scratch);
588 loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
589 #else
590 UNUSED_PARAM(scratch);
591 loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
592 #endif
593 }
594
595 static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch)
596 {
597 #if USE(JSVALUE64)
598 jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
599 jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch);
600 jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
601 #else
602 UNUSED_PARAM(scratch);
603 jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
604 #endif
605 }
606
607 void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
608 {
609 emitStoreStructureWithTypeInfo(*this, structure, dest);
610 }
611
612 void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch)
613 {
614 #if USE(JSVALUE64)
615 load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch);
616 store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
617 #else
618 // Store all the info flags using a single 32-bit wide load and store.
619 load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch);
620 store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
621
622 // Store the StructureID
623 storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
624 #endif
625 }
626
627 static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest);
628
629 Jump checkMarkByte(GPRReg cell)
630 {
631 return branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(cell, JSCell::gcDataOffset()));
632 }
633
634 Jump checkMarkByte(JSCell* cell)
635 {
636 uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::gcDataOffset();
637 return branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(address));
638 }
639
640 Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
641
642 protected:
643 VM* m_vm;
644 CodeBlock* m_codeBlock;
645 CodeBlock* m_baselineCodeBlock;
646
647 HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset>> m_decodedCodeMaps;
648 };
649
650 } // namespace JSC
651
652 #endif // ENABLE(JIT)
653
654 #endif // AssemblyHelpers_h
655