]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/AbstractMacroAssembler.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / AbstractMacroAssembler.h
1 /*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef AbstractMacroAssembler_h
27 #define AbstractMacroAssembler_h
28
29 #include "AssemblerBuffer.h"
30 #include "CodeLocation.h"
31 #include "MacroAssemblerCodeRef.h"
32 #include <wtf/CryptographicallyRandomNumber.h>
33 #include <wtf/Noncopyable.h>
34
35 #if ENABLE(ASSEMBLER)
36
37
38 #if PLATFORM(QT)
39 #define ENABLE_JIT_CONSTANT_BLINDING 0
40 #endif
41
42 #ifndef ENABLE_JIT_CONSTANT_BLINDING
43 #define ENABLE_JIT_CONSTANT_BLINDING 1
44 #endif
45
46 namespace JSC {
47
48 inline bool isARMv7s()
49 {
50 #if CPU(APPLE_ARMV7S)
51 return true;
52 #else
53 return false;
54 #endif
55 }
56
57 inline bool isARM64()
58 {
59 #if CPU(ARM64)
60 return true;
61 #else
62 return false;
63 #endif
64 }
65
66 inline bool isX86()
67 {
68 #if CPU(X86_64) || CPU(X86)
69 return true;
70 #else
71 return false;
72 #endif
73 }
74
75 class JumpReplacementWatchpoint;
76 class LinkBuffer;
77 class RepatchBuffer;
78 class Watchpoint;
79 namespace DFG {
80 struct OSRExit;
81 }
82
83 template <class AssemblerType>
84 class AbstractMacroAssembler {
85 public:
86 friend class JITWriteBarrierBase;
87 typedef AssemblerType AssemblerType_T;
88
89 typedef MacroAssemblerCodePtr CodePtr;
90 typedef MacroAssemblerCodeRef CodeRef;
91
92 class Jump;
93
94 typedef typename AssemblerType::RegisterID RegisterID;
95
96 // Section 1: MacroAssembler operand types
97 //
98 // The following types are used as operands to MacroAssembler operations,
99 // describing immediate and memory operands to the instructions to be planted.
100
101 enum Scale {
102 TimesOne,
103 TimesTwo,
104 TimesFour,
105 TimesEight,
106 };
107
108 // Address:
109 //
110 // Describes a simple base-offset address.
111 struct Address {
112 explicit Address(RegisterID base, int32_t offset = 0)
113 : base(base)
114 , offset(offset)
115 {
116 }
117
118 RegisterID base;
119 int32_t offset;
120 };
121
122 struct ExtendedAddress {
123 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
124 : base(base)
125 , offset(offset)
126 {
127 }
128
129 RegisterID base;
130 intptr_t offset;
131 };
132
133 // ImplicitAddress:
134 //
135 // This class is used for explicit 'load' and 'store' operations
136 // (as opposed to situations in which a memory operand is provided
137 // to a generic operation, such as an integer arithmetic instruction).
138 //
139 // In the case of a load (or store) operation we want to permit
140 // addresses to be implicitly constructed, e.g. the two calls:
141 //
142 // load32(Address(addrReg), destReg);
143 // load32(addrReg, destReg);
144 //
145 // Are equivalent, and the explicit wrapping of the Address in the former
146 // is unnecessary.
147 struct ImplicitAddress {
148 ImplicitAddress(RegisterID base)
149 : base(base)
150 , offset(0)
151 {
152 }
153
154 ImplicitAddress(Address address)
155 : base(address.base)
156 , offset(address.offset)
157 {
158 }
159
160 RegisterID base;
161 int32_t offset;
162 };
163
164 // BaseIndex:
165 //
166 // Describes a complex addressing mode.
167 struct BaseIndex {
168 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
169 : base(base)
170 , index(index)
171 , scale(scale)
172 , offset(offset)
173 {
174 }
175
176 RegisterID base;
177 RegisterID index;
178 Scale scale;
179 int32_t offset;
180 };
181
182 // AbsoluteAddress:
183 //
184 // Describes an memory operand given by a pointer. For regular load & store
185 // operations an unwrapped void* will be used, rather than using this.
186 struct AbsoluteAddress {
187 explicit AbsoluteAddress(const void* ptr)
188 : m_ptr(ptr)
189 {
190 }
191
192 const void* m_ptr;
193 };
194
195 // TrustedImmPtr:
196 //
197 // A pointer sized immediate operand to an instruction - this is wrapped
198 // in a class requiring explicit construction in order to differentiate
199 // from pointers used as absolute addresses to memory operations
200 struct TrustedImmPtr {
201 TrustedImmPtr() { }
202
203 explicit TrustedImmPtr(const void* value)
204 : m_value(value)
205 {
206 }
207
208 // This is only here so that TrustedImmPtr(0) does not confuse the C++
209 // overload handling rules.
210 explicit TrustedImmPtr(int value)
211 : m_value(0)
212 {
213 ASSERT_UNUSED(value, !value);
214 }
215
216 explicit TrustedImmPtr(size_t value)
217 : m_value(reinterpret_cast<void*>(value))
218 {
219 }
220
221 intptr_t asIntptr()
222 {
223 return reinterpret_cast<intptr_t>(m_value);
224 }
225
226 const void* m_value;
227 };
228
229 struct ImmPtr :
230 #if ENABLE(JIT_CONSTANT_BLINDING)
231 private TrustedImmPtr
232 #else
233 public TrustedImmPtr
234 #endif
235 {
236 explicit ImmPtr(const void* value)
237 : TrustedImmPtr(value)
238 {
239 }
240
241 TrustedImmPtr asTrustedImmPtr() { return *this; }
242 };
243
244 // TrustedImm32:
245 //
246 // A 32bit immediate operand to an instruction - this is wrapped in a
247 // class requiring explicit construction in order to prevent RegisterIDs
248 // (which are implemented as an enum) from accidentally being passed as
249 // immediate values.
250 struct TrustedImm32 {
251 TrustedImm32() { }
252
253 explicit TrustedImm32(int32_t value)
254 : m_value(value)
255 {
256 }
257
258 #if !CPU(X86_64)
259 explicit TrustedImm32(TrustedImmPtr ptr)
260 : m_value(ptr.asIntptr())
261 {
262 }
263 #endif
264
265 int32_t m_value;
266 };
267
268
269 struct Imm32 :
270 #if ENABLE(JIT_CONSTANT_BLINDING)
271 private TrustedImm32
272 #else
273 public TrustedImm32
274 #endif
275 {
276 explicit Imm32(int32_t value)
277 : TrustedImm32(value)
278 {
279 }
280 #if !CPU(X86_64)
281 explicit Imm32(TrustedImmPtr ptr)
282 : TrustedImm32(ptr)
283 {
284 }
285 #endif
286 const TrustedImm32& asTrustedImm32() const { return *this; }
287
288 };
289
290 // TrustedImm64:
291 //
292 // A 64bit immediate operand to an instruction - this is wrapped in a
293 // class requiring explicit construction in order to prevent RegisterIDs
294 // (which are implemented as an enum) from accidentally being passed as
295 // immediate values.
296 struct TrustedImm64 {
297 TrustedImm64() { }
298
299 explicit TrustedImm64(int64_t value)
300 : m_value(value)
301 {
302 }
303
304 #if CPU(X86_64) || CPU(ARM64)
305 explicit TrustedImm64(TrustedImmPtr ptr)
306 : m_value(ptr.asIntptr())
307 {
308 }
309 #endif
310
311 int64_t m_value;
312 };
313
314 struct Imm64 :
315 #if ENABLE(JIT_CONSTANT_BLINDING)
316 private TrustedImm64
317 #else
318 public TrustedImm64
319 #endif
320 {
321 explicit Imm64(int64_t value)
322 : TrustedImm64(value)
323 {
324 }
325 #if CPU(X86_64) || CPU(ARM64)
326 explicit Imm64(TrustedImmPtr ptr)
327 : TrustedImm64(ptr)
328 {
329 }
330 #endif
331 const TrustedImm64& asTrustedImm64() const { return *this; }
332 };
333
334 // Section 2: MacroAssembler code buffer handles
335 //
336 // The following types are used to reference items in the code buffer
337 // during JIT code generation. For example, the type Jump is used to
338 // track the location of a jump instruction so that it may later be
339 // linked to a label marking its destination.
340
341
342 // Label:
343 //
344 // A Label records a point in the generated instruction stream, typically such that
345 // it may be used as a destination for a jump.
346 class Label {
347 template<class TemplateAssemblerType>
348 friend class AbstractMacroAssembler;
349 friend struct DFG::OSRExit;
350 friend class Jump;
351 friend class JumpReplacementWatchpoint;
352 friend class MacroAssemblerCodeRef;
353 friend class LinkBuffer;
354 friend class Watchpoint;
355
356 public:
357 Label()
358 {
359 }
360
361 Label(AbstractMacroAssembler<AssemblerType>* masm)
362 : m_label(masm->m_assembler.label())
363 {
364 masm->invalidateAllTempRegisters();
365 }
366
367 bool isSet() const { return m_label.isSet(); }
368 private:
369 AssemblerLabel m_label;
370 };
371
372 // ConvertibleLoadLabel:
373 //
374 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
375 // so that:
376 //
377 // loadPtr(Address(a, i), b)
378 //
379 // becomes:
380 //
381 // addPtr(TrustedImmPtr(i), a, b)
382 class ConvertibleLoadLabel {
383 template<class TemplateAssemblerType>
384 friend class AbstractMacroAssembler;
385 friend class LinkBuffer;
386
387 public:
388 ConvertibleLoadLabel()
389 {
390 }
391
392 ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
393 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
394 {
395 }
396
397 bool isSet() const { return m_label.isSet(); }
398 private:
399 AssemblerLabel m_label;
400 };
401
402 // DataLabelPtr:
403 //
404 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
405 // patched after the code has been generated.
406 class DataLabelPtr {
407 template<class TemplateAssemblerType>
408 friend class AbstractMacroAssembler;
409 friend class LinkBuffer;
410 public:
411 DataLabelPtr()
412 {
413 }
414
415 DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
416 : m_label(masm->m_assembler.label())
417 {
418 }
419
420 bool isSet() const { return m_label.isSet(); }
421
422 private:
423 AssemblerLabel m_label;
424 };
425
426 // DataLabel32:
427 //
428 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
429 // patched after the code has been generated.
430 class DataLabel32 {
431 template<class TemplateAssemblerType>
432 friend class AbstractMacroAssembler;
433 friend class LinkBuffer;
434 public:
435 DataLabel32()
436 {
437 }
438
439 DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
440 : m_label(masm->m_assembler.label())
441 {
442 }
443
444 AssemblerLabel label() const { return m_label; }
445
446 private:
447 AssemblerLabel m_label;
448 };
449
450 // DataLabelCompact:
451 //
452 // A DataLabelCompact is used to refer to a location in the code containing a
453 // compact immediate to be patched after the code has been generated.
454 class DataLabelCompact {
455 template<class TemplateAssemblerType>
456 friend class AbstractMacroAssembler;
457 friend class LinkBuffer;
458 public:
459 DataLabelCompact()
460 {
461 }
462
463 DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
464 : m_label(masm->m_assembler.label())
465 {
466 }
467
468 DataLabelCompact(AssemblerLabel label)
469 : m_label(label)
470 {
471 }
472
473 private:
474 AssemblerLabel m_label;
475 };
476
477 // Call:
478 //
479 // A Call object is a reference to a call instruction that has been planted
480 // into the code buffer - it is typically used to link the call, setting the
481 // relative offset such that when executed it will call to the desired
482 // destination.
483 class Call {
484 template<class TemplateAssemblerType>
485 friend class AbstractMacroAssembler;
486
487 public:
488 enum Flags {
489 None = 0x0,
490 Linkable = 0x1,
491 Near = 0x2,
492 LinkableNear = 0x3,
493 };
494
495 Call()
496 : m_flags(None)
497 {
498 }
499
500 Call(AssemblerLabel jmp, Flags flags)
501 : m_label(jmp)
502 , m_flags(flags)
503 {
504 }
505
506 bool isFlagSet(Flags flag)
507 {
508 return m_flags & flag;
509 }
510
511 static Call fromTailJump(Jump jump)
512 {
513 return Call(jump.m_label, Linkable);
514 }
515
516 AssemblerLabel m_label;
517 private:
518 Flags m_flags;
519 };
520
521 // Jump:
522 //
523 // A jump object is a reference to a jump instruction that has been planted
524 // into the code buffer - it is typically used to link the jump, setting the
525 // relative offset such that when executed it will jump to the desired
526 // destination.
527 class Jump {
528 template<class TemplateAssemblerType>
529 friend class AbstractMacroAssembler;
530 friend class Call;
531 friend struct DFG::OSRExit;
532 friend class LinkBuffer;
533 public:
534 Jump()
535 {
536 }
537
538 #if CPU(ARM_THUMB2)
539 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
540 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
541 : m_label(jmp)
542 , m_type(type)
543 , m_condition(condition)
544 {
545 }
546 #elif CPU(ARM64)
547 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
548 : m_label(jmp)
549 , m_type(type)
550 , m_condition(condition)
551 {
552 }
553
554 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
555 : m_label(jmp)
556 , m_type(type)
557 , m_condition(condition)
558 , m_is64Bit(is64Bit)
559 , m_compareRegister(compareRegister)
560 {
561 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
562 }
563
564 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
565 : m_label(jmp)
566 , m_type(type)
567 , m_condition(condition)
568 , m_bitNumber(bitNumber)
569 , m_compareRegister(compareRegister)
570 {
571 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
572 }
573 #elif CPU(SH4)
574 Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
575 : m_label(jmp)
576 , m_type(type)
577 {
578 }
579 #else
580 Jump(AssemblerLabel jmp)
581 : m_label(jmp)
582 {
583 }
584 #endif
585
586 Label label() const
587 {
588 Label result;
589 result.m_label = m_label;
590 return result;
591 }
592
593 void link(AbstractMacroAssembler<AssemblerType>* masm) const
594 {
595 masm->invalidateAllTempRegisters();
596
597 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
598 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
599 #endif
600
601 #if CPU(ARM_THUMB2)
602 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
603 #elif CPU(ARM64)
604 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
605 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
606 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
607 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
608 else
609 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
610 #elif CPU(SH4)
611 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
612 #else
613 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
614 #endif
615 }
616
617 void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
618 {
619 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
620 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
621 #endif
622
623 #if CPU(ARM_THUMB2)
624 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
625 #elif CPU(ARM64)
626 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
627 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
628 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
629 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
630 else
631 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
632 #else
633 masm->m_assembler.linkJump(m_label, label.m_label);
634 #endif
635 }
636
637 bool isSet() const { return m_label.isSet(); }
638
639 private:
640 AssemblerLabel m_label;
641 #if CPU(ARM_THUMB2)
642 ARMv7Assembler::JumpType m_type;
643 ARMv7Assembler::Condition m_condition;
644 #elif CPU(ARM64)
645 ARM64Assembler::JumpType m_type;
646 ARM64Assembler::Condition m_condition;
647 bool m_is64Bit;
648 unsigned m_bitNumber;
649 ARM64Assembler::RegisterID m_compareRegister;
650 #endif
651 #if CPU(SH4)
652 SH4Assembler::JumpType m_type;
653 #endif
654 };
655
656 struct PatchableJump {
657 PatchableJump()
658 {
659 }
660
661 explicit PatchableJump(Jump jump)
662 : m_jump(jump)
663 {
664 }
665
666 operator Jump&() { return m_jump; }
667
668 Jump m_jump;
669 };
670
671 // JumpList:
672 //
673 // A JumpList is a set of Jump objects.
674 // All jumps in the set will be linked to the same destination.
675 class JumpList {
676 friend class LinkBuffer;
677
678 public:
679 typedef Vector<Jump, 2> JumpVector;
680
681 JumpList() { }
682
683 JumpList(Jump jump)
684 {
685 append(jump);
686 }
687
688 void link(AbstractMacroAssembler<AssemblerType>* masm)
689 {
690 size_t size = m_jumps.size();
691 for (size_t i = 0; i < size; ++i)
692 m_jumps[i].link(masm);
693 m_jumps.clear();
694 }
695
696 void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
697 {
698 size_t size = m_jumps.size();
699 for (size_t i = 0; i < size; ++i)
700 m_jumps[i].linkTo(label, masm);
701 m_jumps.clear();
702 }
703
704 void append(Jump jump)
705 {
706 m_jumps.append(jump);
707 }
708
709 void append(const JumpList& other)
710 {
711 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
712 }
713
714 bool empty()
715 {
716 return !m_jumps.size();
717 }
718
719 void clear()
720 {
721 m_jumps.clear();
722 }
723
724 const JumpVector& jumps() const { return m_jumps; }
725
726 private:
727 JumpVector m_jumps;
728 };
729
730
731 // Section 3: Misc admin methods
732 #if ENABLE(DFG_JIT)
733 Label labelIgnoringWatchpoints()
734 {
735 Label result;
736 result.m_label = m_assembler.labelIgnoringWatchpoints();
737 return result;
738 }
739 #else
740 Label labelIgnoringWatchpoints()
741 {
742 return label();
743 }
744 #endif
745
746 Label label()
747 {
748 return Label(this);
749 }
750
751 void padBeforePatch()
752 {
753 // Rely on the fact that asking for a label already does the padding.
754 (void)label();
755 }
756
757 Label watchpointLabel()
758 {
759 Label result;
760 result.m_label = m_assembler.labelForWatchpoint();
761 return result;
762 }
763
764 Label align()
765 {
766 m_assembler.align(16);
767 return Label(this);
768 }
769
770 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
771 class RegisterAllocationOffset {
772 public:
773 RegisterAllocationOffset(unsigned offset)
774 : m_offset(offset)
775 {
776 }
777
778 void check(unsigned low, unsigned high)
779 {
780 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
781 }
782
783 private:
784 unsigned m_offset;
785 };
786
787 void addRegisterAllocationAtOffset(unsigned offset)
788 {
789 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
790 }
791
792 void clearRegisterAllocationOffsets()
793 {
794 m_registerAllocationForOffsets.clear();
795 }
796
797 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
798 {
799 if (offset1 > offset2)
800 std::swap(offset1, offset2);
801
802 size_t size = m_registerAllocationForOffsets.size();
803 for (size_t i = 0; i < size; ++i)
804 m_registerAllocationForOffsets[i].check(offset1, offset2);
805 }
806 #endif
807
808 template<typename T, typename U>
809 static ptrdiff_t differenceBetween(T from, U to)
810 {
811 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
812 }
813
814 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
815 {
816 return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
817 }
818
819 unsigned debugOffset() { return m_assembler.debugOffset(); }
820
821 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
822 {
823 AssemblerType::cacheFlush(code, size);
824 }
825 protected:
826 AbstractMacroAssembler()
827 : m_randomSource(cryptographicallyRandomNumber())
828 {
829 }
830
831 AssemblerType m_assembler;
832
833 uint32_t random()
834 {
835 return m_randomSource.getUint32();
836 }
837
838 WeakRandom m_randomSource;
839
840 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
841 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
842 #endif
843
844 #if ENABLE(JIT_CONSTANT_BLINDING)
845 static bool scratchRegisterForBlinding() { return false; }
846 static bool shouldBlindForSpecificArch(uint32_t) { return true; }
847 static bool shouldBlindForSpecificArch(uint64_t) { return true; }
848 #endif
849
850 class CachedTempRegister {
851 friend class DataLabelPtr;
852 friend class DataLabel32;
853 friend class DataLabelCompact;
854 friend class Jump;
855 friend class Label;
856
857 public:
858 CachedTempRegister(AbstractMacroAssembler<AssemblerType>* masm, RegisterID registerID)
859 : m_masm(masm)
860 , m_registerID(registerID)
861 , m_value(0)
862 , m_validBit(1 << static_cast<unsigned>(registerID))
863 {
864 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
865 }
866
867 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
868
869 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
870
871 bool value(intptr_t& value)
872 {
873 value = m_value;
874 return m_masm->isTempRegisterValid(m_validBit);
875 }
876
877 void setValue(intptr_t value)
878 {
879 m_value = value;
880 m_masm->setTempRegisterValid(m_validBit);
881 }
882
883 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
884
885 private:
886 AbstractMacroAssembler<AssemblerType>* m_masm;
887 RegisterID m_registerID;
888 intptr_t m_value;
889 unsigned m_validBit;
890 };
891
892 ALWAYS_INLINE void invalidateAllTempRegisters()
893 {
894 m_tempRegistersValidBits = 0;
895 }
896
897 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
898 {
899 return (m_tempRegistersValidBits & registerMask);
900 }
901
902 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
903 {
904 m_tempRegistersValidBits &= ~registerMask;
905 }
906
907 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
908 {
909 m_tempRegistersValidBits |= registerMask;
910 }
911
912 unsigned m_tempRegistersValidBits;
913
914 friend class LinkBuffer;
915 friend class RepatchBuffer;
916
917 static void linkJump(void* code, Jump jump, CodeLocationLabel target)
918 {
919 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
920 }
921
922 static void linkPointer(void* code, AssemblerLabel label, void* value)
923 {
924 AssemblerType::linkPointer(code, label, value);
925 }
926
927 static void* getLinkerAddress(void* code, AssemblerLabel label)
928 {
929 return AssemblerType::getRelocatedAddress(code, label);
930 }
931
932 static unsigned getLinkerCallReturnOffset(Call call)
933 {
934 return AssemblerType::getCallReturnOffset(call.m_label);
935 }
936
937 static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
938 {
939 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
940 }
941
942 static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
943 {
944 AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
945 }
946
947 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
948 {
949 AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
950 }
951
952 static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
953 {
954 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
955 }
956
957 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
958 {
959 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
960 }
961
962 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
963 {
964 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
965 }
966
967 static void replaceWithLoad(CodeLocationConvertibleLoad label)
968 {
969 AssemblerType::replaceWithLoad(label.dataLocation());
970 }
971
972 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
973 {
974 AssemblerType::replaceWithAddressComputation(label.dataLocation());
975 }
976 };
977
978 } // namespace JSC
979
980 #endif // ENABLE(ASSEMBLER)
981
982 #endif // AbstractMacroAssembler_h