]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/AbstractMacroAssembler.h
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / assembler / AbstractMacroAssembler.h
1 /*
2 * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef AbstractMacroAssembler_h
27 #define AbstractMacroAssembler_h
28
29 #include "AbortReason.h"
30 #include "AssemblerBuffer.h"
31 #include "CodeLocation.h"
32 #include "MacroAssemblerCodeRef.h"
33 #include "Options.h"
34 #include "WeakRandom.h"
35 #include <wtf/CryptographicallyRandomNumber.h>
36 #include <wtf/Noncopyable.h>
37
38 #if ENABLE(ASSEMBLER)
39
40 namespace JSC {
41
42 inline bool isARMv7s()
43 {
44 #if CPU(APPLE_ARMV7S)
45 return true;
46 #else
47 return false;
48 #endif
49 }
50
51 inline bool isARM64()
52 {
53 #if CPU(ARM64)
54 return true;
55 #else
56 return false;
57 #endif
58 }
59
60 inline bool isX86()
61 {
62 #if CPU(X86_64) || CPU(X86)
63 return true;
64 #else
65 return false;
66 #endif
67 }
68
69 inline bool optimizeForARMv7s()
70 {
71 return isARMv7s() && Options::enableArchitectureSpecificOptimizations();
72 }
73
74 inline bool optimizeForARM64()
75 {
76 return isARM64() && Options::enableArchitectureSpecificOptimizations();
77 }
78
79 inline bool optimizeForX86()
80 {
81 return isX86() && Options::enableArchitectureSpecificOptimizations();
82 }
83
84 class LinkBuffer;
85 class RepatchBuffer;
86 class Watchpoint;
87 namespace DFG {
88 struct OSRExit;
89 }
90
91 template <class AssemblerType>
92 class AbstractMacroAssembler {
93 public:
94 friend class JITWriteBarrierBase;
95 typedef AssemblerType AssemblerType_T;
96
97 typedef MacroAssemblerCodePtr CodePtr;
98 typedef MacroAssemblerCodeRef CodeRef;
99
100 class Jump;
101
102 typedef typename AssemblerType::RegisterID RegisterID;
103 typedef typename AssemblerType::FPRegisterID FPRegisterID;
104
105 static RegisterID firstRegister() { return AssemblerType::firstRegister(); }
106 static RegisterID lastRegister() { return AssemblerType::lastRegister(); }
107
108 static FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
109 static FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
110
111 // Section 1: MacroAssembler operand types
112 //
113 // The following types are used as operands to MacroAssembler operations,
114 // describing immediate and memory operands to the instructions to be planted.
115
116 enum Scale {
117 TimesOne,
118 TimesTwo,
119 TimesFour,
120 TimesEight,
121 };
122
123 static Scale timesPtr()
124 {
125 if (sizeof(void*) == 4)
126 return TimesFour;
127 return TimesEight;
128 }
129
130 // Address:
131 //
132 // Describes a simple base-offset address.
133 struct Address {
134 explicit Address(RegisterID base, int32_t offset = 0)
135 : base(base)
136 , offset(offset)
137 {
138 }
139
140 Address withOffset(int32_t additionalOffset)
141 {
142 return Address(base, offset + additionalOffset);
143 }
144
145 RegisterID base;
146 int32_t offset;
147 };
148
149 struct ExtendedAddress {
150 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
151 : base(base)
152 , offset(offset)
153 {
154 }
155
156 RegisterID base;
157 intptr_t offset;
158 };
159
160 // ImplicitAddress:
161 //
162 // This class is used for explicit 'load' and 'store' operations
163 // (as opposed to situations in which a memory operand is provided
164 // to a generic operation, such as an integer arithmetic instruction).
165 //
166 // In the case of a load (or store) operation we want to permit
167 // addresses to be implicitly constructed, e.g. the two calls:
168 //
169 // load32(Address(addrReg), destReg);
170 // load32(addrReg, destReg);
171 //
172 // Are equivalent, and the explicit wrapping of the Address in the former
173 // is unnecessary.
174 struct ImplicitAddress {
175 ImplicitAddress(RegisterID base)
176 : base(base)
177 , offset(0)
178 {
179 }
180
181 ImplicitAddress(Address address)
182 : base(address.base)
183 , offset(address.offset)
184 {
185 }
186
187 RegisterID base;
188 int32_t offset;
189 };
190
191 // BaseIndex:
192 //
193 // Describes a complex addressing mode.
194 struct BaseIndex {
195 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
196 : base(base)
197 , index(index)
198 , scale(scale)
199 , offset(offset)
200 {
201 }
202
203 RegisterID base;
204 RegisterID index;
205 Scale scale;
206 int32_t offset;
207 };
208
209 // AbsoluteAddress:
210 //
211 // Describes an memory operand given by a pointer. For regular load & store
212 // operations an unwrapped void* will be used, rather than using this.
213 struct AbsoluteAddress {
214 explicit AbsoluteAddress(const void* ptr)
215 : m_ptr(ptr)
216 {
217 }
218
219 const void* m_ptr;
220 };
221
222 // TrustedImmPtr:
223 //
224 // A pointer sized immediate operand to an instruction - this is wrapped
225 // in a class requiring explicit construction in order to differentiate
226 // from pointers used as absolute addresses to memory operations
227 struct TrustedImmPtr {
228 TrustedImmPtr() { }
229
230 explicit TrustedImmPtr(const void* value)
231 : m_value(value)
232 {
233 }
234
235 // This is only here so that TrustedImmPtr(0) does not confuse the C++
236 // overload handling rules.
237 explicit TrustedImmPtr(int value)
238 : m_value(0)
239 {
240 ASSERT_UNUSED(value, !value);
241 }
242
243 explicit TrustedImmPtr(size_t value)
244 : m_value(reinterpret_cast<void*>(value))
245 {
246 }
247
248 intptr_t asIntptr()
249 {
250 return reinterpret_cast<intptr_t>(m_value);
251 }
252
253 const void* m_value;
254 };
255
256 struct ImmPtr : private TrustedImmPtr
257 {
258 explicit ImmPtr(const void* value)
259 : TrustedImmPtr(value)
260 {
261 }
262
263 TrustedImmPtr asTrustedImmPtr() { return *this; }
264 };
265
266 // TrustedImm32:
267 //
268 // A 32bit immediate operand to an instruction - this is wrapped in a
269 // class requiring explicit construction in order to prevent RegisterIDs
270 // (which are implemented as an enum) from accidentally being passed as
271 // immediate values.
272 struct TrustedImm32 {
273 TrustedImm32() { }
274
275 explicit TrustedImm32(int32_t value)
276 : m_value(value)
277 {
278 }
279
280 #if !CPU(X86_64)
281 explicit TrustedImm32(TrustedImmPtr ptr)
282 : m_value(ptr.asIntptr())
283 {
284 }
285 #endif
286
287 int32_t m_value;
288 };
289
290
291 struct Imm32 : private TrustedImm32 {
292 explicit Imm32(int32_t value)
293 : TrustedImm32(value)
294 {
295 }
296 #if !CPU(X86_64)
297 explicit Imm32(TrustedImmPtr ptr)
298 : TrustedImm32(ptr)
299 {
300 }
301 #endif
302 const TrustedImm32& asTrustedImm32() const { return *this; }
303
304 };
305
306 // TrustedImm64:
307 //
308 // A 64bit immediate operand to an instruction - this is wrapped in a
309 // class requiring explicit construction in order to prevent RegisterIDs
310 // (which are implemented as an enum) from accidentally being passed as
311 // immediate values.
312 struct TrustedImm64 {
313 TrustedImm64() { }
314
315 explicit TrustedImm64(int64_t value)
316 : m_value(value)
317 {
318 }
319
320 #if CPU(X86_64) || CPU(ARM64)
321 explicit TrustedImm64(TrustedImmPtr ptr)
322 : m_value(ptr.asIntptr())
323 {
324 }
325 #endif
326
327 int64_t m_value;
328 };
329
330 struct Imm64 : private TrustedImm64
331 {
332 explicit Imm64(int64_t value)
333 : TrustedImm64(value)
334 {
335 }
336 #if CPU(X86_64) || CPU(ARM64)
337 explicit Imm64(TrustedImmPtr ptr)
338 : TrustedImm64(ptr)
339 {
340 }
341 #endif
342 const TrustedImm64& asTrustedImm64() const { return *this; }
343 };
344
345 // Section 2: MacroAssembler code buffer handles
346 //
347 // The following types are used to reference items in the code buffer
348 // during JIT code generation. For example, the type Jump is used to
349 // track the location of a jump instruction so that it may later be
350 // linked to a label marking its destination.
351
352
353 // Label:
354 //
355 // A Label records a point in the generated instruction stream, typically such that
356 // it may be used as a destination for a jump.
357 class Label {
358 template<class TemplateAssemblerType>
359 friend class AbstractMacroAssembler;
360 friend struct DFG::OSRExit;
361 friend class Jump;
362 friend class MacroAssemblerCodeRef;
363 friend class LinkBuffer;
364 friend class Watchpoint;
365
366 public:
367 Label()
368 {
369 }
370
371 Label(AbstractMacroAssembler<AssemblerType>* masm)
372 : m_label(masm->m_assembler.label())
373 {
374 masm->invalidateAllTempRegisters();
375 }
376
377 bool isSet() const { return m_label.isSet(); }
378 private:
379 AssemblerLabel m_label;
380 };
381
382 // ConvertibleLoadLabel:
383 //
384 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
385 // so that:
386 //
387 // loadPtr(Address(a, i), b)
388 //
389 // becomes:
390 //
391 // addPtr(TrustedImmPtr(i), a, b)
392 class ConvertibleLoadLabel {
393 template<class TemplateAssemblerType>
394 friend class AbstractMacroAssembler;
395 friend class LinkBuffer;
396
397 public:
398 ConvertibleLoadLabel()
399 {
400 }
401
402 ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
403 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
404 {
405 }
406
407 bool isSet() const { return m_label.isSet(); }
408 private:
409 AssemblerLabel m_label;
410 };
411
412 // DataLabelPtr:
413 //
414 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
415 // patched after the code has been generated.
416 class DataLabelPtr {
417 template<class TemplateAssemblerType>
418 friend class AbstractMacroAssembler;
419 friend class LinkBuffer;
420 public:
421 DataLabelPtr()
422 {
423 }
424
425 DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
426 : m_label(masm->m_assembler.label())
427 {
428 }
429
430 bool isSet() const { return m_label.isSet(); }
431
432 private:
433 AssemblerLabel m_label;
434 };
435
436 // DataLabel32:
437 //
438 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
439 // patched after the code has been generated.
440 class DataLabel32 {
441 template<class TemplateAssemblerType>
442 friend class AbstractMacroAssembler;
443 friend class LinkBuffer;
444 public:
445 DataLabel32()
446 {
447 }
448
449 DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
450 : m_label(masm->m_assembler.label())
451 {
452 }
453
454 AssemblerLabel label() const { return m_label; }
455
456 private:
457 AssemblerLabel m_label;
458 };
459
460 // DataLabelCompact:
461 //
462 // A DataLabelCompact is used to refer to a location in the code containing a
463 // compact immediate to be patched after the code has been generated.
464 class DataLabelCompact {
465 template<class TemplateAssemblerType>
466 friend class AbstractMacroAssembler;
467 friend class LinkBuffer;
468 public:
469 DataLabelCompact()
470 {
471 }
472
473 DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
474 : m_label(masm->m_assembler.label())
475 {
476 }
477
478 DataLabelCompact(AssemblerLabel label)
479 : m_label(label)
480 {
481 }
482
483 AssemblerLabel label() const { return m_label; }
484
485 private:
486 AssemblerLabel m_label;
487 };
488
489 // Call:
490 //
491 // A Call object is a reference to a call instruction that has been planted
492 // into the code buffer - it is typically used to link the call, setting the
493 // relative offset such that when executed it will call to the desired
494 // destination.
495 class Call {
496 template<class TemplateAssemblerType>
497 friend class AbstractMacroAssembler;
498
499 public:
500 enum Flags {
501 None = 0x0,
502 Linkable = 0x1,
503 Near = 0x2,
504 LinkableNear = 0x3,
505 };
506
507 Call()
508 : m_flags(None)
509 {
510 }
511
512 Call(AssemblerLabel jmp, Flags flags)
513 : m_label(jmp)
514 , m_flags(flags)
515 {
516 }
517
518 bool isFlagSet(Flags flag)
519 {
520 return m_flags & flag;
521 }
522
523 static Call fromTailJump(Jump jump)
524 {
525 return Call(jump.m_label, Linkable);
526 }
527
528 AssemblerLabel m_label;
529 private:
530 Flags m_flags;
531 };
532
533 // Jump:
534 //
535 // A jump object is a reference to a jump instruction that has been planted
536 // into the code buffer - it is typically used to link the jump, setting the
537 // relative offset such that when executed it will jump to the desired
538 // destination.
539 class Jump {
540 template<class TemplateAssemblerType>
541 friend class AbstractMacroAssembler;
542 friend class Call;
543 friend struct DFG::OSRExit;
544 friend class LinkBuffer;
545 public:
546 Jump()
547 {
548 }
549
550 #if CPU(ARM_THUMB2)
551 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
552 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
553 : m_label(jmp)
554 , m_type(type)
555 , m_condition(condition)
556 {
557 }
558 #elif CPU(ARM64)
559 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
560 : m_label(jmp)
561 , m_type(type)
562 , m_condition(condition)
563 {
564 }
565
566 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
567 : m_label(jmp)
568 , m_type(type)
569 , m_condition(condition)
570 , m_is64Bit(is64Bit)
571 , m_compareRegister(compareRegister)
572 {
573 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
574 }
575
576 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
577 : m_label(jmp)
578 , m_type(type)
579 , m_condition(condition)
580 , m_bitNumber(bitNumber)
581 , m_compareRegister(compareRegister)
582 {
583 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
584 }
585 #elif CPU(SH4)
586 Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
587 : m_label(jmp)
588 , m_type(type)
589 {
590 }
591 #else
592 Jump(AssemblerLabel jmp)
593 : m_label(jmp)
594 {
595 }
596 #endif
597
598 Label label() const
599 {
600 Label result;
601 result.m_label = m_label;
602 return result;
603 }
604
605 void link(AbstractMacroAssembler<AssemblerType>* masm) const
606 {
607 masm->invalidateAllTempRegisters();
608
609 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
610 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
611 #endif
612
613 #if CPU(ARM_THUMB2)
614 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
615 #elif CPU(ARM64)
616 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
617 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
618 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
619 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
620 else
621 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
622 #elif CPU(SH4)
623 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
624 #else
625 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
626 #endif
627 }
628
629 void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
630 {
631 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
632 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
633 #endif
634
635 #if CPU(ARM_THUMB2)
636 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
637 #elif CPU(ARM64)
638 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
639 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
640 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
641 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
642 else
643 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
644 #else
645 masm->m_assembler.linkJump(m_label, label.m_label);
646 #endif
647 }
648
649 bool isSet() const { return m_label.isSet(); }
650
651 private:
652 AssemblerLabel m_label;
653 #if CPU(ARM_THUMB2)
654 ARMv7Assembler::JumpType m_type;
655 ARMv7Assembler::Condition m_condition;
656 #elif CPU(ARM64)
657 ARM64Assembler::JumpType m_type;
658 ARM64Assembler::Condition m_condition;
659 bool m_is64Bit;
660 unsigned m_bitNumber;
661 ARM64Assembler::RegisterID m_compareRegister;
662 #endif
663 #if CPU(SH4)
664 SH4Assembler::JumpType m_type;
665 #endif
666 };
667
668 struct PatchableJump {
669 PatchableJump()
670 {
671 }
672
673 explicit PatchableJump(Jump jump)
674 : m_jump(jump)
675 {
676 }
677
678 operator Jump&() { return m_jump; }
679
680 Jump m_jump;
681 };
682
683 // JumpList:
684 //
685 // A JumpList is a set of Jump objects.
686 // All jumps in the set will be linked to the same destination.
687 class JumpList {
688 friend class LinkBuffer;
689
690 public:
691 typedef Vector<Jump, 2> JumpVector;
692
693 JumpList() { }
694
695 JumpList(Jump jump)
696 {
697 if (jump.isSet())
698 append(jump);
699 }
700
701 void link(AbstractMacroAssembler<AssemblerType>* masm)
702 {
703 size_t size = m_jumps.size();
704 for (size_t i = 0; i < size; ++i)
705 m_jumps[i].link(masm);
706 m_jumps.clear();
707 }
708
709 void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
710 {
711 size_t size = m_jumps.size();
712 for (size_t i = 0; i < size; ++i)
713 m_jumps[i].linkTo(label, masm);
714 m_jumps.clear();
715 }
716
717 void append(Jump jump)
718 {
719 m_jumps.append(jump);
720 }
721
722 void append(const JumpList& other)
723 {
724 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
725 }
726
727 bool empty()
728 {
729 return !m_jumps.size();
730 }
731
732 void clear()
733 {
734 m_jumps.clear();
735 }
736
737 const JumpVector& jumps() const { return m_jumps; }
738
739 private:
740 JumpVector m_jumps;
741 };
742
743
744 // Section 3: Misc admin methods
745 #if ENABLE(DFG_JIT)
746 Label labelIgnoringWatchpoints()
747 {
748 Label result;
749 result.m_label = m_assembler.labelIgnoringWatchpoints();
750 return result;
751 }
752 #else
753 Label labelIgnoringWatchpoints()
754 {
755 return label();
756 }
757 #endif
758
759 Label label()
760 {
761 return Label(this);
762 }
763
764 void padBeforePatch()
765 {
766 // Rely on the fact that asking for a label already does the padding.
767 (void)label();
768 }
769
770 Label watchpointLabel()
771 {
772 Label result;
773 result.m_label = m_assembler.labelForWatchpoint();
774 return result;
775 }
776
777 Label align()
778 {
779 m_assembler.align(16);
780 return Label(this);
781 }
782
783 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
784 class RegisterAllocationOffset {
785 public:
786 RegisterAllocationOffset(unsigned offset)
787 : m_offset(offset)
788 {
789 }
790
791 void checkOffsets(unsigned low, unsigned high)
792 {
793 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
794 }
795
796 private:
797 unsigned m_offset;
798 };
799
800 void addRegisterAllocationAtOffset(unsigned offset)
801 {
802 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
803 }
804
805 void clearRegisterAllocationOffsets()
806 {
807 m_registerAllocationForOffsets.clear();
808 }
809
810 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
811 {
812 if (offset1 > offset2)
813 std::swap(offset1, offset2);
814
815 size_t size = m_registerAllocationForOffsets.size();
816 for (size_t i = 0; i < size; ++i)
817 m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
818 }
819 #endif
820
821 template<typename T, typename U>
822 static ptrdiff_t differenceBetween(T from, U to)
823 {
824 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
825 }
826
827 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
828 {
829 return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
830 }
831
832 unsigned debugOffset() { return m_assembler.debugOffset(); }
833
834 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
835 {
836 AssemblerType::cacheFlush(code, size);
837 }
838
839 AssemblerType m_assembler;
840
841 protected:
842 AbstractMacroAssembler()
843 : m_randomSource(cryptographicallyRandomNumber())
844 {
845 invalidateAllTempRegisters();
846 }
847
848 uint32_t random()
849 {
850 return m_randomSource.getUint32();
851 }
852
853 WeakRandom m_randomSource;
854
855 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
856 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
857 #endif
858
859 static bool haveScratchRegisterForBlinding()
860 {
861 return false;
862 }
863 static RegisterID scratchRegisterForBlinding()
864 {
865 UNREACHABLE_FOR_PLATFORM();
866 return firstRegister();
867 }
868 static bool canBlind() { return false; }
869 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
870 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
871
872 class CachedTempRegister {
873 friend class DataLabelPtr;
874 friend class DataLabel32;
875 friend class DataLabelCompact;
876 friend class Jump;
877 friend class Label;
878
879 public:
880 CachedTempRegister(AbstractMacroAssembler<AssemblerType>* masm, RegisterID registerID)
881 : m_masm(masm)
882 , m_registerID(registerID)
883 , m_value(0)
884 , m_validBit(1 << static_cast<unsigned>(registerID))
885 {
886 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
887 }
888
889 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
890
891 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
892
893 bool value(intptr_t& value)
894 {
895 value = m_value;
896 return m_masm->isTempRegisterValid(m_validBit);
897 }
898
899 void setValue(intptr_t value)
900 {
901 m_value = value;
902 m_masm->setTempRegisterValid(m_validBit);
903 }
904
905 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
906
907 private:
908 AbstractMacroAssembler<AssemblerType>* m_masm;
909 RegisterID m_registerID;
910 intptr_t m_value;
911 unsigned m_validBit;
912 };
913
914 ALWAYS_INLINE void invalidateAllTempRegisters()
915 {
916 m_tempRegistersValidBits = 0;
917 }
918
919 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
920 {
921 return (m_tempRegistersValidBits & registerMask);
922 }
923
924 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
925 {
926 m_tempRegistersValidBits &= ~registerMask;
927 }
928
929 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
930 {
931 m_tempRegistersValidBits |= registerMask;
932 }
933
934 unsigned m_tempRegistersValidBits;
935
936 friend class LinkBuffer;
937 friend class RepatchBuffer;
938
939 static void linkJump(void* code, Jump jump, CodeLocationLabel target)
940 {
941 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
942 }
943
944 static void linkPointer(void* code, AssemblerLabel label, void* value)
945 {
946 AssemblerType::linkPointer(code, label, value);
947 }
948
949 static void* getLinkerAddress(void* code, AssemblerLabel label)
950 {
951 return AssemblerType::getRelocatedAddress(code, label);
952 }
953
954 static unsigned getLinkerCallReturnOffset(Call call)
955 {
956 return AssemblerType::getCallReturnOffset(call.m_label);
957 }
958
959 static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
960 {
961 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
962 }
963
964 static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
965 {
966 AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
967 }
968
969 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
970 {
971 AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
972 }
973
974 static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
975 {
976 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
977 }
978
979 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
980 {
981 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
982 }
983
984 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
985 {
986 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
987 }
988
989 static void replaceWithLoad(CodeLocationConvertibleLoad label)
990 {
991 AssemblerType::replaceWithLoad(label.dataLocation());
992 }
993
994 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
995 {
996 AssemblerType::replaceWithAddressComputation(label.dataLocation());
997 }
998 };
999
1000 } // namespace JSC
1001
1002 #endif // ENABLE(ASSEMBLER)
1003
1004 #endif // AbstractMacroAssembler_h