]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/AbstractMacroAssembler.h
6e82dcc5eb2758e7ab960b022a7dbaf58ce0736f
[apple/javascriptcore.git] / assembler / AbstractMacroAssembler.h
1 /*
2 * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef AbstractMacroAssembler_h
27 #define AbstractMacroAssembler_h
28
29 #include "AbortReason.h"
30 #include "AssemblerBuffer.h"
31 #include "CodeLocation.h"
32 #include "MacroAssemblerCodeRef.h"
33 #include "Options.h"
34 #include "WeakRandom.h"
35 #include <wtf/CryptographicallyRandomNumber.h>
36 #include <wtf/Noncopyable.h>
37
38 #if ENABLE(ASSEMBLER)
39
40 namespace JSC {
41
42 inline bool isARMv7IDIVSupported()
43 {
44 #if HAVE(ARM_IDIV_INSTRUCTIONS)
45 return true;
46 #else
47 return false;
48 #endif
49 }
50
51 inline bool isARM64()
52 {
53 #if CPU(ARM64)
54 return true;
55 #else
56 return false;
57 #endif
58 }
59
60 inline bool isX86()
61 {
62 #if CPU(X86_64) || CPU(X86)
63 return true;
64 #else
65 return false;
66 #endif
67 }
68
69 inline bool optimizeForARMv7IDIVSupported()
70 {
71 return isARMv7IDIVSupported() && Options::enableArchitectureSpecificOptimizations();
72 }
73
74 inline bool optimizeForARM64()
75 {
76 return isARM64() && Options::enableArchitectureSpecificOptimizations();
77 }
78
79 inline bool optimizeForX86()
80 {
81 return isX86() && Options::enableArchitectureSpecificOptimizations();
82 }
83
84 class LinkBuffer;
85 class RepatchBuffer;
86 class Watchpoint;
87 namespace DFG {
88 struct OSRExit;
89 }
90
91 template <class AssemblerType, class MacroAssemblerType>
92 class AbstractMacroAssembler {
93 public:
94 friend class JITWriteBarrierBase;
95 typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType;
96 typedef AssemblerType AssemblerType_T;
97
98 typedef MacroAssemblerCodePtr CodePtr;
99 typedef MacroAssemblerCodeRef CodeRef;
100
101 class Jump;
102
103 typedef typename AssemblerType::RegisterID RegisterID;
104 typedef typename AssemblerType::FPRegisterID FPRegisterID;
105
106 static RegisterID firstRegister() { return AssemblerType::firstRegister(); }
107 static RegisterID lastRegister() { return AssemblerType::lastRegister(); }
108
109 static FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
110 static FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
111
112 // Section 1: MacroAssembler operand types
113 //
114 // The following types are used as operands to MacroAssembler operations,
115 // describing immediate and memory operands to the instructions to be planted.
116
117 enum Scale {
118 TimesOne,
119 TimesTwo,
120 TimesFour,
121 TimesEight,
122 };
123
124 static Scale timesPtr()
125 {
126 if (sizeof(void*) == 4)
127 return TimesFour;
128 return TimesEight;
129 }
130
131 // Address:
132 //
133 // Describes a simple base-offset address.
134 struct Address {
135 explicit Address(RegisterID base, int32_t offset = 0)
136 : base(base)
137 , offset(offset)
138 {
139 }
140
141 Address withOffset(int32_t additionalOffset)
142 {
143 return Address(base, offset + additionalOffset);
144 }
145
146 RegisterID base;
147 int32_t offset;
148 };
149
150 struct ExtendedAddress {
151 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
152 : base(base)
153 , offset(offset)
154 {
155 }
156
157 RegisterID base;
158 intptr_t offset;
159 };
160
161 // ImplicitAddress:
162 //
163 // This class is used for explicit 'load' and 'store' operations
164 // (as opposed to situations in which a memory operand is provided
165 // to a generic operation, such as an integer arithmetic instruction).
166 //
167 // In the case of a load (or store) operation we want to permit
168 // addresses to be implicitly constructed, e.g. the two calls:
169 //
170 // load32(Address(addrReg), destReg);
171 // load32(addrReg, destReg);
172 //
173 // Are equivalent, and the explicit wrapping of the Address in the former
174 // is unnecessary.
175 struct ImplicitAddress {
176 ImplicitAddress(RegisterID base)
177 : base(base)
178 , offset(0)
179 {
180 }
181
182 ImplicitAddress(Address address)
183 : base(address.base)
184 , offset(address.offset)
185 {
186 }
187
188 RegisterID base;
189 int32_t offset;
190 };
191
192 // BaseIndex:
193 //
194 // Describes a complex addressing mode.
195 struct BaseIndex {
196 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
197 : base(base)
198 , index(index)
199 , scale(scale)
200 , offset(offset)
201 {
202 }
203
204 RegisterID base;
205 RegisterID index;
206 Scale scale;
207 int32_t offset;
208
209 BaseIndex withOffset(int32_t additionalOffset)
210 {
211 return BaseIndex(base, index, scale, offset + additionalOffset);
212 }
213 };
214
215 // AbsoluteAddress:
216 //
217 // Describes an memory operand given by a pointer. For regular load & store
218 // operations an unwrapped void* will be used, rather than using this.
219 struct AbsoluteAddress {
220 explicit AbsoluteAddress(const void* ptr)
221 : m_ptr(ptr)
222 {
223 }
224
225 const void* m_ptr;
226 };
227
228 // TrustedImmPtr:
229 //
230 // A pointer sized immediate operand to an instruction - this is wrapped
231 // in a class requiring explicit construction in order to differentiate
232 // from pointers used as absolute addresses to memory operations
233 struct TrustedImmPtr {
234 TrustedImmPtr() { }
235
236 explicit TrustedImmPtr(const void* value)
237 : m_value(value)
238 {
239 }
240
241 // This is only here so that TrustedImmPtr(0) does not confuse the C++
242 // overload handling rules.
243 explicit TrustedImmPtr(int value)
244 : m_value(0)
245 {
246 ASSERT_UNUSED(value, !value);
247 }
248
249 explicit TrustedImmPtr(size_t value)
250 : m_value(reinterpret_cast<void*>(value))
251 {
252 }
253
254 intptr_t asIntptr()
255 {
256 return reinterpret_cast<intptr_t>(m_value);
257 }
258
259 const void* m_value;
260 };
261
262 struct ImmPtr : private TrustedImmPtr
263 {
264 explicit ImmPtr(const void* value)
265 : TrustedImmPtr(value)
266 {
267 }
268
269 TrustedImmPtr asTrustedImmPtr() { return *this; }
270 };
271
272 // TrustedImm32:
273 //
274 // A 32bit immediate operand to an instruction - this is wrapped in a
275 // class requiring explicit construction in order to prevent RegisterIDs
276 // (which are implemented as an enum) from accidentally being passed as
277 // immediate values.
278 struct TrustedImm32 {
279 TrustedImm32() { }
280
281 explicit TrustedImm32(int32_t value)
282 : m_value(value)
283 {
284 }
285
286 #if !CPU(X86_64)
287 explicit TrustedImm32(TrustedImmPtr ptr)
288 : m_value(ptr.asIntptr())
289 {
290 }
291 #endif
292
293 int32_t m_value;
294 };
295
296
297 struct Imm32 : private TrustedImm32 {
298 explicit Imm32(int32_t value)
299 : TrustedImm32(value)
300 {
301 }
302 #if !CPU(X86_64)
303 explicit Imm32(TrustedImmPtr ptr)
304 : TrustedImm32(ptr)
305 {
306 }
307 #endif
308 const TrustedImm32& asTrustedImm32() const { return *this; }
309
310 };
311
312 // TrustedImm64:
313 //
314 // A 64bit immediate operand to an instruction - this is wrapped in a
315 // class requiring explicit construction in order to prevent RegisterIDs
316 // (which are implemented as an enum) from accidentally being passed as
317 // immediate values.
318 struct TrustedImm64 {
319 TrustedImm64() { }
320
321 explicit TrustedImm64(int64_t value)
322 : m_value(value)
323 {
324 }
325
326 #if CPU(X86_64) || CPU(ARM64)
327 explicit TrustedImm64(TrustedImmPtr ptr)
328 : m_value(ptr.asIntptr())
329 {
330 }
331 #endif
332
333 int64_t m_value;
334 };
335
336 struct Imm64 : private TrustedImm64
337 {
338 explicit Imm64(int64_t value)
339 : TrustedImm64(value)
340 {
341 }
342 #if CPU(X86_64) || CPU(ARM64)
343 explicit Imm64(TrustedImmPtr ptr)
344 : TrustedImm64(ptr)
345 {
346 }
347 #endif
348 const TrustedImm64& asTrustedImm64() const { return *this; }
349 };
350
351 // Section 2: MacroAssembler code buffer handles
352 //
353 // The following types are used to reference items in the code buffer
354 // during JIT code generation. For example, the type Jump is used to
355 // track the location of a jump instruction so that it may later be
356 // linked to a label marking its destination.
357
358
359 // Label:
360 //
361 // A Label records a point in the generated instruction stream, typically such that
362 // it may be used as a destination for a jump.
363 class Label {
364 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
365 friend class AbstractMacroAssembler;
366 friend struct DFG::OSRExit;
367 friend class Jump;
368 friend class MacroAssemblerCodeRef;
369 friend class LinkBuffer;
370 friend class Watchpoint;
371
372 public:
373 Label()
374 {
375 }
376
377 Label(AbstractMacroAssemblerType* masm)
378 : m_label(masm->m_assembler.label())
379 {
380 masm->invalidateAllTempRegisters();
381 }
382
383 bool isSet() const { return m_label.isSet(); }
384 private:
385 AssemblerLabel m_label;
386 };
387
388 // ConvertibleLoadLabel:
389 //
390 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
391 // so that:
392 //
393 // loadPtr(Address(a, i), b)
394 //
395 // becomes:
396 //
397 // addPtr(TrustedImmPtr(i), a, b)
398 class ConvertibleLoadLabel {
399 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
400 friend class AbstractMacroAssembler;
401 friend class LinkBuffer;
402
403 public:
404 ConvertibleLoadLabel()
405 {
406 }
407
408 ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
409 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
410 {
411 }
412
413 bool isSet() const { return m_label.isSet(); }
414 private:
415 AssemblerLabel m_label;
416 };
417
418 // DataLabelPtr:
419 //
420 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
421 // patched after the code has been generated.
422 class DataLabelPtr {
423 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
424 friend class AbstractMacroAssembler;
425 friend class LinkBuffer;
426 public:
427 DataLabelPtr()
428 {
429 }
430
431 DataLabelPtr(AbstractMacroAssemblerType* masm)
432 : m_label(masm->m_assembler.label())
433 {
434 }
435
436 bool isSet() const { return m_label.isSet(); }
437
438 private:
439 AssemblerLabel m_label;
440 };
441
442 // DataLabel32:
443 //
444 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
445 // patched after the code has been generated.
446 class DataLabel32 {
447 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
448 friend class AbstractMacroAssembler;
449 friend class LinkBuffer;
450 public:
451 DataLabel32()
452 {
453 }
454
455 DataLabel32(AbstractMacroAssemblerType* masm)
456 : m_label(masm->m_assembler.label())
457 {
458 }
459
460 AssemblerLabel label() const { return m_label; }
461
462 private:
463 AssemblerLabel m_label;
464 };
465
466 // DataLabelCompact:
467 //
468 // A DataLabelCompact is used to refer to a location in the code containing a
469 // compact immediate to be patched after the code has been generated.
470 class DataLabelCompact {
471 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
472 friend class AbstractMacroAssembler;
473 friend class LinkBuffer;
474 public:
475 DataLabelCompact()
476 {
477 }
478
479 DataLabelCompact(AbstractMacroAssemblerType* masm)
480 : m_label(masm->m_assembler.label())
481 {
482 }
483
484 DataLabelCompact(AssemblerLabel label)
485 : m_label(label)
486 {
487 }
488
489 AssemblerLabel label() const { return m_label; }
490
491 private:
492 AssemblerLabel m_label;
493 };
494
495 // Call:
496 //
497 // A Call object is a reference to a call instruction that has been planted
498 // into the code buffer - it is typically used to link the call, setting the
499 // relative offset such that when executed it will call to the desired
500 // destination.
501 class Call {
502 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
503 friend class AbstractMacroAssembler;
504
505 public:
506 enum Flags {
507 None = 0x0,
508 Linkable = 0x1,
509 Near = 0x2,
510 LinkableNear = 0x3,
511 };
512
513 Call()
514 : m_flags(None)
515 {
516 }
517
518 Call(AssemblerLabel jmp, Flags flags)
519 : m_label(jmp)
520 , m_flags(flags)
521 {
522 }
523
524 bool isFlagSet(Flags flag)
525 {
526 return m_flags & flag;
527 }
528
529 static Call fromTailJump(Jump jump)
530 {
531 return Call(jump.m_label, Linkable);
532 }
533
534 AssemblerLabel m_label;
535 private:
536 Flags m_flags;
537 };
538
539 // Jump:
540 //
541 // A jump object is a reference to a jump instruction that has been planted
542 // into the code buffer - it is typically used to link the jump, setting the
543 // relative offset such that when executed it will jump to the desired
544 // destination.
545 class Jump {
546 template<class TemplateAssemblerType, class TemplateMacroAssemblerType>
547 friend class AbstractMacroAssembler;
548 friend class Call;
549 friend struct DFG::OSRExit;
550 friend class LinkBuffer;
551 public:
552 Jump()
553 {
554 }
555
556 #if CPU(ARM_THUMB2)
557 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
558 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
559 : m_label(jmp)
560 , m_type(type)
561 , m_condition(condition)
562 {
563 }
564 #elif CPU(ARM64)
565 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
566 : m_label(jmp)
567 , m_type(type)
568 , m_condition(condition)
569 {
570 }
571
572 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
573 : m_label(jmp)
574 , m_type(type)
575 , m_condition(condition)
576 , m_is64Bit(is64Bit)
577 , m_compareRegister(compareRegister)
578 {
579 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
580 }
581
582 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
583 : m_label(jmp)
584 , m_type(type)
585 , m_condition(condition)
586 , m_bitNumber(bitNumber)
587 , m_compareRegister(compareRegister)
588 {
589 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
590 }
591 #elif CPU(SH4)
592 Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
593 : m_label(jmp)
594 , m_type(type)
595 {
596 }
597 #else
598 Jump(AssemblerLabel jmp)
599 : m_label(jmp)
600 {
601 }
602 #endif
603
604 Label label() const
605 {
606 Label result;
607 result.m_label = m_label;
608 return result;
609 }
610
611 void link(AbstractMacroAssemblerType* masm) const
612 {
613 masm->invalidateAllTempRegisters();
614
615 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
616 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
617 #endif
618
619 #if CPU(ARM_THUMB2)
620 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
621 #elif CPU(ARM64)
622 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
623 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
624 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
625 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
626 else
627 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
628 #elif CPU(SH4)
629 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
630 #else
631 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
632 #endif
633 }
634
635 void linkTo(Label label, AbstractMacroAssemblerType* masm) const
636 {
637 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
638 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
639 #endif
640
641 #if CPU(ARM_THUMB2)
642 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
643 #elif CPU(ARM64)
644 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
645 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
646 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
647 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
648 else
649 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
650 #else
651 masm->m_assembler.linkJump(m_label, label.m_label);
652 #endif
653 }
654
655 bool isSet() const { return m_label.isSet(); }
656
657 private:
658 AssemblerLabel m_label;
659 #if CPU(ARM_THUMB2)
660 ARMv7Assembler::JumpType m_type;
661 ARMv7Assembler::Condition m_condition;
662 #elif CPU(ARM64)
663 ARM64Assembler::JumpType m_type;
664 ARM64Assembler::Condition m_condition;
665 bool m_is64Bit;
666 unsigned m_bitNumber;
667 ARM64Assembler::RegisterID m_compareRegister;
668 #endif
669 #if CPU(SH4)
670 SH4Assembler::JumpType m_type;
671 #endif
672 };
673
674 struct PatchableJump {
675 PatchableJump()
676 {
677 }
678
679 explicit PatchableJump(Jump jump)
680 : m_jump(jump)
681 {
682 }
683
684 operator Jump&() { return m_jump; }
685
686 Jump m_jump;
687 };
688
689 // JumpList:
690 //
691 // A JumpList is a set of Jump objects.
692 // All jumps in the set will be linked to the same destination.
693 class JumpList {
694 friend class LinkBuffer;
695
696 public:
697 typedef Vector<Jump, 2> JumpVector;
698
699 JumpList() { }
700
701 JumpList(Jump jump)
702 {
703 if (jump.isSet())
704 append(jump);
705 }
706
707 void link(AbstractMacroAssemblerType* masm)
708 {
709 size_t size = m_jumps.size();
710 for (size_t i = 0; i < size; ++i)
711 m_jumps[i].link(masm);
712 m_jumps.clear();
713 }
714
715 void linkTo(Label label, AbstractMacroAssemblerType* masm)
716 {
717 size_t size = m_jumps.size();
718 for (size_t i = 0; i < size; ++i)
719 m_jumps[i].linkTo(label, masm);
720 m_jumps.clear();
721 }
722
723 void append(Jump jump)
724 {
725 m_jumps.append(jump);
726 }
727
728 void append(const JumpList& other)
729 {
730 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
731 }
732
733 bool empty()
734 {
735 return !m_jumps.size();
736 }
737
738 void clear()
739 {
740 m_jumps.clear();
741 }
742
743 const JumpVector& jumps() const { return m_jumps; }
744
745 private:
746 JumpVector m_jumps;
747 };
748
749
750 // Section 3: Misc admin methods
751 #if ENABLE(DFG_JIT)
752 Label labelIgnoringWatchpoints()
753 {
754 Label result;
755 result.m_label = m_assembler.labelIgnoringWatchpoints();
756 return result;
757 }
758 #else
759 Label labelIgnoringWatchpoints()
760 {
761 return label();
762 }
763 #endif
764
765 Label label()
766 {
767 return Label(this);
768 }
769
770 void padBeforePatch()
771 {
772 // Rely on the fact that asking for a label already does the padding.
773 (void)label();
774 }
775
776 Label watchpointLabel()
777 {
778 Label result;
779 result.m_label = m_assembler.labelForWatchpoint();
780 return result;
781 }
782
783 Label align()
784 {
785 m_assembler.align(16);
786 return Label(this);
787 }
788
789 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
790 class RegisterAllocationOffset {
791 public:
792 RegisterAllocationOffset(unsigned offset)
793 : m_offset(offset)
794 {
795 }
796
797 void checkOffsets(unsigned low, unsigned high)
798 {
799 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
800 }
801
802 private:
803 unsigned m_offset;
804 };
805
806 void addRegisterAllocationAtOffset(unsigned offset)
807 {
808 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
809 }
810
811 void clearRegisterAllocationOffsets()
812 {
813 m_registerAllocationForOffsets.clear();
814 }
815
816 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
817 {
818 if (offset1 > offset2)
819 std::swap(offset1, offset2);
820
821 size_t size = m_registerAllocationForOffsets.size();
822 for (size_t i = 0; i < size; ++i)
823 m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
824 }
825 #endif
826
827 template<typename T, typename U>
828 static ptrdiff_t differenceBetween(T from, U to)
829 {
830 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
831 }
832
833 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
834 {
835 return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
836 }
837
838 unsigned debugOffset() { return m_assembler.debugOffset(); }
839
840 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
841 {
842 AssemblerType::cacheFlush(code, size);
843 }
844
845 #if ENABLE(MASM_PROBE)
846
847 struct CPUState {
848 #define DECLARE_REGISTER(_type, _regName) \
849 _type _regName;
850 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
851 #undef DECLARE_REGISTER
852
853 static const char* registerName(RegisterID regID)
854 {
855 switch (regID) {
856 #define DECLARE_REGISTER(_type, _regName) \
857 case RegisterID::_regName: \
858 return #_regName;
859 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
860 #undef DECLARE_REGISTER
861 }
862 RELEASE_ASSERT_NOT_REACHED();
863 }
864
865 static const char* registerName(FPRegisterID regID)
866 {
867 switch (regID) {
868 #define DECLARE_REGISTER(_type, _regName) \
869 case FPRegisterID::_regName: \
870 return #_regName;
871 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
872 #undef DECLARE_REGISTER
873 }
874 RELEASE_ASSERT_NOT_REACHED();
875 }
876
877 void* registerValue(RegisterID regID)
878 {
879 switch (regID) {
880 #define DECLARE_REGISTER(_type, _regName) \
881 case RegisterID::_regName: \
882 return _regName;
883 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
884 #undef DECLARE_REGISTER
885 }
886 RELEASE_ASSERT_NOT_REACHED();
887 }
888
889 double registerValue(FPRegisterID regID)
890 {
891 switch (regID) {
892 #define DECLARE_REGISTER(_type, _regName) \
893 case FPRegisterID::_regName: \
894 return _regName;
895 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
896 #undef DECLARE_REGISTER
897 }
898 RELEASE_ASSERT_NOT_REACHED();
899 }
900
901 };
902
903 struct ProbeContext;
904 typedef void (*ProbeFunction)(struct ProbeContext*);
905
906 struct ProbeContext {
907 ProbeFunction probeFunction;
908 void* arg1;
909 void* arg2;
910 CPUState cpu;
911
912 void print(int indentation = 0)
913 {
914 #define INDENT MacroAssemblerType::printIndent(indentation)
915
916 INDENT, dataLogF("ProbeContext %p {\n", this);
917 indentation++;
918 {
919 INDENT, dataLogF("probeFunction: %p\n", probeFunction);
920 INDENT, dataLogF("arg1: %p %llu\n", arg1, reinterpret_cast<int64_t>(arg1));
921 INDENT, dataLogF("arg2: %p %llu\n", arg2, reinterpret_cast<int64_t>(arg2));
922 MacroAssemblerType::printCPU(cpu, indentation);
923 }
924 indentation--;
925 INDENT, dataLog("}\n");
926
927 #undef INDENT
928 }
929 };
930
931 static void printIndent(int indentation)
932 {
933 for (; indentation > 0; indentation--)
934 dataLog(" ");
935 }
936
937 static void printCPU(CPUState& cpu, int indentation = 0)
938 {
939 #define INDENT printIndent(indentation)
940
941 INDENT, dataLog("cpu: {\n");
942 MacroAssemblerType::printCPURegisters(cpu, indentation + 1);
943 INDENT, dataLog("}\n");
944
945 #undef INDENT
946 }
947
948 // This is a marker type only used with print(). See print() below for details.
949 struct AllRegisters { };
950
951 // Emits code which will print debugging info at runtime. The type of values that
952 // can be printed is encapsulated in the PrintArg struct below. Here are some
953 // examples:
954 //
955 // print("Hello world\n"); // Emits code to print the string.
956 //
957 // CodeBlock* cb = ...;
958 // print(cb); // Emits code to print the pointer value.
959 //
960 // RegisterID regID = ...;
961 // print(regID); // Emits code to print the register value (not the id).
962 //
963 // // Emits code to print all registers. Unlike other items, this prints
964 // // multiple lines as follows:
965 // // cpu {
966 // // eax: 0x123456789
967 // // ebx: 0x000000abc
968 // // ...
969 // // }
970 // print(AllRegisters());
971 //
972 // // Print multiple things at once. This incurs the probe overhead only once
973 // // to print all the items.
974 // print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters());
975
976 template<typename... Arguments>
977 void print(Arguments... args)
978 {
979 printInternal(static_cast<MacroAssemblerType*>(this), args...);
980 }
981
982 // This function will be called by printCPU() to print the contents of the
983 // target specific registers which are saved away in the CPUState struct.
984 // printCPURegisters() should make use of printIndentation() to print the
985 // registers with the appropriate amount of indentation.
986 //
987 // Note: printCPURegisters() should be implemented by the target specific
988 // MacroAssembler. This prototype is only provided here to document the
989 // interface.
990
991 static void printCPURegisters(CPUState&, int indentation = 0);
992
993 // This function will be called by print() to print the contents of a
994 // specific register (from the CPUState) in line with other items in the
995 // print stream. Hence, no indentation is needed.
996 //
997 // Note: printRegister() should be implemented by the target specific
998 // MacroAssembler. These prototypes are only provided here to document their
999 // interface.
1000
1001 static void printRegister(CPUState&, RegisterID);
1002 static void printRegister(CPUState&, FPRegisterID);
1003
1004 // This function emits code to preserve the CPUState (e.g. registers),
1005 // call a user supplied probe function, and restore the CPUState before
1006 // continuing with other JIT generated code.
1007 //
1008 // The user supplied probe function will be called with a single pointer to
1009 // a ProbeContext struct (defined above) which contains, among other things,
1010 // the preserved CPUState. This allows the user probe function to inspect
1011 // the CPUState at that point in the JIT generated code.
1012 //
1013 // If the user probe function alters the register values in the ProbeContext,
1014 // the altered values will be loaded into the CPU registers when the probe
1015 // returns.
1016 //
1017 // The ProbeContext is stack allocated and is only valid for the duration
1018 // of the call to the user probe function.
1019 //
1020 // Note: probe() should be implemented by the target specific MacroAssembler.
1021 // This prototype is only provided here to document the interface.
1022
1023 void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
1024
1025 #endif // ENABLE(MASM_PROBE)
1026
1027 AssemblerType m_assembler;
1028
1029 protected:
1030 AbstractMacroAssembler()
1031 : m_randomSource(cryptographicallyRandomNumber())
1032 {
1033 invalidateAllTempRegisters();
1034 }
1035
1036 uint32_t random()
1037 {
1038 return m_randomSource.getUint32();
1039 }
1040
1041 WeakRandom m_randomSource;
1042
1043 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1044 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
1045 #endif
1046
1047 static bool haveScratchRegisterForBlinding()
1048 {
1049 return false;
1050 }
1051 static RegisterID scratchRegisterForBlinding()
1052 {
1053 UNREACHABLE_FOR_PLATFORM();
1054 return firstRegister();
1055 }
1056 static bool canBlind() { return false; }
1057 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
1058 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
1059
1060 class CachedTempRegister {
1061 friend class DataLabelPtr;
1062 friend class DataLabel32;
1063 friend class DataLabelCompact;
1064 friend class Jump;
1065 friend class Label;
1066
1067 public:
1068 CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
1069 : m_masm(masm)
1070 , m_registerID(registerID)
1071 , m_value(0)
1072 , m_validBit(1 << static_cast<unsigned>(registerID))
1073 {
1074 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
1075 }
1076
1077 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
1078
1079 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
1080
1081 bool value(intptr_t& value)
1082 {
1083 value = m_value;
1084 return m_masm->isTempRegisterValid(m_validBit);
1085 }
1086
1087 void setValue(intptr_t value)
1088 {
1089 m_value = value;
1090 m_masm->setTempRegisterValid(m_validBit);
1091 }
1092
1093 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
1094
1095 private:
1096 AbstractMacroAssemblerType* m_masm;
1097 RegisterID m_registerID;
1098 intptr_t m_value;
1099 unsigned m_validBit;
1100 };
1101
1102 ALWAYS_INLINE void invalidateAllTempRegisters()
1103 {
1104 m_tempRegistersValidBits = 0;
1105 }
1106
1107 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
1108 {
1109 return (m_tempRegistersValidBits & registerMask);
1110 }
1111
1112 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
1113 {
1114 m_tempRegistersValidBits &= ~registerMask;
1115 }
1116
1117 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
1118 {
1119 m_tempRegistersValidBits |= registerMask;
1120 }
1121
1122 unsigned m_tempRegistersValidBits;
1123
1124 friend class LinkBuffer;
1125 friend class RepatchBuffer;
1126
1127 static void linkJump(void* code, Jump jump, CodeLocationLabel target)
1128 {
1129 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
1130 }
1131
1132 static void linkPointer(void* code, AssemblerLabel label, void* value)
1133 {
1134 AssemblerType::linkPointer(code, label, value);
1135 }
1136
1137 static void* getLinkerAddress(void* code, AssemblerLabel label)
1138 {
1139 return AssemblerType::getRelocatedAddress(code, label);
1140 }
1141
1142 static unsigned getLinkerCallReturnOffset(Call call)
1143 {
1144 return AssemblerType::getCallReturnOffset(call.m_label);
1145 }
1146
1147 static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
1148 {
1149 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
1150 }
1151
1152 static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
1153 {
1154 AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
1155 }
1156
1157 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
1158 {
1159 AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
1160 }
1161
1162 static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
1163 {
1164 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
1165 }
1166
1167 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
1168 {
1169 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
1170 }
1171
1172 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
1173 {
1174 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
1175 }
1176
1177 static void replaceWithLoad(CodeLocationConvertibleLoad label)
1178 {
1179 AssemblerType::replaceWithLoad(label.dataLocation());
1180 }
1181
1182 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
1183 {
1184 AssemblerType::replaceWithAddressComputation(label.dataLocation());
1185 }
1186
1187 private:
1188
1189 #if ENABLE(MASM_PROBE)
1190
1191 struct PrintArg {
1192
1193 enum class Type {
1194 AllRegisters,
1195 RegisterID,
1196 FPRegisterID,
1197 ConstCharPtr,
1198 ConstVoidPtr,
1199 IntptrValue,
1200 UintptrValue,
1201 };
1202
1203 PrintArg(AllRegisters&)
1204 : type(Type::AllRegisters)
1205 {
1206 }
1207
1208 PrintArg(RegisterID regID)
1209 : type(Type::RegisterID)
1210 {
1211 u.gpRegisterID = regID;
1212 }
1213
1214 PrintArg(FPRegisterID regID)
1215 : type(Type::FPRegisterID)
1216 {
1217 u.fpRegisterID = regID;
1218 }
1219
1220 PrintArg(const char* ptr)
1221 : type(Type::ConstCharPtr)
1222 {
1223 u.constCharPtr = ptr;
1224 }
1225
1226 PrintArg(const void* ptr)
1227 : type(Type::ConstVoidPtr)
1228 {
1229 u.constVoidPtr = ptr;
1230 }
1231
1232 PrintArg(int value)
1233 : type(Type::IntptrValue)
1234 {
1235 u.intptrValue = value;
1236 }
1237
1238 PrintArg(unsigned value)
1239 : type(Type::UintptrValue)
1240 {
1241 u.intptrValue = value;
1242 }
1243
1244 PrintArg(intptr_t value)
1245 : type(Type::IntptrValue)
1246 {
1247 u.intptrValue = value;
1248 }
1249
1250 PrintArg(uintptr_t value)
1251 : type(Type::UintptrValue)
1252 {
1253 u.uintptrValue = value;
1254 }
1255
1256 Type type;
1257 union {
1258 RegisterID gpRegisterID;
1259 FPRegisterID fpRegisterID;
1260 const char* constCharPtr;
1261 const void* constVoidPtr;
1262 intptr_t intptrValue;
1263 uintptr_t uintptrValue;
1264 } u;
1265 };
1266
1267 typedef Vector<PrintArg> PrintArgsList;
1268
1269 template<typename FirstArg, typename... Arguments>
1270 static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs)
1271 {
1272 argsList->append(PrintArg(firstArg));
1273 appendPrintArg(argsList, otherArgs...);
1274 }
1275
1276 static void appendPrintArg(PrintArgsList*) { }
1277
1278
1279 template<typename... Arguments>
1280 static void printInternal(MacroAssemblerType* masm, Arguments... args)
1281 {
1282 auto argsList = std::make_unique<PrintArgsList>();
1283 appendPrintArg(argsList.get(), args...);
1284 masm->probe(printCallback, argsList.release());
1285 }
1286
1287 static void printCallback(ProbeContext* context)
1288 {
1289 typedef PrintArg Arg;
1290 PrintArgsList& argsList =
1291 *reinterpret_cast<PrintArgsList*>(context->arg1);
1292 for (size_t i = 0; i < argsList.size(); i++) {
1293 auto& arg = argsList[i];
1294 switch (arg.type) {
1295 case Arg::Type::AllRegisters:
1296 MacroAssemblerType::printCPU(context->cpu);
1297 break;
1298 case Arg::Type::RegisterID:
1299 MacroAssemblerType::printRegister(context->cpu, arg.u.gpRegisterID);
1300 break;
1301 case Arg::Type::FPRegisterID:
1302 MacroAssemblerType::printRegister(context->cpu, arg.u.fpRegisterID);
1303 break;
1304 case Arg::Type::ConstCharPtr:
1305 dataLog(arg.u.constCharPtr);
1306 break;
1307 case Arg::Type::ConstVoidPtr:
1308 dataLogF("%p", arg.u.constVoidPtr);
1309 break;
1310 case Arg::Type::IntptrValue:
1311 dataLog(arg.u.intptrValue);
1312 break;
1313 case Arg::Type::UintptrValue:
1314 dataLog(arg.u.uintptrValue);
1315 break;
1316 }
1317 }
1318 }
1319
1320 #endif // ENABLE(MASM_PROBE)
1321
1322 }; // class AbstractMacroAssembler
1323
1324 } // namespace JSC
1325
1326 #endif // ENABLE(ASSEMBLER)
1327
1328 #endif // AbstractMacroAssembler_h