]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARMv7.h
JavaScriptCore-7600.1.4.9.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARMv7.h
1 /*
2 * Copyright (C) 2009, 2010, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 static const RegisterID dataTempRegister = ARMRegisters::ip;
39 static const RegisterID addressTempRegister = ARMRegisters::r6;
40
41 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
42 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
43
44 public:
45 MacroAssemblerARMv7()
46 : m_makeJumpPatchable(false)
47 {
48 }
49
50 typedef ARMv7Assembler::LinkRecord LinkRecord;
51 typedef ARMv7Assembler::JumpType JumpType;
52 typedef ARMv7Assembler::JumpLinkType JumpLinkType;
53 typedef ARMv7Assembler::Condition Condition;
54
55 static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
56 static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
57
58 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
59 {
60 return value >= -255 && value <= 255;
61 }
62
63 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
64 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
65 static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
66 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
67 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
68 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
69 static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARMv7Assembler::link(record, from, to); }
70
71 struct ArmAddress {
72 enum AddressType {
73 HasOffset,
74 HasIndex,
75 } type;
76 RegisterID base;
77 union {
78 int32_t offset;
79 struct {
80 RegisterID index;
81 Scale scale;
82 };
83 } u;
84
85 explicit ArmAddress(RegisterID base, int32_t offset = 0)
86 : type(HasOffset)
87 , base(base)
88 {
89 u.offset = offset;
90 }
91
92 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
93 : type(HasIndex)
94 , base(base)
95 {
96 u.index = index;
97 u.scale = scale;
98 }
99 };
100
101 public:
102 static const Scale ScalePtr = TimesFour;
103
104 enum RelationalCondition {
105 Equal = ARMv7Assembler::ConditionEQ,
106 NotEqual = ARMv7Assembler::ConditionNE,
107 Above = ARMv7Assembler::ConditionHI,
108 AboveOrEqual = ARMv7Assembler::ConditionHS,
109 Below = ARMv7Assembler::ConditionLO,
110 BelowOrEqual = ARMv7Assembler::ConditionLS,
111 GreaterThan = ARMv7Assembler::ConditionGT,
112 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
113 LessThan = ARMv7Assembler::ConditionLT,
114 LessThanOrEqual = ARMv7Assembler::ConditionLE
115 };
116
117 enum ResultCondition {
118 Overflow = ARMv7Assembler::ConditionVS,
119 Signed = ARMv7Assembler::ConditionMI,
120 PositiveOrZero = ARMv7Assembler::ConditionPL,
121 Zero = ARMv7Assembler::ConditionEQ,
122 NonZero = ARMv7Assembler::ConditionNE
123 };
124
125 enum DoubleCondition {
126 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
127 DoubleEqual = ARMv7Assembler::ConditionEQ,
128 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
129 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
130 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
131 DoubleLessThan = ARMv7Assembler::ConditionLO,
132 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
133 // If either operand is NaN, these conditions always evaluate to true.
134 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
135 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
136 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
137 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
138 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
139 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
140 };
141
142 static const RegisterID stackPointerRegister = ARMRegisters::sp;
143 static const RegisterID framePointerRegister = ARMRegisters::fp;
144 static const RegisterID linkRegister = ARMRegisters::lr;
145
146 // Integer arithmetic operations:
147 //
148 // Operations are typically two operand - operation(source, srcDst)
149 // For many operations the source may be an TrustedImm32, the srcDst operand
150 // may often be a memory location (explictly described using an Address
151 // object).
152
153 void add32(RegisterID src, RegisterID dest)
154 {
155 m_assembler.add(dest, dest, src);
156 }
157
158 void add32(TrustedImm32 imm, RegisterID dest)
159 {
160 add32(imm, dest, dest);
161 }
162
163 void add32(AbsoluteAddress src, RegisterID dest)
164 {
165 load32(src.m_ptr, dataTempRegister);
166 add32(dataTempRegister, dest);
167 }
168
169 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
170 {
171 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
172
173 // For adds with stack pointer destination, moving the src first to sp is
174 // needed to avoid unpredictable instruction
175 if (dest == ARMRegisters::sp && src != dest) {
176 move(src, ARMRegisters::sp);
177 src = ARMRegisters::sp;
178 }
179
180 if (armImm.isValid())
181 m_assembler.add(dest, src, armImm);
182 else {
183 move(imm, dataTempRegister);
184 m_assembler.add(dest, src, dataTempRegister);
185 }
186 }
187
188 void add32(TrustedImm32 imm, Address address)
189 {
190 load32(address, dataTempRegister);
191
192 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
193 if (armImm.isValid())
194 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
195 else {
196 // Hrrrm, since dataTempRegister holds the data loaded,
197 // use addressTempRegister to hold the immediate.
198 move(imm, addressTempRegister);
199 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
200 }
201
202 store32(dataTempRegister, address);
203 }
204
205 void add32(Address src, RegisterID dest)
206 {
207 load32(src, dataTempRegister);
208 add32(dataTempRegister, dest);
209 }
210
211 void add32(TrustedImm32 imm, AbsoluteAddress address)
212 {
213 load32(address.m_ptr, dataTempRegister);
214
215 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
216 if (armImm.isValid())
217 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
218 else {
219 // Hrrrm, since dataTempRegister holds the data loaded,
220 // use addressTempRegister to hold the immediate.
221 move(imm, addressTempRegister);
222 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
223 }
224
225 store32(dataTempRegister, address.m_ptr);
226 }
227
228 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
229 {
230 add32(imm, srcDest);
231 }
232
233 void add64(TrustedImm32 imm, AbsoluteAddress address)
234 {
235 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
236
237 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
238 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
239 if (armImm.isValid())
240 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
241 else {
242 move(imm, addressTempRegister);
243 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
244 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
245 }
246 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
247
248 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
249 m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
250 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
251 }
252
253 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
254 {
255 m_assembler.ARM_and(dest, op1, op2);
256 }
257
258 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
259 {
260 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
261 if (armImm.isValid())
262 m_assembler.ARM_and(dest, src, armImm);
263 else {
264 move(imm, dataTempRegister);
265 m_assembler.ARM_and(dest, src, dataTempRegister);
266 }
267 }
268
269 void and32(RegisterID src, RegisterID dest)
270 {
271 and32(dest, src, dest);
272 }
273
274 void and32(TrustedImm32 imm, RegisterID dest)
275 {
276 and32(imm, dest, dest);
277 }
278
279 void and32(Address src, RegisterID dest)
280 {
281 load32(src, dataTempRegister);
282 and32(dataTempRegister, dest);
283 }
284
285 void countLeadingZeros32(RegisterID src, RegisterID dest)
286 {
287 m_assembler.clz(dest, src);
288 }
289
290 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
291 {
292 // Clamp the shift to the range 0..31
293 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
294 ASSERT(armImm.isValid());
295 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
296
297 m_assembler.lsl(dest, src, dataTempRegister);
298 }
299
300 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
301 {
302 m_assembler.lsl(dest, src, imm.m_value & 0x1f);
303 }
304
305 void lshift32(RegisterID shiftAmount, RegisterID dest)
306 {
307 lshift32(dest, shiftAmount, dest);
308 }
309
310 void lshift32(TrustedImm32 imm, RegisterID dest)
311 {
312 lshift32(dest, imm, dest);
313 }
314
315 void mul32(RegisterID src, RegisterID dest)
316 {
317 m_assembler.smull(dest, dataTempRegister, dest, src);
318 }
319
320 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
321 {
322 move(imm, dataTempRegister);
323 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
324 }
325
326 void neg32(RegisterID srcDest)
327 {
328 m_assembler.neg(srcDest, srcDest);
329 }
330
331 void or32(RegisterID src, RegisterID dest)
332 {
333 m_assembler.orr(dest, dest, src);
334 }
335
336 void or32(RegisterID src, AbsoluteAddress dest)
337 {
338 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
339 load32(addressTempRegister, dataTempRegister);
340 or32(src, dataTempRegister);
341 store32(dataTempRegister, addressTempRegister);
342 }
343
344 void or32(TrustedImm32 imm, Address address)
345 {
346 load32(address, dataTempRegister);
347 or32(imm, dataTempRegister, dataTempRegister);
348 store32(dataTempRegister, address);
349 }
350
351 void or32(TrustedImm32 imm, RegisterID dest)
352 {
353 or32(imm, dest, dest);
354 }
355
356 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
357 {
358 m_assembler.orr(dest, op1, op2);
359 }
360
361 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
362 {
363 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
364 if (armImm.isValid())
365 m_assembler.orr(dest, src, armImm);
366 else {
367 move(imm, dataTempRegister);
368 m_assembler.orr(dest, src, dataTempRegister);
369 }
370 }
371
372 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
373 {
374 // Clamp the shift to the range 0..31
375 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
376 ASSERT(armImm.isValid());
377 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
378
379 m_assembler.asr(dest, src, dataTempRegister);
380 }
381
382 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
383 {
384 m_assembler.asr(dest, src, imm.m_value & 0x1f);
385 }
386
387 void rshift32(RegisterID shiftAmount, RegisterID dest)
388 {
389 rshift32(dest, shiftAmount, dest);
390 }
391
392 void rshift32(TrustedImm32 imm, RegisterID dest)
393 {
394 rshift32(dest, imm, dest);
395 }
396
397 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
398 {
399 // Clamp the shift to the range 0..31
400 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
401 ASSERT(armImm.isValid());
402 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
403
404 m_assembler.lsr(dest, src, dataTempRegister);
405 }
406
407 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
408 {
409 m_assembler.lsr(dest, src, imm.m_value & 0x1f);
410 }
411
412 void urshift32(RegisterID shiftAmount, RegisterID dest)
413 {
414 urshift32(dest, shiftAmount, dest);
415 }
416
417 void urshift32(TrustedImm32 imm, RegisterID dest)
418 {
419 urshift32(dest, imm, dest);
420 }
421
422 void sub32(RegisterID src, RegisterID dest)
423 {
424 m_assembler.sub(dest, dest, src);
425 }
426
427 void sub32(TrustedImm32 imm, RegisterID dest)
428 {
429 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
430 if (armImm.isValid())
431 m_assembler.sub(dest, dest, armImm);
432 else {
433 move(imm, dataTempRegister);
434 m_assembler.sub(dest, dest, dataTempRegister);
435 }
436 }
437
438 void sub32(TrustedImm32 imm, Address address)
439 {
440 load32(address, dataTempRegister);
441
442 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
443 if (armImm.isValid())
444 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
445 else {
446 // Hrrrm, since dataTempRegister holds the data loaded,
447 // use addressTempRegister to hold the immediate.
448 move(imm, addressTempRegister);
449 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
450 }
451
452 store32(dataTempRegister, address);
453 }
454
455 void sub32(Address src, RegisterID dest)
456 {
457 load32(src, dataTempRegister);
458 sub32(dataTempRegister, dest);
459 }
460
461 void sub32(TrustedImm32 imm, AbsoluteAddress address)
462 {
463 load32(address.m_ptr, dataTempRegister);
464
465 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
466 if (armImm.isValid())
467 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
468 else {
469 // Hrrrm, since dataTempRegister holds the data loaded,
470 // use addressTempRegister to hold the immediate.
471 move(imm, addressTempRegister);
472 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
473 }
474
475 store32(dataTempRegister, address.m_ptr);
476 }
477
478 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
479 {
480 m_assembler.eor(dest, op1, op2);
481 }
482
483 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
484 {
485 if (imm.m_value == -1) {
486 m_assembler.mvn(dest, src);
487 return;
488 }
489
490 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
491 if (armImm.isValid())
492 m_assembler.eor(dest, src, armImm);
493 else {
494 move(imm, dataTempRegister);
495 m_assembler.eor(dest, src, dataTempRegister);
496 }
497 }
498
499 void xor32(RegisterID src, RegisterID dest)
500 {
501 xor32(dest, src, dest);
502 }
503
504 void xor32(TrustedImm32 imm, RegisterID dest)
505 {
506 if (imm.m_value == -1)
507 m_assembler.mvn(dest, dest);
508 else
509 xor32(imm, dest, dest);
510 }
511
512
513 // Memory access operations:
514 //
515 // Loads are of the form load(address, destination) and stores of the form
516 // store(source, address). The source for a store may be an TrustedImm32. Address
517 // operand objects to loads and store will be implicitly constructed if a
518 // register is passed.
519
520 private:
521 void load32(ArmAddress address, RegisterID dest)
522 {
523 if (address.type == ArmAddress::HasIndex)
524 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
525 else if (address.u.offset >= 0) {
526 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
527 ASSERT(armImm.isValid());
528 m_assembler.ldr(dest, address.base, armImm);
529 } else {
530 ASSERT(address.u.offset >= -255);
531 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
532 }
533 }
534
535 void load16(ArmAddress address, RegisterID dest)
536 {
537 if (address.type == ArmAddress::HasIndex)
538 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
539 else if (address.u.offset >= 0) {
540 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
541 ASSERT(armImm.isValid());
542 m_assembler.ldrh(dest, address.base, armImm);
543 } else {
544 ASSERT(address.u.offset >= -255);
545 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
546 }
547 }
548
549 void load16Signed(ArmAddress address, RegisterID dest)
550 {
551 ASSERT(address.type == ArmAddress::HasIndex);
552 m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
553 }
554
555 void load8(ArmAddress address, RegisterID dest)
556 {
557 if (address.type == ArmAddress::HasIndex)
558 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
559 else if (address.u.offset >= 0) {
560 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
561 ASSERT(armImm.isValid());
562 m_assembler.ldrb(dest, address.base, armImm);
563 } else {
564 ASSERT(address.u.offset >= -255);
565 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
566 }
567 }
568
569 void load8Signed(ArmAddress address, RegisterID dest)
570 {
571 ASSERT(address.type == ArmAddress::HasIndex);
572 m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
573 }
574
575 protected:
576 void store32(RegisterID src, ArmAddress address)
577 {
578 if (address.type == ArmAddress::HasIndex)
579 m_assembler.str(src, address.base, address.u.index, address.u.scale);
580 else if (address.u.offset >= 0) {
581 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
582 ASSERT(armImm.isValid());
583 m_assembler.str(src, address.base, armImm);
584 } else {
585 ASSERT(address.u.offset >= -255);
586 m_assembler.str(src, address.base, address.u.offset, true, false);
587 }
588 }
589
590 private:
591 void store8(RegisterID src, ArmAddress address)
592 {
593 if (address.type == ArmAddress::HasIndex)
594 m_assembler.strb(src, address.base, address.u.index, address.u.scale);
595 else if (address.u.offset >= 0) {
596 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
597 ASSERT(armImm.isValid());
598 m_assembler.strb(src, address.base, armImm);
599 } else {
600 ASSERT(address.u.offset >= -255);
601 m_assembler.strb(src, address.base, address.u.offset, true, false);
602 }
603 }
604
605 void store16(RegisterID src, ArmAddress address)
606 {
607 if (address.type == ArmAddress::HasIndex)
608 m_assembler.strh(src, address.base, address.u.index, address.u.scale);
609 else if (address.u.offset >= 0) {
610 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
611 ASSERT(armImm.isValid());
612 m_assembler.strh(src, address.base, armImm);
613 } else {
614 ASSERT(address.u.offset >= -255);
615 m_assembler.strh(src, address.base, address.u.offset, true, false);
616 }
617 }
618
619 public:
620 void load32(ImplicitAddress address, RegisterID dest)
621 {
622 load32(setupArmAddress(address), dest);
623 }
624
625 void load32(BaseIndex address, RegisterID dest)
626 {
627 load32(setupArmAddress(address), dest);
628 }
629
630 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
631 {
632 load32(setupArmAddress(address), dest);
633 }
634
635 void load16Unaligned(BaseIndex address, RegisterID dest)
636 {
637 load16(setupArmAddress(address), dest);
638 }
639
640 void load32(const void* address, RegisterID dest)
641 {
642 move(TrustedImmPtr(address), addressTempRegister);
643 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
644 }
645
646 void abortWithReason(AbortReason reason)
647 {
648 move(TrustedImm32(reason), dataTempRegister);
649 breakpoint();
650 }
651
652 void abortWithReason(AbortReason reason, intptr_t misc)
653 {
654 move(TrustedImm32(misc), addressTempRegister);
655 abortWithReason(reason);
656 }
657
658 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
659 {
660 ConvertibleLoadLabel result(this);
661 ASSERT(address.offset >= 0 && address.offset <= 255);
662 m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
663 return result;
664 }
665
666 void load8(ImplicitAddress address, RegisterID dest)
667 {
668 load8(setupArmAddress(address), dest);
669 }
670
671 void load8Signed(ImplicitAddress, RegisterID)
672 {
673 UNREACHABLE_FOR_PLATFORM();
674 }
675
676 void load8(BaseIndex address, RegisterID dest)
677 {
678 load8(setupArmAddress(address), dest);
679 }
680
681 void load8Signed(BaseIndex address, RegisterID dest)
682 {
683 load8Signed(setupArmAddress(address), dest);
684 }
685
686 void load8(const void* address, RegisterID dest)
687 {
688 move(TrustedImmPtr(address), dest);
689 load8(dest, dest);
690 }
691
692 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
693 {
694 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
695 load32(ArmAddress(address.base, dataTempRegister), dest);
696 return label;
697 }
698
699 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
700 {
701 padBeforePatch();
702
703 RegisterID base = address.base;
704
705 DataLabelCompact label(this);
706 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
707
708 m_assembler.ldr(dest, base, address.offset, true, false);
709 return label;
710 }
711
712 void load16(BaseIndex address, RegisterID dest)
713 {
714 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
715 }
716
717 void load16Signed(BaseIndex address, RegisterID dest)
718 {
719 load16Signed(setupArmAddress(address), dest);
720 }
721
722 void load16(ImplicitAddress address, RegisterID dest)
723 {
724 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
725 if (armImm.isValid())
726 m_assembler.ldrh(dest, address.base, armImm);
727 else {
728 move(TrustedImm32(address.offset), dataTempRegister);
729 m_assembler.ldrh(dest, address.base, dataTempRegister);
730 }
731 }
732
733 void load16Signed(ImplicitAddress, RegisterID)
734 {
735 UNREACHABLE_FOR_PLATFORM();
736 }
737
738 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
739 {
740 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
741 store32(src, ArmAddress(address.base, dataTempRegister));
742 return label;
743 }
744
745 void store32(RegisterID src, ImplicitAddress address)
746 {
747 store32(src, setupArmAddress(address));
748 }
749
750 void store32(RegisterID src, BaseIndex address)
751 {
752 store32(src, setupArmAddress(address));
753 }
754
755 void store32(TrustedImm32 imm, ImplicitAddress address)
756 {
757 move(imm, dataTempRegister);
758 store32(dataTempRegister, setupArmAddress(address));
759 }
760
761 void store32(TrustedImm32 imm, BaseIndex address)
762 {
763 move(imm, dataTempRegister);
764 store32(dataTempRegister, setupArmAddress(address));
765 }
766
767 void store32(RegisterID src, const void* address)
768 {
769 move(TrustedImmPtr(address), addressTempRegister);
770 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
771 }
772
773 void store32(TrustedImm32 imm, const void* address)
774 {
775 move(imm, dataTempRegister);
776 store32(dataTempRegister, address);
777 }
778
779 void store8(RegisterID src, Address address)
780 {
781 store8(src, setupArmAddress(address));
782 }
783
784 void store8(RegisterID src, BaseIndex address)
785 {
786 store8(src, setupArmAddress(address));
787 }
788
789 void store8(RegisterID src, void* address)
790 {
791 move(TrustedImmPtr(address), addressTempRegister);
792 store8(src, ArmAddress(addressTempRegister, 0));
793 }
794
795 void store8(TrustedImm32 imm, void* address)
796 {
797 move(imm, dataTempRegister);
798 store8(dataTempRegister, address);
799 }
800
801 void store8(TrustedImm32 imm, Address address)
802 {
803 move(imm, dataTempRegister);
804 store8(dataTempRegister, address);
805 }
806
807 void store16(RegisterID src, BaseIndex address)
808 {
809 store16(src, setupArmAddress(address));
810 }
811
812 // Possibly clobbers src, but not on this architecture.
813 void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
814 {
815 m_assembler.vmov(dest1, dest2, src);
816 }
817
818 void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
819 {
820 UNUSED_PARAM(scratch);
821 m_assembler.vmov(dest, src1, src2);
822 }
823
824 static bool shouldBlindForSpecificArch(uint32_t value)
825 {
826 ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
827
828 // Couldn't be encoded as an immediate, so assume it's untrusted.
829 if (!immediate.isValid())
830 return true;
831
832 // If we can encode the immediate, we have less than 16 attacker
833 // controlled bits.
834 if (immediate.isEncodedImm())
835 return false;
836
837 // Don't let any more than 12 bits of an instruction word
838 // be controlled by an attacker.
839 return !immediate.isUInt12();
840 }
841
842 // Floating-point operations:
843
844 static bool supportsFloatingPoint() { return true; }
845 static bool supportsFloatingPointTruncate() { return true; }
846 static bool supportsFloatingPointSqrt() { return true; }
847 static bool supportsFloatingPointAbs() { return true; }
848
849 void loadDouble(ImplicitAddress address, FPRegisterID dest)
850 {
851 RegisterID base = address.base;
852 int32_t offset = address.offset;
853
854 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
855 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
856 add32(TrustedImm32(offset), base, addressTempRegister);
857 base = addressTempRegister;
858 offset = 0;
859 }
860
861 m_assembler.vldr(dest, base, offset);
862 }
863
864 void loadFloat(ImplicitAddress address, FPRegisterID dest)
865 {
866 RegisterID base = address.base;
867 int32_t offset = address.offset;
868
869 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
870 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
871 add32(TrustedImm32(offset), base, addressTempRegister);
872 base = addressTempRegister;
873 offset = 0;
874 }
875
876 m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
877 }
878
879 void loadDouble(BaseIndex address, FPRegisterID dest)
880 {
881 move(address.index, addressTempRegister);
882 lshift32(TrustedImm32(address.scale), addressTempRegister);
883 add32(address.base, addressTempRegister);
884 loadDouble(Address(addressTempRegister, address.offset), dest);
885 }
886
887 void loadFloat(BaseIndex address, FPRegisterID dest)
888 {
889 move(address.index, addressTempRegister);
890 lshift32(TrustedImm32(address.scale), addressTempRegister);
891 add32(address.base, addressTempRegister);
892 loadFloat(Address(addressTempRegister, address.offset), dest);
893 }
894
895 void moveDouble(FPRegisterID src, FPRegisterID dest)
896 {
897 if (src != dest)
898 m_assembler.vmov(dest, src);
899 }
900
901 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
902 {
903 move(address, addressTempRegister);
904 m_assembler.vldr(dest, addressTempRegister, 0);
905 }
906
907 void storeDouble(FPRegisterID src, ImplicitAddress address)
908 {
909 RegisterID base = address.base;
910 int32_t offset = address.offset;
911
912 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
913 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
914 add32(TrustedImm32(offset), base, addressTempRegister);
915 base = addressTempRegister;
916 offset = 0;
917 }
918
919 m_assembler.vstr(src, base, offset);
920 }
921
922 void storeFloat(FPRegisterID src, ImplicitAddress address)
923 {
924 RegisterID base = address.base;
925 int32_t offset = address.offset;
926
927 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
928 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
929 add32(TrustedImm32(offset), base, addressTempRegister);
930 base = addressTempRegister;
931 offset = 0;
932 }
933
934 m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
935 }
936
937 void storeDouble(FPRegisterID src, TrustedImmPtr address)
938 {
939 move(address, addressTempRegister);
940 storeDouble(src, addressTempRegister);
941 }
942
943 void storeDouble(FPRegisterID src, BaseIndex address)
944 {
945 move(address.index, addressTempRegister);
946 lshift32(TrustedImm32(address.scale), addressTempRegister);
947 add32(address.base, addressTempRegister);
948 storeDouble(src, Address(addressTempRegister, address.offset));
949 }
950
951 void storeFloat(FPRegisterID src, BaseIndex address)
952 {
953 move(address.index, addressTempRegister);
954 lshift32(TrustedImm32(address.scale), addressTempRegister);
955 add32(address.base, addressTempRegister);
956 storeFloat(src, Address(addressTempRegister, address.offset));
957 }
958
959 void addDouble(FPRegisterID src, FPRegisterID dest)
960 {
961 m_assembler.vadd(dest, dest, src);
962 }
963
964 void addDouble(Address src, FPRegisterID dest)
965 {
966 loadDouble(src, fpTempRegister);
967 addDouble(fpTempRegister, dest);
968 }
969
970 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
971 {
972 m_assembler.vadd(dest, op1, op2);
973 }
974
975 void addDouble(AbsoluteAddress address, FPRegisterID dest)
976 {
977 loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
978 m_assembler.vadd(dest, dest, fpTempRegister);
979 }
980
981 void divDouble(FPRegisterID src, FPRegisterID dest)
982 {
983 m_assembler.vdiv(dest, dest, src);
984 }
985
986 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
987 {
988 m_assembler.vdiv(dest, op1, op2);
989 }
990
991 void subDouble(FPRegisterID src, FPRegisterID dest)
992 {
993 m_assembler.vsub(dest, dest, src);
994 }
995
996 void subDouble(Address src, FPRegisterID dest)
997 {
998 loadDouble(src, fpTempRegister);
999 subDouble(fpTempRegister, dest);
1000 }
1001
1002 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1003 {
1004 m_assembler.vsub(dest, op1, op2);
1005 }
1006
1007 void mulDouble(FPRegisterID src, FPRegisterID dest)
1008 {
1009 m_assembler.vmul(dest, dest, src);
1010 }
1011
1012 void mulDouble(Address src, FPRegisterID dest)
1013 {
1014 loadDouble(src, fpTempRegister);
1015 mulDouble(fpTempRegister, dest);
1016 }
1017
1018 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1019 {
1020 m_assembler.vmul(dest, op1, op2);
1021 }
1022
1023 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1024 {
1025 m_assembler.vsqrt(dest, src);
1026 }
1027
1028 void absDouble(FPRegisterID src, FPRegisterID dest)
1029 {
1030 m_assembler.vabs(dest, src);
1031 }
1032
1033 void negateDouble(FPRegisterID src, FPRegisterID dest)
1034 {
1035 m_assembler.vneg(dest, src);
1036 }
1037
1038 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1039 {
1040 m_assembler.vmov(fpTempRegister, src, src);
1041 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1042 }
1043
1044 void convertInt32ToDouble(Address address, FPRegisterID dest)
1045 {
1046 // Fixme: load directly into the fpr!
1047 load32(address, dataTempRegister);
1048 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1049 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1050 }
1051
1052 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1053 {
1054 // Fixme: load directly into the fpr!
1055 load32(address.m_ptr, dataTempRegister);
1056 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1057 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1058 }
1059
1060 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1061 {
1062 m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
1063 }
1064
1065 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1066 {
1067 m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
1068 }
1069
1070 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1071 {
1072 m_assembler.vcmp(left, right);
1073 m_assembler.vmrs();
1074
1075 if (cond == DoubleNotEqual) {
1076 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1077 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1078 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1079 unordered.link(this);
1080 return result;
1081 }
1082 if (cond == DoubleEqualOrUnordered) {
1083 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1084 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1085 unordered.link(this);
1086 // We get here if either unordered or equal.
1087 Jump result = jump();
1088 notEqual.link(this);
1089 return result;
1090 }
1091 return makeBranch(cond);
1092 }
1093
1094 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1095 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1096 {
1097 // Convert into dest.
1098 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1099 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1100
1101 // Calculate 2x dest. If the value potentially underflowed, it will have
1102 // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1103 // overflow the result will be equal to -2.
1104 Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
1105 Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
1106
1107 // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1108 underflow.link(this);
1109 if (branchType == BranchIfTruncateSuccessful)
1110 return noOverflow;
1111
1112 // We'll reach the current point in the code on failure, so plant a
1113 // jump here & link the success case.
1114 Jump failure = jump();
1115 noOverflow.link(this);
1116 return failure;
1117 }
1118
1119 // Result is undefined if the value is outside of the integer range.
1120 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1121 {
1122 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1123 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1124 }
1125
1126 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1127 {
1128 m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
1129 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1130 }
1131
1132 // Convert 'src' to an integer, and places the resulting 'dest'.
1133 // If the result is not representable as a 32 bit value, branch.
1134 // May also branch for some values that are representable in 32 bits
1135 // (specifically, in this case, 0).
1136 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1137 {
1138 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1139 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1140
1141 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1142 m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
1143 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1144
1145 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1146 if (negZeroCheck)
1147 failureCases.append(branchTest32(Zero, dest));
1148 }
1149
1150 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1151 {
1152 m_assembler.vcmpz(reg);
1153 m_assembler.vmrs();
1154 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1155 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1156 unordered.link(this);
1157 return result;
1158 }
1159
1160 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1161 {
1162 m_assembler.vcmpz(reg);
1163 m_assembler.vmrs();
1164 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1165 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1166 unordered.link(this);
1167 // We get here if either unordered or equal.
1168 Jump result = jump();
1169 notEqual.link(this);
1170 return result;
1171 }
1172
1173 // Stack manipulation operations:
1174 //
1175 // The ABI is assumed to provide a stack abstraction to memory,
1176 // containing machine word sized units of data. Push and pop
1177 // operations add and remove a single register sized unit of data
1178 // to or from the stack. Peek and poke operations read or write
1179 // values on the stack, without moving the current stack position.
1180
1181 void pop(RegisterID dest)
1182 {
1183 m_assembler.pop(dest);
1184 }
1185
1186 void push(RegisterID src)
1187 {
1188 m_assembler.push(src);
1189 }
1190
1191 void push(Address address)
1192 {
1193 load32(address, dataTempRegister);
1194 push(dataTempRegister);
1195 }
1196
1197 void push(TrustedImm32 imm)
1198 {
1199 move(imm, dataTempRegister);
1200 push(dataTempRegister);
1201 }
1202
1203 void popPair(RegisterID dest1, RegisterID dest2)
1204 {
1205 m_assembler.pop(1 << dest1 | 1 << dest2);
1206 }
1207
1208 void pushPair(RegisterID src1, RegisterID src2)
1209 {
1210 m_assembler.push(1 << src1 | 1 << src2);
1211 }
1212
1213 // Register move operations:
1214 //
1215 // Move values in registers.
1216
1217 void move(TrustedImm32 imm, RegisterID dest)
1218 {
1219 uint32_t value = imm.m_value;
1220
1221 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
1222
1223 if (armImm.isValid())
1224 m_assembler.mov(dest, armImm);
1225 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
1226 m_assembler.mvn(dest, armImm);
1227 else {
1228 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
1229 if (value & 0xffff0000)
1230 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
1231 }
1232 }
1233
1234 void move(RegisterID src, RegisterID dest)
1235 {
1236 if (src != dest)
1237 m_assembler.mov(dest, src);
1238 }
1239
1240 void move(TrustedImmPtr imm, RegisterID dest)
1241 {
1242 move(TrustedImm32(imm), dest);
1243 }
1244
1245 void swap(RegisterID reg1, RegisterID reg2)
1246 {
1247 move(reg1, dataTempRegister);
1248 move(reg2, reg1);
1249 move(dataTempRegister, reg2);
1250 }
1251
1252 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1253 {
1254 move(src, dest);
1255 }
1256
1257 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1258 {
1259 move(src, dest);
1260 }
1261
1262 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1263 static RelationalCondition invert(RelationalCondition cond)
1264 {
1265 return static_cast<RelationalCondition>(cond ^ 1);
1266 }
1267
1268 void nop()
1269 {
1270 m_assembler.nop();
1271 }
1272
1273 void memoryFence()
1274 {
1275 m_assembler.dmbSY();
1276 }
1277
1278 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1279 {
1280 ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1281 }
1282
1283 static ptrdiff_t maxJumpReplacementSize()
1284 {
1285 return ARMv7Assembler::maxJumpReplacementSize();
1286 }
1287
1288 // Forwards / external control flow operations:
1289 //
1290 // This set of jump and conditional branch operations return a Jump
1291 // object which may linked at a later point, allow forwards jump,
1292 // or jumps that will require external linkage (after the code has been
1293 // relocated).
1294 //
1295 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1296 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1297 // used (representing the names 'below' and 'above').
1298 //
1299 // Operands to the comparision are provided in the expected order, e.g.
1300 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1301 // treated as a signed 32bit value, is less than or equal to 5.
1302 //
1303 // jz and jnz test whether the first operand is equal to zero, and take
1304 // an optional second operand of a mask under which to perform the test.
1305 private:
1306
1307 // Should we be using TEQ for equal/not-equal?
1308 void compare32(RegisterID left, TrustedImm32 right)
1309 {
1310 int32_t imm = right.m_value;
1311 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1312 if (armImm.isValid())
1313 m_assembler.cmp(left, armImm);
1314 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
1315 m_assembler.cmn(left, armImm);
1316 else {
1317 move(TrustedImm32(imm), dataTempRegister);
1318 m_assembler.cmp(left, dataTempRegister);
1319 }
1320 }
1321
1322 void test32(RegisterID reg, TrustedImm32 mask)
1323 {
1324 int32_t imm = mask.m_value;
1325
1326 if (imm == -1)
1327 m_assembler.tst(reg, reg);
1328 else {
1329 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1330 if (armImm.isValid()) {
1331 if (reg == ARMRegisters::sp) {
1332 move(reg, addressTempRegister);
1333 m_assembler.tst(addressTempRegister, armImm);
1334 } else
1335 m_assembler.tst(reg, armImm);
1336 } else {
1337 move(mask, dataTempRegister);
1338 if (reg == ARMRegisters::sp) {
1339 move(reg, addressTempRegister);
1340 m_assembler.tst(addressTempRegister, dataTempRegister);
1341 } else
1342 m_assembler.tst(reg, dataTempRegister);
1343 }
1344 }
1345 }
1346
1347 public:
1348 void test32(ResultCondition, RegisterID reg, TrustedImm32 mask)
1349 {
1350 test32(reg, mask);
1351 }
1352
1353 Jump branch(ResultCondition cond)
1354 {
1355 return Jump(makeBranch(cond));
1356 }
1357
1358 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1359 {
1360 m_assembler.cmp(left, right);
1361 return Jump(makeBranch(cond));
1362 }
1363
1364 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1365 {
1366 compare32(left, right);
1367 return Jump(makeBranch(cond));
1368 }
1369
1370 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1371 {
1372 load32(right, dataTempRegister);
1373 return branch32(cond, left, dataTempRegister);
1374 }
1375
1376 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1377 {
1378 load32(left, dataTempRegister);
1379 return branch32(cond, dataTempRegister, right);
1380 }
1381
1382 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1383 {
1384 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1385 load32(left, addressTempRegister);
1386 return branch32(cond, addressTempRegister, right);
1387 }
1388
1389 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1390 {
1391 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1392 load32(left, addressTempRegister);
1393 return branch32(cond, addressTempRegister, right);
1394 }
1395
1396 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1397 {
1398 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1399 load32WithUnalignedHalfWords(left, addressTempRegister);
1400 return branch32(cond, addressTempRegister, right);
1401 }
1402
1403 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1404 {
1405 load32(left.m_ptr, dataTempRegister);
1406 return branch32(cond, dataTempRegister, right);
1407 }
1408
1409 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1410 {
1411 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1412 load32(left.m_ptr, addressTempRegister);
1413 return branch32(cond, addressTempRegister, right);
1414 }
1415
1416 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1417 {
1418 load32(left, dataTempRegister);
1419 return branch32(cond, dataTempRegister, right);
1420 }
1421
1422 Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1423 {
1424 compare32(left, right);
1425 return Jump(makeBranch(cond));
1426 }
1427
1428 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1429 {
1430 ASSERT(!(0xffffff00 & right.m_value));
1431 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1432 load8(left, addressTempRegister);
1433 return branch8(cond, addressTempRegister, right);
1434 }
1435
1436 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1437 {
1438 ASSERT(!(0xffffff00 & right.m_value));
1439 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1440 load8(left, addressTempRegister);
1441 return branch32(cond, addressTempRegister, right);
1442 }
1443
1444 Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
1445 {
1446 // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
1447 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1448 load8(Address(addressTempRegister), addressTempRegister);
1449 return branch32(cond, addressTempRegister, right);
1450 }
1451
1452 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1453 {
1454 m_assembler.tst(reg, mask);
1455 return Jump(makeBranch(cond));
1456 }
1457
1458 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1459 {
1460 test32(reg, mask);
1461 return Jump(makeBranch(cond));
1462 }
1463
1464 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1465 {
1466 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1467 load32(address, addressTempRegister);
1468 return branchTest32(cond, addressTempRegister, mask);
1469 }
1470
1471 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1472 {
1473 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1474 load32(address, addressTempRegister);
1475 return branchTest32(cond, addressTempRegister, mask);
1476 }
1477
1478 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1479 {
1480 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1481 load8(address, addressTempRegister);
1482 return branchTest32(cond, addressTempRegister, mask);
1483 }
1484
1485 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1486 {
1487 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1488 load8(address, addressTempRegister);
1489 return branchTest32(cond, addressTempRegister, mask);
1490 }
1491
1492 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1493 {
1494 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1495 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1496 load8(Address(addressTempRegister), addressTempRegister);
1497 return branchTest32(cond, addressTempRegister, mask);
1498 }
1499
1500 void jump(RegisterID target)
1501 {
1502 m_assembler.bx(target);
1503 }
1504
1505 // Address is a memory location containing the address to jump to
1506 void jump(Address address)
1507 {
1508 load32(address, dataTempRegister);
1509 m_assembler.bx(dataTempRegister);
1510 }
1511
1512 void jump(AbsoluteAddress address)
1513 {
1514 move(TrustedImmPtr(address.m_ptr), dataTempRegister);
1515 load32(Address(dataTempRegister), dataTempRegister);
1516 m_assembler.bx(dataTempRegister);
1517 }
1518
1519
1520 // Arithmetic control flow operations:
1521 //
1522 // This set of conditional branch operations branch based
1523 // on the result of an arithmetic operation. The operation
1524 // is performed as normal, storing the result.
1525 //
1526 // * jz operations branch if the result is zero.
1527 // * jo operations branch if the (signed) arithmetic
1528 // operation caused an overflow to occur.
1529
1530 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1531 {
1532 m_assembler.add_S(dest, op1, op2);
1533 return Jump(makeBranch(cond));
1534 }
1535
1536 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1537 {
1538 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1539 if (armImm.isValid())
1540 m_assembler.add_S(dest, op1, armImm);
1541 else {
1542 move(imm, dataTempRegister);
1543 m_assembler.add_S(dest, op1, dataTempRegister);
1544 }
1545 return Jump(makeBranch(cond));
1546 }
1547
1548 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1549 {
1550 return branchAdd32(cond, dest, src, dest);
1551 }
1552
1553 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1554 {
1555 load32(src, dataTempRegister);
1556 return branchAdd32(cond, dest, dataTempRegister, dest);
1557 }
1558
1559 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1560 {
1561 return branchAdd32(cond, dest, imm, dest);
1562 }
1563
1564 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1565 {
1566 // Move the high bits of the address into addressTempRegister,
1567 // and load the value into dataTempRegister.
1568 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1569 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1570
1571 // Do the add.
1572 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1573 if (armImm.isValid())
1574 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
1575 else {
1576 // If the operand does not fit into an immediate then load it temporarily
1577 // into addressTempRegister; since we're overwriting addressTempRegister
1578 // we'll need to reload it with the high bits of the address afterwards.
1579 move(imm, addressTempRegister);
1580 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
1581 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1582 }
1583
1584 // Store the result.
1585 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1586
1587 return Jump(makeBranch(cond));
1588 }
1589
1590 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1591 {
1592 m_assembler.smull(dest, dataTempRegister, src1, src2);
1593
1594 if (cond == Overflow) {
1595 m_assembler.asr(addressTempRegister, dest, 31);
1596 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1597 }
1598
1599 return branchTest32(cond, dest);
1600 }
1601
1602 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1603 {
1604 return branchMul32(cond, src, dest, dest);
1605 }
1606
1607 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1608 {
1609 move(imm, dataTempRegister);
1610 return branchMul32(cond, dataTempRegister, src, dest);
1611 }
1612
1613 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1614 {
1615 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1616 m_assembler.sub_S(srcDest, zero, srcDest);
1617 return Jump(makeBranch(cond));
1618 }
1619
1620 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1621 {
1622 m_assembler.orr_S(dest, dest, src);
1623 return Jump(makeBranch(cond));
1624 }
1625
1626 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1627 {
1628 m_assembler.sub_S(dest, op1, op2);
1629 return Jump(makeBranch(cond));
1630 }
1631
1632 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1633 {
1634 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1635 if (armImm.isValid())
1636 m_assembler.sub_S(dest, op1, armImm);
1637 else {
1638 move(imm, dataTempRegister);
1639 m_assembler.sub_S(dest, op1, dataTempRegister);
1640 }
1641 return Jump(makeBranch(cond));
1642 }
1643
1644 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1645 {
1646 return branchSub32(cond, dest, src, dest);
1647 }
1648
1649 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1650 {
1651 return branchSub32(cond, dest, imm, dest);
1652 }
1653
1654 void relativeTableJump(RegisterID index, int scale)
1655 {
1656 ASSERT(scale >= 0 && scale <= 31);
1657
1658 // dataTempRegister will point after the jump if index register contains zero
1659 move(ARMRegisters::pc, dataTempRegister);
1660 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1661
1662 ShiftTypeAndAmount shift(SRType_LSL, scale);
1663 m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1664 jump(dataTempRegister);
1665 }
1666
1667 // Miscellaneous operations:
1668
1669 void breakpoint(uint8_t imm = 0)
1670 {
1671 m_assembler.bkpt(imm);
1672 }
1673
1674 ALWAYS_INLINE Call nearCall()
1675 {
1676 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1677 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1678 }
1679
1680 ALWAYS_INLINE Call call()
1681 {
1682 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1683 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1684 }
1685
1686 ALWAYS_INLINE Call call(RegisterID target)
1687 {
1688 return Call(m_assembler.blx(target), Call::None);
1689 }
1690
1691 ALWAYS_INLINE Call call(Address address)
1692 {
1693 load32(address, dataTempRegister);
1694 return Call(m_assembler.blx(dataTempRegister), Call::None);
1695 }
1696
1697 ALWAYS_INLINE void ret()
1698 {
1699 m_assembler.bx(linkRegister);
1700 }
1701
1702 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1703 {
1704 m_assembler.cmp(left, right);
1705 m_assembler.it(armV7Condition(cond), false);
1706 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1707 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1708 }
1709
1710 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1711 {
1712 load32(left, dataTempRegister);
1713 compare32(cond, dataTempRegister, right, dest);
1714 }
1715
1716 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1717 {
1718 load8(left, addressTempRegister);
1719 compare32(cond, addressTempRegister, right, dest);
1720 }
1721
1722 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1723 {
1724 compare32(left, right);
1725 m_assembler.it(armV7Condition(cond), false);
1726 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1727 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1728 }
1729
1730 // FIXME:
1731 // The mask should be optional... paerhaps the argument order should be
1732 // dest-src, operations always have a dest? ... possibly not true, considering
1733 // asm ops like test, or pseudo ops like pop().
1734 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1735 {
1736 load32(address, dataTempRegister);
1737 test32(dataTempRegister, mask);
1738 m_assembler.it(armV7Condition(cond), false);
1739 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1740 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1741 }
1742
1743 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1744 {
1745 load8(address, dataTempRegister);
1746 test32(dataTempRegister, mask);
1747 m_assembler.it(armV7Condition(cond), false);
1748 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1749 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1750 }
1751
1752 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1753 {
1754 padBeforePatch();
1755 moveFixedWidthEncoding(imm, dst);
1756 return DataLabel32(this);
1757 }
1758
1759 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1760 {
1761 padBeforePatch();
1762 moveFixedWidthEncoding(TrustedImm32(imm), dst);
1763 return DataLabelPtr(this);
1764 }
1765
1766 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1767 {
1768 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1769 return branch32(cond, left, dataTempRegister);
1770 }
1771
1772 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1773 {
1774 load32(left, addressTempRegister);
1775 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1776 return branch32(cond, addressTempRegister, dataTempRegister);
1777 }
1778
1779 ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1780 {
1781 load32(left, addressTempRegister);
1782 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1783 return branch32(cond, addressTempRegister, dataTempRegister);
1784 }
1785
1786 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
1787 {
1788 m_makeJumpPatchable = true;
1789 Jump result = branch32(cond, left, TrustedImm32(right));
1790 m_makeJumpPatchable = false;
1791 return PatchableJump(result);
1792 }
1793
1794 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1795 {
1796 m_makeJumpPatchable = true;
1797 Jump result = branchTest32(cond, reg, mask);
1798 m_makeJumpPatchable = false;
1799 return PatchableJump(result);
1800 }
1801
1802 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
1803 {
1804 m_makeJumpPatchable = true;
1805 Jump result = branch32(cond, reg, imm);
1806 m_makeJumpPatchable = false;
1807 return PatchableJump(result);
1808 }
1809
1810 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1811 {
1812 m_makeJumpPatchable = true;
1813 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
1814 m_makeJumpPatchable = false;
1815 return PatchableJump(result);
1816 }
1817
1818 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1819 {
1820 m_makeJumpPatchable = true;
1821 Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
1822 m_makeJumpPatchable = false;
1823 return PatchableJump(result);
1824 }
1825
1826 PatchableJump patchableJump()
1827 {
1828 padBeforePatch();
1829 m_makeJumpPatchable = true;
1830 Jump result = jump();
1831 m_makeJumpPatchable = false;
1832 return PatchableJump(result);
1833 }
1834
1835 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1836 {
1837 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1838 store32(dataTempRegister, address);
1839 return label;
1840 }
1841 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1842
1843
1844 ALWAYS_INLINE Call tailRecursiveCall()
1845 {
1846 // Like a normal call, but don't link.
1847 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1848 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1849 }
1850
1851 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1852 {
1853 oldJump.link(this);
1854 return tailRecursiveCall();
1855 }
1856
1857
1858 static FunctionPtr readCallTarget(CodeLocationCall call)
1859 {
1860 return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
1861 }
1862
1863 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1864 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
1865
1866 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1867 {
1868 const unsigned twoWordOpSize = 4;
1869 return label.labelAtOffset(-twoWordOpSize * 2);
1870 }
1871
1872 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
1873 {
1874 #if OS(LINUX)
1875 ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
1876 #else
1877 UNUSED_PARAM(rd);
1878 ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
1879 #endif
1880 }
1881
1882 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
1883 {
1884 UNREACHABLE_FOR_PLATFORM();
1885 return CodeLocationLabel();
1886 }
1887
1888 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
1889 {
1890 UNREACHABLE_FOR_PLATFORM();
1891 return CodeLocationLabel();
1892 }
1893
1894 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
1895 {
1896 UNREACHABLE_FOR_PLATFORM();
1897 }
1898
1899 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
1900 {
1901 UNREACHABLE_FOR_PLATFORM();
1902 }
1903
1904 #if USE(MASM_PROBE)
1905 struct CPUState {
1906 #define DECLARE_REGISTER(_type, _regName) \
1907 _type _regName;
1908 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
1909 #undef DECLARE_REGISTER
1910 };
1911
1912 struct ProbeContext;
1913 typedef void (*ProbeFunction)(struct ProbeContext*);
1914
1915 struct ProbeContext {
1916 ProbeFunction probeFunction;
1917 void* arg1;
1918 void* arg2;
1919 CPUState cpu;
1920
1921 void dump(const char* indentation = 0);
1922 private:
1923 void dumpCPURegisters(const char* indentation);
1924 };
1925
1926 // For details about probe(), see comment in MacroAssemblerX86_64.h.
1927 void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
1928 #endif // USE(MASM_PROBE)
1929
1930 protected:
1931 ALWAYS_INLINE Jump jump()
1932 {
1933 m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
1934 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1935 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
1936 }
1937
1938 ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
1939 {
1940 m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
1941 m_assembler.it(cond, true, true);
1942 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1943 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
1944 }
1945 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
1946 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
1947 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1948
1949 ArmAddress setupArmAddress(BaseIndex address)
1950 {
1951 if (address.offset) {
1952 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1953 if (imm.isValid())
1954 m_assembler.add(addressTempRegister, address.base, imm);
1955 else {
1956 move(TrustedImm32(address.offset), addressTempRegister);
1957 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1958 }
1959
1960 return ArmAddress(addressTempRegister, address.index, address.scale);
1961 } else
1962 return ArmAddress(address.base, address.index, address.scale);
1963 }
1964
1965 ArmAddress setupArmAddress(Address address)
1966 {
1967 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1968 return ArmAddress(address.base, address.offset);
1969
1970 move(TrustedImm32(address.offset), addressTempRegister);
1971 return ArmAddress(address.base, addressTempRegister);
1972 }
1973
1974 ArmAddress setupArmAddress(ImplicitAddress address)
1975 {
1976 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1977 return ArmAddress(address.base, address.offset);
1978
1979 move(TrustedImm32(address.offset), addressTempRegister);
1980 return ArmAddress(address.base, addressTempRegister);
1981 }
1982
1983 RegisterID makeBaseIndexBase(BaseIndex address)
1984 {
1985 if (!address.offset)
1986 return address.base;
1987
1988 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1989 if (imm.isValid())
1990 m_assembler.add(addressTempRegister, address.base, imm);
1991 else {
1992 move(TrustedImm32(address.offset), addressTempRegister);
1993 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1994 }
1995
1996 return addressTempRegister;
1997 }
1998
1999 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
2000 {
2001 uint32_t value = imm.m_value;
2002 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
2003 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
2004 }
2005
2006 ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
2007 {
2008 return static_cast<ARMv7Assembler::Condition>(cond);
2009 }
2010
2011 ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
2012 {
2013 return static_cast<ARMv7Assembler::Condition>(cond);
2014 }
2015
2016 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
2017 {
2018 return static_cast<ARMv7Assembler::Condition>(cond);
2019 }
2020
2021 private:
2022 friend class LinkBuffer;
2023 friend class RepatchBuffer;
2024
2025 static void linkCall(void* code, Call call, FunctionPtr function)
2026 {
2027 ARMv7Assembler::linkCall(code, call.m_label, function.value());
2028 }
2029
2030 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2031 {
2032 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2033 }
2034
2035 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2036 {
2037 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2038 }
2039
2040 #if USE(MASM_PROBE)
2041 inline TrustedImm32 trustedImm32FromPtr(void* ptr)
2042 {
2043 return TrustedImm32(TrustedImmPtr(ptr));
2044 }
2045
2046 inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
2047 {
2048 return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
2049 }
2050
2051 inline TrustedImm32 trustedImm32FromPtr(void (*function)())
2052 {
2053 return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
2054 }
2055 #endif
2056
2057 bool m_makeJumpPatchable;
2058 };
2059
2060 } // namespace JSC
2061
2062 #endif // ENABLE(ASSEMBLER)
2063
2064 #endif // MacroAssemblerARMv7_h