]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/MacroAssemblerARM.h
JavaScriptCore-1097.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARM.h
CommitLineData
f9bf01c6
A
1/*
2 * Copyright (C) 2008 Apple Inc.
4e4e5a6f 3 * Copyright (C) 2009, 2010 University of Szeged
f9bf01c6
A
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifndef MacroAssemblerARM_h
29#define MacroAssemblerARM_h
30
f9bf01c6
A
31#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32
33#include "ARMAssembler.h"
34#include "AbstractMacroAssembler.h"
35
36namespace JSC {
37
38class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
39 static const int DoubleConditionMask = 0x0f;
40 static const int DoubleConditionBitSpecial = 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
42public:
4e4e5a6f 43 typedef ARMRegisters::FPRegisterID FPRegisterID;
14957cd0 44 static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
4e4e5a6f 45
14957cd0 46 enum RelationalCondition {
f9bf01c6
A
47 Equal = ARMAssembler::EQ,
48 NotEqual = ARMAssembler::NE,
49 Above = ARMAssembler::HI,
50 AboveOrEqual = ARMAssembler::CS,
51 Below = ARMAssembler::CC,
52 BelowOrEqual = ARMAssembler::LS,
53 GreaterThan = ARMAssembler::GT,
54 GreaterThanOrEqual = ARMAssembler::GE,
55 LessThan = ARMAssembler::LT,
14957cd0
A
56 LessThanOrEqual = ARMAssembler::LE
57 };
58
59 enum ResultCondition {
f9bf01c6
A
60 Overflow = ARMAssembler::VS,
61 Signed = ARMAssembler::MI,
62 Zero = ARMAssembler::EQ,
63 NonZero = ARMAssembler::NE
64 };
65
66 enum DoubleCondition {
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual = ARMAssembler::EQ,
69 DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
70 DoubleGreaterThan = ARMAssembler::GT,
71 DoubleGreaterThanOrEqual = ARMAssembler::GE,
72 DoubleLessThan = ARMAssembler::CC,
73 DoubleLessThanOrEqual = ARMAssembler::LS,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
76 DoubleNotEqualOrUnordered = ARMAssembler::NE,
77 DoubleGreaterThanOrUnordered = ARMAssembler::HI,
78 DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
79 DoubleLessThanOrUnordered = ARMAssembler::LT,
80 DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
81 };
82
83 static const RegisterID stackPointerRegister = ARMRegisters::sp;
84 static const RegisterID linkRegister = ARMRegisters::lr;
85
86 static const Scale ScalePtr = TimesFour;
87
88 void add32(RegisterID src, RegisterID dest)
89 {
90 m_assembler.adds_r(dest, dest, src);
91 }
92
14957cd0 93 void add32(TrustedImm32 imm, Address address)
f9bf01c6
A
94 {
95 load32(address, ARMRegisters::S1);
96 add32(imm, ARMRegisters::S1);
97 store32(ARMRegisters::S1, address);
98 }
99
14957cd0 100 void add32(TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
101 {
102 m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
103 }
104
105 void add32(Address src, RegisterID dest)
106 {
107 load32(src, ARMRegisters::S1);
108 add32(ARMRegisters::S1, dest);
109 }
110
6fe7ccc8
A
111 void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
112 {
113 m_assembler.adds_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
114 }
115
f9bf01c6
A
116 void and32(RegisterID src, RegisterID dest)
117 {
118 m_assembler.ands_r(dest, dest, src);
119 }
120
14957cd0 121 void and32(TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
122 {
123 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
124 if (w & ARMAssembler::OP2_INV_IMM)
125 m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
126 else
127 m_assembler.ands_r(dest, dest, w);
128 }
129
6fe7ccc8
A
130 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
131 {
132 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
133 if (w & ARMAssembler::OP2_INV_IMM)
134 m_assembler.bics_r(dest, src, w & ~ARMAssembler::OP2_INV_IMM);
135 else
136 m_assembler.ands_r(dest, src, w);
137 }
138
f9bf01c6
A
139 void lshift32(RegisterID shift_amount, RegisterID dest)
140 {
141 ARMWord w = ARMAssembler::getOp2(0x1f);
142 ASSERT(w != ARMAssembler::INVALID_IMM);
143 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
144
145 m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
146 }
147
14957cd0 148 void lshift32(TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
149 {
150 m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
151 }
152
6fe7ccc8
A
153 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
154 {
155 m_assembler.movs_r(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
156 }
157
f9bf01c6
A
158 void mul32(RegisterID src, RegisterID dest)
159 {
160 if (src == dest) {
161 move(src, ARMRegisters::S0);
162 src = ARMRegisters::S0;
163 }
164 m_assembler.muls_r(dest, dest, src);
165 }
166
14957cd0 167 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
f9bf01c6
A
168 {
169 move(imm, ARMRegisters::S0);
170 m_assembler.muls_r(dest, src, ARMRegisters::S0);
171 }
172
173 void neg32(RegisterID srcDest)
174 {
175 m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
176 }
177
f9bf01c6
A
178 void or32(RegisterID src, RegisterID dest)
179 {
180 m_assembler.orrs_r(dest, dest, src);
181 }
182
14957cd0 183 void or32(TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
184 {
185 m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
186 }
187
6fe7ccc8
A
188 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
189 {
190 m_assembler.orrs_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
191 }
192
193 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
194 {
195 m_assembler.orrs_r(dest, op1, op2);
196 }
197
f9bf01c6
A
198 void rshift32(RegisterID shift_amount, RegisterID dest)
199 {
200 ARMWord w = ARMAssembler::getOp2(0x1f);
201 ASSERT(w != ARMAssembler::INVALID_IMM);
202 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
203
204 m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
205 }
6fe7ccc8 206
14957cd0 207 void rshift32(TrustedImm32 imm, RegisterID dest)
f9bf01c6 208 {
6fe7ccc8
A
209 rshift32(dest, imm, dest);
210 }
211
212 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
213 {
214 m_assembler.movs_r(dest, m_assembler.asr(src, imm.m_value & 0x1f));
f9bf01c6 215 }
4e4e5a6f
A
216
217 void urshift32(RegisterID shift_amount, RegisterID dest)
218 {
219 ARMWord w = ARMAssembler::getOp2(0x1f);
220 ASSERT(w != ARMAssembler::INVALID_IMM);
221 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
222
223 m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
224 }
225
14957cd0 226 void urshift32(TrustedImm32 imm, RegisterID dest)
4e4e5a6f
A
227 {
228 m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
229 }
6fe7ccc8
A
230
231 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
232 {
233 m_assembler.movs_r(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
234 }
f9bf01c6
A
235
236 void sub32(RegisterID src, RegisterID dest)
237 {
238 m_assembler.subs_r(dest, dest, src);
239 }
240
14957cd0 241 void sub32(TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
242 {
243 m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
244 }
245
14957cd0 246 void sub32(TrustedImm32 imm, Address address)
f9bf01c6
A
247 {
248 load32(address, ARMRegisters::S1);
249 sub32(imm, ARMRegisters::S1);
250 store32(ARMRegisters::S1, address);
251 }
252
253 void sub32(Address src, RegisterID dest)
254 {
255 load32(src, ARMRegisters::S1);
256 sub32(ARMRegisters::S1, dest);
257 }
258
6fe7ccc8
A
259 void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
260 {
261 m_assembler.subs_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
262 }
263
f9bf01c6
A
264 void xor32(RegisterID src, RegisterID dest)
265 {
266 m_assembler.eors_r(dest, dest, src);
267 }
268
14957cd0 269 void xor32(TrustedImm32 imm, RegisterID dest)
f9bf01c6 270 {
6fe7ccc8
A
271 if (imm.m_value == -1)
272 m_assembler.mvns_r(dest, dest);
273 else
274 m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
275 }
276
277 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
278 {
279 if (imm.m_value == -1)
280 m_assembler.mvns_r(dest, src);
281 else
282 m_assembler.eors_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
f9bf01c6
A
283 }
284
14957cd0
A
285 void countLeadingZeros32(RegisterID src, RegisterID dest)
286 {
287#if WTF_ARM_ARCH_AT_LEAST(5)
288 m_assembler.clz_r(dest, src);
289#else
290 UNUSED_PARAM(src);
291 UNUSED_PARAM(dest);
292 ASSERT_NOT_REACHED();
293#endif
294 }
295
4e4e5a6f
A
296 void load8(ImplicitAddress address, RegisterID dest)
297 {
298 m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
299 }
300
6fe7ccc8
A
301 void load8(BaseIndex address, RegisterID dest)
302 {
303 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset, true);
304 }
305
f9bf01c6
A
306 void load32(ImplicitAddress address, RegisterID dest)
307 {
308 m_assembler.dataTransfer32(true, dest, address.base, address.offset);
309 }
310
311 void load32(BaseIndex address, RegisterID dest)
312 {
313 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
314 }
315
316#if CPU(ARMV5_OR_LOWER)
317 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
318#else
319 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
320 {
321 load32(address, dest);
322 }
323#endif
324
6fe7ccc8
A
325 void load16Unaligned(BaseIndex address, RegisterID dest)
326 {
327 load16(address, dest);
328 }
329
f9bf01c6
A
330 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
331 {
332 DataLabel32 dataLabel(this);
333 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
334 m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
335 return dataLabel;
336 }
14957cd0
A
337
338 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
f9bf01c6 339 {
14957cd0
A
340 DataLabelCompact dataLabel(this);
341 load32WithAddressOffsetPatch(address, dest);
342 return dataLabel;
f9bf01c6
A
343 }
344
345 void load16(BaseIndex address, RegisterID dest)
346 {
14957cd0
A
347 m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale));
348 load16(Address(ARMRegisters::S1, address.offset), dest);
f9bf01c6 349 }
4e4e5a6f
A
350
351 void load16(ImplicitAddress address, RegisterID dest)
352 {
353 if (address.offset >= 0)
14957cd0 354 m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0));
4e4e5a6f 355 else
14957cd0 356 m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0));
4e4e5a6f 357 }
f9bf01c6
A
358
359 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
360 {
361 DataLabel32 dataLabel(this);
362 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
363 m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
364 return dataLabel;
365 }
366
367 void store32(RegisterID src, ImplicitAddress address)
368 {
369 m_assembler.dataTransfer32(false, src, address.base, address.offset);
370 }
371
372 void store32(RegisterID src, BaseIndex address)
373 {
374 m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
375 }
376
14957cd0 377 void store32(TrustedImm32 imm, ImplicitAddress address)
f9bf01c6
A
378 {
379 if (imm.m_isPointer)
380 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
381 else
382 move(imm, ARMRegisters::S1);
383 store32(ARMRegisters::S1, address);
384 }
385
386 void store32(RegisterID src, void* address)
387 {
388 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
389 m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
390 }
391
14957cd0 392 void store32(TrustedImm32 imm, void* address)
f9bf01c6
A
393 {
394 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
395 if (imm.m_isPointer)
396 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
397 else
398 m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
399 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
400 }
401
402 void pop(RegisterID dest)
403 {
404 m_assembler.pop_r(dest);
405 }
406
407 void push(RegisterID src)
408 {
409 m_assembler.push_r(src);
410 }
411
412 void push(Address address)
413 {
414 load32(address, ARMRegisters::S1);
415 push(ARMRegisters::S1);
416 }
417
14957cd0 418 void push(TrustedImm32 imm)
f9bf01c6
A
419 {
420 move(imm, ARMRegisters::S0);
421 push(ARMRegisters::S0);
422 }
423
14957cd0 424 void move(TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
425 {
426 if (imm.m_isPointer)
427 m_assembler.ldr_un_imm(dest, imm.m_value);
428 else
429 m_assembler.moveImm(imm.m_value, dest);
430 }
431
432 void move(RegisterID src, RegisterID dest)
433 {
434 m_assembler.mov_r(dest, src);
435 }
436
14957cd0 437 void move(TrustedImmPtr imm, RegisterID dest)
f9bf01c6 438 {
14957cd0 439 move(TrustedImm32(imm), dest);
f9bf01c6
A
440 }
441
442 void swap(RegisterID reg1, RegisterID reg2)
443 {
444 m_assembler.mov_r(ARMRegisters::S0, reg1);
445 m_assembler.mov_r(reg1, reg2);
446 m_assembler.mov_r(reg2, ARMRegisters::S0);
447 }
448
449 void signExtend32ToPtr(RegisterID src, RegisterID dest)
450 {
451 if (src != dest)
452 move(src, dest);
453 }
454
455 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
456 {
457 if (src != dest)
458 move(src, dest);
459 }
460
14957cd0 461 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
4e4e5a6f
A
462 {
463 load8(left, ARMRegisters::S1);
464 return branch32(cond, ARMRegisters::S1, right);
465 }
466
6fe7ccc8
A
467 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
468 {
469 ASSERT(!(right.m_value & 0xFFFFFF00));
470 load8(left, ARMRegisters::S1);
471 return branch32(cond, ARMRegisters::S1, right);
472 }
473
14957cd0 474 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
f9bf01c6
A
475 {
476 m_assembler.cmp_r(left, right);
477 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
478 }
479
14957cd0 480 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
f9bf01c6
A
481 {
482 if (right.m_isPointer) {
483 m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
484 m_assembler.cmp_r(left, ARMRegisters::S0);
14957cd0 485 } else {
6fe7ccc8 486 ARMWord tmp = (right.m_value == 0x80000000) ? ARMAssembler::INVALID_IMM : m_assembler.getOp2(-right.m_value);
14957cd0
A
487 if (tmp != ARMAssembler::INVALID_IMM)
488 m_assembler.cmn_r(left, tmp);
489 else
490 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
491 }
f9bf01c6
A
492 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
493 }
494
14957cd0 495 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
f9bf01c6
A
496 {
497 load32(right, ARMRegisters::S1);
498 return branch32(cond, left, ARMRegisters::S1);
499 }
500
14957cd0 501 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
f9bf01c6
A
502 {
503 load32(left, ARMRegisters::S1);
504 return branch32(cond, ARMRegisters::S1, right);
505 }
506
14957cd0 507 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
f9bf01c6
A
508 {
509 load32(left, ARMRegisters::S1);
510 return branch32(cond, ARMRegisters::S1, right);
511 }
512
14957cd0 513 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
f9bf01c6
A
514 {
515 load32(left, ARMRegisters::S1);
516 return branch32(cond, ARMRegisters::S1, right);
517 }
518
14957cd0 519 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
f9bf01c6
A
520 {
521 load32WithUnalignedHalfWords(left, ARMRegisters::S1);
522 return branch32(cond, ARMRegisters::S1, right);
523 }
524
14957cd0 525 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
4e4e5a6f
A
526 {
527 load8(address, ARMRegisters::S1);
528 return branchTest32(cond, ARMRegisters::S1, mask);
529 }
530
14957cd0 531 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
f9bf01c6
A
532 {
533 ASSERT((cond == Zero) || (cond == NonZero));
534 m_assembler.tst_r(reg, mask);
535 return Jump(m_assembler.jmp(ARMCondition(cond)));
536 }
537
14957cd0 538 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
f9bf01c6
A
539 {
540 ASSERT((cond == Zero) || (cond == NonZero));
541 ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
542 if (w & ARMAssembler::OP2_INV_IMM)
543 m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
544 else
545 m_assembler.tst_r(reg, w);
546 return Jump(m_assembler.jmp(ARMCondition(cond)));
547 }
548
14957cd0 549 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
f9bf01c6
A
550 {
551 load32(address, ARMRegisters::S1);
552 return branchTest32(cond, ARMRegisters::S1, mask);
553 }
554
14957cd0 555 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
f9bf01c6
A
556 {
557 load32(address, ARMRegisters::S1);
558 return branchTest32(cond, ARMRegisters::S1, mask);
559 }
560
561 Jump jump()
562 {
563 return Jump(m_assembler.jmp());
564 }
565
566 void jump(RegisterID target)
567 {
4e4e5a6f 568 m_assembler.bx(target);
f9bf01c6
A
569 }
570
571 void jump(Address address)
572 {
573 load32(address, ARMRegisters::pc);
574 }
575
14957cd0 576 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
f9bf01c6
A
577 {
578 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
579 add32(src, dest);
580 return Jump(m_assembler.jmp(ARMCondition(cond)));
581 }
582
14957cd0 583 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
584 {
585 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
586 add32(imm, dest);
587 return Jump(m_assembler.jmp(ARMCondition(cond)));
588 }
589
6fe7ccc8
A
590 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
591 {
592 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
593 add32(src, imm, dest);
594 return Jump(m_assembler.jmp(ARMCondition(cond)));
595 }
596
f9bf01c6
A
597 void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
598 {
599 if (src1 == dest) {
600 move(src1, ARMRegisters::S0);
601 src1 = ARMRegisters::S0;
602 }
603 m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
604 m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
605 }
606
14957cd0 607 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
f9bf01c6
A
608 {
609 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
610 if (cond == Overflow) {
611 mull32(src, dest, dest);
612 cond = NonZero;
613 }
614 else
615 mul32(src, dest);
616 return Jump(m_assembler.jmp(ARMCondition(cond)));
617 }
618
14957cd0 619 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
f9bf01c6
A
620 {
621 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
622 if (cond == Overflow) {
623 move(imm, ARMRegisters::S0);
624 mull32(ARMRegisters::S0, src, dest);
625 cond = NonZero;
626 }
627 else
628 mul32(imm, src, dest);
629 return Jump(m_assembler.jmp(ARMCondition(cond)));
630 }
631
14957cd0 632 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
f9bf01c6
A
633 {
634 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
635 sub32(src, dest);
636 return Jump(m_assembler.jmp(ARMCondition(cond)));
637 }
638
14957cd0 639 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
f9bf01c6
A
640 {
641 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
642 sub32(imm, dest);
643 return Jump(m_assembler.jmp(ARMCondition(cond)));
644 }
645
6fe7ccc8
A
646 Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
647 {
648 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
649 sub32(src, imm, dest);
650 return Jump(m_assembler.jmp(ARMCondition(cond)));
651 }
652
653 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
654 {
655 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
656 m_assembler.subs_r(dest, op1, op2);
657 return Jump(m_assembler.jmp(ARMCondition(cond)));
658 }
659
14957cd0 660 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
4e4e5a6f
A
661 {
662 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
663 neg32(srcDest);
664 return Jump(m_assembler.jmp(ARMCondition(cond)));
665 }
666
14957cd0 667 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
f9bf01c6
A
668 {
669 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
670 or32(src, dest);
671 return Jump(m_assembler.jmp(ARMCondition(cond)));
672 }
673
674 void breakpoint()
675 {
676 m_assembler.bkpt(0);
677 }
678
679 Call nearCall()
680 {
4e4e5a6f
A
681#if WTF_ARM_ARCH_AT_LEAST(5)
682 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
683 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
684 return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
685#else
f9bf01c6
A
686 prepareCall();
687 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
4e4e5a6f 688#endif
f9bf01c6
A
689 }
690
691 Call call(RegisterID target)
692 {
14957cd0 693 return Call(m_assembler.blx(target), Call::None);
f9bf01c6
A
694 }
695
696 void call(Address address)
697 {
698 call32(address.base, address.offset);
699 }
700
701 void ret()
702 {
4e4e5a6f 703 m_assembler.bx(linkRegister);
f9bf01c6
A
704 }
705
14957cd0 706 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
f9bf01c6
A
707 {
708 m_assembler.cmp_r(left, right);
709 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
710 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
711 }
712
14957cd0 713 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
f9bf01c6
A
714 {
715 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
716 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
717 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
718 }
719
6fe7ccc8
A
720 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
721 {
722 load8(left, ARMRegisters::S1);
723 compare32(cond, ARMRegisters::S1, right, dest);
724 }
725
14957cd0 726 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
f9bf01c6 727 {
f9bf01c6 728 if (mask.m_value == -1)
14957cd0 729 m_assembler.cmp_r(0, reg);
f9bf01c6 730 else
14957cd0 731 m_assembler.tst_r(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
f9bf01c6
A
732 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
733 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
734 }
735
14957cd0
A
736 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
737 {
738 load32(address, ARMRegisters::S1);
739 test32(cond, ARMRegisters::S1, mask, dest);
740 }
741
742 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
f9bf01c6 743 {
14957cd0
A
744 load8(address, ARMRegisters::S1);
745 test32(cond, ARMRegisters::S1, mask, dest);
f9bf01c6
A
746 }
747
14957cd0 748 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
f9bf01c6
A
749 {
750 m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
751 }
752
14957cd0 753 void add32(TrustedImm32 imm, AbsoluteAddress address)
f9bf01c6
A
754 {
755 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
756 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
757 add32(imm, ARMRegisters::S1);
758 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
759 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
760 }
761
14957cd0 762 void sub32(TrustedImm32 imm, AbsoluteAddress address)
f9bf01c6
A
763 {
764 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
765 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
766 sub32(imm, ARMRegisters::S1);
767 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
768 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
769 }
770
14957cd0 771 void load32(const void* address, RegisterID dest)
f9bf01c6
A
772 {
773 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
774 m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
775 }
776
14957cd0 777 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
f9bf01c6
A
778 {
779 load32(left.m_ptr, ARMRegisters::S1);
780 return branch32(cond, ARMRegisters::S1, right);
781 }
782
14957cd0 783 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
f9bf01c6
A
784 {
785 load32(left.m_ptr, ARMRegisters::S1);
786 return branch32(cond, ARMRegisters::S1, right);
787 }
788
14957cd0
A
789 void relativeTableJump(RegisterID index, int scale)
790 {
791 ASSERT(scale >= 0 && scale <= 31);
792 m_assembler.add_r(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
793
794 // NOP the default prefetching
795 m_assembler.mov_r(ARMRegisters::r0, ARMRegisters::r0);
796 }
797
f9bf01c6
A
798 Call call()
799 {
4e4e5a6f
A
800#if WTF_ARM_ARCH_AT_LEAST(5)
801 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
802 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
803 return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
804#else
f9bf01c6
A
805 prepareCall();
806 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
4e4e5a6f 807#endif
f9bf01c6
A
808 }
809
810 Call tailRecursiveCall()
811 {
812 return Call::fromTailJump(jump());
813 }
814
815 Call makeTailRecursiveCall(Jump oldJump)
816 {
817 return Call::fromTailJump(oldJump);
818 }
819
14957cd0 820 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
f9bf01c6
A
821 {
822 DataLabelPtr dataLabel(this);
823 m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
824 return dataLabel;
825 }
826
14957cd0 827 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
f9bf01c6
A
828 {
829 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
830 Jump jump = branch32(cond, left, ARMRegisters::S1, true);
831 return jump;
832 }
833
14957cd0 834 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
f9bf01c6
A
835 {
836 load32(left, ARMRegisters::S1);
837 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
838 Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
839 return jump;
840 }
841
14957cd0 842 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
f9bf01c6
A
843 {
844 DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
845 store32(ARMRegisters::S1, address);
846 return dataLabel;
847 }
848
849 DataLabelPtr storePtrWithPatch(ImplicitAddress address)
850 {
14957cd0 851 return storePtrWithPatch(TrustedImmPtr(0), address);
f9bf01c6
A
852 }
853
854 // Floating point operators
6fe7ccc8 855 static bool supportsFloatingPoint()
f9bf01c6
A
856 {
857 return s_isVFPPresent;
858 }
859
6fe7ccc8 860 static bool supportsFloatingPointTruncate()
f9bf01c6 861 {
6fe7ccc8 862 return false;
f9bf01c6
A
863 }
864
6fe7ccc8 865 static bool supportsFloatingPointSqrt()
4e4e5a6f
A
866 {
867 return s_isVFPPresent;
868 }
6fe7ccc8 869 static bool supportsFloatingPointAbs() { return false; }
4e4e5a6f 870
f9bf01c6
A
871 void loadDouble(ImplicitAddress address, FPRegisterID dest)
872 {
873 m_assembler.doubleTransfer(true, dest, address.base, address.offset);
874 }
875
4e4e5a6f 876 void loadDouble(const void* address, FPRegisterID dest)
f9bf01c6
A
877 {
878 m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
879 m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
880 }
881
882 void storeDouble(FPRegisterID src, ImplicitAddress address)
883 {
884 m_assembler.doubleTransfer(false, src, address.base, address.offset);
885 }
886
887 void addDouble(FPRegisterID src, FPRegisterID dest)
888 {
14957cd0 889 m_assembler.vadd_f64_r(dest, dest, src);
f9bf01c6
A
890 }
891
892 void addDouble(Address src, FPRegisterID dest)
893 {
894 loadDouble(src, ARMRegisters::SD0);
895 addDouble(ARMRegisters::SD0, dest);
896 }
897
898 void divDouble(FPRegisterID src, FPRegisterID dest)
899 {
14957cd0 900 m_assembler.vdiv_f64_r(dest, dest, src);
f9bf01c6
A
901 }
902
903 void divDouble(Address src, FPRegisterID dest)
904 {
905 ASSERT_NOT_REACHED(); // Untested
906 loadDouble(src, ARMRegisters::SD0);
907 divDouble(ARMRegisters::SD0, dest);
908 }
909
910 void subDouble(FPRegisterID src, FPRegisterID dest)
911 {
14957cd0 912 m_assembler.vsub_f64_r(dest, dest, src);
f9bf01c6
A
913 }
914
915 void subDouble(Address src, FPRegisterID dest)
916 {
917 loadDouble(src, ARMRegisters::SD0);
918 subDouble(ARMRegisters::SD0, dest);
919 }
920
921 void mulDouble(FPRegisterID src, FPRegisterID dest)
922 {
14957cd0 923 m_assembler.vmul_f64_r(dest, dest, src);
f9bf01c6
A
924 }
925
926 void mulDouble(Address src, FPRegisterID dest)
927 {
928 loadDouble(src, ARMRegisters::SD0);
929 mulDouble(ARMRegisters::SD0, dest);
930 }
931
4e4e5a6f
A
932 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
933 {
14957cd0 934 m_assembler.vsqrt_f64_r(dest, src);
4e4e5a6f 935 }
6fe7ccc8
A
936
937 void absDouble(FPRegisterID, FPRegisterID)
938 {
939 ASSERT_NOT_REACHED();
940 }
4e4e5a6f 941
f9bf01c6
A
942 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
943 {
14957cd0
A
944 m_assembler.vmov_vfp_r(dest << 1, src);
945 m_assembler.vcvt_f64_s32_r(dest, dest << 1);
f9bf01c6
A
946 }
947
948 void convertInt32ToDouble(Address src, FPRegisterID dest)
949 {
950 ASSERT_NOT_REACHED(); // Untested
951 // flds does not worth the effort here
952 load32(src, ARMRegisters::S1);
953 convertInt32ToDouble(ARMRegisters::S1, dest);
954 }
955
956 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
957 {
958 ASSERT_NOT_REACHED(); // Untested
959 // flds does not worth the effort here
960 m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
961 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
962 convertInt32ToDouble(ARMRegisters::S1, dest);
963 }
964
965 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
966 {
14957cd0
A
967 m_assembler.vcmp_f64_r(left, right);
968 m_assembler.vmrs_apsr();
f9bf01c6
A
969 if (cond & DoubleConditionBitSpecial)
970 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
971 return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
972 }
973
974 // Truncates 'src' to an integer, and places the resulting 'dest'.
975 // If the result is not representable as a 32 bit value, branch.
976 // May also branch for some values that are representable in 32 bits
6fe7ccc8 977 // (specifically, in this case, INT_MIN).
f9bf01c6
A
978 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
979 {
6fe7ccc8
A
980 UNUSED_PARAM(src);
981 UNUSED_PARAM(dest);
982 ASSERT_NOT_REACHED();
983 return jump();
f9bf01c6
A
984 }
985
986 // Convert 'src' to an integer, and places the resulting 'dest'.
987 // If the result is not representable as a 32 bit value, branch.
988 // May also branch for some values that are representable in 32 bits
989 // (specifically, in this case, 0).
990 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
991 {
14957cd0
A
992 m_assembler.vcvt_s32_f64_r(ARMRegisters::SD0 << 1, src);
993 m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1);
f9bf01c6
A
994
995 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
14957cd0 996 m_assembler.vcvt_f64_s32_r(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
f9bf01c6
A
997 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
998
999 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
1000 failureCases.append(branchTest32(Zero, dest));
1001 }
1002
b80e6193 1003 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
f9bf01c6
A
1004 {
1005 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
b80e6193
A
1006 convertInt32ToDouble(ARMRegisters::S0, scratch);
1007 return branchDouble(DoubleNotEqual, reg, scratch);
1008 }
1009
1010 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
1011 {
1012 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
1013 convertInt32ToDouble(ARMRegisters::S0, scratch);
1014 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
f9bf01c6
A
1015 }
1016
14957cd0
A
1017 void nop()
1018 {
1019 m_assembler.nop();
1020 }
1021
6fe7ccc8
A
1022 static FunctionPtr readCallTarget(CodeLocationCall call)
1023 {
1024 return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
1025 }
1026
f9bf01c6 1027protected:
14957cd0
A
1028 ARMAssembler::Condition ARMCondition(RelationalCondition cond)
1029 {
1030 return static_cast<ARMAssembler::Condition>(cond);
1031 }
1032
1033 ARMAssembler::Condition ARMCondition(ResultCondition cond)
f9bf01c6
A
1034 {
1035 return static_cast<ARMAssembler::Condition>(cond);
1036 }
1037
1038 void ensureSpace(int insnSpace, int constSpace)
1039 {
1040 m_assembler.ensureSpace(insnSpace, constSpace);
1041 }
1042
1043 int sizeOfConstantPool()
1044 {
1045 return m_assembler.sizeOfConstantPool();
1046 }
1047
1048 void prepareCall()
1049 {
4e4e5a6f 1050#if WTF_ARM_ARCH_VERSION < 5
f9bf01c6
A
1051 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
1052
1053 m_assembler.mov_r(linkRegister, ARMRegisters::pc);
4e4e5a6f 1054#endif
f9bf01c6
A
1055 }
1056
1057 void call32(RegisterID base, int32_t offset)
1058 {
4e4e5a6f
A
1059#if WTF_ARM_ARCH_AT_LEAST(5)
1060 int targetReg = ARMRegisters::S1;
1061#else
1062 int targetReg = ARMRegisters::pc;
1063#endif
1064 int tmpReg = ARMRegisters::S1;
1065
f9bf01c6
A
1066 if (base == ARMRegisters::sp)
1067 offset += 4;
1068
1069 if (offset >= 0) {
1070 if (offset <= 0xfff) {
1071 prepareCall();
4e4e5a6f 1072 m_assembler.dtr_u(true, targetReg, base, offset);
f9bf01c6 1073 } else if (offset <= 0xfffff) {
4e4e5a6f 1074 m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
f9bf01c6 1075 prepareCall();
4e4e5a6f 1076 m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
f9bf01c6 1077 } else {
14957cd0 1078 m_assembler.moveImm(offset, tmpReg);
f9bf01c6 1079 prepareCall();
14957cd0 1080 m_assembler.dtr_ur(true, targetReg, base, tmpReg);
f9bf01c6
A
1081 }
1082 } else {
1083 offset = -offset;
1084 if (offset <= 0xfff) {
1085 prepareCall();
4e4e5a6f 1086 m_assembler.dtr_d(true, targetReg, base, offset);
f9bf01c6 1087 } else if (offset <= 0xfffff) {
4e4e5a6f 1088 m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
f9bf01c6 1089 prepareCall();
4e4e5a6f 1090 m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
f9bf01c6 1091 } else {
14957cd0 1092 m_assembler.moveImm(offset, tmpReg);
f9bf01c6 1093 prepareCall();
14957cd0 1094 m_assembler.dtr_dr(true, targetReg, base, tmpReg);
f9bf01c6
A
1095 }
1096 }
4e4e5a6f
A
1097#if WTF_ARM_ARCH_AT_LEAST(5)
1098 m_assembler.blx(targetReg);
1099#endif
f9bf01c6
A
1100 }
1101
1102private:
1103 friend class LinkBuffer;
1104 friend class RepatchBuffer;
1105
1106 static void linkCall(void* code, Call call, FunctionPtr function)
1107 {
6fe7ccc8 1108 ARMAssembler::linkCall(code, call.m_label, function.value());
f9bf01c6
A
1109 }
1110
1111 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1112 {
1113 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1114 }
1115
1116 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1117 {
1118 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1119 }
1120
1121 static const bool s_isVFPPresent;
1122};
1123
1124}
1125
1126#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1127
1128#endif // MacroAssemblerARM_h