]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerSH4.h
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerSH4.h
1 /*
2 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
3 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef MacroAssemblerSH4_h
28 #define MacroAssemblerSH4_h
29
30 #if ENABLE(ASSEMBLER) && CPU(SH4)
31
32 #include "SH4Assembler.h"
33 #include "AbstractMacroAssembler.h"
34 #include <wtf/Assertions.h>
35
36 namespace JSC {
37
38 class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> {
39 public:
40 typedef SH4Assembler::FPRegisterID FPRegisterID;
41
42 static const Scale ScalePtr = TimesFour;
43 static const FPRegisterID fscratch = SH4Registers::fr10;
44 static const RegisterID stackPointerRegister = SH4Registers::sp;
45 static const RegisterID linkRegister = SH4Registers::pr;
46 static const RegisterID scratchReg3 = SH4Registers::r13;
47
48 static const int MaximumCompactPtrAlignedAddressOffset = 60;
49
50 enum RelationalCondition {
51 Equal = SH4Assembler::EQ,
52 NotEqual = SH4Assembler::NE,
53 Above = SH4Assembler::HI,
54 AboveOrEqual = SH4Assembler::HS,
55 Below = SH4Assembler::LI,
56 BelowOrEqual = SH4Assembler::LS,
57 GreaterThan = SH4Assembler::GT,
58 GreaterThanOrEqual = SH4Assembler::GE,
59 LessThan = SH4Assembler::LT,
60 LessThanOrEqual = SH4Assembler::LE
61 };
62
63 enum ResultCondition {
64 Overflow = SH4Assembler::OF,
65 Signed = SH4Assembler::SI,
66 Zero = SH4Assembler::EQ,
67 NonZero = SH4Assembler::NE
68 };
69
70 enum DoubleCondition {
71 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
72 DoubleEqual = SH4Assembler::EQ,
73 DoubleNotEqual = SH4Assembler::NE,
74 DoubleGreaterThan = SH4Assembler::GT,
75 DoubleGreaterThanOrEqual = SH4Assembler::GE,
76 DoubleLessThan = SH4Assembler::LT,
77 DoubleLessThanOrEqual = SH4Assembler::LE,
78 // If either operand is NaN, these conditions always evaluate to true.
79 DoubleEqualOrUnordered = SH4Assembler::EQU,
80 DoubleNotEqualOrUnordered = SH4Assembler::NEU,
81 DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
82 DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
83 DoubleLessThanOrUnordered = SH4Assembler::LTU,
84 DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
85 };
86
87 RegisterID claimScratch()
88 {
89 return m_assembler.claimScratch();
90 }
91
92 void releaseScratch(RegisterID reg)
93 {
94 m_assembler.releaseScratch(reg);
95 }
96
97 // Integer arithmetic operations
98
99 void add32(RegisterID src, RegisterID dest)
100 {
101 m_assembler.addlRegReg(src, dest);
102 }
103
104 void add32(TrustedImm32 imm, RegisterID dest)
105 {
106 if (m_assembler.isImmediate(imm.m_value)) {
107 m_assembler.addlImm8r(imm.m_value, dest);
108 return;
109 }
110
111 RegisterID scr = claimScratch();
112 m_assembler.loadConstant(imm.m_value, scr);
113 m_assembler.addlRegReg(scr, dest);
114 releaseScratch(scr);
115 }
116
117 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
118 {
119 if (src != dest)
120 m_assembler.movlRegReg(src, dest);
121 add32(imm, dest);
122 }
123
124 void add32(TrustedImm32 imm, Address address)
125 {
126 RegisterID scr = claimScratch();
127 load32(address, scr);
128 add32(imm, scr);
129 store32(scr, address);
130 releaseScratch(scr);
131 }
132
133 void add32(Address src, RegisterID dest)
134 {
135 RegisterID scr = claimScratch();
136 load32(src, scr);
137 m_assembler.addlRegReg(scr, dest);
138 releaseScratch(scr);
139 }
140
141 void and32(RegisterID src, RegisterID dest)
142 {
143 m_assembler.andlRegReg(src, dest);
144 }
145
146 void and32(TrustedImm32 imm, RegisterID dest)
147 {
148 if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
149 m_assembler.andlImm8r(imm.m_value, dest);
150 return;
151 }
152
153 RegisterID scr = claimScratch();
154 m_assembler.loadConstant((imm.m_value), scr);
155 m_assembler.andlRegReg(scr, dest);
156 releaseScratch(scr);
157 }
158
159 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
160 {
161 if (src != dest) {
162 move(imm, dest);
163 and32(src, dest);
164 return;
165 }
166
167 and32(imm, dest);
168 }
169
170 void lshift32(RegisterID shiftamount, RegisterID dest)
171 {
172 if (shiftamount == SH4Registers::r0)
173 m_assembler.andlImm8r(0x1f, shiftamount);
174 else {
175 RegisterID scr = claimScratch();
176 m_assembler.loadConstant(0x1f, scr);
177 m_assembler.andlRegReg(scr, shiftamount);
178 releaseScratch(scr);
179 }
180 m_assembler.shllRegReg(dest, shiftamount);
181 }
182
183 void rshift32(int imm, RegisterID dest)
184 {
185 RegisterID scr = claimScratch();
186 m_assembler.loadConstant(-imm, scr);
187 m_assembler.shaRegReg(dest, scr);
188 releaseScratch(scr);
189 }
190
191 void lshift32(TrustedImm32 imm, RegisterID dest)
192 {
193 if (!imm.m_value)
194 return;
195
196 if ((imm.m_value == 1) || (imm.m_value == 2) || (imm.m_value == 8) || (imm.m_value == 16)) {
197 m_assembler.shllImm8r(imm.m_value, dest);
198 return;
199 }
200
201 RegisterID scr = claimScratch();
202 m_assembler.loadConstant((imm.m_value & 0x1f) , scr);
203 m_assembler.shllRegReg(dest, scr);
204 releaseScratch(scr);
205 }
206
207 void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
208 {
209 if (src != dest)
210 move(src, dest);
211
212 lshift32(shiftamount, dest);
213 }
214
215 void mul32(RegisterID src, RegisterID dest)
216 {
217 m_assembler.imullRegReg(src, dest);
218 m_assembler.stsmacl(dest);
219 }
220
221 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
222 {
223 RegisterID scr = claimScratch();
224 move(imm, scr);
225 if (src != dest)
226 move(src, dest);
227 mul32(scr, dest);
228 releaseScratch(scr);
229 }
230
231 void or32(RegisterID src, RegisterID dest)
232 {
233 m_assembler.orlRegReg(src, dest);
234 }
235
236 void or32(TrustedImm32 imm, RegisterID dest)
237 {
238 if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
239 m_assembler.orlImm8r(imm.m_value, dest);
240 return;
241 }
242
243 RegisterID scr = claimScratch();
244 m_assembler.loadConstant(imm.m_value, scr);
245 m_assembler.orlRegReg(scr, dest);
246 releaseScratch(scr);
247 }
248
249 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
250 {
251 if (op1 == op2)
252 move(op1, dest);
253 else if (op1 == dest)
254 or32(op2, dest);
255 else {
256 move(op2, dest);
257 or32(op1, dest);
258 }
259 }
260
261
262 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
263 {
264 if (src != dest) {
265 move(imm, dest);
266 or32(src, dest);
267 return;
268 }
269
270 or32(imm, dest);
271 }
272
273 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
274 {
275 if (src != dest) {
276 move(imm, dest);
277 xor32(src, dest);
278 return;
279 }
280
281 xor32(imm, dest);
282 }
283
284 void rshift32(RegisterID shiftamount, RegisterID dest)
285 {
286 if (shiftamount == SH4Registers::r0)
287 m_assembler.andlImm8r(0x1f, shiftamount);
288 else {
289 RegisterID scr = claimScratch();
290 m_assembler.loadConstant(0x1f, scr);
291 m_assembler.andlRegReg(scr, shiftamount);
292 releaseScratch(scr);
293 }
294 m_assembler.neg(shiftamount, shiftamount);
295 m_assembler.shaRegReg(dest, shiftamount);
296 }
297
298 void rshift32(TrustedImm32 imm, RegisterID dest)
299 {
300 if (imm.m_value & 0x1f)
301 rshift32(imm.m_value & 0x1f, dest);
302 }
303
304 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
305 {
306 if (src != dest)
307 move(src, dest);
308 rshift32(imm, dest);
309 }
310
311 void sub32(RegisterID src, RegisterID dest)
312 {
313 m_assembler.sublRegReg(src, dest);
314 }
315
316 void sub32(TrustedImm32 imm, AbsoluteAddress address, RegisterID scratchReg)
317 {
318 RegisterID result = claimScratch();
319
320 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
321 m_assembler.movlMemReg(scratchReg, result);
322
323 if (m_assembler.isImmediate(-imm.m_value))
324 m_assembler.addlImm8r(-imm.m_value, result);
325 else {
326 m_assembler.loadConstant(imm.m_value, scratchReg3);
327 m_assembler.sublRegReg(scratchReg3, result);
328 }
329
330 store32(result, scratchReg);
331 releaseScratch(result);
332 }
333
334 void sub32(TrustedImm32 imm, AbsoluteAddress address)
335 {
336 RegisterID result = claimScratch();
337 RegisterID scratchReg = claimScratch();
338
339 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
340 m_assembler.movlMemReg(scratchReg, result);
341
342 if (m_assembler.isImmediate(-imm.m_value))
343 m_assembler.addlImm8r(-imm.m_value, result);
344 else {
345 m_assembler.loadConstant(imm.m_value, scratchReg3);
346 m_assembler.sublRegReg(scratchReg3, result);
347 }
348
349 store32(result, scratchReg);
350 releaseScratch(result);
351 releaseScratch(scratchReg);
352 }
353
354 void add32(TrustedImm32 imm, AbsoluteAddress address, RegisterID scratchReg)
355 {
356 RegisterID result = claimScratch();
357
358 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
359 m_assembler.movlMemReg(scratchReg, result);
360
361 if (m_assembler.isImmediate(imm.m_value))
362 m_assembler.addlImm8r(imm.m_value, result);
363 else {
364 m_assembler.loadConstant(imm.m_value, scratchReg3);
365 m_assembler.addlRegReg(scratchReg3, result);
366 }
367
368 store32(result, scratchReg);
369 releaseScratch(result);
370 }
371
372 void add32(TrustedImm32 imm, AbsoluteAddress address)
373 {
374 RegisterID result = claimScratch();
375 RegisterID scratchReg = claimScratch();
376
377 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
378 m_assembler.movlMemReg(scratchReg, result);
379
380 if (m_assembler.isImmediate(imm.m_value))
381 m_assembler.addlImm8r(imm.m_value, result);
382 else {
383 m_assembler.loadConstant(imm.m_value, scratchReg3);
384 m_assembler.addlRegReg(scratchReg3, result);
385 }
386
387 store32(result, scratchReg);
388 releaseScratch(result);
389 releaseScratch(scratchReg);
390 }
391
392 void sub32(TrustedImm32 imm, RegisterID dest)
393 {
394 if (m_assembler.isImmediate(-imm.m_value)) {
395 m_assembler.addlImm8r(-imm.m_value, dest);
396 return;
397 }
398
399 RegisterID scr = claimScratch();
400 m_assembler.loadConstant(imm.m_value, scr);
401 m_assembler.sublRegReg(scr, dest);
402 releaseScratch(scr);
403 }
404
405 void sub32(Address src, RegisterID dest)
406 {
407 RegisterID scr = claimScratch();
408 load32(src, scr);
409 m_assembler.sublRegReg(scr, dest);
410 releaseScratch(scr);
411 }
412
413 void xor32(RegisterID src, RegisterID dest)
414 {
415 m_assembler.xorlRegReg(src, dest);
416 }
417
418 void xor32(TrustedImm32 imm, RegisterID srcDest)
419 {
420 if (imm.m_value == -1) {
421 m_assembler.notlReg(srcDest, srcDest);
422 return;
423 }
424
425 if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
426 RegisterID scr = claimScratch();
427 m_assembler.loadConstant((imm.m_value), scr);
428 m_assembler.xorlRegReg(scr, srcDest);
429 releaseScratch(scr);
430 return;
431 }
432
433 m_assembler.xorlImm8r(imm.m_value, srcDest);
434 }
435
436 void compare32(int imm, RegisterID dst, RelationalCondition cond)
437 {
438 if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
439 m_assembler.cmpEqImmR0(imm, dst);
440 return;
441 }
442
443 RegisterID scr = claimScratch();
444 m_assembler.loadConstant(imm, scr);
445 m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
446 releaseScratch(scr);
447 }
448
449 void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
450 {
451 RegisterID scr = claimScratch();
452 if (!offset) {
453 m_assembler.movlMemReg(base, scr);
454 m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
455 releaseScratch(scr);
456 return;
457 }
458
459 if ((offset < 0) || (offset >= 64)) {
460 m_assembler.loadConstant(offset, scr);
461 m_assembler.addlRegReg(base, scr);
462 m_assembler.movlMemReg(scr, scr);
463 m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
464 releaseScratch(scr);
465 return;
466 }
467
468 m_assembler.movlMemReg(offset >> 2, base, scr);
469 m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
470 releaseScratch(scr);
471 }
472
473 void testImm(int imm, int offset, RegisterID base)
474 {
475 RegisterID scr = claimScratch();
476 RegisterID scr1 = claimScratch();
477
478 if ((offset < 0) || (offset >= 64)) {
479 m_assembler.loadConstant(offset, scr);
480 m_assembler.addlRegReg(base, scr);
481 m_assembler.movlMemReg(scr, scr);
482 } else if (offset)
483 m_assembler.movlMemReg(offset >> 2, base, scr);
484 else
485 m_assembler.movlMemReg(base, scr);
486 if (m_assembler.isImmediate(imm))
487 m_assembler.movImm8(imm, scr1);
488 else
489 m_assembler.loadConstant(imm, scr1);
490
491 m_assembler.testlRegReg(scr, scr1);
492 releaseScratch(scr);
493 releaseScratch(scr1);
494 }
495
496 void testlImm(int imm, RegisterID dst)
497 {
498 if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
499 m_assembler.testlImm8r(imm, dst);
500 return;
501 }
502
503 RegisterID scr = claimScratch();
504 m_assembler.loadConstant(imm, scr);
505 m_assembler.testlRegReg(scr, dst);
506 releaseScratch(scr);
507 }
508
509 void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
510 {
511 if (!offset) {
512 RegisterID scr = claimScratch();
513 m_assembler.movlMemReg(base, scr);
514 m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
515 releaseScratch(scr);
516 return;
517 }
518
519 if ((offset < 0) || (offset >= 64)) {
520 RegisterID scr = claimScratch();
521 m_assembler.loadConstant(offset, scr);
522 m_assembler.addlRegReg(base, scr);
523 m_assembler.movlMemReg(scr, scr);
524 m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
525 releaseScratch(scr);
526 return;
527 }
528
529 RegisterID scr = claimScratch();
530 m_assembler.movlMemReg(offset >> 2, base, scr);
531 m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
532 releaseScratch(scr);
533 }
534
535 void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
536 {
537 if (!offset) {
538 RegisterID scr = claimScratch();
539 RegisterID scr1 = claimScratch();
540 m_assembler.movlMemReg(base, scr);
541 m_assembler.loadConstant(imm, scr1);
542 m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
543 releaseScratch(scr1);
544 releaseScratch(scr);
545 return;
546 }
547
548 if ((offset < 0) || (offset >= 64)) {
549 RegisterID scr = claimScratch();
550 RegisterID scr1 = claimScratch();
551 m_assembler.loadConstant(offset, scr);
552 m_assembler.addlRegReg(base, scr);
553 m_assembler.movlMemReg(scr, scr);
554 m_assembler.loadConstant(imm, scr1);
555 m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
556 releaseScratch(scr1);
557 releaseScratch(scr);
558 return;
559 }
560
561 RegisterID scr = claimScratch();
562 RegisterID scr1 = claimScratch();
563 m_assembler.movlMemReg(offset >> 2, base, scr);
564 m_assembler.loadConstant(imm, scr1);
565 m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
566 releaseScratch(scr1);
567 releaseScratch(scr);
568 }
569
570 // Memory access operation
571
572 void load32(ImplicitAddress address, RegisterID dest)
573 {
574 load32(address.base, address.offset, dest);
575 }
576
577 void load8(ImplicitAddress address, RegisterID dest)
578 {
579 load8(address.base, address.offset, dest);
580 }
581
582 void load8(BaseIndex address, RegisterID dest)
583 {
584 RegisterID scr = claimScratch();
585 move(address.index, scr);
586 lshift32(TrustedImm32(address.scale), scr);
587 add32(address.base, scr);
588 load8(scr, address.offset, dest);
589 releaseScratch(scr);
590 }
591
592 void load32(BaseIndex address, RegisterID dest)
593 {
594 RegisterID scr = claimScratch();
595 move(address.index, scr);
596 lshift32(TrustedImm32(address.scale), scr);
597 add32(address.base, scr);
598 load32(scr, address.offset, dest);
599 releaseScratch(scr);
600 }
601
602 void load32(const void* address, RegisterID dest)
603 {
604 m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)), dest);
605 m_assembler.movlMemReg(dest, dest);
606 }
607
608 void load32(RegisterID base, int offset, RegisterID dest)
609 {
610 if (!offset) {
611 m_assembler.movlMemReg(base, dest);
612 return;
613 }
614
615 if ((offset >= 0) && (offset < 64)) {
616 m_assembler.movlMemReg(offset >> 2, base, dest);
617 return;
618 }
619
620 if ((dest == SH4Registers::r0) && (dest != base)) {
621 m_assembler.loadConstant((offset), dest);
622 m_assembler.movlR0mr(base, dest);
623 return;
624 }
625
626 RegisterID scr;
627 if (dest == base)
628 scr = claimScratch();
629 else
630 scr = dest;
631 m_assembler.loadConstant((offset), scr);
632 m_assembler.addlRegReg(base, scr);
633 m_assembler.movlMemReg(scr, dest);
634
635 if (dest == base)
636 releaseScratch(scr);
637 }
638
639 void load8(RegisterID base, int offset, RegisterID dest)
640 {
641 if (!offset) {
642 m_assembler.movbMemReg(base, dest);
643 m_assembler.extub(dest, dest);
644 return;
645 }
646
647 if ((offset > 0) && (offset < 64) && (dest == SH4Registers::r0)) {
648 m_assembler.movbMemReg(offset, base, dest);
649 m_assembler.extub(dest, dest);
650 return;
651 }
652
653 if (base != dest) {
654 m_assembler.loadConstant((offset), dest);
655 m_assembler.addlRegReg(base, dest);
656 m_assembler.movbMemReg(dest, dest);
657 m_assembler.extub(dest, dest);
658 return;
659 }
660
661 RegisterID scr = claimScratch();
662 m_assembler.loadConstant((offset), scr);
663 m_assembler.addlRegReg(base, scr);
664 m_assembler.movbMemReg(scr, dest);
665 m_assembler.extub(dest, dest);
666 releaseScratch(scr);
667 }
668
669 void load32(RegisterID r0, RegisterID src, RegisterID dst)
670 {
671 ASSERT(r0 == SH4Registers::r0);
672 m_assembler.movlR0mr(src, dst);
673 }
674
675 void load32(RegisterID src, RegisterID dst)
676 {
677 m_assembler.movlMemReg(src, dst);
678 }
679
680 void load16(ImplicitAddress address, RegisterID dest)
681 {
682 if (!address.offset) {
683 m_assembler.movwMemReg(address.base, dest);
684 extuw(dest, dest);
685 return;
686 }
687
688 if ((address.offset > 0) && (address.offset < 64) && (dest == SH4Registers::r0)) {
689 m_assembler.movwMemReg(address.offset, address.base, dest);
690 extuw(dest, dest);
691 return;
692 }
693
694 if (address.base != dest) {
695 m_assembler.loadConstant((address.offset), dest);
696 m_assembler.addlRegReg(address.base, dest);
697 m_assembler.movwMemReg(dest, dest);
698 extuw(dest, dest);
699 return;
700 }
701
702 RegisterID scr = claimScratch();
703 m_assembler.loadConstant((address.offset), scr);
704 m_assembler.addlRegReg(address.base, scr);
705 m_assembler.movwMemReg(scr, dest);
706 extuw(dest, dest);
707 releaseScratch(scr);
708 }
709
710 void load16Unaligned(BaseIndex address, RegisterID dest)
711 {
712
713 RegisterID scr = claimScratch();
714 RegisterID scr1 = claimScratch();
715
716 move(address.index, scr);
717 lshift32(TrustedImm32(address.scale), scr);
718
719 if (address.offset)
720 add32(TrustedImm32(address.offset), scr);
721
722 add32(address.base, scr);
723 load8(scr, scr1);
724 add32(TrustedImm32(1), scr);
725 load8(scr, dest);
726 m_assembler.shllImm8r(8, dest);
727 or32(scr1, dest);
728
729 releaseScratch(scr);
730 releaseScratch(scr1);
731 }
732
733 void load16(RegisterID src, RegisterID dest)
734 {
735 m_assembler.movwMemReg(src, dest);
736 extuw(dest, dest);
737 }
738
739 void load16(RegisterID r0, RegisterID src, RegisterID dest)
740 {
741 ASSERT(r0 == SH4Registers::r0);
742 m_assembler.movwR0mr(src, dest);
743 extuw(dest, dest);
744 }
745
746 void load16(BaseIndex address, RegisterID dest)
747 {
748 RegisterID scr = claimScratch();
749
750 move(address.index, scr);
751 lshift32(TrustedImm32(address.scale), scr);
752
753 if (address.offset)
754 add32(TrustedImm32(address.offset), scr);
755 if (address.base == SH4Registers::r0)
756 load16(address.base, scr, dest);
757 else {
758 add32(address.base, scr);
759 load16(scr, dest);
760 }
761
762 releaseScratch(scr);
763 }
764
765 void store32(RegisterID src, ImplicitAddress address)
766 {
767 RegisterID scr = claimScratch();
768 store32(src, address.offset, address.base, scr);
769 releaseScratch(scr);
770 }
771
772 void store32(RegisterID src, int offset, RegisterID base, RegisterID scr)
773 {
774 if (!offset) {
775 m_assembler.movlRegMem(src, base);
776 return;
777 }
778
779 if ((offset >=0) && (offset < 64)) {
780 m_assembler.movlRegMem(src, offset >> 2, base);
781 return;
782 }
783
784 m_assembler.loadConstant((offset), scr);
785 if (scr == SH4Registers::r0) {
786 m_assembler.movlRegMemr0(src, base);
787 return;
788 }
789
790 m_assembler.addlRegReg(base, scr);
791 m_assembler.movlRegMem(src, scr);
792 }
793
794 void store32(RegisterID src, RegisterID offset, RegisterID base)
795 {
796 ASSERT(offset == SH4Registers::r0);
797 m_assembler.movlRegMemr0(src, base);
798 }
799
800 void store32(RegisterID src, RegisterID dst)
801 {
802 m_assembler.movlRegMem(src, dst);
803 }
804
805 void store32(TrustedImm32 imm, ImplicitAddress address)
806 {
807 RegisterID scr = claimScratch();
808 RegisterID scr1 = claimScratch();
809 m_assembler.loadConstant((imm.m_value), scr);
810 store32(scr, address.offset, address.base, scr1);
811 releaseScratch(scr);
812 releaseScratch(scr1);
813 }
814
815 void store32(RegisterID src, BaseIndex address)
816 {
817 RegisterID scr = claimScratch();
818
819 move(address.index, scr);
820 lshift32(TrustedImm32(address.scale), scr);
821 add32(address.base, scr);
822 store32(src, Address(scr, address.offset));
823
824 releaseScratch(scr);
825 }
826
827 void store32(TrustedImm32 imm, void* address)
828 {
829 RegisterID scr = claimScratch();
830 RegisterID scr1 = claimScratch();
831 m_assembler.loadConstant((imm.m_value), scr);
832 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr1);
833 m_assembler.movlRegMem(scr, scr1);
834 releaseScratch(scr);
835 releaseScratch(scr1);
836 }
837
838 void store32(RegisterID src, void* address)
839 {
840 RegisterID scr = claimScratch();
841 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
842 m_assembler.movlRegMem(src, scr);
843 releaseScratch(scr);
844 }
845
846 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
847 {
848 RegisterID scr = claimScratch();
849 DataLabel32 label(this);
850 m_assembler.loadConstantUnReusable(address.offset, scr);
851 m_assembler.addlRegReg(address.base, scr);
852 m_assembler.movlMemReg(scr, dest);
853 releaseScratch(scr);
854 return label;
855 }
856
857 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
858 {
859 RegisterID scr = claimScratch();
860 DataLabel32 label(this);
861 m_assembler.loadConstantUnReusable(address.offset, scr);
862 m_assembler.addlRegReg(address.base, scr);
863 m_assembler.movlRegMem(src, scr);
864 releaseScratch(scr);
865 return label;
866 }
867
868 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
869 {
870 DataLabelCompact dataLabel(this);
871 ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
872 ASSERT(address.offset >= 0);
873 m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
874 return dataLabel;
875 }
876
877 // Floating-point operations
878
879 static bool supportsFloatingPoint() { return true; }
880 static bool supportsFloatingPointTruncate() { return true; }
881 static bool supportsFloatingPointSqrt() { return true; }
882 static bool supportsFloatingPointAbs() { return false; }
883
884 void loadDouble(ImplicitAddress address, FPRegisterID dest)
885 {
886 RegisterID scr = claimScratch();
887
888 m_assembler.loadConstant(address.offset, scr);
889 if (address.base == SH4Registers::r0) {
890 m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
891 m_assembler.addlImm8r(4, scr);
892 m_assembler.fmovsReadr0r(scr, dest);
893 releaseScratch(scr);
894 return;
895 }
896
897 m_assembler.addlRegReg(address.base, scr);
898 m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
899 m_assembler.fmovsReadrm(scr, dest);
900 releaseScratch(scr);
901 }
902
903 void loadDouble(const void* address, FPRegisterID dest)
904 {
905 RegisterID scr = claimScratch();
906 m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
907 m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
908 m_assembler.fmovsReadrm(scr, dest);
909 releaseScratch(scr);
910 }
911
912 void storeDouble(FPRegisterID src, ImplicitAddress address)
913 {
914 RegisterID scr = claimScratch();
915 m_assembler.loadConstant(address.offset, scr);
916 m_assembler.addlRegReg(address.base, scr);
917 m_assembler.fmovsWriterm((FPRegisterID)(src + 1), scr);
918 m_assembler.addlImm8r(4, scr);
919 m_assembler.fmovsWriterm(src, scr);
920 releaseScratch(scr);
921 }
922
923 void addDouble(FPRegisterID src, FPRegisterID dest)
924 {
925 m_assembler.daddRegReg(src, dest);
926 }
927
928 void addDouble(Address address, FPRegisterID dest)
929 {
930 loadDouble(address, fscratch);
931 addDouble(fscratch, dest);
932 }
933
934 void subDouble(FPRegisterID src, FPRegisterID dest)
935 {
936 m_assembler.dsubRegReg(src, dest);
937 }
938
939 void subDouble(Address address, FPRegisterID dest)
940 {
941 loadDouble(address, fscratch);
942 subDouble(fscratch, dest);
943 }
944
945 void mulDouble(FPRegisterID src, FPRegisterID dest)
946 {
947 m_assembler.dmulRegReg(src, dest);
948 }
949
950 void mulDouble(Address address, FPRegisterID dest)
951 {
952 loadDouble(address, fscratch);
953 mulDouble(fscratch, dest);
954 }
955
956 void divDouble(FPRegisterID src, FPRegisterID dest)
957 {
958 m_assembler.ddivRegReg(src, dest);
959 }
960
961 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
962 {
963 m_assembler.ldsrmfpul(src);
964 m_assembler.floatfpulDreg(dest);
965 }
966
967 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
968 {
969 RegisterID scr = claimScratch();
970 m_assembler.loadConstant(reinterpret_cast<uint32_t>(src.m_ptr), scr);
971 convertInt32ToDouble(scr, dest);
972 releaseScratch(scr);
973 }
974
975 void convertInt32ToDouble(Address src, FPRegisterID dest)
976 {
977 RegisterID scr = claimScratch();
978 load32(src, scr);
979 convertInt32ToDouble(scr, dest);
980 releaseScratch(scr);
981 }
982
983 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
984 {
985 RegisterID scr = claimScratch();
986 RegisterID scr1 = claimScratch();
987 Jump m_jump;
988 JumpList end;
989
990 if (dest != SH4Registers::r0)
991 move(SH4Registers::r0, scr1);
992
993 move(address.index, scr);
994 lshift32(TrustedImm32(address.scale), scr);
995 add32(address.base, scr);
996
997 if (address.offset)
998 add32(TrustedImm32(address.offset), scr);
999
1000 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 68, sizeof(uint32_t));
1001 move(scr, SH4Registers::r0);
1002 m_assembler.andlImm8r(0x3, SH4Registers::r0);
1003 m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
1004 m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
1005 if (dest != SH4Registers::r0)
1006 move(scr1, SH4Registers::r0);
1007
1008 load32(scr, dest);
1009 end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
1010 m_assembler.nop();
1011 m_jump.link(this);
1012 m_assembler.andlImm8r(0x1, SH4Registers::r0);
1013 m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
1014
1015 if (dest != SH4Registers::r0)
1016 move(scr1, SH4Registers::r0);
1017
1018 m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
1019 load16(scr, scr1);
1020 add32(TrustedImm32(2), scr);
1021 load16(scr, dest);
1022 m_assembler.shllImm8r(16, dest);
1023 or32(scr1, dest);
1024 end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
1025 m_assembler.nop();
1026 m_jump.link(this);
1027 load8(scr, scr1);
1028 add32(TrustedImm32(1), scr);
1029 load16(scr, dest);
1030 m_assembler.shllImm8r(8, dest);
1031 or32(dest, scr1);
1032 add32(TrustedImm32(2), scr);
1033 load8(scr, dest);
1034 m_assembler.shllImm8r(8, dest);
1035 m_assembler.shllImm8r(16, dest);
1036 or32(scr1, dest);
1037 end.link(this);
1038
1039 releaseScratch(scr);
1040 releaseScratch(scr1);
1041 }
1042
1043 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1044 {
1045 RegisterID scr = scratchReg3;
1046 load32WithUnalignedHalfWords(left, scr);
1047 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1048 m_assembler.testlRegReg(scr, scr);
1049 else
1050 compare32(right.m_value, scr, cond);
1051
1052 if (cond == NotEqual)
1053 return branchFalse();
1054 return branchTrue();
1055 }
1056
1057 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
1058 {
1059 m_assembler.movImm8(0, scratchReg3);
1060 convertInt32ToDouble(scratchReg3, scratch);
1061 return branchDouble(DoubleNotEqual, reg, scratch);
1062 }
1063
1064 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
1065 {
1066 m_assembler.movImm8(0, scratchReg3);
1067 convertInt32ToDouble(scratchReg3, scratch);
1068 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
1069 }
1070
1071 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1072 {
1073 if (cond == DoubleEqual) {
1074 m_assembler.dcmppeq(right, left);
1075 return branchTrue();
1076 }
1077
1078 if (cond == DoubleNotEqual) {
1079 RegisterID scr = claimScratch();
1080 JumpList end;
1081 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1082 m_assembler.dcnvds(right);
1083 m_assembler.stsfpulReg(scr);
1084 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1085 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1086 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1087 m_assembler.dcnvds(left);
1088 m_assembler.stsfpulReg(scr);
1089 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1090 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1091 m_assembler.dcmppeq(right, left);
1092 releaseScratch(scr);
1093 Jump m_jump = branchFalse();
1094 end.link(this);
1095 return m_jump;
1096 }
1097
1098 if (cond == DoubleGreaterThan) {
1099 m_assembler.dcmppgt(right, left);
1100 return branchTrue();
1101 }
1102
1103 if (cond == DoubleGreaterThanOrEqual) {
1104 m_assembler.dcmppgt(left, right);
1105 return branchFalse();
1106 }
1107
1108 if (cond == DoubleLessThan) {
1109 m_assembler.dcmppgt(left, right);
1110 return branchTrue();
1111 }
1112
1113 if (cond == DoubleLessThanOrEqual) {
1114 m_assembler.dcmppgt(right, left);
1115 return branchFalse();
1116 }
1117
1118 if (cond == DoubleEqualOrUnordered) {
1119 RegisterID scr = claimScratch();
1120 JumpList end;
1121 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1122 m_assembler.dcnvds(right);
1123 m_assembler.stsfpulReg(scr);
1124 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1125 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1126 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1127 m_assembler.dcnvds(left);
1128 m_assembler.stsfpulReg(scr);
1129 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1130 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1131 m_assembler.dcmppeq(left, right);
1132 Jump m_jump = Jump(m_assembler.je());
1133 end.link(this);
1134 m_assembler.extraInstrForBranch(scr);
1135 releaseScratch(scr);
1136 return m_jump;
1137 }
1138
1139 if (cond == DoubleGreaterThanOrUnordered) {
1140 RegisterID scr = claimScratch();
1141 JumpList end;
1142 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1143 m_assembler.dcnvds(right);
1144 m_assembler.stsfpulReg(scr);
1145 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1146 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1147 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1148 m_assembler.dcnvds(left);
1149 m_assembler.stsfpulReg(scr);
1150 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1151 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1152 m_assembler.dcmppgt(right, left);
1153 Jump m_jump = Jump(m_assembler.je());
1154 end.link(this);
1155 m_assembler.extraInstrForBranch(scr);
1156 releaseScratch(scr);
1157 return m_jump;
1158 }
1159
1160 if (cond == DoubleGreaterThanOrEqualOrUnordered) {
1161 RegisterID scr = claimScratch();
1162 JumpList end;
1163 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1164 m_assembler.dcnvds(right);
1165 m_assembler.stsfpulReg(scr);
1166 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1167 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1168 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1169 m_assembler.dcnvds(left);
1170 m_assembler.stsfpulReg(scr);
1171 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1172 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1173 m_assembler.dcmppgt(left, right);
1174 Jump m_jump = Jump(m_assembler.jne());
1175 end.link(this);
1176 m_assembler.extraInstrForBranch(scr);
1177 releaseScratch(scr);
1178 return m_jump;
1179 }
1180
1181 if (cond == DoubleLessThanOrUnordered) {
1182 RegisterID scr = claimScratch();
1183 JumpList end;
1184 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1185 m_assembler.dcnvds(right);
1186 m_assembler.stsfpulReg(scr);
1187 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1188 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1189 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1190 m_assembler.dcnvds(left);
1191 m_assembler.stsfpulReg(scr);
1192 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1193 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1194 m_assembler.dcmppgt(left, right);
1195 Jump m_jump = Jump(m_assembler.je());
1196 end.link(this);
1197 m_assembler.extraInstrForBranch(scr);
1198 releaseScratch(scr);
1199 return m_jump;
1200 }
1201
1202 if (cond == DoubleLessThanOrEqualOrUnordered) {
1203 RegisterID scr = claimScratch();
1204 JumpList end;
1205 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1206 m_assembler.dcnvds(right);
1207 m_assembler.stsfpulReg(scr);
1208 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1209 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1210 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1211 m_assembler.dcnvds(left);
1212 m_assembler.stsfpulReg(scr);
1213 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1214 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1215 m_assembler.dcmppgt(right, left);
1216 Jump m_jump = Jump(m_assembler.jne());
1217 end.link(this);
1218 m_assembler.extraInstrForBranch(scr);
1219 releaseScratch(scr);
1220 return m_jump;
1221 }
1222
1223 ASSERT(cond == DoubleNotEqualOrUnordered);
1224 RegisterID scr = claimScratch();
1225 JumpList end;
1226 m_assembler.loadConstant(0x7fbfffff, scratchReg3);
1227 m_assembler.dcnvds(right);
1228 m_assembler.stsfpulReg(scr);
1229 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1230 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1231 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1232 m_assembler.dcnvds(left);
1233 m_assembler.stsfpulReg(scr);
1234 m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
1235 end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
1236 m_assembler.dcmppeq(right, left);
1237 Jump m_jump = Jump(m_assembler.jne());
1238 end.link(this);
1239 m_assembler.extraInstrForBranch(scr);
1240 releaseScratch(scr);
1241 return m_jump;
1242 }
1243
1244 Jump branchTrue()
1245 {
1246 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
1247 Jump m_jump = Jump(m_assembler.je());
1248 m_assembler.extraInstrForBranch(scratchReg3);
1249 return m_jump;
1250 }
1251
1252 Jump branchFalse()
1253 {
1254 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
1255 Jump m_jump = Jump(m_assembler.jne());
1256 m_assembler.extraInstrForBranch(scratchReg3);
1257 return m_jump;
1258 }
1259
1260 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1261 {
1262 RegisterID scr = claimScratch();
1263 move(left.index, scr);
1264 lshift32(TrustedImm32(left.scale), scr);
1265 add32(left.base, scr);
1266 load32(scr, left.offset, scr);
1267 compare32(right.m_value, scr, cond);
1268 releaseScratch(scr);
1269
1270 if (cond == NotEqual)
1271 return branchFalse();
1272 return branchTrue();
1273 }
1274
1275 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1276 {
1277 if (dest != src)
1278 m_assembler.dmovRegReg(src, dest);
1279 m_assembler.dsqrt(dest);
1280 }
1281
1282 void absDouble(FPRegisterID, FPRegisterID)
1283 {
1284 ASSERT_NOT_REACHED();
1285 }
1286
1287 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1288 {
1289 RegisterID addressTempRegister = claimScratch();
1290 load8(address, addressTempRegister);
1291 Jump jmp = branchTest32(cond, addressTempRegister, mask);
1292 releaseScratch(addressTempRegister);
1293 return jmp;
1294 }
1295
1296 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1297 {
1298 if (src != dest)
1299 move(src, dest);
1300 }
1301
1302 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1303 {
1304 RegisterID addressTempRegister = claimScratch();
1305 load8(left, addressTempRegister);
1306 Jump jmp = branch32(cond, addressTempRegister, right);
1307 releaseScratch(addressTempRegister);
1308 return jmp;
1309 }
1310
1311 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1312 {
1313 RegisterID addressTempRegister = claimScratch();
1314 load8(left, addressTempRegister);
1315 compare32(cond, addressTempRegister, right, dest);
1316 releaseScratch(addressTempRegister);
1317 }
1318
1319 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1320 {
1321 m_assembler.ftrcdrmfpul(src);
1322 m_assembler.stsfpulReg(dest);
1323 m_assembler.loadConstant(0x7fffffff, scratchReg3);
1324 m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
1325 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 14, sizeof(uint32_t));
1326 m_assembler.branch(BT_OPCODE, 2);
1327 m_assembler.addlImm8r(1, scratchReg3);
1328 m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
1329 return branchTrue();
1330 }
1331
1332 // Stack manipulation operations
1333
1334 void pop(RegisterID dest)
1335 {
1336 m_assembler.popReg(dest);
1337 }
1338
1339 void push(RegisterID src)
1340 {
1341 m_assembler.pushReg(src);
1342 }
1343
1344 void push(Address address)
1345 {
1346 if (!address.offset) {
1347 push(address.base);
1348 return;
1349 }
1350
1351 if ((address.offset < 0) || (address.offset >= 64)) {
1352 RegisterID scr = claimScratch();
1353 m_assembler.loadConstant(address.offset, scr);
1354 m_assembler.addlRegReg(address.base, scr);
1355 m_assembler.movlMemReg(scr, SH4Registers::sp);
1356 m_assembler.addlImm8r(-4, SH4Registers::sp);
1357 releaseScratch(scr);
1358 return;
1359 }
1360
1361 m_assembler.movlMemReg(address.offset >> 2, address.base, SH4Registers::sp);
1362 m_assembler.addlImm8r(-4, SH4Registers::sp);
1363 }
1364
1365 void push(TrustedImm32 imm)
1366 {
1367 RegisterID scr = claimScratch();
1368 m_assembler.loadConstant(imm.m_value, scr);
1369 push(scr);
1370 releaseScratch(scr);
1371 }
1372
1373 // Register move operations
1374
1375 void move(TrustedImm32 imm, RegisterID dest)
1376 {
1377 m_assembler.loadConstant(imm.m_value, dest);
1378 }
1379
1380 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
1381 {
1382 m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
1383 DataLabelPtr dataLabel(this);
1384 m_assembler.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue.m_value), dest);
1385 return dataLabel;
1386 }
1387
1388 void move(RegisterID src, RegisterID dest)
1389 {
1390 if (src != dest)
1391 m_assembler.movlRegReg(src, dest);
1392 }
1393
1394 void move(TrustedImmPtr imm, RegisterID dest)
1395 {
1396 m_assembler.loadConstant(imm.asIntptr(), dest);
1397 }
1398
1399 void extuw(RegisterID src, RegisterID dst)
1400 {
1401 m_assembler.extuw(src, dst);
1402 }
1403
1404 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1405 {
1406 m_assembler.cmplRegReg(right, left, SH4Condition(cond));
1407 if (cond != NotEqual) {
1408 m_assembler.movt(dest);
1409 return;
1410 }
1411
1412 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
1413 m_assembler.movImm8(0, dest);
1414 m_assembler.branch(BT_OPCODE, 0);
1415 m_assembler.movImm8(1, dest);
1416 }
1417
1418 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1419 {
1420 if (left != dest) {
1421 move(right, dest);
1422 compare32(cond, left, dest, dest);
1423 return;
1424 }
1425
1426 RegisterID scr = claimScratch();
1427 move(right, scr);
1428 compare32(cond, left, scr, dest);
1429 releaseScratch(scr);
1430 }
1431
1432 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1433 {
1434 ASSERT((cond == Zero) || (cond == NonZero));
1435
1436 load8(address, dest);
1437 if (mask.m_value == -1)
1438 compare32(0, dest, static_cast<RelationalCondition>(cond));
1439 else
1440 testlImm(mask.m_value, dest);
1441 if (cond != NonZero) {
1442 m_assembler.movt(dest);
1443 return;
1444 }
1445
1446 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
1447 m_assembler.movImm8(0, dest);
1448 m_assembler.branch(BT_OPCODE, 0);
1449 m_assembler.movImm8(1, dest);
1450 }
1451
1452 void loadPtrLinkReg(ImplicitAddress address)
1453 {
1454 RegisterID scr = claimScratch();
1455 load32(address, scr);
1456 m_assembler.ldspr(scr);
1457 releaseScratch(scr);
1458 }
1459
1460 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1461 {
1462 m_assembler.cmplRegReg(right, left, SH4Condition(cond));
1463 /* BT label => BF off
1464 nop LDR reg
1465 nop braf @reg
1466 nop nop
1467 */
1468 if (cond == NotEqual)
1469 return branchFalse();
1470 return branchTrue();
1471 }
1472
1473 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1474 {
1475 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1476 m_assembler.testlRegReg(left, left);
1477 else
1478 compare32(right.m_value, left, cond);
1479
1480 if (cond == NotEqual)
1481 return branchFalse();
1482 return branchTrue();
1483 }
1484
1485 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1486 {
1487 compare32(right.offset, right.base, left, cond);
1488 if (cond == NotEqual)
1489 return branchFalse();
1490 return branchTrue();
1491 }
1492
1493 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1494 {
1495 compare32(right, left.offset, left.base, cond);
1496 if (cond == NotEqual)
1497 return branchFalse();
1498 return branchTrue();
1499 }
1500
1501 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1502 {
1503 compare32(right.m_value, left.offset, left.base, cond);
1504 if (cond == NotEqual)
1505 return branchFalse();
1506 return branchTrue();
1507 }
1508
1509 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1510 {
1511 RegisterID scr = claimScratch();
1512
1513 move(TrustedImm32(reinterpret_cast<uint32_t>(left.m_ptr)), scr);
1514 m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
1515 releaseScratch(scr);
1516
1517 if (cond == NotEqual)
1518 return branchFalse();
1519 return branchTrue();
1520 }
1521
1522 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1523 {
1524 RegisterID addressTempRegister = claimScratch();
1525
1526 m_assembler.loadConstant(reinterpret_cast<uint32_t>(left.m_ptr), addressTempRegister);
1527 m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
1528 compare32(right.m_value, addressTempRegister, cond);
1529 releaseScratch(addressTempRegister);
1530
1531 if (cond == NotEqual)
1532 return branchFalse();
1533 return branchTrue();
1534 }
1535
1536 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1537 {
1538 ASSERT(!(right.m_value & 0xFFFFFF00));
1539 RegisterID scr = claimScratch();
1540
1541 move(left.index, scr);
1542 lshift32(TrustedImm32(left.scale), scr);
1543
1544 if (left.offset)
1545 add32(TrustedImm32(left.offset), scr);
1546 add32(left.base, scr);
1547 load8(scr, scr);
1548 RegisterID scr1 = claimScratch();
1549 m_assembler.loadConstant(right.m_value, scr1);
1550 releaseScratch(scr);
1551 releaseScratch(scr1);
1552
1553 return branch32(cond, scr, scr1);
1554 }
1555
1556 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1557 {
1558 ASSERT((cond == Zero) || (cond == NonZero));
1559
1560 m_assembler.testlRegReg(reg, mask);
1561
1562 if (cond == NotEqual)
1563 return branchFalse();
1564 return branchTrue();
1565 }
1566
1567 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1568 {
1569 ASSERT((cond == Zero) || (cond == NonZero));
1570
1571 if (mask.m_value == -1)
1572 m_assembler.testlRegReg(reg, reg);
1573 else
1574 testlImm(mask.m_value, reg);
1575
1576 if (cond == NotEqual)
1577 return branchFalse();
1578 return branchTrue();
1579 }
1580
1581 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1582 {
1583 ASSERT((cond == Zero) || (cond == NonZero));
1584
1585 if (mask.m_value == -1)
1586 compare32(0, address.offset, address.base, static_cast<RelationalCondition>(cond));
1587 else
1588 testImm(mask.m_value, address.offset, address.base);
1589
1590 if (cond == NotEqual)
1591 return branchFalse();
1592 return branchTrue();
1593 }
1594
1595 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1596 {
1597 RegisterID scr = claimScratch();
1598
1599 move(address.index, scr);
1600 lshift32(TrustedImm32(address.scale), scr);
1601 add32(address.base, scr);
1602 load32(scr, address.offset, scr);
1603
1604 if (mask.m_value == -1)
1605 m_assembler.testlRegReg(scr, scr);
1606 else
1607 testlImm(mask.m_value, scr);
1608
1609 releaseScratch(scr);
1610
1611 if (cond == NotEqual)
1612 return branchFalse();
1613 return branchTrue();
1614 }
1615
1616 Jump jump()
1617 {
1618 return Jump(m_assembler.jmp());
1619 }
1620
1621 void jump(RegisterID target)
1622 {
1623 m_assembler.jmpReg(target);
1624 }
1625
1626 void jump(Address address)
1627 {
1628 RegisterID scr = claimScratch();
1629
1630 if ((address.offset < 0) || (address.offset >= 64)) {
1631 m_assembler.loadConstant(address.offset, scr);
1632 m_assembler.addlRegReg(address.base, scr);
1633 m_assembler.movlMemReg(scr, scr);
1634 } else if (address.offset)
1635 m_assembler.movlMemReg(address.offset >> 2, address.base, scr);
1636 else
1637 m_assembler.movlMemReg(address.base, scr);
1638 m_assembler.jmpReg(scr);
1639
1640 releaseScratch(scr);
1641 }
1642
1643 // Arithmetic control flow operations
1644
1645 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1646 {
1647 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1648
1649 if (cond == Overflow) {
1650 m_assembler.addvlRegReg(src, dest);
1651 return branchTrue();
1652 }
1653
1654 if (cond == Signed) {
1655 m_assembler.addlRegReg(src, dest);
1656 // Check if dest is negative
1657 m_assembler.cmppz(dest);
1658 return branchFalse();
1659 }
1660
1661 m_assembler.addlRegReg(src, dest);
1662 compare32(0, dest, Equal);
1663
1664 if (cond == NotEqual)
1665 return branchFalse();
1666 return branchTrue();
1667 }
1668
1669 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1670 {
1671 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1672
1673 move(imm, scratchReg3);
1674 return branchAdd32(cond, scratchReg3, dest);
1675 }
1676
1677 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1678 {
1679 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1680
1681 if (src != dest)
1682 move(src, dest);
1683
1684 if (cond == Overflow) {
1685 move(imm, scratchReg3);
1686 m_assembler.addvlRegReg(scratchReg3, dest);
1687 return branchTrue();
1688 }
1689
1690 add32(imm, dest);
1691
1692 if (cond == Signed) {
1693 m_assembler.cmppz(dest);
1694 return branchFalse();
1695 }
1696
1697 compare32(0, dest, Equal);
1698
1699 if (cond == NotEqual)
1700 return branchFalse();
1701 return branchTrue();
1702 }
1703
1704 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1705 {
1706 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1707
1708 if (cond == Overflow) {
1709 RegisterID scr1 = claimScratch();
1710 RegisterID scr = claimScratch();
1711 m_assembler.dmullRegReg(src, dest);
1712 m_assembler.stsmacl(dest);
1713 m_assembler.movImm8(-31, scr);
1714 m_assembler.movlRegReg(dest, scr1);
1715 m_assembler.shaRegReg(scr1, scr);
1716 m_assembler.stsmach(scr);
1717 m_assembler.cmplRegReg(scr, scr1, SH4Condition(Equal));
1718 releaseScratch(scr1);
1719 releaseScratch(scr);
1720 return branchFalse();
1721 }
1722
1723 m_assembler.imullRegReg(src, dest);
1724 m_assembler.stsmacl(dest);
1725 if (cond == Signed) {
1726 // Check if dest is negative
1727 m_assembler.cmppz(dest);
1728 return branchFalse();
1729 }
1730
1731 compare32(0, dest, static_cast<RelationalCondition>(cond));
1732
1733 if (cond == NotEqual)
1734 return branchFalse();
1735 return branchTrue();
1736 }
1737
1738 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1739 {
1740 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1741
1742 move(imm, scratchReg3);
1743 if (src != dest)
1744 move(src, dest);
1745
1746 return branchMul32(cond, scratchReg3, dest);
1747 }
1748
1749 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1750 {
1751 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1752
1753 if (cond == Overflow) {
1754 m_assembler.subvlRegReg(src, dest);
1755 return branchTrue();
1756 }
1757
1758 if (cond == Signed) {
1759 // Check if dest is negative
1760 m_assembler.sublRegReg(src, dest);
1761 compare32(0, dest, LessThan);
1762 return branchTrue();
1763 }
1764
1765 sub32(src, dest);
1766 compare32(0, dest, static_cast<RelationalCondition>(cond));
1767
1768 if (cond == NotEqual)
1769 return branchFalse();
1770 return branchTrue();
1771 }
1772
1773 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1774 {
1775 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1776
1777 move(imm, scratchReg3);
1778 return branchSub32(cond, scratchReg3, dest);
1779 }
1780
1781 Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1782 {
1783 move(imm, scratchReg3);
1784 if (src != dest)
1785 move(src, dest);
1786 return branchSub32(cond, scratchReg3, dest);
1787 }
1788
1789 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1790 {
1791 if (src1 != dest)
1792 move(src1, dest);
1793 return branchSub32(cond, src2, dest);
1794 }
1795
1796 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1797 {
1798 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
1799
1800 if (cond == Signed) {
1801 or32(src, dest);
1802 compare32(0, dest, static_cast<RelationalCondition>(LessThan));
1803 return branchTrue();
1804 }
1805
1806 or32(src, dest);
1807 compare32(0, dest, static_cast<RelationalCondition>(cond));
1808
1809 if (cond == NotEqual)
1810 return branchFalse();
1811 return branchTrue();
1812 }
1813
1814 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
1815 {
1816 m_assembler.ftrcdrmfpul(src);
1817 m_assembler.stsfpulReg(dest);
1818 convertInt32ToDouble(dest, fscratch);
1819 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
1820
1821 if (dest == SH4Registers::r0)
1822 m_assembler.cmpEqImmR0(0, dest);
1823 else {
1824 m_assembler.movImm8(0, scratchReg3);
1825 m_assembler.cmplRegReg(scratchReg3, dest, SH4Condition(Equal));
1826 }
1827 failureCases.append(branchTrue());
1828 }
1829
1830 void neg32(RegisterID dst)
1831 {
1832 m_assembler.neg(dst, dst);
1833 }
1834
1835 void urshift32(RegisterID shiftamount, RegisterID dest)
1836 {
1837 if (shiftamount == SH4Registers::r0)
1838 m_assembler.andlImm8r(0x1f, shiftamount);
1839 else {
1840 RegisterID scr = claimScratch();
1841 m_assembler.loadConstant(0x1f, scr);
1842 m_assembler.andlRegReg(scr, shiftamount);
1843 releaseScratch(scr);
1844 }
1845 m_assembler.neg(shiftamount, shiftamount);
1846 m_assembler.shllRegReg(dest, shiftamount);
1847 }
1848
1849 void urshift32(TrustedImm32 imm, RegisterID dest)
1850 {
1851 RegisterID scr = claimScratch();
1852 m_assembler.loadConstant(-(imm.m_value & 0x1f), scr);
1853 m_assembler.shaRegReg(dest, scr);
1854 releaseScratch(scr);
1855 }
1856
1857 void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
1858 {
1859 if (src != dest)
1860 move(src, dest);
1861
1862 urshift32(shiftamount, dest);
1863 }
1864
1865 Call call()
1866 {
1867 return Call(m_assembler.call(), Call::Linkable);
1868 }
1869
1870 Call nearCall()
1871 {
1872 return Call(m_assembler.call(), Call::LinkableNear);
1873 }
1874
1875 Call call(RegisterID target)
1876 {
1877 return Call(m_assembler.call(target), Call::None);
1878 }
1879
1880 void call(Address address, RegisterID target)
1881 {
1882 load32(address.base, address.offset, target);
1883 m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
1884 m_assembler.branch(JSR_OPCODE, target);
1885 m_assembler.nop();
1886 }
1887
1888 void breakpoint()
1889 {
1890 m_assembler.bkpt();
1891 m_assembler.nop();
1892 }
1893
1894 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1895 {
1896 RegisterID dataTempRegister = claimScratch();
1897
1898 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1899 m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
1900 releaseScratch(dataTempRegister);
1901
1902 if (cond == NotEqual)
1903 return branchFalse();
1904 return branchTrue();
1905 }
1906
1907 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1908 {
1909 RegisterID scr = claimScratch();
1910
1911 m_assembler.loadConstant(left.offset, scr);
1912 m_assembler.addlRegReg(left.base, scr);
1913 m_assembler.movlMemReg(scr, scr);
1914 RegisterID scr1 = claimScratch();
1915 dataLabel = moveWithPatch(initialRightValue, scr1);
1916 m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
1917 releaseScratch(scr);
1918 releaseScratch(scr1);
1919
1920 if (cond == NotEqual)
1921 return branchFalse();
1922 return branchTrue();
1923 }
1924
1925 void ret()
1926 {
1927 m_assembler.ret();
1928 m_assembler.nop();
1929 }
1930
1931 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1932 {
1933 RegisterID scr = claimScratch();
1934 DataLabelPtr label = moveWithPatch(initialValue, scr);
1935 store32(scr, address);
1936 releaseScratch(scr);
1937 return label;
1938 }
1939
1940 DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1941
1942 int sizeOfConstantPool()
1943 {
1944 return m_assembler.sizeOfConstantPool();
1945 }
1946
1947 Call tailRecursiveCall()
1948 {
1949 RegisterID scr = claimScratch();
1950
1951 m_assembler.loadConstantUnReusable(0x0, scr, true);
1952 Jump m_jump = Jump(m_assembler.jmp(scr));
1953 releaseScratch(scr);
1954
1955 return Call::fromTailJump(m_jump);
1956 }
1957
1958 Call makeTailRecursiveCall(Jump oldJump)
1959 {
1960 oldJump.link(this);
1961 return tailRecursiveCall();
1962 }
1963
1964 void nop()
1965 {
1966 m_assembler.nop();
1967 }
1968
1969 static FunctionPtr readCallTarget(CodeLocationCall call)
1970 {
1971 return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation())));
1972 }
1973
1974 protected:
1975 SH4Assembler::Condition SH4Condition(RelationalCondition cond)
1976 {
1977 return static_cast<SH4Assembler::Condition>(cond);
1978 }
1979
1980 SH4Assembler::Condition SH4Condition(ResultCondition cond)
1981 {
1982 return static_cast<SH4Assembler::Condition>(cond);
1983 }
1984 private:
1985 friend class LinkBuffer;
1986 friend class RepatchBuffer;
1987
1988 static void linkCall(void*, Call, FunctionPtr);
1989 static void repatchCall(CodeLocationCall, CodeLocationLabel);
1990 static void repatchCall(CodeLocationCall, FunctionPtr);
1991 };
1992
1993 } // namespace JSC
1994
1995 #endif // ENABLE(ASSEMBLER)
1996
1997 #endif // MacroAssemblerSH4_h