]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/MacroAssembler.h
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssembler.h
CommitLineData
9dae56ea 1/*
81345200 2 * Copyright (C) 2008, 2012, 2013, 2014 Apple Inc. All rights reserved.
9dae56ea
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssembler_h
27#define MacroAssembler_h
28
9dae56ea
A
29#if ENABLE(ASSEMBLER)
30
f9bf01c6 31#if CPU(ARM_THUMB2)
ba379fdc
A
32#include "MacroAssemblerARMv7.h"
33namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
9dae56ea 34
93a37866
A
35#elif CPU(ARM64)
36#include "MacroAssemblerARM64.h"
37namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
38
f9bf01c6
A
39#elif CPU(ARM_TRADITIONAL)
40#include "MacroAssemblerARM.h"
41namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
42
4e4e5a6f
A
43#elif CPU(MIPS)
44#include "MacroAssemblerMIPS.h"
45namespace JSC {
46typedef MacroAssemblerMIPS MacroAssemblerBase;
47};
48
f9bf01c6 49#elif CPU(X86)
ba379fdc
A
50#include "MacroAssemblerX86.h"
51namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
9dae56ea 52
f9bf01c6 53#elif CPU(X86_64)
ba379fdc
A
54#include "MacroAssemblerX86_64.h"
55namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
9dae56ea 56
14957cd0
A
57#elif CPU(SH4)
58#include "MacroAssemblerSH4.h"
59namespace JSC {
60typedef MacroAssemblerSH4 MacroAssemblerBase;
61};
62
ba379fdc
A
63#else
64#error "The MacroAssembler is not supported on this platform."
9dae56ea
A
65#endif
66
ba379fdc
A
67namespace JSC {
68
69class MacroAssembler : public MacroAssemblerBase {
9dae56ea 70public:
ba379fdc 71
81345200
A
72 static RegisterID nextRegister(RegisterID reg)
73 {
74 return static_cast<RegisterID>(reg + 1);
75 }
76
77 static FPRegisterID nextFPRegister(FPRegisterID reg)
78 {
79 return static_cast<FPRegisterID>(reg + 1);
80 }
81
82 static unsigned numberOfRegisters()
83 {
84 return lastRegister() - firstRegister() + 1;
85 }
86
87 static unsigned registerIndex(RegisterID reg)
88 {
89 return reg - firstRegister();
90 }
91
92 static unsigned numberOfFPRegisters()
93 {
94 return lastFPRegister() - firstFPRegister() + 1;
95 }
96
97 static unsigned fpRegisterIndex(FPRegisterID reg)
98 {
99 return reg - firstFPRegister();
100 }
101
102 static unsigned registerIndex(FPRegisterID reg)
103 {
104 return fpRegisterIndex(reg) + numberOfRegisters();
105 }
106
107 static unsigned totalNumberOfRegisters()
108 {
109 return numberOfRegisters() + numberOfFPRegisters();
110 }
111
ba379fdc
A
112 using MacroAssemblerBase::pop;
113 using MacroAssemblerBase::jump;
114 using MacroAssemblerBase::branch32;
6fe7ccc8 115 using MacroAssemblerBase::move;
6fe7ccc8
A
116 using MacroAssemblerBase::add32;
117 using MacroAssemblerBase::and32;
118 using MacroAssemblerBase::branchAdd32;
119 using MacroAssemblerBase::branchMul32;
81345200
A
120#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
121 using MacroAssemblerBase::branchPtr;
122#endif
6fe7ccc8
A
123 using MacroAssemblerBase::branchSub32;
124 using MacroAssemblerBase::lshift32;
125 using MacroAssemblerBase::or32;
126 using MacroAssemblerBase::rshift32;
127 using MacroAssemblerBase::store32;
128 using MacroAssemblerBase::sub32;
129 using MacroAssemblerBase::urshift32;
130 using MacroAssemblerBase::xor32;
81345200 131
93a37866
A
132 static bool isPtrAlignedAddressOffset(ptrdiff_t value)
133 {
81345200 134 return value == static_cast<int32_t>(value);
93a37866
A
135 }
136
137 static const double twoToThe32; // This is super useful for some double code.
9dae56ea 138
6fe7ccc8
A
139 // Utilities used by the DFG JIT.
140#if ENABLE(DFG_JIT)
141 using MacroAssemblerBase::invert;
142
143 static DoubleCondition invert(DoubleCondition cond)
144 {
145 switch (cond) {
146 case DoubleEqual:
147 return DoubleNotEqualOrUnordered;
148 case DoubleNotEqual:
149 return DoubleEqualOrUnordered;
150 case DoubleGreaterThan:
151 return DoubleLessThanOrEqualOrUnordered;
152 case DoubleGreaterThanOrEqual:
153 return DoubleLessThanOrUnordered;
154 case DoubleLessThan:
155 return DoubleGreaterThanOrEqualOrUnordered;
156 case DoubleLessThanOrEqual:
157 return DoubleGreaterThanOrUnordered;
158 case DoubleEqualOrUnordered:
159 return DoubleNotEqual;
160 case DoubleNotEqualOrUnordered:
161 return DoubleEqual;
162 case DoubleGreaterThanOrUnordered:
163 return DoubleLessThanOrEqual;
164 case DoubleGreaterThanOrEqualOrUnordered:
165 return DoubleLessThan;
166 case DoubleLessThanOrUnordered:
167 return DoubleGreaterThanOrEqual;
168 case DoubleLessThanOrEqualOrUnordered:
169 return DoubleGreaterThan;
170 default:
93a37866 171 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
172 return DoubleEqual; // make compiler happy
173 }
174 }
175
176 static bool isInvertible(ResultCondition cond)
177 {
178 switch (cond) {
179 case Zero:
180 case NonZero:
181 return true;
182 default:
183 return false;
184 }
185 }
186
187 static ResultCondition invert(ResultCondition cond)
188 {
189 switch (cond) {
190 case Zero:
191 return NonZero;
192 case NonZero:
193 return Zero;
194 default:
93a37866 195 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
196 return Zero; // Make compiler happy for release builds.
197 }
198 }
199#endif
ba379fdc
A
200
201 // Platform agnostic onvenience functions,
202 // described in terms of other macro assembly methods.
203 void pop()
9dae56ea 204 {
14957cd0 205 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
9dae56ea
A
206 }
207
ba379fdc 208 void peek(RegisterID dest, int index = 0)
9dae56ea 209 {
ba379fdc 210 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
9dae56ea
A
211 }
212
6fe7ccc8
A
213 Address addressForPoke(int index)
214 {
215 return Address(stackPointerRegister, (index * sizeof(void*)));
216 }
217
ba379fdc 218 void poke(RegisterID src, int index = 0)
9dae56ea 219 {
6fe7ccc8 220 storePtr(src, addressForPoke(index));
9dae56ea
A
221 }
222
14957cd0 223 void poke(TrustedImm32 value, int index = 0)
9dae56ea 224 {
6fe7ccc8 225 store32(value, addressForPoke(index));
9dae56ea
A
226 }
227
14957cd0 228 void poke(TrustedImmPtr imm, int index = 0)
9dae56ea 229 {
6fe7ccc8 230 storePtr(imm, addressForPoke(index));
9dae56ea
A
231 }
232
93a37866
A
233#if !CPU(ARM64)
234 void pushToSave(RegisterID src)
235 {
236 push(src);
237 }
81345200
A
238 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
239 {
240 push(imm);
241 }
93a37866
A
242 void popToRestore(RegisterID dest)
243 {
244 pop(dest);
245 }
246 void pushToSave(FPRegisterID src)
247 {
248 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
249 storeDouble(src, stackPointerRegister);
250 }
251 void popToRestore(FPRegisterID dest)
252 {
253 loadDouble(stackPointerRegister, dest);
254 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
255 }
81345200
A
256
257 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
93a37866
A
258#endif // !CPU(ARM64)
259
260#if CPU(X86_64) || CPU(ARM64)
261 void peek64(RegisterID dest, int index = 0)
262 {
263 load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
264 }
265
266 void poke(TrustedImm64 value, int index = 0)
267 {
268 store64(value, addressForPoke(index));
269 }
270
271 void poke64(RegisterID src, int index = 0)
272 {
273 store64(src, addressForPoke(index));
274 }
275#endif
276
277#if CPU(MIPS)
278 void poke(FPRegisterID src, int index = 0)
279 {
280 ASSERT(!(index & 1));
281 storeDouble(src, addressForPoke(index));
282 }
283#endif
9dae56ea 284
81345200
A
285 // Immediate shifts only have 5 controllable bits
286 // so we'll consider them safe for now.
287 TrustedImm32 trustedImm32ForShift(Imm32 imm)
288 {
289 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
290 }
291
ba379fdc 292 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
14957cd0 293 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
9dae56ea 294 {
ba379fdc 295 branchPtr(cond, op1, imm).linkTo(target, this);
9dae56ea 296 }
6fe7ccc8
A
297 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
298 {
299 branchPtr(cond, op1, imm).linkTo(target, this);
300 }
9dae56ea 301
14957cd0 302 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
9dae56ea 303 {
ba379fdc 304 branch32(cond, op1, op2).linkTo(target, this);
9dae56ea
A
305 }
306
14957cd0 307 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
9dae56ea 308 {
ba379fdc 309 branch32(cond, op1, imm).linkTo(target, this);
9dae56ea 310 }
6fe7ccc8
A
311
312 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
313 {
314 branch32(cond, op1, imm).linkTo(target, this);
315 }
ba379fdc 316
14957cd0 317 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
9dae56ea 318 {
ba379fdc 319 branch32(cond, left, right).linkTo(target, this);
9dae56ea 320 }
ba379fdc 321
6fe7ccc8 322 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
9dae56ea 323 {
6fe7ccc8 324 return branch32(commute(cond), right, left);
9dae56ea 325 }
6fe7ccc8
A
326
327 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
328 {
329 return branch32(commute(cond), right, left);
330 }
331
14957cd0 332 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
9dae56ea 333 {
ba379fdc 334 branchTestPtr(cond, reg).linkTo(target, this);
9dae56ea
A
335 }
336
93a37866
A
337#if !CPU(ARM_THUMB2) && !CPU(ARM64)
338 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
339 {
340 return PatchableJump(branchPtr(cond, left, right));
341 }
342
6fe7ccc8
A
343 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
344 {
345 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
346 }
347
81345200
A
348 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
349 {
350 return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
351 }
352
353#if !CPU(ARM_TRADITIONAL)
6fe7ccc8
A
354 PatchableJump patchableJump()
355 {
356 return PatchableJump(jump());
357 }
93a37866
A
358
359 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
360 {
361 return PatchableJump(branchTest32(cond, reg, mask));
362 }
363
364 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
365 {
366 return PatchableJump(branch32(cond, reg, imm));
367 }
81345200
A
368
369 PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
370 {
371 return PatchableJump(branch32(cond, address, imm));
372 }
373#endif
6fe7ccc8
A
374#endif
375
ba379fdc 376 void jump(Label target)
9dae56ea 377 {
ba379fdc 378 jump().linkTo(target, this);
9dae56ea
A
379 }
380
6fe7ccc8
A
381 // Commute a relational condition, returns a new condition that will produce
382 // the same results given the same inputs but with their positions exchanged.
383 static RelationalCondition commute(RelationalCondition condition)
384 {
385 switch (condition) {
386 case Above:
387 return Below;
388 case AboveOrEqual:
389 return BelowOrEqual;
390 case Below:
391 return Above;
392 case BelowOrEqual:
393 return AboveOrEqual;
394 case GreaterThan:
395 return LessThan;
396 case GreaterThanOrEqual:
397 return LessThanOrEqual;
398 case LessThan:
399 return GreaterThan;
400 case LessThanOrEqual:
401 return GreaterThanOrEqual;
402 default:
403 break;
404 }
405
406 ASSERT(condition == Equal || condition == NotEqual);
407 return condition;
408 }
93a37866
A
409
410 static const unsigned BlindingModulus = 64;
411 bool shouldConsiderBlinding()
412 {
413 return !(random() & (BlindingModulus - 1));
414 }
9dae56ea 415
ba379fdc
A
416 // Ptr methods
417 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
f9bf01c6 418 // FIXME: should this use a test for 32-bitness instead of this specific exception?
93a37866
A
419#if !CPU(X86_64) && !CPU(ARM64)
420 void addPtr(Address src, RegisterID dest)
421 {
422 add32(src, dest);
423 }
424
425 void addPtr(AbsoluteAddress src, RegisterID dest)
426 {
427 add32(src, dest);
428 }
429
ba379fdc 430 void addPtr(RegisterID src, RegisterID dest)
9dae56ea 431 {
ba379fdc 432 add32(src, dest);
9dae56ea
A
433 }
434
14957cd0 435 void addPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 436 {
ba379fdc 437 add32(imm, srcDest);
9dae56ea 438 }
9dae56ea 439
14957cd0 440 void addPtr(TrustedImmPtr imm, RegisterID dest)
ba379fdc 441 {
14957cd0 442 add32(TrustedImm32(imm), dest);
9dae56ea
A
443 }
444
14957cd0 445 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
9dae56ea 446 {
ba379fdc 447 add32(imm, src, dest);
9dae56ea 448 }
ba379fdc 449
6fe7ccc8
A
450 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
451 {
452 add32(imm, address);
453 }
454
ba379fdc 455 void andPtr(RegisterID src, RegisterID dest)
9dae56ea 456 {
ba379fdc 457 and32(src, dest);
9dae56ea 458 }
ba379fdc 459
14957cd0 460 void andPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 461 {
ba379fdc 462 and32(imm, srcDest);
9dae56ea 463 }
81345200
A
464
465 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
466 {
467 and32(TrustedImm32(imm), srcDest);
468 }
469
470 void lshiftPtr(Imm32 imm, RegisterID srcDest)
471 {
472 lshift32(trustedImm32ForShift(imm), srcDest);
473 }
474
93a37866
A
475 void negPtr(RegisterID dest)
476 {
477 neg32(dest);
478 }
ba379fdc 479
9dae56ea
A
480 void orPtr(RegisterID src, RegisterID dest)
481 {
9dae56ea 482 or32(src, dest);
9dae56ea
A
483 }
484
6fe7ccc8
A
485 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
486 {
487 or32(op1, op2, dest);
488 }
489
14957cd0 490 void orPtr(TrustedImmPtr imm, RegisterID dest)
9dae56ea 491 {
14957cd0 492 or32(TrustedImm32(imm), dest);
9dae56ea
A
493 }
494
14957cd0 495 void orPtr(TrustedImm32 imm, RegisterID dest)
9dae56ea 496 {
9dae56ea 497 or32(imm, dest);
9dae56ea
A
498 }
499
9dae56ea
A
500 void subPtr(RegisterID src, RegisterID dest)
501 {
9dae56ea 502 sub32(src, dest);
9dae56ea
A
503 }
504
14957cd0 505 void subPtr(TrustedImm32 imm, RegisterID dest)
9dae56ea 506 {
9dae56ea 507 sub32(imm, dest);
9dae56ea
A
508 }
509
14957cd0 510 void subPtr(TrustedImmPtr imm, RegisterID dest)
9dae56ea 511 {
14957cd0 512 sub32(TrustedImm32(imm), dest);
9dae56ea
A
513 }
514
515 void xorPtr(RegisterID src, RegisterID dest)
516 {
9dae56ea 517 xor32(src, dest);
9dae56ea
A
518 }
519
14957cd0 520 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 521 {
9dae56ea 522 xor32(imm, srcDest);
9dae56ea 523 }
9dae56ea 524
9dae56ea
A
525
526 void loadPtr(ImplicitAddress address, RegisterID dest)
527 {
9dae56ea 528 load32(address, dest);
9dae56ea
A
529 }
530
531 void loadPtr(BaseIndex address, RegisterID dest)
532 {
9dae56ea 533 load32(address, dest);
9dae56ea
A
534 }
535
6fe7ccc8 536 void loadPtr(const void* address, RegisterID dest)
9dae56ea 537 {
9dae56ea 538 load32(address, dest);
9dae56ea
A
539 }
540
ba379fdc 541 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
9dae56ea 542 {
ba379fdc 543 return load32WithAddressOffsetPatch(address, dest);
9dae56ea 544 }
14957cd0
A
545
546 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
547 {
548 return load32WithCompactAddressOffsetPatch(address, dest);
549 }
9dae56ea 550
6fe7ccc8
A
551 void move(ImmPtr imm, RegisterID dest)
552 {
553 move(Imm32(imm.asTrustedImmPtr()), dest);
554 }
555
14957cd0 556 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
9dae56ea 557 {
14957cd0 558 compare32(cond, left, right, dest);
9dae56ea
A
559 }
560
81345200
A
561 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
562 {
563 compare32(cond, left, right, dest);
564 }
565
9dae56ea
A
566 void storePtr(RegisterID src, ImplicitAddress address)
567 {
9dae56ea 568 store32(src, address);
9dae56ea
A
569 }
570
ba379fdc 571 void storePtr(RegisterID src, BaseIndex address)
9dae56ea 572 {
ba379fdc 573 store32(src, address);
9dae56ea
A
574 }
575
ba379fdc 576 void storePtr(RegisterID src, void* address)
9dae56ea 577 {
9dae56ea 578 store32(src, address);
9dae56ea
A
579 }
580
14957cd0 581 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
9dae56ea 582 {
14957cd0 583 store32(TrustedImm32(imm), address);
9dae56ea 584 }
6fe7ccc8
A
585
586 void storePtr(ImmPtr imm, Address address)
587 {
588 store32(Imm32(imm.asTrustedImmPtr()), address);
589 }
9dae56ea 590
14957cd0 591 void storePtr(TrustedImmPtr imm, void* address)
9dae56ea 592 {
14957cd0 593 store32(TrustedImm32(imm), address);
9dae56ea 594 }
9dae56ea 595
81345200
A
596 void storePtr(TrustedImm32 imm, ImplicitAddress address)
597 {
598 store32(imm, address);
599 }
600
601 void storePtr(TrustedImmPtr imm, BaseIndex address)
602 {
603 store32(TrustedImm32(imm), address);
604 }
605
ba379fdc 606 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
9dae56ea 607 {
ba379fdc 608 return store32WithAddressOffsetPatch(src, address);
9dae56ea
A
609 }
610
14957cd0 611 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
9dae56ea 612 {
ba379fdc 613 return branch32(cond, left, right);
9dae56ea
A
614 }
615
14957cd0 616 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
9dae56ea 617 {
14957cd0 618 return branch32(cond, left, TrustedImm32(right));
9dae56ea 619 }
6fe7ccc8
A
620
621 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
622 {
623 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
624 }
9dae56ea 625
14957cd0 626 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
9dae56ea 627 {
ba379fdc 628 return branch32(cond, left, right);
9dae56ea
A
629 }
630
14957cd0 631 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
9dae56ea 632 {
ba379fdc 633 return branch32(cond, left, right);
9dae56ea
A
634 }
635
14957cd0 636 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
9dae56ea 637 {
ba379fdc 638 return branch32(cond, left, right);
9dae56ea
A
639 }
640
14957cd0 641 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
9dae56ea 642 {
14957cd0 643 return branch32(cond, left, TrustedImm32(right));
9dae56ea 644 }
6fe7ccc8 645
14957cd0 646 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
9dae56ea 647 {
14957cd0 648 return branch32(cond, left, TrustedImm32(right));
9dae56ea
A
649 }
650
93a37866
A
651 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
652 {
653 return branchSub32(cond, src, dest);
654 }
655
14957cd0 656 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
9dae56ea 657 {
ba379fdc 658 return branchTest32(cond, reg, mask);
9dae56ea
A
659 }
660
14957cd0 661 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 662 {
ba379fdc 663 return branchTest32(cond, reg, mask);
9dae56ea
A
664 }
665
14957cd0 666 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 667 {
ba379fdc 668 return branchTest32(cond, address, mask);
9dae56ea
A
669 }
670
14957cd0 671 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 672 {
ba379fdc 673 return branchTest32(cond, address, mask);
9dae56ea
A
674 }
675
14957cd0 676 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
9dae56ea 677 {
ba379fdc 678 return branchAdd32(cond, src, dest);
9dae56ea
A
679 }
680
14957cd0 681 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
9dae56ea 682 {
ba379fdc 683 return branchSub32(cond, imm, dest);
9dae56ea 684 }
4e4e5a6f 685 using MacroAssemblerBase::branchTest8;
14957cd0 686 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
4e4e5a6f
A
687 {
688 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
689 }
81345200
A
690
691#else // !CPU(X86_64)
692
93a37866
A
693 void addPtr(RegisterID src, RegisterID dest)
694 {
695 add64(src, dest);
696 }
697
698 void addPtr(Address src, RegisterID dest)
699 {
700 add64(src, dest);
701 }
702
703 void addPtr(TrustedImm32 imm, RegisterID srcDest)
704 {
705 add64(imm, srcDest);
706 }
707
708 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
709 {
710 add64(imm, src, dest);
711 }
712
713 void addPtr(TrustedImm32 imm, Address address)
714 {
715 add64(imm, address);
716 }
717
718 void addPtr(AbsoluteAddress src, RegisterID dest)
719 {
720 add64(src, dest);
721 }
722
723 void addPtr(TrustedImmPtr imm, RegisterID dest)
724 {
725 add64(TrustedImm64(imm), dest);
726 }
727
728 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
729 {
730 add64(imm, address);
731 }
732
733 void andPtr(RegisterID src, RegisterID dest)
734 {
735 and64(src, dest);
736 }
737
738 void andPtr(TrustedImm32 imm, RegisterID srcDest)
739 {
740 and64(imm, srcDest);
741 }
742
81345200
A
743 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
744 {
745 and64(imm, srcDest);
746 }
747
748 void lshiftPtr(Imm32 imm, RegisterID srcDest)
749 {
750 lshift64(trustedImm32ForShift(imm), srcDest);
751 }
752
93a37866
A
753 void negPtr(RegisterID dest)
754 {
755 neg64(dest);
756 }
757
758 void orPtr(RegisterID src, RegisterID dest)
759 {
760 or64(src, dest);
761 }
762
763 void orPtr(TrustedImm32 imm, RegisterID dest)
764 {
765 or64(imm, dest);
766 }
767
768 void orPtr(TrustedImmPtr imm, RegisterID dest)
769 {
770 or64(TrustedImm64(imm), dest);
771 }
772
773 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
774 {
775 or64(op1, op2, dest);
776 }
777
778 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
779 {
780 or64(imm, src, dest);
781 }
782
783 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
784 {
785 rotateRight64(imm, srcDst);
786 }
787
788 void subPtr(RegisterID src, RegisterID dest)
789 {
790 sub64(src, dest);
791 }
792
793 void subPtr(TrustedImm32 imm, RegisterID dest)
794 {
795 sub64(imm, dest);
796 }
797
798 void subPtr(TrustedImmPtr imm, RegisterID dest)
799 {
800 sub64(TrustedImm64(imm), dest);
801 }
802
803 void xorPtr(RegisterID src, RegisterID dest)
804 {
805 xor64(src, dest);
806 }
807
808 void xorPtr(RegisterID src, Address dest)
809 {
810 xor64(src, dest);
811 }
812
813 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
814 {
815 xor64(imm, srcDest);
816 }
817
818 void loadPtr(ImplicitAddress address, RegisterID dest)
819 {
820 load64(address, dest);
821 }
822
823 void loadPtr(BaseIndex address, RegisterID dest)
824 {
825 load64(address, dest);
826 }
827
828 void loadPtr(const void* address, RegisterID dest)
829 {
830 load64(address, dest);
831 }
832
833 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
834 {
835 return load64WithAddressOffsetPatch(address, dest);
836 }
837
838 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
839 {
840 return load64WithCompactAddressOffsetPatch(address, dest);
841 }
842
843 void storePtr(RegisterID src, ImplicitAddress address)
844 {
845 store64(src, address);
846 }
847
848 void storePtr(RegisterID src, BaseIndex address)
849 {
850 store64(src, address);
851 }
852
853 void storePtr(RegisterID src, void* address)
854 {
855 store64(src, address);
856 }
857
858 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
859 {
860 store64(TrustedImm64(imm), address);
861 }
862
863 void storePtr(TrustedImmPtr imm, BaseIndex address)
864 {
865 store64(TrustedImm64(imm), address);
866 }
867
868 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
869 {
870 return store64WithAddressOffsetPatch(src, address);
871 }
872
873 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
874 {
875 compare64(cond, left, right, dest);
876 }
877
878 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
879 {
880 compare64(cond, left, right, dest);
881 }
6fe7ccc8 882
93a37866
A
883 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
884 {
885 test64(cond, reg, mask, dest);
886 }
887
888 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
889 {
890 test64(cond, reg, mask, dest);
891 }
892
893 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
894 {
895 return branch64(cond, left, right);
896 }
897
898 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
899 {
900 return branch64(cond, left, TrustedImm64(right));
901 }
902
903 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
904 {
905 return branch64(cond, left, right);
906 }
907
908 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
909 {
910 return branch64(cond, left, right);
911 }
912
913 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
914 {
915 return branch64(cond, left, right);
916 }
917
918 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
919 {
920 return branch64(cond, left, TrustedImm64(right));
921 }
922
923 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
924 {
925 return branchTest64(cond, reg, mask);
926 }
927
928 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
929 {
930 return branchTest64(cond, reg, mask);
931 }
932
933 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
934 {
935 return branchTest64(cond, address, mask);
936 }
937
938 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
939 {
940 return branchTest64(cond, address, reg);
941 }
942
943 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
944 {
945 return branchTest64(cond, address, mask);
946 }
947
948 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
949 {
950 return branchTest64(cond, address, mask);
951 }
952
953 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
954 {
955 return branchAdd64(cond, imm, dest);
956 }
957
958 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
959 {
960 return branchAdd64(cond, src, dest);
961 }
962
963 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
964 {
965 return branchSub64(cond, imm, dest);
966 }
967
968 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
969 {
970 return branchSub64(cond, src, dest);
971 }
972
973 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
974 {
975 return branchSub64(cond, src1, src2, dest);
976 }
977
93a37866 978 using MacroAssemblerBase::and64;
6fe7ccc8 979 using MacroAssemblerBase::convertInt32ToDouble;
93a37866 980 using MacroAssemblerBase::store64;
6fe7ccc8
A
981 bool shouldBlindDouble(double value)
982 {
983 // Don't trust NaN or +/-Infinity
93a37866
A
984 if (!std::isfinite(value))
985 return shouldConsiderBlinding();
6fe7ccc8
A
986
987 // Try to force normalisation, and check that there's no change
988 // in the bit pattern
93a37866
A
989 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
990 return shouldConsiderBlinding();
6fe7ccc8 991
81345200 992 value = fabs(value);
6fe7ccc8
A
993 // Only allow a limited set of fractional components
994 double scaledValue = value * 8;
995 if (scaledValue / 8 != value)
93a37866 996 return shouldConsiderBlinding();
6fe7ccc8
A
997 double frac = scaledValue - floor(scaledValue);
998 if (frac != 0.0)
93a37866 999 return shouldConsiderBlinding();
6fe7ccc8
A
1000
1001 return value > 0xff;
1002 }
1003
81345200
A
1004 bool shouldBlindPointerForSpecificArch(uintptr_t value)
1005 {
1006 if (sizeof(void*) == 4)
1007 return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
1008 return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
1009 }
1010
6fe7ccc8 1011 bool shouldBlind(ImmPtr imm)
81345200
A
1012 {
1013 if (!canBlind())
1014 return false;
1015
93a37866 1016#if ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1017 UNUSED_PARAM(imm);
1018 // Debug always blind all constants, if only so we know
1019 // if we've broken blinding during patch development.
81345200 1020 return true;
6fe7ccc8
A
1021#endif
1022
1023 // First off we'll special case common, "safe" values to avoid hurting
1024 // performance too much
1025 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1026 switch (value) {
1027 case 0xffff:
1028 case 0xffffff:
1029 case 0xffffffffL:
1030 case 0xffffffffffL:
1031 case 0xffffffffffffL:
1032 case 0xffffffffffffffL:
1033 case 0xffffffffffffffffL:
1034 return false;
1035 default: {
1036 if (value <= 0xff)
1037 return false;
93a37866 1038 if (~value <= 0xff)
6fe7ccc8 1039 return false;
6fe7ccc8
A
1040 }
1041 }
93a37866
A
1042
1043 if (!shouldConsiderBlinding())
1044 return false;
1045
81345200 1046 return shouldBlindPointerForSpecificArch(value);
6fe7ccc8
A
1047 }
1048
1049 struct RotatedImmPtr {
1050 RotatedImmPtr(uintptr_t v1, uint8_t v2)
1051 : value(v1)
1052 , rotation(v2)
1053 {
1054 }
1055 TrustedImmPtr value;
1056 TrustedImm32 rotation;
1057 };
1058
1059 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
1060 {
1061 uint8_t rotation = random() % (sizeof(void*) * 8);
1062 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1063 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
1064 return RotatedImmPtr(value, rotation);
1065 }
1066
1067 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
1068 {
1069 move(constant.value, dest);
1070 rotateRightPtr(constant.rotation, dest);
1071 }
1072
93a37866
A
1073 bool shouldBlind(Imm64 imm)
1074 {
1075#if ENABLE(FORCED_JIT_BLINDING)
1076 UNUSED_PARAM(imm);
1077 // Debug always blind all constants, if only so we know
1078 // if we've broken blinding during patch development.
1079 return true;
1080#endif
1081
1082 // First off we'll special case common, "safe" values to avoid hurting
1083 // performance too much
1084 uint64_t value = imm.asTrustedImm64().m_value;
1085 switch (value) {
1086 case 0xffff:
1087 case 0xffffff:
1088 case 0xffffffffL:
1089 case 0xffffffffffL:
1090 case 0xffffffffffffL:
1091 case 0xffffffffffffffL:
1092 case 0xffffffffffffffffL:
1093 return false;
1094 default: {
1095 if (value <= 0xff)
1096 return false;
1097 if (~value <= 0xff)
1098 return false;
1099
1100 JSValue jsValue = JSValue::decode(value);
1101 if (jsValue.isInt32())
1102 return shouldBlind(Imm32(jsValue.asInt32()));
1103 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
1104 return false;
1105
1106 if (!shouldBlindDouble(bitwise_cast<double>(value)))
1107 return false;
1108 }
1109 }
1110
1111 if (!shouldConsiderBlinding())
1112 return false;
1113
1114 return shouldBlindForSpecificArch(value);
1115 }
1116
1117 struct RotatedImm64 {
1118 RotatedImm64(uint64_t v1, uint8_t v2)
1119 : value(v1)
1120 , rotation(v2)
1121 {
1122 }
1123 TrustedImm64 value;
1124 TrustedImm32 rotation;
1125 };
1126
1127 RotatedImm64 rotationBlindConstant(Imm64 imm)
1128 {
1129 uint8_t rotation = random() % (sizeof(int64_t) * 8);
1130 uint64_t value = imm.asTrustedImm64().m_value;
1131 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1132 return RotatedImm64(value, rotation);
1133 }
1134
1135 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1136 {
1137 move(constant.value, dest);
1138 rotateRight64(constant.rotation, dest);
1139 }
1140
6fe7ccc8
A
1141 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1142 {
81345200 1143 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
6fe7ccc8
A
1144 RegisterID scratchRegister = scratchRegisterForBlinding();
1145 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1146 convertInt32ToDouble(scratchRegister, dest);
1147 } else
1148 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1149 }
1150
1151 void move(ImmPtr imm, RegisterID dest)
1152 {
1153 if (shouldBlind(imm))
1154 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1155 else
1156 move(imm.asTrustedImmPtr(), dest);
1157 }
1158
93a37866
A
1159 void move(Imm64 imm, RegisterID dest)
1160 {
1161 if (shouldBlind(imm))
1162 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1163 else
1164 move(imm.asTrustedImm64(), dest);
1165 }
1166
1167 void and64(Imm32 imm, RegisterID dest)
1168 {
1169 if (shouldBlind(imm)) {
1170 BlindedImm32 key = andBlindedConstant(imm);
1171 and64(key.value1, dest);
1172 and64(key.value2, dest);
1173 } else
1174 and64(imm.asTrustedImm32(), dest);
1175 }
1176
6fe7ccc8
A
1177 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1178 {
81345200 1179 if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
6fe7ccc8
A
1180 RegisterID scratchRegister = scratchRegisterForBlinding();
1181 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1182 return branchPtr(cond, left, scratchRegister);
1183 }
1184 return branchPtr(cond, left, right.asTrustedImmPtr());
1185 }
1186
1187 void storePtr(ImmPtr imm, Address dest)
1188 {
81345200 1189 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
6fe7ccc8
A
1190 RegisterID scratchRegister = scratchRegisterForBlinding();
1191 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1192 storePtr(scratchRegister, dest);
1193 } else
1194 storePtr(imm.asTrustedImmPtr(), dest);
1195 }
1196
93a37866
A
1197 void store64(Imm64 imm, Address dest)
1198 {
81345200 1199 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
93a37866
A
1200 RegisterID scratchRegister = scratchRegisterForBlinding();
1201 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1202 store64(scratchRegister, dest);
1203 } else
1204 store64(imm.asTrustedImm64(), dest);
1205 }
1206
6fe7ccc8
A
1207#endif // !CPU(X86_64)
1208
6fe7ccc8 1209 bool shouldBlind(Imm32 imm)
93a37866
A
1210 {
1211#if ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1212 UNUSED_PARAM(imm);
1213 // Debug always blind all constants, if only so we know
1214 // if we've broken blinding during patch development.
1215 return true;
81345200 1216#else // ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1217
1218 // First off we'll special case common, "safe" values to avoid hurting
1219 // performance too much
1220 uint32_t value = imm.asTrustedImm32().m_value;
1221 switch (value) {
1222 case 0xffff:
1223 case 0xffffff:
1224 case 0xffffffff:
1225 return false;
1226 default:
1227 if (value <= 0xff)
1228 return false;
93a37866
A
1229 if (~value <= 0xff)
1230 return false;
6fe7ccc8 1231 }
93a37866
A
1232
1233 if (!shouldConsiderBlinding())
1234 return false;
1235
6fe7ccc8 1236 return shouldBlindForSpecificArch(value);
81345200 1237#endif // ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1238 }
1239
1240 struct BlindedImm32 {
1241 BlindedImm32(int32_t v1, int32_t v2)
1242 : value1(v1)
1243 , value2(v2)
1244 {
1245 }
1246 TrustedImm32 value1;
1247 TrustedImm32 value2;
1248 };
1249
1250 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1251 {
1252 uint32_t key = random();
1253 if (value <= 0xff)
1254 mask = 0xff;
1255 else if (value <= 0xffff)
1256 mask = 0xffff;
1257 else if (value <= 0xffffff)
1258 mask = 0xffffff;
1259 else
1260 mask = 0xffffffff;
1261 return key & mask;
1262 }
1263
1264 uint32_t keyForConstant(uint32_t value)
1265 {
1266 uint32_t mask = 0;
1267 return keyForConstant(value, mask);
1268 }
1269
1270 BlindedImm32 xorBlindConstant(Imm32 imm)
1271 {
1272 uint32_t baseValue = imm.asTrustedImm32().m_value;
1273 uint32_t key = keyForConstant(baseValue);
1274 return BlindedImm32(baseValue ^ key, key);
1275 }
1276
1277 BlindedImm32 additionBlindedConstant(Imm32 imm)
1278 {
1279 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1280 static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1281
1282 uint32_t baseValue = imm.asTrustedImm32().m_value;
1283 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1284 if (key > baseValue)
1285 key = key - baseValue;
1286 return BlindedImm32(baseValue - key, key);
1287 }
1288
1289 BlindedImm32 andBlindedConstant(Imm32 imm)
1290 {
1291 uint32_t baseValue = imm.asTrustedImm32().m_value;
1292 uint32_t mask = 0;
1293 uint32_t key = keyForConstant(baseValue, mask);
1294 ASSERT((baseValue & mask) == baseValue);
1295 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1296 }
1297
1298 BlindedImm32 orBlindedConstant(Imm32 imm)
1299 {
1300 uint32_t baseValue = imm.asTrustedImm32().m_value;
1301 uint32_t mask = 0;
1302 uint32_t key = keyForConstant(baseValue, mask);
1303 ASSERT((baseValue & mask) == baseValue);
1304 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1305 }
1306
1307 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1308 {
1309 move(constant.value1, dest);
1310 xor32(constant.value2, dest);
1311 }
1312
1313 void add32(Imm32 imm, RegisterID dest)
1314 {
1315 if (shouldBlind(imm)) {
1316 BlindedImm32 key = additionBlindedConstant(imm);
1317 add32(key.value1, dest);
1318 add32(key.value2, dest);
1319 } else
1320 add32(imm.asTrustedImm32(), dest);
1321 }
1322
1323 void addPtr(Imm32 imm, RegisterID dest)
1324 {
1325 if (shouldBlind(imm)) {
1326 BlindedImm32 key = additionBlindedConstant(imm);
1327 addPtr(key.value1, dest);
1328 addPtr(key.value2, dest);
1329 } else
1330 addPtr(imm.asTrustedImm32(), dest);
1331 }
1332
1333 void and32(Imm32 imm, RegisterID dest)
1334 {
1335 if (shouldBlind(imm)) {
1336 BlindedImm32 key = andBlindedConstant(imm);
1337 and32(key.value1, dest);
1338 and32(key.value2, dest);
1339 } else
1340 and32(imm.asTrustedImm32(), dest);
1341 }
1342
1343 void andPtr(Imm32 imm, RegisterID dest)
1344 {
1345 if (shouldBlind(imm)) {
1346 BlindedImm32 key = andBlindedConstant(imm);
1347 andPtr(key.value1, dest);
1348 andPtr(key.value2, dest);
1349 } else
1350 andPtr(imm.asTrustedImm32(), dest);
1351 }
1352
1353 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1354 {
1355 if (shouldBlind(imm)) {
1356 if (src == dest)
1357 return and32(imm.asTrustedImm32(), dest);
1358 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1359 and32(src, dest);
1360 } else
1361 and32(imm.asTrustedImm32(), src, dest);
1362 }
1363
1364 void move(Imm32 imm, RegisterID dest)
1365 {
1366 if (shouldBlind(imm))
1367 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1368 else
1369 move(imm.asTrustedImm32(), dest);
1370 }
1371
1372 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1373 {
1374 if (shouldBlind(imm)) {
1375 if (src == dest)
1376 return or32(imm, dest);
1377 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1378 or32(src, dest);
1379 } else
1380 or32(imm.asTrustedImm32(), src, dest);
1381 }
1382
1383 void or32(Imm32 imm, RegisterID dest)
1384 {
1385 if (shouldBlind(imm)) {
1386 BlindedImm32 key = orBlindedConstant(imm);
1387 or32(key.value1, dest);
1388 or32(key.value2, dest);
1389 } else
1390 or32(imm.asTrustedImm32(), dest);
1391 }
1392
1393 void poke(Imm32 value, int index = 0)
1394 {
1395 store32(value, addressForPoke(index));
1396 }
1397
1398 void poke(ImmPtr value, int index = 0)
1399 {
1400 storePtr(value, addressForPoke(index));
1401 }
1402
93a37866
A
1403#if CPU(X86_64) || CPU(ARM64)
1404 void poke(Imm64 value, int index = 0)
1405 {
1406 store64(value, addressForPoke(index));
1407 }
81345200 1408#endif // CPU(X86_64)
93a37866 1409
6fe7ccc8
A
1410 void store32(Imm32 imm, Address dest)
1411 {
1412 if (shouldBlind(imm)) {
1413#if CPU(X86) || CPU(X86_64)
1414 BlindedImm32 blind = xorBlindConstant(imm);
1415 store32(blind.value1, dest);
1416 xor32(blind.value2, dest);
81345200
A
1417#else // CPU(X86) || CPU(X86_64)
1418 if (haveScratchRegisterForBlinding()) {
1419 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
1420 store32(scratchRegisterForBlinding(), dest);
6fe7ccc8
A
1421 } else {
1422 // If we don't have a scratch register available for use, we'll just
1423 // place a random number of nops.
1424 uint32_t nopCount = random() & 3;
1425 while (nopCount--)
1426 nop();
1427 store32(imm.asTrustedImm32(), dest);
1428 }
81345200 1429#endif // CPU(X86) || CPU(X86_64)
6fe7ccc8
A
1430 } else
1431 store32(imm.asTrustedImm32(), dest);
1432 }
1433
1434 void sub32(Imm32 imm, RegisterID dest)
1435 {
1436 if (shouldBlind(imm)) {
1437 BlindedImm32 key = additionBlindedConstant(imm);
1438 sub32(key.value1, dest);
1439 sub32(key.value2, dest);
1440 } else
1441 sub32(imm.asTrustedImm32(), dest);
1442 }
1443
1444 void subPtr(Imm32 imm, RegisterID dest)
1445 {
1446 if (shouldBlind(imm)) {
1447 BlindedImm32 key = additionBlindedConstant(imm);
1448 subPtr(key.value1, dest);
1449 subPtr(key.value2, dest);
1450 } else
1451 subPtr(imm.asTrustedImm32(), dest);
1452 }
1453
1454 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1455 {
1456 if (shouldBlind(imm)) {
1457 BlindedImm32 blind = xorBlindConstant(imm);
1458 xor32(blind.value1, src, dest);
1459 xor32(blind.value2, dest);
1460 } else
1461 xor32(imm.asTrustedImm32(), src, dest);
1462 }
1463
1464 void xor32(Imm32 imm, RegisterID dest)
1465 {
1466 if (shouldBlind(imm)) {
1467 BlindedImm32 blind = xorBlindConstant(imm);
1468 xor32(blind.value1, dest);
1469 xor32(blind.value2, dest);
1470 } else
1471 xor32(imm.asTrustedImm32(), dest);
1472 }
1473
1474 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1475 {
1476 if (shouldBlind(right)) {
81345200
A
1477 if (haveScratchRegisterForBlinding()) {
1478 loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
1479 return branch32(cond, left, scratchRegisterForBlinding());
6fe7ccc8
A
1480 }
1481 // If we don't have a scratch register available for use, we'll just
1482 // place a random number of nops.
1483 uint32_t nopCount = random() & 3;
1484 while (nopCount--)
1485 nop();
1486 return branch32(cond, left, right.asTrustedImm32());
1487 }
1488
1489 return branch32(cond, left, right.asTrustedImm32());
1490 }
1491
1492 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1493 {
93a37866 1494 if (src == dest)
81345200 1495 ASSERT(haveScratchRegisterForBlinding());
93a37866 1496
6fe7ccc8
A
1497 if (shouldBlind(imm)) {
1498 if (src == dest) {
81345200
A
1499 move(src, scratchRegisterForBlinding());
1500 src = scratchRegisterForBlinding();
6fe7ccc8
A
1501 }
1502 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1503 return branchAdd32(cond, src, dest);
1504 }
1505 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1506 }
1507
1508 Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
1509 {
93a37866 1510 if (src == dest)
81345200 1511 ASSERT(haveScratchRegisterForBlinding());
93a37866 1512
6fe7ccc8
A
1513 if (shouldBlind(imm)) {
1514 if (src == dest) {
81345200
A
1515 move(src, scratchRegisterForBlinding());
1516 src = scratchRegisterForBlinding();
6fe7ccc8
A
1517 }
1518 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1519 return branchMul32(cond, src, dest);
1520 }
1521 return branchMul32(cond, imm.asTrustedImm32(), src, dest);
1522 }
1523
1524 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1525 // with src == dst, and on x86-32 we don't have a platform scratch register.
1526 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1527 {
1528 if (shouldBlind(imm)) {
1529 ASSERT(scratch != dest);
1530 ASSERT(scratch != src);
1531 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1532 return branchSub32(cond, src, scratch, dest);
1533 }
1534 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1535 }
1536
6fe7ccc8
A
1537 void lshift32(Imm32 imm, RegisterID dest)
1538 {
1539 lshift32(trustedImm32ForShift(imm), dest);
1540 }
1541
1542 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1543 {
1544 lshift32(src, trustedImm32ForShift(amount), dest);
1545 }
1546
1547 void rshift32(Imm32 imm, RegisterID dest)
1548 {
1549 rshift32(trustedImm32ForShift(imm), dest);
1550 }
1551
1552 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1553 {
1554 rshift32(src, trustedImm32ForShift(amount), dest);
1555 }
1556
1557 void urshift32(Imm32 imm, RegisterID dest)
1558 {
1559 urshift32(trustedImm32ForShift(imm), dest);
1560 }
1561
1562 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1563 {
1564 urshift32(src, trustedImm32ForShift(amount), dest);
1565 }
9dae56ea
A
1566};
1567
1568} // namespace JSC
1569
6fe7ccc8
A
1570#else // ENABLE(ASSEMBLER)
1571
1572// If there is no assembler for this platform, at least allow code to make references to
1573// some of the things it would otherwise define, albeit without giving that code any way
1574// of doing anything useful.
1575class MacroAssembler {
1576private:
1577 MacroAssembler() { }
1578
1579public:
1580
1581 enum RegisterID { NoRegister };
1582 enum FPRegisterID { NoFPRegister };
1583};
1584
9dae56ea
A
1585#endif // ENABLE(ASSEMBLER)
1586
1587#endif // MacroAssembler_h