]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/MacroAssembler.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssembler.h
CommitLineData
9dae56ea 1/*
ed1e77d3 2 * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
9dae56ea
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssembler_h
27#define MacroAssembler_h
28
9dae56ea
A
29#if ENABLE(ASSEMBLER)
30
f9bf01c6 31#if CPU(ARM_THUMB2)
ba379fdc
A
32#include "MacroAssemblerARMv7.h"
33namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
9dae56ea 34
93a37866
A
35#elif CPU(ARM64)
36#include "MacroAssemblerARM64.h"
37namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
38
f9bf01c6
A
39#elif CPU(ARM_TRADITIONAL)
40#include "MacroAssemblerARM.h"
41namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
42
4e4e5a6f
A
43#elif CPU(MIPS)
44#include "MacroAssemblerMIPS.h"
45namespace JSC {
46typedef MacroAssemblerMIPS MacroAssemblerBase;
47};
48
f9bf01c6 49#elif CPU(X86)
ba379fdc
A
50#include "MacroAssemblerX86.h"
51namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
9dae56ea 52
f9bf01c6 53#elif CPU(X86_64)
ba379fdc
A
54#include "MacroAssemblerX86_64.h"
55namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
9dae56ea 56
14957cd0
A
57#elif CPU(SH4)
58#include "MacroAssemblerSH4.h"
59namespace JSC {
60typedef MacroAssemblerSH4 MacroAssemblerBase;
61};
62
ba379fdc
A
63#else
64#error "The MacroAssembler is not supported on this platform."
9dae56ea
A
65#endif
66
ba379fdc
A
67namespace JSC {
68
69class MacroAssembler : public MacroAssemblerBase {
9dae56ea 70public:
ba379fdc 71
81345200
A
72 static RegisterID nextRegister(RegisterID reg)
73 {
74 return static_cast<RegisterID>(reg + 1);
75 }
76
77 static FPRegisterID nextFPRegister(FPRegisterID reg)
78 {
79 return static_cast<FPRegisterID>(reg + 1);
80 }
81
82 static unsigned numberOfRegisters()
83 {
84 return lastRegister() - firstRegister() + 1;
85 }
86
87 static unsigned registerIndex(RegisterID reg)
88 {
89 return reg - firstRegister();
90 }
91
92 static unsigned numberOfFPRegisters()
93 {
94 return lastFPRegister() - firstFPRegister() + 1;
95 }
96
97 static unsigned fpRegisterIndex(FPRegisterID reg)
98 {
99 return reg - firstFPRegister();
100 }
101
102 static unsigned registerIndex(FPRegisterID reg)
103 {
104 return fpRegisterIndex(reg) + numberOfRegisters();
105 }
106
107 static unsigned totalNumberOfRegisters()
108 {
109 return numberOfRegisters() + numberOfFPRegisters();
110 }
111
ba379fdc
A
112 using MacroAssemblerBase::pop;
113 using MacroAssemblerBase::jump;
114 using MacroAssemblerBase::branch32;
6fe7ccc8 115 using MacroAssemblerBase::move;
6fe7ccc8
A
116 using MacroAssemblerBase::add32;
117 using MacroAssemblerBase::and32;
118 using MacroAssemblerBase::branchAdd32;
119 using MacroAssemblerBase::branchMul32;
81345200
A
120#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
121 using MacroAssemblerBase::branchPtr;
122#endif
6fe7ccc8
A
123 using MacroAssemblerBase::branchSub32;
124 using MacroAssemblerBase::lshift32;
125 using MacroAssemblerBase::or32;
126 using MacroAssemblerBase::rshift32;
127 using MacroAssemblerBase::store32;
128 using MacroAssemblerBase::sub32;
129 using MacroAssemblerBase::urshift32;
130 using MacroAssemblerBase::xor32;
81345200 131
93a37866
A
132 static bool isPtrAlignedAddressOffset(ptrdiff_t value)
133 {
81345200 134 return value == static_cast<int32_t>(value);
93a37866
A
135 }
136
137 static const double twoToThe32; // This is super useful for some double code.
9dae56ea 138
6fe7ccc8
A
139 // Utilities used by the DFG JIT.
140#if ENABLE(DFG_JIT)
141 using MacroAssemblerBase::invert;
142
143 static DoubleCondition invert(DoubleCondition cond)
144 {
145 switch (cond) {
146 case DoubleEqual:
147 return DoubleNotEqualOrUnordered;
148 case DoubleNotEqual:
149 return DoubleEqualOrUnordered;
150 case DoubleGreaterThan:
151 return DoubleLessThanOrEqualOrUnordered;
152 case DoubleGreaterThanOrEqual:
153 return DoubleLessThanOrUnordered;
154 case DoubleLessThan:
155 return DoubleGreaterThanOrEqualOrUnordered;
156 case DoubleLessThanOrEqual:
157 return DoubleGreaterThanOrUnordered;
158 case DoubleEqualOrUnordered:
159 return DoubleNotEqual;
160 case DoubleNotEqualOrUnordered:
161 return DoubleEqual;
162 case DoubleGreaterThanOrUnordered:
163 return DoubleLessThanOrEqual;
164 case DoubleGreaterThanOrEqualOrUnordered:
165 return DoubleLessThan;
166 case DoubleLessThanOrUnordered:
167 return DoubleGreaterThanOrEqual;
168 case DoubleLessThanOrEqualOrUnordered:
169 return DoubleGreaterThan;
170 default:
93a37866 171 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
172 return DoubleEqual; // make compiler happy
173 }
174 }
175
176 static bool isInvertible(ResultCondition cond)
177 {
178 switch (cond) {
179 case Zero:
180 case NonZero:
181 return true;
182 default:
183 return false;
184 }
185 }
186
187 static ResultCondition invert(ResultCondition cond)
188 {
189 switch (cond) {
190 case Zero:
191 return NonZero;
192 case NonZero:
193 return Zero;
194 default:
93a37866 195 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
196 return Zero; // Make compiler happy for release builds.
197 }
198 }
199#endif
ba379fdc
A
200
201 // Platform agnostic onvenience functions,
202 // described in terms of other macro assembly methods.
203 void pop()
9dae56ea 204 {
14957cd0 205 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
9dae56ea
A
206 }
207
ba379fdc 208 void peek(RegisterID dest, int index = 0)
9dae56ea 209 {
ba379fdc 210 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
9dae56ea
A
211 }
212
6fe7ccc8
A
213 Address addressForPoke(int index)
214 {
215 return Address(stackPointerRegister, (index * sizeof(void*)));
216 }
217
ba379fdc 218 void poke(RegisterID src, int index = 0)
9dae56ea 219 {
6fe7ccc8 220 storePtr(src, addressForPoke(index));
9dae56ea
A
221 }
222
14957cd0 223 void poke(TrustedImm32 value, int index = 0)
9dae56ea 224 {
6fe7ccc8 225 store32(value, addressForPoke(index));
9dae56ea
A
226 }
227
14957cd0 228 void poke(TrustedImmPtr imm, int index = 0)
9dae56ea 229 {
6fe7ccc8 230 storePtr(imm, addressForPoke(index));
9dae56ea
A
231 }
232
93a37866
A
233#if !CPU(ARM64)
234 void pushToSave(RegisterID src)
235 {
236 push(src);
237 }
81345200
A
238 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
239 {
240 push(imm);
241 }
93a37866
A
242 void popToRestore(RegisterID dest)
243 {
244 pop(dest);
245 }
246 void pushToSave(FPRegisterID src)
247 {
248 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
249 storeDouble(src, stackPointerRegister);
250 }
251 void popToRestore(FPRegisterID dest)
252 {
253 loadDouble(stackPointerRegister, dest);
254 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
255 }
81345200
A
256
257 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
93a37866
A
258#endif // !CPU(ARM64)
259
260#if CPU(X86_64) || CPU(ARM64)
261 void peek64(RegisterID dest, int index = 0)
262 {
263 load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
264 }
265
266 void poke(TrustedImm64 value, int index = 0)
267 {
268 store64(value, addressForPoke(index));
269 }
270
271 void poke64(RegisterID src, int index = 0)
272 {
273 store64(src, addressForPoke(index));
274 }
275#endif
276
277#if CPU(MIPS)
278 void poke(FPRegisterID src, int index = 0)
279 {
280 ASSERT(!(index & 1));
281 storeDouble(src, addressForPoke(index));
282 }
283#endif
9dae56ea 284
81345200
A
285 // Immediate shifts only have 5 controllable bits
286 // so we'll consider them safe for now.
287 TrustedImm32 trustedImm32ForShift(Imm32 imm)
288 {
289 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
290 }
291
ba379fdc 292 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
14957cd0 293 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
9dae56ea 294 {
ba379fdc 295 branchPtr(cond, op1, imm).linkTo(target, this);
9dae56ea 296 }
6fe7ccc8
A
297 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
298 {
299 branchPtr(cond, op1, imm).linkTo(target, this);
300 }
9dae56ea 301
14957cd0 302 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
9dae56ea 303 {
ba379fdc 304 branch32(cond, op1, op2).linkTo(target, this);
9dae56ea
A
305 }
306
14957cd0 307 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
9dae56ea 308 {
ba379fdc 309 branch32(cond, op1, imm).linkTo(target, this);
9dae56ea 310 }
6fe7ccc8
A
311
312 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
313 {
314 branch32(cond, op1, imm).linkTo(target, this);
315 }
ba379fdc 316
14957cd0 317 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
9dae56ea 318 {
ba379fdc 319 branch32(cond, left, right).linkTo(target, this);
9dae56ea 320 }
ba379fdc 321
6fe7ccc8 322 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
9dae56ea 323 {
6fe7ccc8 324 return branch32(commute(cond), right, left);
9dae56ea 325 }
6fe7ccc8
A
326
327 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
328 {
329 return branch32(commute(cond), right, left);
330 }
331
14957cd0 332 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
9dae56ea 333 {
ba379fdc 334 branchTestPtr(cond, reg).linkTo(target, this);
9dae56ea
A
335 }
336
93a37866
A
337#if !CPU(ARM_THUMB2) && !CPU(ARM64)
338 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
339 {
340 return PatchableJump(branchPtr(cond, left, right));
341 }
342
6fe7ccc8
A
343 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
344 {
345 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
346 }
347
81345200
A
348 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
349 {
350 return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
351 }
352
353#if !CPU(ARM_TRADITIONAL)
6fe7ccc8
A
354 PatchableJump patchableJump()
355 {
356 return PatchableJump(jump());
357 }
93a37866
A
358
359 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
360 {
361 return PatchableJump(branchTest32(cond, reg, mask));
362 }
363
364 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
365 {
366 return PatchableJump(branch32(cond, reg, imm));
367 }
81345200
A
368
369 PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
370 {
371 return PatchableJump(branch32(cond, address, imm));
372 }
373#endif
6fe7ccc8
A
374#endif
375
ba379fdc 376 void jump(Label target)
9dae56ea 377 {
ba379fdc 378 jump().linkTo(target, this);
9dae56ea
A
379 }
380
6fe7ccc8
A
381 // Commute a relational condition, returns a new condition that will produce
382 // the same results given the same inputs but with their positions exchanged.
383 static RelationalCondition commute(RelationalCondition condition)
384 {
385 switch (condition) {
386 case Above:
387 return Below;
388 case AboveOrEqual:
389 return BelowOrEqual;
390 case Below:
391 return Above;
392 case BelowOrEqual:
393 return AboveOrEqual;
394 case GreaterThan:
395 return LessThan;
396 case GreaterThanOrEqual:
397 return LessThanOrEqual;
398 case LessThan:
399 return GreaterThan;
400 case LessThanOrEqual:
401 return GreaterThanOrEqual;
402 default:
403 break;
404 }
405
406 ASSERT(condition == Equal || condition == NotEqual);
407 return condition;
408 }
93a37866
A
409
410 static const unsigned BlindingModulus = 64;
411 bool shouldConsiderBlinding()
412 {
413 return !(random() & (BlindingModulus - 1));
414 }
9dae56ea 415
ba379fdc
A
416 // Ptr methods
417 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
f9bf01c6 418 // FIXME: should this use a test for 32-bitness instead of this specific exception?
93a37866
A
419#if !CPU(X86_64) && !CPU(ARM64)
420 void addPtr(Address src, RegisterID dest)
421 {
422 add32(src, dest);
423 }
424
425 void addPtr(AbsoluteAddress src, RegisterID dest)
426 {
427 add32(src, dest);
428 }
429
ba379fdc 430 void addPtr(RegisterID src, RegisterID dest)
9dae56ea 431 {
ba379fdc 432 add32(src, dest);
9dae56ea
A
433 }
434
14957cd0 435 void addPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 436 {
ba379fdc 437 add32(imm, srcDest);
9dae56ea 438 }
9dae56ea 439
14957cd0 440 void addPtr(TrustedImmPtr imm, RegisterID dest)
ba379fdc 441 {
14957cd0 442 add32(TrustedImm32(imm), dest);
9dae56ea
A
443 }
444
14957cd0 445 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
9dae56ea 446 {
ba379fdc 447 add32(imm, src, dest);
9dae56ea 448 }
ba379fdc 449
6fe7ccc8
A
450 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
451 {
452 add32(imm, address);
453 }
454
ba379fdc 455 void andPtr(RegisterID src, RegisterID dest)
9dae56ea 456 {
ba379fdc 457 and32(src, dest);
9dae56ea 458 }
ba379fdc 459
14957cd0 460 void andPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 461 {
ba379fdc 462 and32(imm, srcDest);
9dae56ea 463 }
81345200
A
464
465 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
466 {
467 and32(TrustedImm32(imm), srcDest);
468 }
469
470 void lshiftPtr(Imm32 imm, RegisterID srcDest)
471 {
472 lshift32(trustedImm32ForShift(imm), srcDest);
473 }
ed1e77d3
A
474
475 void rshiftPtr(Imm32 imm, RegisterID srcDest)
476 {
477 rshift32(trustedImm32ForShift(imm), srcDest);
478 }
479
480 void urshiftPtr(Imm32 imm, RegisterID srcDest)
481 {
482 urshift32(trustedImm32ForShift(imm), srcDest);
483 }
81345200 484
93a37866
A
485 void negPtr(RegisterID dest)
486 {
487 neg32(dest);
488 }
ba379fdc 489
9dae56ea
A
490 void orPtr(RegisterID src, RegisterID dest)
491 {
9dae56ea 492 or32(src, dest);
9dae56ea
A
493 }
494
6fe7ccc8
A
495 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
496 {
497 or32(op1, op2, dest);
498 }
499
14957cd0 500 void orPtr(TrustedImmPtr imm, RegisterID dest)
9dae56ea 501 {
14957cd0 502 or32(TrustedImm32(imm), dest);
9dae56ea
A
503 }
504
14957cd0 505 void orPtr(TrustedImm32 imm, RegisterID dest)
9dae56ea 506 {
9dae56ea 507 or32(imm, dest);
9dae56ea
A
508 }
509
9dae56ea
A
510 void subPtr(RegisterID src, RegisterID dest)
511 {
9dae56ea 512 sub32(src, dest);
9dae56ea
A
513 }
514
14957cd0 515 void subPtr(TrustedImm32 imm, RegisterID dest)
9dae56ea 516 {
9dae56ea 517 sub32(imm, dest);
9dae56ea
A
518 }
519
14957cd0 520 void subPtr(TrustedImmPtr imm, RegisterID dest)
9dae56ea 521 {
14957cd0 522 sub32(TrustedImm32(imm), dest);
9dae56ea
A
523 }
524
525 void xorPtr(RegisterID src, RegisterID dest)
526 {
9dae56ea 527 xor32(src, dest);
9dae56ea
A
528 }
529
14957cd0 530 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 531 {
9dae56ea 532 xor32(imm, srcDest);
9dae56ea 533 }
9dae56ea 534
9dae56ea
A
535
536 void loadPtr(ImplicitAddress address, RegisterID dest)
537 {
9dae56ea 538 load32(address, dest);
9dae56ea
A
539 }
540
541 void loadPtr(BaseIndex address, RegisterID dest)
542 {
9dae56ea 543 load32(address, dest);
9dae56ea
A
544 }
545
6fe7ccc8 546 void loadPtr(const void* address, RegisterID dest)
9dae56ea 547 {
9dae56ea 548 load32(address, dest);
9dae56ea
A
549 }
550
ba379fdc 551 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
9dae56ea 552 {
ba379fdc 553 return load32WithAddressOffsetPatch(address, dest);
9dae56ea 554 }
14957cd0
A
555
556 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
557 {
558 return load32WithCompactAddressOffsetPatch(address, dest);
559 }
9dae56ea 560
6fe7ccc8
A
561 void move(ImmPtr imm, RegisterID dest)
562 {
563 move(Imm32(imm.asTrustedImmPtr()), dest);
564 }
565
14957cd0 566 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
9dae56ea 567 {
14957cd0 568 compare32(cond, left, right, dest);
9dae56ea
A
569 }
570
81345200
A
571 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
572 {
573 compare32(cond, left, right, dest);
574 }
575
9dae56ea
A
576 void storePtr(RegisterID src, ImplicitAddress address)
577 {
9dae56ea 578 store32(src, address);
9dae56ea
A
579 }
580
ba379fdc 581 void storePtr(RegisterID src, BaseIndex address)
9dae56ea 582 {
ba379fdc 583 store32(src, address);
9dae56ea
A
584 }
585
ba379fdc 586 void storePtr(RegisterID src, void* address)
9dae56ea 587 {
9dae56ea 588 store32(src, address);
9dae56ea
A
589 }
590
14957cd0 591 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
9dae56ea 592 {
14957cd0 593 store32(TrustedImm32(imm), address);
9dae56ea 594 }
6fe7ccc8
A
595
596 void storePtr(ImmPtr imm, Address address)
597 {
598 store32(Imm32(imm.asTrustedImmPtr()), address);
599 }
9dae56ea 600
14957cd0 601 void storePtr(TrustedImmPtr imm, void* address)
9dae56ea 602 {
14957cd0 603 store32(TrustedImm32(imm), address);
9dae56ea 604 }
9dae56ea 605
81345200
A
606 void storePtr(TrustedImm32 imm, ImplicitAddress address)
607 {
608 store32(imm, address);
609 }
610
611 void storePtr(TrustedImmPtr imm, BaseIndex address)
612 {
613 store32(TrustedImm32(imm), address);
614 }
615
ba379fdc 616 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
9dae56ea 617 {
ba379fdc 618 return store32WithAddressOffsetPatch(src, address);
9dae56ea
A
619 }
620
14957cd0 621 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
9dae56ea 622 {
ba379fdc 623 return branch32(cond, left, right);
9dae56ea
A
624 }
625
14957cd0 626 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
9dae56ea 627 {
14957cd0 628 return branch32(cond, left, TrustedImm32(right));
9dae56ea 629 }
6fe7ccc8
A
630
631 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
632 {
633 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
634 }
9dae56ea 635
14957cd0 636 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
9dae56ea 637 {
ba379fdc 638 return branch32(cond, left, right);
9dae56ea
A
639 }
640
14957cd0 641 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
9dae56ea 642 {
ba379fdc 643 return branch32(cond, left, right);
9dae56ea
A
644 }
645
14957cd0 646 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
9dae56ea 647 {
ba379fdc 648 return branch32(cond, left, right);
9dae56ea
A
649 }
650
14957cd0 651 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
9dae56ea 652 {
14957cd0 653 return branch32(cond, left, TrustedImm32(right));
9dae56ea 654 }
6fe7ccc8 655
14957cd0 656 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
9dae56ea 657 {
14957cd0 658 return branch32(cond, left, TrustedImm32(right));
9dae56ea
A
659 }
660
93a37866
A
661 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
662 {
663 return branchSub32(cond, src, dest);
664 }
665
14957cd0 666 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
9dae56ea 667 {
ba379fdc 668 return branchTest32(cond, reg, mask);
9dae56ea
A
669 }
670
14957cd0 671 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 672 {
ba379fdc 673 return branchTest32(cond, reg, mask);
9dae56ea
A
674 }
675
14957cd0 676 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 677 {
ba379fdc 678 return branchTest32(cond, address, mask);
9dae56ea
A
679 }
680
14957cd0 681 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 682 {
ba379fdc 683 return branchTest32(cond, address, mask);
9dae56ea
A
684 }
685
14957cd0 686 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
9dae56ea 687 {
ba379fdc 688 return branchAdd32(cond, src, dest);
9dae56ea
A
689 }
690
14957cd0 691 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
9dae56ea 692 {
ba379fdc 693 return branchSub32(cond, imm, dest);
9dae56ea 694 }
4e4e5a6f 695 using MacroAssemblerBase::branchTest8;
14957cd0 696 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
4e4e5a6f
A
697 {
698 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
699 }
81345200
A
700
701#else // !CPU(X86_64)
702
93a37866
A
703 void addPtr(RegisterID src, RegisterID dest)
704 {
705 add64(src, dest);
706 }
707
708 void addPtr(Address src, RegisterID dest)
709 {
710 add64(src, dest);
711 }
712
713 void addPtr(TrustedImm32 imm, RegisterID srcDest)
714 {
715 add64(imm, srcDest);
716 }
717
718 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
719 {
720 add64(imm, src, dest);
721 }
722
723 void addPtr(TrustedImm32 imm, Address address)
724 {
725 add64(imm, address);
726 }
727
728 void addPtr(AbsoluteAddress src, RegisterID dest)
729 {
730 add64(src, dest);
731 }
732
733 void addPtr(TrustedImmPtr imm, RegisterID dest)
734 {
735 add64(TrustedImm64(imm), dest);
736 }
737
738 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
739 {
740 add64(imm, address);
741 }
742
743 void andPtr(RegisterID src, RegisterID dest)
744 {
745 and64(src, dest);
746 }
747
748 void andPtr(TrustedImm32 imm, RegisterID srcDest)
749 {
750 and64(imm, srcDest);
751 }
752
81345200
A
753 void andPtr(TrustedImmPtr imm, RegisterID srcDest)
754 {
755 and64(imm, srcDest);
756 }
757
758 void lshiftPtr(Imm32 imm, RegisterID srcDest)
759 {
760 lshift64(trustedImm32ForShift(imm), srcDest);
761 }
762
ed1e77d3
A
763 void rshiftPtr(Imm32 imm, RegisterID srcDest)
764 {
765 rshift64(trustedImm32ForShift(imm), srcDest);
766 }
767
768 void urshiftPtr(Imm32 imm, RegisterID srcDest)
769 {
770 urshift64(trustedImm32ForShift(imm), srcDest);
771 }
772
93a37866
A
773 void negPtr(RegisterID dest)
774 {
775 neg64(dest);
776 }
777
778 void orPtr(RegisterID src, RegisterID dest)
779 {
780 or64(src, dest);
781 }
782
783 void orPtr(TrustedImm32 imm, RegisterID dest)
784 {
785 or64(imm, dest);
786 }
787
788 void orPtr(TrustedImmPtr imm, RegisterID dest)
789 {
790 or64(TrustedImm64(imm), dest);
791 }
792
793 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
794 {
795 or64(op1, op2, dest);
796 }
797
798 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
799 {
800 or64(imm, src, dest);
801 }
802
803 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
804 {
805 rotateRight64(imm, srcDst);
806 }
807
808 void subPtr(RegisterID src, RegisterID dest)
809 {
810 sub64(src, dest);
811 }
812
813 void subPtr(TrustedImm32 imm, RegisterID dest)
814 {
815 sub64(imm, dest);
816 }
817
818 void subPtr(TrustedImmPtr imm, RegisterID dest)
819 {
820 sub64(TrustedImm64(imm), dest);
821 }
822
823 void xorPtr(RegisterID src, RegisterID dest)
824 {
825 xor64(src, dest);
826 }
827
828 void xorPtr(RegisterID src, Address dest)
829 {
830 xor64(src, dest);
831 }
832
833 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
834 {
835 xor64(imm, srcDest);
836 }
837
838 void loadPtr(ImplicitAddress address, RegisterID dest)
839 {
840 load64(address, dest);
841 }
842
843 void loadPtr(BaseIndex address, RegisterID dest)
844 {
845 load64(address, dest);
846 }
847
848 void loadPtr(const void* address, RegisterID dest)
849 {
850 load64(address, dest);
851 }
852
853 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
854 {
855 return load64WithAddressOffsetPatch(address, dest);
856 }
857
858 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
859 {
860 return load64WithCompactAddressOffsetPatch(address, dest);
861 }
862
863 void storePtr(RegisterID src, ImplicitAddress address)
864 {
865 store64(src, address);
866 }
867
868 void storePtr(RegisterID src, BaseIndex address)
869 {
870 store64(src, address);
871 }
872
873 void storePtr(RegisterID src, void* address)
874 {
875 store64(src, address);
876 }
877
878 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
879 {
880 store64(TrustedImm64(imm), address);
881 }
882
883 void storePtr(TrustedImmPtr imm, BaseIndex address)
884 {
885 store64(TrustedImm64(imm), address);
886 }
887
888 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
889 {
890 return store64WithAddressOffsetPatch(src, address);
891 }
892
893 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
894 {
895 compare64(cond, left, right, dest);
896 }
897
898 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
899 {
900 compare64(cond, left, right, dest);
901 }
6fe7ccc8 902
93a37866
A
903 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
904 {
905 test64(cond, reg, mask, dest);
906 }
907
908 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
909 {
910 test64(cond, reg, mask, dest);
911 }
912
913 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
914 {
915 return branch64(cond, left, right);
916 }
917
918 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
919 {
920 return branch64(cond, left, TrustedImm64(right));
921 }
922
923 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
924 {
925 return branch64(cond, left, right);
926 }
927
928 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
929 {
930 return branch64(cond, left, right);
931 }
932
933 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
934 {
935 return branch64(cond, left, right);
936 }
937
938 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
939 {
940 return branch64(cond, left, TrustedImm64(right));
941 }
942
943 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
944 {
945 return branchTest64(cond, reg, mask);
946 }
947
948 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
949 {
950 return branchTest64(cond, reg, mask);
951 }
952
953 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
954 {
955 return branchTest64(cond, address, mask);
956 }
957
958 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
959 {
960 return branchTest64(cond, address, reg);
961 }
962
963 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
964 {
965 return branchTest64(cond, address, mask);
966 }
967
968 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
969 {
970 return branchTest64(cond, address, mask);
971 }
972
973 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
974 {
975 return branchAdd64(cond, imm, dest);
976 }
977
978 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
979 {
980 return branchAdd64(cond, src, dest);
981 }
982
983 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
984 {
985 return branchSub64(cond, imm, dest);
986 }
987
988 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
989 {
990 return branchSub64(cond, src, dest);
991 }
992
993 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
994 {
995 return branchSub64(cond, src1, src2, dest);
996 }
997
93a37866 998 using MacroAssemblerBase::and64;
6fe7ccc8 999 using MacroAssemblerBase::convertInt32ToDouble;
93a37866 1000 using MacroAssemblerBase::store64;
6fe7ccc8
A
1001 bool shouldBlindDouble(double value)
1002 {
1003 // Don't trust NaN or +/-Infinity
93a37866
A
1004 if (!std::isfinite(value))
1005 return shouldConsiderBlinding();
6fe7ccc8
A
1006
1007 // Try to force normalisation, and check that there's no change
1008 // in the bit pattern
93a37866
A
1009 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
1010 return shouldConsiderBlinding();
6fe7ccc8 1011
81345200 1012 value = fabs(value);
6fe7ccc8
A
1013 // Only allow a limited set of fractional components
1014 double scaledValue = value * 8;
1015 if (scaledValue / 8 != value)
93a37866 1016 return shouldConsiderBlinding();
6fe7ccc8
A
1017 double frac = scaledValue - floor(scaledValue);
1018 if (frac != 0.0)
93a37866 1019 return shouldConsiderBlinding();
6fe7ccc8
A
1020
1021 return value > 0xff;
1022 }
1023
81345200
A
1024 bool shouldBlindPointerForSpecificArch(uintptr_t value)
1025 {
1026 if (sizeof(void*) == 4)
1027 return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
1028 return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
1029 }
1030
6fe7ccc8 1031 bool shouldBlind(ImmPtr imm)
81345200
A
1032 {
1033 if (!canBlind())
1034 return false;
1035
93a37866 1036#if ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1037 UNUSED_PARAM(imm);
1038 // Debug always blind all constants, if only so we know
1039 // if we've broken blinding during patch development.
81345200 1040 return true;
6fe7ccc8
A
1041#endif
1042
1043 // First off we'll special case common, "safe" values to avoid hurting
1044 // performance too much
1045 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1046 switch (value) {
1047 case 0xffff:
1048 case 0xffffff:
1049 case 0xffffffffL:
1050 case 0xffffffffffL:
1051 case 0xffffffffffffL:
1052 case 0xffffffffffffffL:
1053 case 0xffffffffffffffffL:
1054 return false;
1055 default: {
1056 if (value <= 0xff)
1057 return false;
93a37866 1058 if (~value <= 0xff)
6fe7ccc8 1059 return false;
6fe7ccc8
A
1060 }
1061 }
93a37866
A
1062
1063 if (!shouldConsiderBlinding())
1064 return false;
1065
81345200 1066 return shouldBlindPointerForSpecificArch(value);
6fe7ccc8
A
1067 }
1068
1069 struct RotatedImmPtr {
1070 RotatedImmPtr(uintptr_t v1, uint8_t v2)
1071 : value(v1)
1072 , rotation(v2)
1073 {
1074 }
1075 TrustedImmPtr value;
1076 TrustedImm32 rotation;
1077 };
1078
1079 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
1080 {
1081 uint8_t rotation = random() % (sizeof(void*) * 8);
1082 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1083 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
1084 return RotatedImmPtr(value, rotation);
1085 }
1086
1087 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
1088 {
1089 move(constant.value, dest);
1090 rotateRightPtr(constant.rotation, dest);
1091 }
1092
93a37866
A
1093 bool shouldBlind(Imm64 imm)
1094 {
1095#if ENABLE(FORCED_JIT_BLINDING)
1096 UNUSED_PARAM(imm);
1097 // Debug always blind all constants, if only so we know
1098 // if we've broken blinding during patch development.
1099 return true;
1100#endif
1101
1102 // First off we'll special case common, "safe" values to avoid hurting
1103 // performance too much
1104 uint64_t value = imm.asTrustedImm64().m_value;
1105 switch (value) {
1106 case 0xffff:
1107 case 0xffffff:
1108 case 0xffffffffL:
1109 case 0xffffffffffL:
1110 case 0xffffffffffffL:
1111 case 0xffffffffffffffL:
1112 case 0xffffffffffffffffL:
1113 return false;
1114 default: {
1115 if (value <= 0xff)
1116 return false;
1117 if (~value <= 0xff)
1118 return false;
1119
1120 JSValue jsValue = JSValue::decode(value);
1121 if (jsValue.isInt32())
1122 return shouldBlind(Imm32(jsValue.asInt32()));
1123 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
1124 return false;
1125
1126 if (!shouldBlindDouble(bitwise_cast<double>(value)))
1127 return false;
1128 }
1129 }
1130
1131 if (!shouldConsiderBlinding())
1132 return false;
1133
1134 return shouldBlindForSpecificArch(value);
1135 }
1136
1137 struct RotatedImm64 {
1138 RotatedImm64(uint64_t v1, uint8_t v2)
1139 : value(v1)
1140 , rotation(v2)
1141 {
1142 }
1143 TrustedImm64 value;
1144 TrustedImm32 rotation;
1145 };
1146
1147 RotatedImm64 rotationBlindConstant(Imm64 imm)
1148 {
1149 uint8_t rotation = random() % (sizeof(int64_t) * 8);
1150 uint64_t value = imm.asTrustedImm64().m_value;
1151 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1152 return RotatedImm64(value, rotation);
1153 }
1154
1155 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1156 {
1157 move(constant.value, dest);
1158 rotateRight64(constant.rotation, dest);
1159 }
1160
6fe7ccc8
A
1161 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1162 {
81345200 1163 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
6fe7ccc8
A
1164 RegisterID scratchRegister = scratchRegisterForBlinding();
1165 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1166 convertInt32ToDouble(scratchRegister, dest);
1167 } else
1168 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1169 }
1170
1171 void move(ImmPtr imm, RegisterID dest)
1172 {
1173 if (shouldBlind(imm))
1174 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1175 else
1176 move(imm.asTrustedImmPtr(), dest);
1177 }
1178
93a37866
A
1179 void move(Imm64 imm, RegisterID dest)
1180 {
1181 if (shouldBlind(imm))
1182 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1183 else
1184 move(imm.asTrustedImm64(), dest);
1185 }
1186
1187 void and64(Imm32 imm, RegisterID dest)
1188 {
1189 if (shouldBlind(imm)) {
1190 BlindedImm32 key = andBlindedConstant(imm);
1191 and64(key.value1, dest);
1192 and64(key.value2, dest);
1193 } else
1194 and64(imm.asTrustedImm32(), dest);
1195 }
1196
6fe7ccc8
A
1197 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1198 {
81345200 1199 if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
6fe7ccc8
A
1200 RegisterID scratchRegister = scratchRegisterForBlinding();
1201 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1202 return branchPtr(cond, left, scratchRegister);
1203 }
1204 return branchPtr(cond, left, right.asTrustedImmPtr());
1205 }
1206
1207 void storePtr(ImmPtr imm, Address dest)
1208 {
81345200 1209 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
6fe7ccc8
A
1210 RegisterID scratchRegister = scratchRegisterForBlinding();
1211 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1212 storePtr(scratchRegister, dest);
1213 } else
1214 storePtr(imm.asTrustedImmPtr(), dest);
1215 }
1216
93a37866
A
1217 void store64(Imm64 imm, Address dest)
1218 {
81345200 1219 if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
93a37866
A
1220 RegisterID scratchRegister = scratchRegisterForBlinding();
1221 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1222 store64(scratchRegister, dest);
1223 } else
1224 store64(imm.asTrustedImm64(), dest);
1225 }
1226
6fe7ccc8
A
1227#endif // !CPU(X86_64)
1228
6fe7ccc8 1229 bool shouldBlind(Imm32 imm)
93a37866
A
1230 {
1231#if ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1232 UNUSED_PARAM(imm);
1233 // Debug always blind all constants, if only so we know
1234 // if we've broken blinding during patch development.
1235 return true;
81345200 1236#else // ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1237
1238 // First off we'll special case common, "safe" values to avoid hurting
1239 // performance too much
1240 uint32_t value = imm.asTrustedImm32().m_value;
1241 switch (value) {
1242 case 0xffff:
1243 case 0xffffff:
1244 case 0xffffffff:
1245 return false;
1246 default:
1247 if (value <= 0xff)
1248 return false;
93a37866
A
1249 if (~value <= 0xff)
1250 return false;
6fe7ccc8 1251 }
93a37866
A
1252
1253 if (!shouldConsiderBlinding())
1254 return false;
1255
6fe7ccc8 1256 return shouldBlindForSpecificArch(value);
81345200 1257#endif // ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1258 }
1259
1260 struct BlindedImm32 {
1261 BlindedImm32(int32_t v1, int32_t v2)
1262 : value1(v1)
1263 , value2(v2)
1264 {
1265 }
1266 TrustedImm32 value1;
1267 TrustedImm32 value2;
1268 };
1269
1270 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1271 {
1272 uint32_t key = random();
1273 if (value <= 0xff)
1274 mask = 0xff;
1275 else if (value <= 0xffff)
1276 mask = 0xffff;
1277 else if (value <= 0xffffff)
1278 mask = 0xffffff;
1279 else
1280 mask = 0xffffffff;
1281 return key & mask;
1282 }
1283
1284 uint32_t keyForConstant(uint32_t value)
1285 {
1286 uint32_t mask = 0;
1287 return keyForConstant(value, mask);
1288 }
1289
1290 BlindedImm32 xorBlindConstant(Imm32 imm)
1291 {
1292 uint32_t baseValue = imm.asTrustedImm32().m_value;
1293 uint32_t key = keyForConstant(baseValue);
1294 return BlindedImm32(baseValue ^ key, key);
1295 }
1296
1297 BlindedImm32 additionBlindedConstant(Imm32 imm)
1298 {
1299 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1300 static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1301
1302 uint32_t baseValue = imm.asTrustedImm32().m_value;
1303 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1304 if (key > baseValue)
1305 key = key - baseValue;
1306 return BlindedImm32(baseValue - key, key);
1307 }
1308
1309 BlindedImm32 andBlindedConstant(Imm32 imm)
1310 {
1311 uint32_t baseValue = imm.asTrustedImm32().m_value;
1312 uint32_t mask = 0;
1313 uint32_t key = keyForConstant(baseValue, mask);
1314 ASSERT((baseValue & mask) == baseValue);
1315 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1316 }
1317
1318 BlindedImm32 orBlindedConstant(Imm32 imm)
1319 {
1320 uint32_t baseValue = imm.asTrustedImm32().m_value;
1321 uint32_t mask = 0;
1322 uint32_t key = keyForConstant(baseValue, mask);
1323 ASSERT((baseValue & mask) == baseValue);
1324 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1325 }
1326
1327 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1328 {
1329 move(constant.value1, dest);
1330 xor32(constant.value2, dest);
1331 }
1332
1333 void add32(Imm32 imm, RegisterID dest)
1334 {
1335 if (shouldBlind(imm)) {
1336 BlindedImm32 key = additionBlindedConstant(imm);
1337 add32(key.value1, dest);
1338 add32(key.value2, dest);
1339 } else
1340 add32(imm.asTrustedImm32(), dest);
1341 }
1342
1343 void addPtr(Imm32 imm, RegisterID dest)
1344 {
1345 if (shouldBlind(imm)) {
1346 BlindedImm32 key = additionBlindedConstant(imm);
1347 addPtr(key.value1, dest);
1348 addPtr(key.value2, dest);
1349 } else
1350 addPtr(imm.asTrustedImm32(), dest);
1351 }
1352
1353 void and32(Imm32 imm, RegisterID dest)
1354 {
1355 if (shouldBlind(imm)) {
1356 BlindedImm32 key = andBlindedConstant(imm);
1357 and32(key.value1, dest);
1358 and32(key.value2, dest);
1359 } else
1360 and32(imm.asTrustedImm32(), dest);
1361 }
1362
1363 void andPtr(Imm32 imm, RegisterID dest)
1364 {
1365 if (shouldBlind(imm)) {
1366 BlindedImm32 key = andBlindedConstant(imm);
1367 andPtr(key.value1, dest);
1368 andPtr(key.value2, dest);
1369 } else
1370 andPtr(imm.asTrustedImm32(), dest);
1371 }
1372
1373 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1374 {
1375 if (shouldBlind(imm)) {
1376 if (src == dest)
1377 return and32(imm.asTrustedImm32(), dest);
1378 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1379 and32(src, dest);
1380 } else
1381 and32(imm.asTrustedImm32(), src, dest);
1382 }
1383
1384 void move(Imm32 imm, RegisterID dest)
1385 {
1386 if (shouldBlind(imm))
1387 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1388 else
1389 move(imm.asTrustedImm32(), dest);
1390 }
1391
1392 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1393 {
1394 if (shouldBlind(imm)) {
1395 if (src == dest)
1396 return or32(imm, dest);
1397 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1398 or32(src, dest);
1399 } else
1400 or32(imm.asTrustedImm32(), src, dest);
1401 }
1402
1403 void or32(Imm32 imm, RegisterID dest)
1404 {
1405 if (shouldBlind(imm)) {
1406 BlindedImm32 key = orBlindedConstant(imm);
1407 or32(key.value1, dest);
1408 or32(key.value2, dest);
1409 } else
1410 or32(imm.asTrustedImm32(), dest);
1411 }
1412
1413 void poke(Imm32 value, int index = 0)
1414 {
1415 store32(value, addressForPoke(index));
1416 }
1417
1418 void poke(ImmPtr value, int index = 0)
1419 {
1420 storePtr(value, addressForPoke(index));
1421 }
1422
93a37866
A
1423#if CPU(X86_64) || CPU(ARM64)
1424 void poke(Imm64 value, int index = 0)
1425 {
1426 store64(value, addressForPoke(index));
1427 }
81345200 1428#endif // CPU(X86_64)
93a37866 1429
6fe7ccc8
A
1430 void store32(Imm32 imm, Address dest)
1431 {
1432 if (shouldBlind(imm)) {
1433#if CPU(X86) || CPU(X86_64)
1434 BlindedImm32 blind = xorBlindConstant(imm);
1435 store32(blind.value1, dest);
1436 xor32(blind.value2, dest);
81345200
A
1437#else // CPU(X86) || CPU(X86_64)
1438 if (haveScratchRegisterForBlinding()) {
1439 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
1440 store32(scratchRegisterForBlinding(), dest);
6fe7ccc8
A
1441 } else {
1442 // If we don't have a scratch register available for use, we'll just
1443 // place a random number of nops.
1444 uint32_t nopCount = random() & 3;
1445 while (nopCount--)
1446 nop();
1447 store32(imm.asTrustedImm32(), dest);
1448 }
81345200 1449#endif // CPU(X86) || CPU(X86_64)
6fe7ccc8
A
1450 } else
1451 store32(imm.asTrustedImm32(), dest);
1452 }
1453
1454 void sub32(Imm32 imm, RegisterID dest)
1455 {
1456 if (shouldBlind(imm)) {
1457 BlindedImm32 key = additionBlindedConstant(imm);
1458 sub32(key.value1, dest);
1459 sub32(key.value2, dest);
1460 } else
1461 sub32(imm.asTrustedImm32(), dest);
1462 }
1463
1464 void subPtr(Imm32 imm, RegisterID dest)
1465 {
1466 if (shouldBlind(imm)) {
1467 BlindedImm32 key = additionBlindedConstant(imm);
1468 subPtr(key.value1, dest);
1469 subPtr(key.value2, dest);
1470 } else
1471 subPtr(imm.asTrustedImm32(), dest);
1472 }
1473
1474 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1475 {
1476 if (shouldBlind(imm)) {
1477 BlindedImm32 blind = xorBlindConstant(imm);
1478 xor32(blind.value1, src, dest);
1479 xor32(blind.value2, dest);
1480 } else
1481 xor32(imm.asTrustedImm32(), src, dest);
1482 }
1483
1484 void xor32(Imm32 imm, RegisterID dest)
1485 {
1486 if (shouldBlind(imm)) {
1487 BlindedImm32 blind = xorBlindConstant(imm);
1488 xor32(blind.value1, dest);
1489 xor32(blind.value2, dest);
1490 } else
1491 xor32(imm.asTrustedImm32(), dest);
1492 }
1493
1494 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1495 {
1496 if (shouldBlind(right)) {
81345200
A
1497 if (haveScratchRegisterForBlinding()) {
1498 loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
1499 return branch32(cond, left, scratchRegisterForBlinding());
6fe7ccc8
A
1500 }
1501 // If we don't have a scratch register available for use, we'll just
1502 // place a random number of nops.
1503 uint32_t nopCount = random() & 3;
1504 while (nopCount--)
1505 nop();
1506 return branch32(cond, left, right.asTrustedImm32());
1507 }
1508
1509 return branch32(cond, left, right.asTrustedImm32());
1510 }
1511
1512 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1513 {
93a37866 1514 if (src == dest)
81345200 1515 ASSERT(haveScratchRegisterForBlinding());
93a37866 1516
6fe7ccc8
A
1517 if (shouldBlind(imm)) {
1518 if (src == dest) {
81345200
A
1519 move(src, scratchRegisterForBlinding());
1520 src = scratchRegisterForBlinding();
6fe7ccc8
A
1521 }
1522 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1523 return branchAdd32(cond, src, dest);
1524 }
1525 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1526 }
1527
1528 Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
1529 {
93a37866 1530 if (src == dest)
81345200 1531 ASSERT(haveScratchRegisterForBlinding());
93a37866 1532
6fe7ccc8
A
1533 if (shouldBlind(imm)) {
1534 if (src == dest) {
81345200
A
1535 move(src, scratchRegisterForBlinding());
1536 src = scratchRegisterForBlinding();
6fe7ccc8
A
1537 }
1538 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1539 return branchMul32(cond, src, dest);
1540 }
1541 return branchMul32(cond, imm.asTrustedImm32(), src, dest);
1542 }
1543
1544 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1545 // with src == dst, and on x86-32 we don't have a platform scratch register.
1546 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1547 {
1548 if (shouldBlind(imm)) {
1549 ASSERT(scratch != dest);
1550 ASSERT(scratch != src);
1551 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1552 return branchSub32(cond, src, scratch, dest);
1553 }
1554 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1555 }
1556
6fe7ccc8
A
1557 void lshift32(Imm32 imm, RegisterID dest)
1558 {
1559 lshift32(trustedImm32ForShift(imm), dest);
1560 }
1561
1562 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1563 {
1564 lshift32(src, trustedImm32ForShift(amount), dest);
1565 }
1566
1567 void rshift32(Imm32 imm, RegisterID dest)
1568 {
1569 rshift32(trustedImm32ForShift(imm), dest);
1570 }
1571
1572 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1573 {
1574 rshift32(src, trustedImm32ForShift(amount), dest);
1575 }
1576
1577 void urshift32(Imm32 imm, RegisterID dest)
1578 {
1579 urshift32(trustedImm32ForShift(imm), dest);
1580 }
1581
1582 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1583 {
1584 urshift32(src, trustedImm32ForShift(amount), dest);
1585 }
9dae56ea
A
1586};
1587
1588} // namespace JSC
1589
6fe7ccc8
A
1590#else // ENABLE(ASSEMBLER)
1591
1592// If there is no assembler for this platform, at least allow code to make references to
1593// some of the things it would otherwise define, albeit without giving that code any way
1594// of doing anything useful.
1595class MacroAssembler {
1596private:
1597 MacroAssembler() { }
1598
1599public:
1600
1601 enum RegisterID { NoRegister };
1602 enum FPRegisterID { NoFPRegister };
1603};
1604
9dae56ea
A
1605#endif // ENABLE(ASSEMBLER)
1606
1607#endif // MacroAssembler_h