]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/MacroAssembler.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssembler.h
CommitLineData
9dae56ea 1/*
93a37866 2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
9dae56ea
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssembler_h
27#define MacroAssembler_h
28
93a37866
A
29#include <wtf/Platform.h>
30
9dae56ea
A
31#if ENABLE(ASSEMBLER)
32
f9bf01c6 33#if CPU(ARM_THUMB2)
ba379fdc
A
34#include "MacroAssemblerARMv7.h"
35namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
9dae56ea 36
93a37866
A
37#elif CPU(ARM64)
38#include "MacroAssemblerARM64.h"
39namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
40
f9bf01c6
A
41#elif CPU(ARM_TRADITIONAL)
42#include "MacroAssemblerARM.h"
43namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
44
4e4e5a6f
A
45#elif CPU(MIPS)
46#include "MacroAssemblerMIPS.h"
47namespace JSC {
48typedef MacroAssemblerMIPS MacroAssemblerBase;
49};
50
f9bf01c6 51#elif CPU(X86)
ba379fdc
A
52#include "MacroAssemblerX86.h"
53namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
9dae56ea 54
f9bf01c6 55#elif CPU(X86_64)
ba379fdc
A
56#include "MacroAssemblerX86_64.h"
57namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
9dae56ea 58
14957cd0
A
59#elif CPU(SH4)
60#include "MacroAssemblerSH4.h"
61namespace JSC {
62typedef MacroAssemblerSH4 MacroAssemblerBase;
63};
64
ba379fdc
A
65#else
66#error "The MacroAssembler is not supported on this platform."
9dae56ea
A
67#endif
68
ba379fdc
A
69namespace JSC {
70
71class MacroAssembler : public MacroAssemblerBase {
9dae56ea 72public:
ba379fdc
A
73
74 using MacroAssemblerBase::pop;
75 using MacroAssemblerBase::jump;
76 using MacroAssemblerBase::branch32;
6fe7ccc8
A
77 using MacroAssemblerBase::move;
78
79#if ENABLE(JIT_CONSTANT_BLINDING)
80 using MacroAssemblerBase::add32;
81 using MacroAssemblerBase::and32;
82 using MacroAssemblerBase::branchAdd32;
83 using MacroAssemblerBase::branchMul32;
84 using MacroAssemblerBase::branchSub32;
85 using MacroAssemblerBase::lshift32;
86 using MacroAssemblerBase::or32;
87 using MacroAssemblerBase::rshift32;
88 using MacroAssemblerBase::store32;
89 using MacroAssemblerBase::sub32;
90 using MacroAssemblerBase::urshift32;
91 using MacroAssemblerBase::xor32;
92#endif
93a37866
A
93
94 static bool isPtrAlignedAddressOffset(ptrdiff_t value)
95 {
96 return value == (int32_t)value;
97 }
98
99 static const double twoToThe32; // This is super useful for some double code.
9dae56ea 100
6fe7ccc8
A
101 // Utilities used by the DFG JIT.
102#if ENABLE(DFG_JIT)
103 using MacroAssemblerBase::invert;
104
105 static DoubleCondition invert(DoubleCondition cond)
106 {
107 switch (cond) {
108 case DoubleEqual:
109 return DoubleNotEqualOrUnordered;
110 case DoubleNotEqual:
111 return DoubleEqualOrUnordered;
112 case DoubleGreaterThan:
113 return DoubleLessThanOrEqualOrUnordered;
114 case DoubleGreaterThanOrEqual:
115 return DoubleLessThanOrUnordered;
116 case DoubleLessThan:
117 return DoubleGreaterThanOrEqualOrUnordered;
118 case DoubleLessThanOrEqual:
119 return DoubleGreaterThanOrUnordered;
120 case DoubleEqualOrUnordered:
121 return DoubleNotEqual;
122 case DoubleNotEqualOrUnordered:
123 return DoubleEqual;
124 case DoubleGreaterThanOrUnordered:
125 return DoubleLessThanOrEqual;
126 case DoubleGreaterThanOrEqualOrUnordered:
127 return DoubleLessThan;
128 case DoubleLessThanOrUnordered:
129 return DoubleGreaterThanOrEqual;
130 case DoubleLessThanOrEqualOrUnordered:
131 return DoubleGreaterThan;
132 default:
93a37866 133 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
134 return DoubleEqual; // make compiler happy
135 }
136 }
137
138 static bool isInvertible(ResultCondition cond)
139 {
140 switch (cond) {
141 case Zero:
142 case NonZero:
143 return true;
144 default:
145 return false;
146 }
147 }
148
149 static ResultCondition invert(ResultCondition cond)
150 {
151 switch (cond) {
152 case Zero:
153 return NonZero;
154 case NonZero:
155 return Zero;
156 default:
93a37866 157 RELEASE_ASSERT_NOT_REACHED();
6fe7ccc8
A
158 return Zero; // Make compiler happy for release builds.
159 }
160 }
161#endif
ba379fdc
A
162
163 // Platform agnostic onvenience functions,
164 // described in terms of other macro assembly methods.
165 void pop()
9dae56ea 166 {
14957cd0 167 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
9dae56ea
A
168 }
169
ba379fdc 170 void peek(RegisterID dest, int index = 0)
9dae56ea 171 {
ba379fdc 172 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
9dae56ea
A
173 }
174
6fe7ccc8
A
175 Address addressForPoke(int index)
176 {
177 return Address(stackPointerRegister, (index * sizeof(void*)));
178 }
179
ba379fdc 180 void poke(RegisterID src, int index = 0)
9dae56ea 181 {
6fe7ccc8 182 storePtr(src, addressForPoke(index));
9dae56ea
A
183 }
184
14957cd0 185 void poke(TrustedImm32 value, int index = 0)
9dae56ea 186 {
6fe7ccc8 187 store32(value, addressForPoke(index));
9dae56ea
A
188 }
189
14957cd0 190 void poke(TrustedImmPtr imm, int index = 0)
9dae56ea 191 {
6fe7ccc8 192 storePtr(imm, addressForPoke(index));
9dae56ea
A
193 }
194
93a37866
A
195#if !CPU(ARM64)
196 void pushToSave(RegisterID src)
197 {
198 push(src);
199 }
200 void popToRestore(RegisterID dest)
201 {
202 pop(dest);
203 }
204 void pushToSave(FPRegisterID src)
205 {
206 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
207 storeDouble(src, stackPointerRegister);
208 }
209 void popToRestore(FPRegisterID dest)
210 {
211 loadDouble(stackPointerRegister, dest);
212 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
213 }
214#endif // !CPU(ARM64)
215
216#if CPU(X86_64) || CPU(ARM64)
217 void peek64(RegisterID dest, int index = 0)
218 {
219 load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
220 }
221
222 void poke(TrustedImm64 value, int index = 0)
223 {
224 store64(value, addressForPoke(index));
225 }
226
227 void poke64(RegisterID src, int index = 0)
228 {
229 store64(src, addressForPoke(index));
230 }
231#endif
232
233#if CPU(MIPS)
234 void poke(FPRegisterID src, int index = 0)
235 {
236 ASSERT(!(index & 1));
237 storeDouble(src, addressForPoke(index));
238 }
239#endif
9dae56ea 240
ba379fdc 241 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
14957cd0 242 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
9dae56ea 243 {
ba379fdc 244 branchPtr(cond, op1, imm).linkTo(target, this);
9dae56ea 245 }
6fe7ccc8
A
246 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
247 {
248 branchPtr(cond, op1, imm).linkTo(target, this);
249 }
9dae56ea 250
14957cd0 251 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
9dae56ea 252 {
ba379fdc 253 branch32(cond, op1, op2).linkTo(target, this);
9dae56ea
A
254 }
255
14957cd0 256 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
9dae56ea 257 {
ba379fdc 258 branch32(cond, op1, imm).linkTo(target, this);
9dae56ea 259 }
6fe7ccc8
A
260
261 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
262 {
263 branch32(cond, op1, imm).linkTo(target, this);
264 }
ba379fdc 265
14957cd0 266 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
9dae56ea 267 {
ba379fdc 268 branch32(cond, left, right).linkTo(target, this);
9dae56ea 269 }
ba379fdc 270
6fe7ccc8 271 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
9dae56ea 272 {
6fe7ccc8 273 return branch32(commute(cond), right, left);
9dae56ea 274 }
6fe7ccc8
A
275
276 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
277 {
278 return branch32(commute(cond), right, left);
279 }
280
14957cd0 281 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
9dae56ea 282 {
ba379fdc 283 branchTestPtr(cond, reg).linkTo(target, this);
9dae56ea
A
284 }
285
93a37866
A
286#if !CPU(ARM_THUMB2) && !CPU(ARM64)
287 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
288 {
289 return PatchableJump(branchPtr(cond, left, right));
290 }
291
6fe7ccc8
A
292 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
293 {
294 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
295 }
296
297 PatchableJump patchableJump()
298 {
299 return PatchableJump(jump());
300 }
93a37866
A
301
302 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
303 {
304 return PatchableJump(branchTest32(cond, reg, mask));
305 }
306
307 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
308 {
309 return PatchableJump(branch32(cond, reg, imm));
310 }
6fe7ccc8
A
311#endif
312
ba379fdc 313 void jump(Label target)
9dae56ea 314 {
ba379fdc 315 jump().linkTo(target, this);
9dae56ea
A
316 }
317
6fe7ccc8
A
318 // Commute a relational condition, returns a new condition that will produce
319 // the same results given the same inputs but with their positions exchanged.
320 static RelationalCondition commute(RelationalCondition condition)
321 {
322 switch (condition) {
323 case Above:
324 return Below;
325 case AboveOrEqual:
326 return BelowOrEqual;
327 case Below:
328 return Above;
329 case BelowOrEqual:
330 return AboveOrEqual;
331 case GreaterThan:
332 return LessThan;
333 case GreaterThanOrEqual:
334 return LessThanOrEqual;
335 case LessThan:
336 return GreaterThan;
337 case LessThanOrEqual:
338 return GreaterThanOrEqual;
339 default:
340 break;
341 }
342
343 ASSERT(condition == Equal || condition == NotEqual);
344 return condition;
345 }
93a37866
A
346
347 static const unsigned BlindingModulus = 64;
348 bool shouldConsiderBlinding()
349 {
350 return !(random() & (BlindingModulus - 1));
351 }
9dae56ea 352
ba379fdc
A
353 // Ptr methods
354 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
f9bf01c6 355 // FIXME: should this use a test for 32-bitness instead of this specific exception?
93a37866
A
356#if !CPU(X86_64) && !CPU(ARM64)
357 void addPtr(Address src, RegisterID dest)
358 {
359 add32(src, dest);
360 }
361
362 void addPtr(AbsoluteAddress src, RegisterID dest)
363 {
364 add32(src, dest);
365 }
366
ba379fdc 367 void addPtr(RegisterID src, RegisterID dest)
9dae56ea 368 {
ba379fdc 369 add32(src, dest);
9dae56ea
A
370 }
371
14957cd0 372 void addPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 373 {
ba379fdc 374 add32(imm, srcDest);
9dae56ea 375 }
9dae56ea 376
14957cd0 377 void addPtr(TrustedImmPtr imm, RegisterID dest)
ba379fdc 378 {
14957cd0 379 add32(TrustedImm32(imm), dest);
9dae56ea
A
380 }
381
14957cd0 382 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
9dae56ea 383 {
ba379fdc 384 add32(imm, src, dest);
9dae56ea 385 }
ba379fdc 386
6fe7ccc8
A
387 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
388 {
389 add32(imm, address);
390 }
391
ba379fdc 392 void andPtr(RegisterID src, RegisterID dest)
9dae56ea 393 {
ba379fdc 394 and32(src, dest);
9dae56ea 395 }
ba379fdc 396
14957cd0 397 void andPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 398 {
ba379fdc 399 and32(imm, srcDest);
9dae56ea 400 }
93a37866
A
401
402 void negPtr(RegisterID dest)
403 {
404 neg32(dest);
405 }
ba379fdc 406
9dae56ea
A
407 void orPtr(RegisterID src, RegisterID dest)
408 {
9dae56ea 409 or32(src, dest);
9dae56ea
A
410 }
411
6fe7ccc8
A
412 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
413 {
414 or32(op1, op2, dest);
415 }
416
14957cd0 417 void orPtr(TrustedImmPtr imm, RegisterID dest)
9dae56ea 418 {
14957cd0 419 or32(TrustedImm32(imm), dest);
9dae56ea
A
420 }
421
14957cd0 422 void orPtr(TrustedImm32 imm, RegisterID dest)
9dae56ea 423 {
9dae56ea 424 or32(imm, dest);
9dae56ea
A
425 }
426
9dae56ea
A
427 void subPtr(RegisterID src, RegisterID dest)
428 {
9dae56ea 429 sub32(src, dest);
9dae56ea
A
430 }
431
14957cd0 432 void subPtr(TrustedImm32 imm, RegisterID dest)
9dae56ea 433 {
9dae56ea 434 sub32(imm, dest);
9dae56ea
A
435 }
436
14957cd0 437 void subPtr(TrustedImmPtr imm, RegisterID dest)
9dae56ea 438 {
14957cd0 439 sub32(TrustedImm32(imm), dest);
9dae56ea
A
440 }
441
442 void xorPtr(RegisterID src, RegisterID dest)
443 {
9dae56ea 444 xor32(src, dest);
9dae56ea
A
445 }
446
14957cd0 447 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
9dae56ea 448 {
9dae56ea 449 xor32(imm, srcDest);
9dae56ea 450 }
9dae56ea 451
9dae56ea
A
452
453 void loadPtr(ImplicitAddress address, RegisterID dest)
454 {
9dae56ea 455 load32(address, dest);
9dae56ea
A
456 }
457
458 void loadPtr(BaseIndex address, RegisterID dest)
459 {
9dae56ea 460 load32(address, dest);
9dae56ea
A
461 }
462
6fe7ccc8 463 void loadPtr(const void* address, RegisterID dest)
9dae56ea 464 {
9dae56ea 465 load32(address, dest);
9dae56ea
A
466 }
467
ba379fdc 468 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
9dae56ea 469 {
ba379fdc 470 return load32WithAddressOffsetPatch(address, dest);
9dae56ea 471 }
14957cd0
A
472
473 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
474 {
475 return load32WithCompactAddressOffsetPatch(address, dest);
476 }
9dae56ea 477
6fe7ccc8
A
478 void move(ImmPtr imm, RegisterID dest)
479 {
480 move(Imm32(imm.asTrustedImmPtr()), dest);
481 }
482
14957cd0 483 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
9dae56ea 484 {
14957cd0 485 compare32(cond, left, right, dest);
9dae56ea
A
486 }
487
488 void storePtr(RegisterID src, ImplicitAddress address)
489 {
9dae56ea 490 store32(src, address);
9dae56ea
A
491 }
492
ba379fdc 493 void storePtr(RegisterID src, BaseIndex address)
9dae56ea 494 {
ba379fdc 495 store32(src, address);
9dae56ea
A
496 }
497
ba379fdc 498 void storePtr(RegisterID src, void* address)
9dae56ea 499 {
9dae56ea 500 store32(src, address);
9dae56ea
A
501 }
502
14957cd0 503 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
9dae56ea 504 {
14957cd0 505 store32(TrustedImm32(imm), address);
9dae56ea 506 }
6fe7ccc8
A
507
508 void storePtr(ImmPtr imm, Address address)
509 {
510 store32(Imm32(imm.asTrustedImmPtr()), address);
511 }
9dae56ea 512
14957cd0 513 void storePtr(TrustedImmPtr imm, void* address)
9dae56ea 514 {
14957cd0 515 store32(TrustedImm32(imm), address);
9dae56ea 516 }
9dae56ea 517
ba379fdc 518 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
9dae56ea 519 {
ba379fdc 520 return store32WithAddressOffsetPatch(src, address);
9dae56ea
A
521 }
522
14957cd0 523 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
9dae56ea 524 {
ba379fdc 525 return branch32(cond, left, right);
9dae56ea
A
526 }
527
14957cd0 528 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
9dae56ea 529 {
14957cd0 530 return branch32(cond, left, TrustedImm32(right));
9dae56ea 531 }
6fe7ccc8
A
532
533 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
534 {
535 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
536 }
9dae56ea 537
14957cd0 538 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
9dae56ea 539 {
ba379fdc 540 return branch32(cond, left, right);
9dae56ea
A
541 }
542
14957cd0 543 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
9dae56ea 544 {
ba379fdc 545 return branch32(cond, left, right);
9dae56ea
A
546 }
547
14957cd0 548 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
9dae56ea 549 {
ba379fdc 550 return branch32(cond, left, right);
9dae56ea
A
551 }
552
14957cd0 553 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
9dae56ea 554 {
14957cd0 555 return branch32(cond, left, TrustedImm32(right));
9dae56ea 556 }
6fe7ccc8 557
14957cd0 558 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
9dae56ea 559 {
14957cd0 560 return branch32(cond, left, TrustedImm32(right));
9dae56ea
A
561 }
562
93a37866
A
563 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
564 {
565 return branchSub32(cond, src, dest);
566 }
567
14957cd0 568 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
9dae56ea 569 {
ba379fdc 570 return branchTest32(cond, reg, mask);
9dae56ea
A
571 }
572
14957cd0 573 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 574 {
ba379fdc 575 return branchTest32(cond, reg, mask);
9dae56ea
A
576 }
577
14957cd0 578 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 579 {
ba379fdc 580 return branchTest32(cond, address, mask);
9dae56ea
A
581 }
582
14957cd0 583 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
9dae56ea 584 {
ba379fdc 585 return branchTest32(cond, address, mask);
9dae56ea
A
586 }
587
14957cd0 588 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
9dae56ea 589 {
ba379fdc 590 return branchAdd32(cond, src, dest);
9dae56ea
A
591 }
592
14957cd0 593 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
9dae56ea 594 {
ba379fdc 595 return branchSub32(cond, imm, dest);
9dae56ea 596 }
4e4e5a6f 597 using MacroAssemblerBase::branchTest8;
14957cd0 598 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
4e4e5a6f
A
599 {
600 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
601 }
6fe7ccc8 602#else
93a37866
A
603 void addPtr(RegisterID src, RegisterID dest)
604 {
605 add64(src, dest);
606 }
607
608 void addPtr(Address src, RegisterID dest)
609 {
610 add64(src, dest);
611 }
612
613 void addPtr(TrustedImm32 imm, RegisterID srcDest)
614 {
615 add64(imm, srcDest);
616 }
617
618 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
619 {
620 add64(imm, src, dest);
621 }
622
623 void addPtr(TrustedImm32 imm, Address address)
624 {
625 add64(imm, address);
626 }
627
628 void addPtr(AbsoluteAddress src, RegisterID dest)
629 {
630 add64(src, dest);
631 }
632
633 void addPtr(TrustedImmPtr imm, RegisterID dest)
634 {
635 add64(TrustedImm64(imm), dest);
636 }
637
638 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
639 {
640 add64(imm, address);
641 }
642
643 void andPtr(RegisterID src, RegisterID dest)
644 {
645 and64(src, dest);
646 }
647
648 void andPtr(TrustedImm32 imm, RegisterID srcDest)
649 {
650 and64(imm, srcDest);
651 }
652
653 void negPtr(RegisterID dest)
654 {
655 neg64(dest);
656 }
657
658 void orPtr(RegisterID src, RegisterID dest)
659 {
660 or64(src, dest);
661 }
662
663 void orPtr(TrustedImm32 imm, RegisterID dest)
664 {
665 or64(imm, dest);
666 }
667
668 void orPtr(TrustedImmPtr imm, RegisterID dest)
669 {
670 or64(TrustedImm64(imm), dest);
671 }
672
673 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
674 {
675 or64(op1, op2, dest);
676 }
677
678 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
679 {
680 or64(imm, src, dest);
681 }
682
683 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
684 {
685 rotateRight64(imm, srcDst);
686 }
687
688 void subPtr(RegisterID src, RegisterID dest)
689 {
690 sub64(src, dest);
691 }
692
693 void subPtr(TrustedImm32 imm, RegisterID dest)
694 {
695 sub64(imm, dest);
696 }
697
698 void subPtr(TrustedImmPtr imm, RegisterID dest)
699 {
700 sub64(TrustedImm64(imm), dest);
701 }
702
703 void xorPtr(RegisterID src, RegisterID dest)
704 {
705 xor64(src, dest);
706 }
707
708 void xorPtr(RegisterID src, Address dest)
709 {
710 xor64(src, dest);
711 }
712
713 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
714 {
715 xor64(imm, srcDest);
716 }
717
718 void loadPtr(ImplicitAddress address, RegisterID dest)
719 {
720 load64(address, dest);
721 }
722
723 void loadPtr(BaseIndex address, RegisterID dest)
724 {
725 load64(address, dest);
726 }
727
728 void loadPtr(const void* address, RegisterID dest)
729 {
730 load64(address, dest);
731 }
732
733 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
734 {
735 return load64WithAddressOffsetPatch(address, dest);
736 }
737
738 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
739 {
740 return load64WithCompactAddressOffsetPatch(address, dest);
741 }
742
743 void storePtr(RegisterID src, ImplicitAddress address)
744 {
745 store64(src, address);
746 }
747
748 void storePtr(RegisterID src, BaseIndex address)
749 {
750 store64(src, address);
751 }
752
753 void storePtr(RegisterID src, void* address)
754 {
755 store64(src, address);
756 }
757
758 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
759 {
760 store64(TrustedImm64(imm), address);
761 }
762
763 void storePtr(TrustedImmPtr imm, BaseIndex address)
764 {
765 store64(TrustedImm64(imm), address);
766 }
767
768 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
769 {
770 return store64WithAddressOffsetPatch(src, address);
771 }
772
773 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
774 {
775 compare64(cond, left, right, dest);
776 }
777
778 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
779 {
780 compare64(cond, left, right, dest);
781 }
6fe7ccc8 782
93a37866
A
783 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
784 {
785 test64(cond, reg, mask, dest);
786 }
787
788 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
789 {
790 test64(cond, reg, mask, dest);
791 }
792
793 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
794 {
795 return branch64(cond, left, right);
796 }
797
798 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
799 {
800 return branch64(cond, left, TrustedImm64(right));
801 }
802
803 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
804 {
805 return branch64(cond, left, right);
806 }
807
808 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
809 {
810 return branch64(cond, left, right);
811 }
812
813 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
814 {
815 return branch64(cond, left, right);
816 }
817
818 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
819 {
820 return branch64(cond, left, TrustedImm64(right));
821 }
822
823 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
824 {
825 return branchTest64(cond, reg, mask);
826 }
827
828 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
829 {
830 return branchTest64(cond, reg, mask);
831 }
832
833 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
834 {
835 return branchTest64(cond, address, mask);
836 }
837
838 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
839 {
840 return branchTest64(cond, address, reg);
841 }
842
843 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
844 {
845 return branchTest64(cond, address, mask);
846 }
847
848 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
849 {
850 return branchTest64(cond, address, mask);
851 }
852
853 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
854 {
855 return branchAdd64(cond, imm, dest);
856 }
857
858 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
859 {
860 return branchAdd64(cond, src, dest);
861 }
862
863 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
864 {
865 return branchSub64(cond, imm, dest);
866 }
867
868 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
869 {
870 return branchSub64(cond, src, dest);
871 }
872
873 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
874 {
875 return branchSub64(cond, src1, src2, dest);
876 }
877
6fe7ccc8 878#if ENABLE(JIT_CONSTANT_BLINDING)
93a37866 879 using MacroAssemblerBase::and64;
6fe7ccc8 880 using MacroAssemblerBase::convertInt32ToDouble;
93a37866 881 using MacroAssemblerBase::store64;
6fe7ccc8
A
882 bool shouldBlindDouble(double value)
883 {
884 // Don't trust NaN or +/-Infinity
93a37866
A
885 if (!std::isfinite(value))
886 return shouldConsiderBlinding();
6fe7ccc8
A
887
888 // Try to force normalisation, and check that there's no change
889 // in the bit pattern
93a37866
A
890 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
891 return shouldConsiderBlinding();
6fe7ccc8
A
892
893 value = abs(value);
894 // Only allow a limited set of fractional components
895 double scaledValue = value * 8;
896 if (scaledValue / 8 != value)
93a37866 897 return shouldConsiderBlinding();
6fe7ccc8
A
898 double frac = scaledValue - floor(scaledValue);
899 if (frac != 0.0)
93a37866 900 return shouldConsiderBlinding();
6fe7ccc8
A
901
902 return value > 0xff;
903 }
904
905 bool shouldBlind(ImmPtr imm)
906 {
93a37866 907#if ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
908 UNUSED_PARAM(imm);
909 // Debug always blind all constants, if only so we know
910 // if we've broken blinding during patch development.
911 return true;
912#endif
913
914 // First off we'll special case common, "safe" values to avoid hurting
915 // performance too much
916 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
917 switch (value) {
918 case 0xffff:
919 case 0xffffff:
920 case 0xffffffffL:
921 case 0xffffffffffL:
922 case 0xffffffffffffL:
923 case 0xffffffffffffffL:
924 case 0xffffffffffffffffL:
925 return false;
926 default: {
927 if (value <= 0xff)
928 return false;
93a37866 929 if (~value <= 0xff)
6fe7ccc8 930 return false;
6fe7ccc8
A
931 }
932 }
93a37866
A
933
934 if (!shouldConsiderBlinding())
935 return false;
936
6fe7ccc8
A
937 return shouldBlindForSpecificArch(value);
938 }
939
940 struct RotatedImmPtr {
941 RotatedImmPtr(uintptr_t v1, uint8_t v2)
942 : value(v1)
943 , rotation(v2)
944 {
945 }
946 TrustedImmPtr value;
947 TrustedImm32 rotation;
948 };
949
950 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
951 {
952 uint8_t rotation = random() % (sizeof(void*) * 8);
953 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
954 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
955 return RotatedImmPtr(value, rotation);
956 }
957
958 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
959 {
960 move(constant.value, dest);
961 rotateRightPtr(constant.rotation, dest);
962 }
963
93a37866
A
964 bool shouldBlind(Imm64 imm)
965 {
966#if ENABLE(FORCED_JIT_BLINDING)
967 UNUSED_PARAM(imm);
968 // Debug always blind all constants, if only so we know
969 // if we've broken blinding during patch development.
970 return true;
971#endif
972
973 // First off we'll special case common, "safe" values to avoid hurting
974 // performance too much
975 uint64_t value = imm.asTrustedImm64().m_value;
976 switch (value) {
977 case 0xffff:
978 case 0xffffff:
979 case 0xffffffffL:
980 case 0xffffffffffL:
981 case 0xffffffffffffL:
982 case 0xffffffffffffffL:
983 case 0xffffffffffffffffL:
984 return false;
985 default: {
986 if (value <= 0xff)
987 return false;
988 if (~value <= 0xff)
989 return false;
990
991 JSValue jsValue = JSValue::decode(value);
992 if (jsValue.isInt32())
993 return shouldBlind(Imm32(jsValue.asInt32()));
994 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
995 return false;
996
997 if (!shouldBlindDouble(bitwise_cast<double>(value)))
998 return false;
999 }
1000 }
1001
1002 if (!shouldConsiderBlinding())
1003 return false;
1004
1005 return shouldBlindForSpecificArch(value);
1006 }
1007
1008 struct RotatedImm64 {
1009 RotatedImm64(uint64_t v1, uint8_t v2)
1010 : value(v1)
1011 , rotation(v2)
1012 {
1013 }
1014 TrustedImm64 value;
1015 TrustedImm32 rotation;
1016 };
1017
1018 RotatedImm64 rotationBlindConstant(Imm64 imm)
1019 {
1020 uint8_t rotation = random() % (sizeof(int64_t) * 8);
1021 uint64_t value = imm.asTrustedImm64().m_value;
1022 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1023 return RotatedImm64(value, rotation);
1024 }
1025
1026 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1027 {
1028 move(constant.value, dest);
1029 rotateRight64(constant.rotation, dest);
1030 }
1031
6fe7ccc8
A
1032 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1033 {
1034 if (shouldBlind(imm)) {
1035 RegisterID scratchRegister = scratchRegisterForBlinding();
1036 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1037 convertInt32ToDouble(scratchRegister, dest);
1038 } else
1039 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1040 }
1041
1042 void move(ImmPtr imm, RegisterID dest)
1043 {
1044 if (shouldBlind(imm))
1045 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1046 else
1047 move(imm.asTrustedImmPtr(), dest);
1048 }
1049
93a37866
A
1050 void move(Imm64 imm, RegisterID dest)
1051 {
1052 if (shouldBlind(imm))
1053 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1054 else
1055 move(imm.asTrustedImm64(), dest);
1056 }
1057
1058 void and64(Imm32 imm, RegisterID dest)
1059 {
1060 if (shouldBlind(imm)) {
1061 BlindedImm32 key = andBlindedConstant(imm);
1062 and64(key.value1, dest);
1063 and64(key.value2, dest);
1064 } else
1065 and64(imm.asTrustedImm32(), dest);
1066 }
1067
6fe7ccc8
A
1068 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1069 {
1070 if (shouldBlind(right)) {
1071 RegisterID scratchRegister = scratchRegisterForBlinding();
1072 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1073 return branchPtr(cond, left, scratchRegister);
1074 }
1075 return branchPtr(cond, left, right.asTrustedImmPtr());
1076 }
1077
1078 void storePtr(ImmPtr imm, Address dest)
1079 {
1080 if (shouldBlind(imm)) {
1081 RegisterID scratchRegister = scratchRegisterForBlinding();
1082 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1083 storePtr(scratchRegister, dest);
1084 } else
1085 storePtr(imm.asTrustedImmPtr(), dest);
1086 }
1087
93a37866
A
1088 void store64(Imm64 imm, Address dest)
1089 {
1090 if (shouldBlind(imm)) {
1091 RegisterID scratchRegister = scratchRegisterForBlinding();
1092 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1093 store64(scratchRegister, dest);
1094 } else
1095 store64(imm.asTrustedImm64(), dest);
1096 }
1097
9dae56ea 1098#endif
9dae56ea 1099
6fe7ccc8
A
1100#endif // !CPU(X86_64)
1101
1102#if ENABLE(JIT_CONSTANT_BLINDING)
1103 bool shouldBlind(Imm32 imm)
93a37866
A
1104 {
1105#if ENABLE(FORCED_JIT_BLINDING)
6fe7ccc8
A
1106 UNUSED_PARAM(imm);
1107 // Debug always blind all constants, if only so we know
1108 // if we've broken blinding during patch development.
1109 return true;
1110#else
1111
1112 // First off we'll special case common, "safe" values to avoid hurting
1113 // performance too much
1114 uint32_t value = imm.asTrustedImm32().m_value;
1115 switch (value) {
1116 case 0xffff:
1117 case 0xffffff:
1118 case 0xffffffff:
1119 return false;
1120 default:
1121 if (value <= 0xff)
1122 return false;
93a37866
A
1123 if (~value <= 0xff)
1124 return false;
6fe7ccc8 1125 }
93a37866
A
1126
1127 if (!shouldConsiderBlinding())
1128 return false;
1129
6fe7ccc8
A
1130 return shouldBlindForSpecificArch(value);
1131#endif
1132 }
1133
1134 struct BlindedImm32 {
1135 BlindedImm32(int32_t v1, int32_t v2)
1136 : value1(v1)
1137 , value2(v2)
1138 {
1139 }
1140 TrustedImm32 value1;
1141 TrustedImm32 value2;
1142 };
1143
1144 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1145 {
1146 uint32_t key = random();
1147 if (value <= 0xff)
1148 mask = 0xff;
1149 else if (value <= 0xffff)
1150 mask = 0xffff;
1151 else if (value <= 0xffffff)
1152 mask = 0xffffff;
1153 else
1154 mask = 0xffffffff;
1155 return key & mask;
1156 }
1157
1158 uint32_t keyForConstant(uint32_t value)
1159 {
1160 uint32_t mask = 0;
1161 return keyForConstant(value, mask);
1162 }
1163
1164 BlindedImm32 xorBlindConstant(Imm32 imm)
1165 {
1166 uint32_t baseValue = imm.asTrustedImm32().m_value;
1167 uint32_t key = keyForConstant(baseValue);
1168 return BlindedImm32(baseValue ^ key, key);
1169 }
1170
1171 BlindedImm32 additionBlindedConstant(Imm32 imm)
1172 {
1173 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1174 static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1175
1176 uint32_t baseValue = imm.asTrustedImm32().m_value;
1177 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1178 if (key > baseValue)
1179 key = key - baseValue;
1180 return BlindedImm32(baseValue - key, key);
1181 }
1182
1183 BlindedImm32 andBlindedConstant(Imm32 imm)
1184 {
1185 uint32_t baseValue = imm.asTrustedImm32().m_value;
1186 uint32_t mask = 0;
1187 uint32_t key = keyForConstant(baseValue, mask);
1188 ASSERT((baseValue & mask) == baseValue);
1189 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1190 }
1191
1192 BlindedImm32 orBlindedConstant(Imm32 imm)
1193 {
1194 uint32_t baseValue = imm.asTrustedImm32().m_value;
1195 uint32_t mask = 0;
1196 uint32_t key = keyForConstant(baseValue, mask);
1197 ASSERT((baseValue & mask) == baseValue);
1198 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1199 }
1200
1201 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1202 {
1203 move(constant.value1, dest);
1204 xor32(constant.value2, dest);
1205 }
1206
1207 void add32(Imm32 imm, RegisterID dest)
1208 {
1209 if (shouldBlind(imm)) {
1210 BlindedImm32 key = additionBlindedConstant(imm);
1211 add32(key.value1, dest);
1212 add32(key.value2, dest);
1213 } else
1214 add32(imm.asTrustedImm32(), dest);
1215 }
1216
1217 void addPtr(Imm32 imm, RegisterID dest)
1218 {
1219 if (shouldBlind(imm)) {
1220 BlindedImm32 key = additionBlindedConstant(imm);
1221 addPtr(key.value1, dest);
1222 addPtr(key.value2, dest);
1223 } else
1224 addPtr(imm.asTrustedImm32(), dest);
1225 }
1226
1227 void and32(Imm32 imm, RegisterID dest)
1228 {
1229 if (shouldBlind(imm)) {
1230 BlindedImm32 key = andBlindedConstant(imm);
1231 and32(key.value1, dest);
1232 and32(key.value2, dest);
1233 } else
1234 and32(imm.asTrustedImm32(), dest);
1235 }
1236
1237 void andPtr(Imm32 imm, RegisterID dest)
1238 {
1239 if (shouldBlind(imm)) {
1240 BlindedImm32 key = andBlindedConstant(imm);
1241 andPtr(key.value1, dest);
1242 andPtr(key.value2, dest);
1243 } else
1244 andPtr(imm.asTrustedImm32(), dest);
1245 }
1246
1247 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1248 {
1249 if (shouldBlind(imm)) {
1250 if (src == dest)
1251 return and32(imm.asTrustedImm32(), dest);
1252 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1253 and32(src, dest);
1254 } else
1255 and32(imm.asTrustedImm32(), src, dest);
1256 }
1257
1258 void move(Imm32 imm, RegisterID dest)
1259 {
1260 if (shouldBlind(imm))
1261 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1262 else
1263 move(imm.asTrustedImm32(), dest);
1264 }
1265
1266 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1267 {
1268 if (shouldBlind(imm)) {
1269 if (src == dest)
1270 return or32(imm, dest);
1271 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1272 or32(src, dest);
1273 } else
1274 or32(imm.asTrustedImm32(), src, dest);
1275 }
1276
1277 void or32(Imm32 imm, RegisterID dest)
1278 {
1279 if (shouldBlind(imm)) {
1280 BlindedImm32 key = orBlindedConstant(imm);
1281 or32(key.value1, dest);
1282 or32(key.value2, dest);
1283 } else
1284 or32(imm.asTrustedImm32(), dest);
1285 }
1286
1287 void poke(Imm32 value, int index = 0)
1288 {
1289 store32(value, addressForPoke(index));
1290 }
1291
1292 void poke(ImmPtr value, int index = 0)
1293 {
1294 storePtr(value, addressForPoke(index));
1295 }
1296
93a37866
A
1297#if CPU(X86_64) || CPU(ARM64)
1298 void poke(Imm64 value, int index = 0)
1299 {
1300 store64(value, addressForPoke(index));
1301 }
1302#endif
1303
6fe7ccc8
A
1304 void store32(Imm32 imm, Address dest)
1305 {
1306 if (shouldBlind(imm)) {
1307#if CPU(X86) || CPU(X86_64)
1308 BlindedImm32 blind = xorBlindConstant(imm);
1309 store32(blind.value1, dest);
1310 xor32(blind.value2, dest);
1311#else
1312 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1313 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1314 store32(scratchRegister, dest);
1315 } else {
1316 // If we don't have a scratch register available for use, we'll just
1317 // place a random number of nops.
1318 uint32_t nopCount = random() & 3;
1319 while (nopCount--)
1320 nop();
1321 store32(imm.asTrustedImm32(), dest);
1322 }
1323#endif
1324 } else
1325 store32(imm.asTrustedImm32(), dest);
1326 }
1327
1328 void sub32(Imm32 imm, RegisterID dest)
1329 {
1330 if (shouldBlind(imm)) {
1331 BlindedImm32 key = additionBlindedConstant(imm);
1332 sub32(key.value1, dest);
1333 sub32(key.value2, dest);
1334 } else
1335 sub32(imm.asTrustedImm32(), dest);
1336 }
1337
1338 void subPtr(Imm32 imm, RegisterID dest)
1339 {
1340 if (shouldBlind(imm)) {
1341 BlindedImm32 key = additionBlindedConstant(imm);
1342 subPtr(key.value1, dest);
1343 subPtr(key.value2, dest);
1344 } else
1345 subPtr(imm.asTrustedImm32(), dest);
1346 }
1347
1348 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1349 {
1350 if (shouldBlind(imm)) {
1351 BlindedImm32 blind = xorBlindConstant(imm);
1352 xor32(blind.value1, src, dest);
1353 xor32(blind.value2, dest);
1354 } else
1355 xor32(imm.asTrustedImm32(), src, dest);
1356 }
1357
1358 void xor32(Imm32 imm, RegisterID dest)
1359 {
1360 if (shouldBlind(imm)) {
1361 BlindedImm32 blind = xorBlindConstant(imm);
1362 xor32(blind.value1, dest);
1363 xor32(blind.value2, dest);
1364 } else
1365 xor32(imm.asTrustedImm32(), dest);
1366 }
1367
1368 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1369 {
1370 if (shouldBlind(right)) {
1371 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1372 loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
1373 return branch32(cond, left, scratchRegister);
1374 }
1375 // If we don't have a scratch register available for use, we'll just
1376 // place a random number of nops.
1377 uint32_t nopCount = random() & 3;
1378 while (nopCount--)
1379 nop();
1380 return branch32(cond, left, right.asTrustedImm32());
1381 }
1382
1383 return branch32(cond, left, right.asTrustedImm32());
1384 }
1385
1386 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1387 {
93a37866
A
1388 if (src == dest)
1389 ASSERT(scratchRegisterForBlinding());
1390
6fe7ccc8
A
1391 if (shouldBlind(imm)) {
1392 if (src == dest) {
1393 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1394 move(src, scratchRegister);
1395 src = scratchRegister;
1396 }
1397 }
1398 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1399 return branchAdd32(cond, src, dest);
1400 }
1401 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1402 }
1403
1404 Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
1405 {
93a37866
A
1406 if (src == dest)
1407 ASSERT(scratchRegisterForBlinding());
1408
6fe7ccc8
A
1409 if (shouldBlind(imm)) {
1410 if (src == dest) {
1411 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1412 move(src, scratchRegister);
1413 src = scratchRegister;
1414 }
1415 }
1416 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1417 return branchMul32(cond, src, dest);
1418 }
1419 return branchMul32(cond, imm.asTrustedImm32(), src, dest);
1420 }
1421
1422 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1423 // with src == dst, and on x86-32 we don't have a platform scratch register.
1424 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1425 {
1426 if (shouldBlind(imm)) {
1427 ASSERT(scratch != dest);
1428 ASSERT(scratch != src);
1429 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1430 return branchSub32(cond, src, scratch, dest);
1431 }
1432 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1433 }
1434
1435 // Immediate shifts only have 5 controllable bits
1436 // so we'll consider them safe for now.
1437 TrustedImm32 trustedImm32ForShift(Imm32 imm)
1438 {
1439 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
1440 }
1441
1442 void lshift32(Imm32 imm, RegisterID dest)
1443 {
1444 lshift32(trustedImm32ForShift(imm), dest);
1445 }
1446
1447 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1448 {
1449 lshift32(src, trustedImm32ForShift(amount), dest);
1450 }
1451
1452 void rshift32(Imm32 imm, RegisterID dest)
1453 {
1454 rshift32(trustedImm32ForShift(imm), dest);
1455 }
1456
1457 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1458 {
1459 rshift32(src, trustedImm32ForShift(amount), dest);
1460 }
1461
1462 void urshift32(Imm32 imm, RegisterID dest)
1463 {
1464 urshift32(trustedImm32ForShift(imm), dest);
1465 }
1466
1467 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1468 {
1469 urshift32(src, trustedImm32ForShift(amount), dest);
1470 }
1471#endif
9dae56ea
A
1472};
1473
1474} // namespace JSC
1475
6fe7ccc8
A
1476#else // ENABLE(ASSEMBLER)
1477
1478// If there is no assembler for this platform, at least allow code to make references to
1479// some of the things it would otherwise define, albeit without giving that code any way
1480// of doing anything useful.
1481class MacroAssembler {
1482private:
1483 MacroAssembler() { }
1484
1485public:
1486
1487 enum RegisterID { NoRegister };
1488 enum FPRegisterID { NoFPRegister };
1489};
1490
9dae56ea
A
1491#endif // ENABLE(ASSEMBLER)
1492
1493#endif // MacroAssembler_h