]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef MacroAssembler_h | |
27 | #define MacroAssembler_h | |
28 | ||
29 | #if ENABLE(ASSEMBLER) | |
30 | ||
31 | #if CPU(ARM_THUMB2) | |
32 | #include "MacroAssemblerARMv7.h" | |
33 | namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; | |
34 | ||
35 | #elif CPU(ARM_TRADITIONAL) | |
36 | #include "MacroAssemblerARM.h" | |
37 | namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; | |
38 | ||
39 | #elif CPU(MIPS) | |
40 | #include "MacroAssemblerMIPS.h" | |
41 | namespace JSC { | |
42 | typedef MacroAssemblerMIPS MacroAssemblerBase; | |
43 | }; | |
44 | ||
45 | #elif CPU(X86) | |
46 | #include "MacroAssemblerX86.h" | |
47 | namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; | |
48 | ||
49 | #elif CPU(X86_64) | |
50 | #include "MacroAssemblerX86_64.h" | |
51 | namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; | |
52 | ||
53 | #elif CPU(SH4) | |
54 | #include "MacroAssemblerSH4.h" | |
55 | namespace JSC { | |
56 | typedef MacroAssemblerSH4 MacroAssemblerBase; | |
57 | }; | |
58 | ||
59 | #else | |
60 | #error "The MacroAssembler is not supported on this platform." | |
61 | #endif | |
62 | ||
63 | namespace JSC { | |
64 | ||
65 | class MacroAssembler : public MacroAssemblerBase { | |
66 | public: | |
67 | ||
68 | using MacroAssemblerBase::pop; | |
69 | using MacroAssemblerBase::jump; | |
70 | using MacroAssemblerBase::branch32; | |
71 | #if CPU(X86_64) | |
72 | using MacroAssemblerBase::branchPtr; | |
73 | using MacroAssemblerBase::branchTestPtr; | |
74 | #endif | |
75 | using MacroAssemblerBase::move; | |
76 | ||
77 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
78 | using MacroAssemblerBase::add32; | |
79 | using MacroAssemblerBase::and32; | |
80 | using MacroAssemblerBase::branchAdd32; | |
81 | using MacroAssemblerBase::branchMul32; | |
82 | using MacroAssemblerBase::branchSub32; | |
83 | using MacroAssemblerBase::lshift32; | |
84 | using MacroAssemblerBase::or32; | |
85 | using MacroAssemblerBase::rshift32; | |
86 | using MacroAssemblerBase::store32; | |
87 | using MacroAssemblerBase::sub32; | |
88 | using MacroAssemblerBase::urshift32; | |
89 | using MacroAssemblerBase::xor32; | |
90 | #endif | |
91 | ||
92 | // Utilities used by the DFG JIT. | |
93 | #if ENABLE(DFG_JIT) | |
94 | using MacroAssemblerBase::invert; | |
95 | ||
96 | static DoubleCondition invert(DoubleCondition cond) | |
97 | { | |
98 | switch (cond) { | |
99 | case DoubleEqual: | |
100 | return DoubleNotEqualOrUnordered; | |
101 | case DoubleNotEqual: | |
102 | return DoubleEqualOrUnordered; | |
103 | case DoubleGreaterThan: | |
104 | return DoubleLessThanOrEqualOrUnordered; | |
105 | case DoubleGreaterThanOrEqual: | |
106 | return DoubleLessThanOrUnordered; | |
107 | case DoubleLessThan: | |
108 | return DoubleGreaterThanOrEqualOrUnordered; | |
109 | case DoubleLessThanOrEqual: | |
110 | return DoubleGreaterThanOrUnordered; | |
111 | case DoubleEqualOrUnordered: | |
112 | return DoubleNotEqual; | |
113 | case DoubleNotEqualOrUnordered: | |
114 | return DoubleEqual; | |
115 | case DoubleGreaterThanOrUnordered: | |
116 | return DoubleLessThanOrEqual; | |
117 | case DoubleGreaterThanOrEqualOrUnordered: | |
118 | return DoubleLessThan; | |
119 | case DoubleLessThanOrUnordered: | |
120 | return DoubleGreaterThanOrEqual; | |
121 | case DoubleLessThanOrEqualOrUnordered: | |
122 | return DoubleGreaterThan; | |
123 | default: | |
124 | ASSERT_NOT_REACHED(); | |
125 | return DoubleEqual; // make compiler happy | |
126 | } | |
127 | } | |
128 | ||
129 | static bool isInvertible(ResultCondition cond) | |
130 | { | |
131 | switch (cond) { | |
132 | case Zero: | |
133 | case NonZero: | |
134 | return true; | |
135 | default: | |
136 | return false; | |
137 | } | |
138 | } | |
139 | ||
140 | static ResultCondition invert(ResultCondition cond) | |
141 | { | |
142 | switch (cond) { | |
143 | case Zero: | |
144 | return NonZero; | |
145 | case NonZero: | |
146 | return Zero; | |
147 | default: | |
148 | ASSERT_NOT_REACHED(); | |
149 | return Zero; // Make compiler happy for release builds. | |
150 | } | |
151 | } | |
152 | #endif | |
153 | ||
154 | // Platform agnostic onvenience functions, | |
155 | // described in terms of other macro assembly methods. | |
156 | void pop() | |
157 | { | |
158 | addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister); | |
159 | } | |
160 | ||
161 | void peek(RegisterID dest, int index = 0) | |
162 | { | |
163 | loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest); | |
164 | } | |
165 | ||
166 | Address addressForPoke(int index) | |
167 | { | |
168 | return Address(stackPointerRegister, (index * sizeof(void*))); | |
169 | } | |
170 | ||
171 | void poke(RegisterID src, int index = 0) | |
172 | { | |
173 | storePtr(src, addressForPoke(index)); | |
174 | } | |
175 | ||
176 | void poke(TrustedImm32 value, int index = 0) | |
177 | { | |
178 | store32(value, addressForPoke(index)); | |
179 | } | |
180 | ||
181 | void poke(TrustedImmPtr imm, int index = 0) | |
182 | { | |
183 | storePtr(imm, addressForPoke(index)); | |
184 | } | |
185 | ||
186 | ||
187 | // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. | |
188 | void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target) | |
189 | { | |
190 | branchPtr(cond, op1, imm).linkTo(target, this); | |
191 | } | |
192 | void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target) | |
193 | { | |
194 | branchPtr(cond, op1, imm).linkTo(target, this); | |
195 | } | |
196 | ||
197 | void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target) | |
198 | { | |
199 | branch32(cond, op1, op2).linkTo(target, this); | |
200 | } | |
201 | ||
202 | void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target) | |
203 | { | |
204 | branch32(cond, op1, imm).linkTo(target, this); | |
205 | } | |
206 | ||
207 | void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target) | |
208 | { | |
209 | branch32(cond, op1, imm).linkTo(target, this); | |
210 | } | |
211 | ||
212 | void branch32(RelationalCondition cond, RegisterID left, Address right, Label target) | |
213 | { | |
214 | branch32(cond, left, right).linkTo(target, this); | |
215 | } | |
216 | ||
217 | Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right) | |
218 | { | |
219 | return branch32(commute(cond), right, left); | |
220 | } | |
221 | ||
222 | Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right) | |
223 | { | |
224 | return branch32(commute(cond), right, left); | |
225 | } | |
226 | ||
227 | void branchTestPtr(ResultCondition cond, RegisterID reg, Label target) | |
228 | { | |
229 | branchTestPtr(cond, reg).linkTo(target, this); | |
230 | } | |
231 | ||
232 | #if !CPU(ARM_THUMB2) | |
233 | PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) | |
234 | { | |
235 | return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); | |
236 | } | |
237 | ||
238 | PatchableJump patchableJump() | |
239 | { | |
240 | return PatchableJump(jump()); | |
241 | } | |
242 | #endif | |
243 | ||
244 | void jump(Label target) | |
245 | { | |
246 | jump().linkTo(target, this); | |
247 | } | |
248 | ||
249 | // Commute a relational condition, returns a new condition that will produce | |
250 | // the same results given the same inputs but with their positions exchanged. | |
251 | static RelationalCondition commute(RelationalCondition condition) | |
252 | { | |
253 | switch (condition) { | |
254 | case Above: | |
255 | return Below; | |
256 | case AboveOrEqual: | |
257 | return BelowOrEqual; | |
258 | case Below: | |
259 | return Above; | |
260 | case BelowOrEqual: | |
261 | return AboveOrEqual; | |
262 | case GreaterThan: | |
263 | return LessThan; | |
264 | case GreaterThanOrEqual: | |
265 | return LessThanOrEqual; | |
266 | case LessThan: | |
267 | return GreaterThan; | |
268 | case LessThanOrEqual: | |
269 | return GreaterThanOrEqual; | |
270 | default: | |
271 | break; | |
272 | } | |
273 | ||
274 | ASSERT(condition == Equal || condition == NotEqual); | |
275 | return condition; | |
276 | } | |
277 | ||
278 | ||
279 | // Ptr methods | |
280 | // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. | |
281 | // FIXME: should this use a test for 32-bitness instead of this specific exception? | |
282 | #if !CPU(X86_64) | |
283 | void addPtr(RegisterID src, RegisterID dest) | |
284 | { | |
285 | add32(src, dest); | |
286 | } | |
287 | ||
288 | void addPtr(TrustedImm32 imm, RegisterID srcDest) | |
289 | { | |
290 | add32(imm, srcDest); | |
291 | } | |
292 | ||
293 | void addPtr(TrustedImmPtr imm, RegisterID dest) | |
294 | { | |
295 | add32(TrustedImm32(imm), dest); | |
296 | } | |
297 | ||
298 | void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
299 | { | |
300 | add32(imm, src, dest); | |
301 | } | |
302 | ||
303 | void addPtr(TrustedImm32 imm, AbsoluteAddress address) | |
304 | { | |
305 | add32(imm, address); | |
306 | } | |
307 | ||
308 | void andPtr(RegisterID src, RegisterID dest) | |
309 | { | |
310 | and32(src, dest); | |
311 | } | |
312 | ||
313 | void andPtr(TrustedImm32 imm, RegisterID srcDest) | |
314 | { | |
315 | and32(imm, srcDest); | |
316 | } | |
317 | ||
318 | void orPtr(RegisterID src, RegisterID dest) | |
319 | { | |
320 | or32(src, dest); | |
321 | } | |
322 | ||
323 | void orPtr(RegisterID op1, RegisterID op2, RegisterID dest) | |
324 | { | |
325 | or32(op1, op2, dest); | |
326 | } | |
327 | ||
328 | void orPtr(TrustedImmPtr imm, RegisterID dest) | |
329 | { | |
330 | or32(TrustedImm32(imm), dest); | |
331 | } | |
332 | ||
333 | void orPtr(TrustedImm32 imm, RegisterID dest) | |
334 | { | |
335 | or32(imm, dest); | |
336 | } | |
337 | ||
338 | void subPtr(RegisterID src, RegisterID dest) | |
339 | { | |
340 | sub32(src, dest); | |
341 | } | |
342 | ||
343 | void subPtr(TrustedImm32 imm, RegisterID dest) | |
344 | { | |
345 | sub32(imm, dest); | |
346 | } | |
347 | ||
348 | void subPtr(TrustedImmPtr imm, RegisterID dest) | |
349 | { | |
350 | sub32(TrustedImm32(imm), dest); | |
351 | } | |
352 | ||
353 | void xorPtr(RegisterID src, RegisterID dest) | |
354 | { | |
355 | xor32(src, dest); | |
356 | } | |
357 | ||
358 | void xorPtr(TrustedImm32 imm, RegisterID srcDest) | |
359 | { | |
360 | xor32(imm, srcDest); | |
361 | } | |
362 | ||
363 | ||
364 | void loadPtr(ImplicitAddress address, RegisterID dest) | |
365 | { | |
366 | load32(address, dest); | |
367 | } | |
368 | ||
369 | void loadPtr(BaseIndex address, RegisterID dest) | |
370 | { | |
371 | load32(address, dest); | |
372 | } | |
373 | ||
374 | void loadPtr(const void* address, RegisterID dest) | |
375 | { | |
376 | load32(address, dest); | |
377 | } | |
378 | ||
379 | DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) | |
380 | { | |
381 | return load32WithAddressOffsetPatch(address, dest); | |
382 | } | |
383 | ||
384 | DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest) | |
385 | { | |
386 | return load32WithCompactAddressOffsetPatch(address, dest); | |
387 | } | |
388 | ||
389 | void move(ImmPtr imm, RegisterID dest) | |
390 | { | |
391 | move(Imm32(imm.asTrustedImmPtr()), dest); | |
392 | } | |
393 | ||
394 | void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) | |
395 | { | |
396 | compare32(cond, left, right, dest); | |
397 | } | |
398 | ||
399 | void storePtr(RegisterID src, ImplicitAddress address) | |
400 | { | |
401 | store32(src, address); | |
402 | } | |
403 | ||
404 | void storePtr(RegisterID src, BaseIndex address) | |
405 | { | |
406 | store32(src, address); | |
407 | } | |
408 | ||
409 | void storePtr(RegisterID src, void* address) | |
410 | { | |
411 | store32(src, address); | |
412 | } | |
413 | ||
414 | void storePtr(TrustedImmPtr imm, ImplicitAddress address) | |
415 | { | |
416 | store32(TrustedImm32(imm), address); | |
417 | } | |
418 | ||
419 | void storePtr(ImmPtr imm, Address address) | |
420 | { | |
421 | store32(Imm32(imm.asTrustedImmPtr()), address); | |
422 | } | |
423 | ||
424 | void storePtr(TrustedImmPtr imm, void* address) | |
425 | { | |
426 | store32(TrustedImm32(imm), address); | |
427 | } | |
428 | ||
429 | DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) | |
430 | { | |
431 | return store32WithAddressOffsetPatch(src, address); | |
432 | } | |
433 | ||
434 | Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right) | |
435 | { | |
436 | return branch32(cond, left, right); | |
437 | } | |
438 | ||
439 | Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right) | |
440 | { | |
441 | return branch32(cond, left, TrustedImm32(right)); | |
442 | } | |
443 | ||
444 | Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) | |
445 | { | |
446 | return branch32(cond, left, Imm32(right.asTrustedImmPtr())); | |
447 | } | |
448 | ||
449 | Jump branchPtr(RelationalCondition cond, RegisterID left, Address right) | |
450 | { | |
451 | return branch32(cond, left, right); | |
452 | } | |
453 | ||
454 | Jump branchPtr(RelationalCondition cond, Address left, RegisterID right) | |
455 | { | |
456 | return branch32(cond, left, right); | |
457 | } | |
458 | ||
459 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right) | |
460 | { | |
461 | return branch32(cond, left, right); | |
462 | } | |
463 | ||
464 | Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) | |
465 | { | |
466 | return branch32(cond, left, TrustedImm32(right)); | |
467 | } | |
468 | ||
469 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right) | |
470 | { | |
471 | return branch32(cond, left, TrustedImm32(right)); | |
472 | } | |
473 | ||
474 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask) | |
475 | { | |
476 | return branchTest32(cond, reg, mask); | |
477 | } | |
478 | ||
479 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) | |
480 | { | |
481 | return branchTest32(cond, reg, mask); | |
482 | } | |
483 | ||
484 | Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) | |
485 | { | |
486 | return branchTest32(cond, address, mask); | |
487 | } | |
488 | ||
489 | Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) | |
490 | { | |
491 | return branchTest32(cond, address, mask); | |
492 | } | |
493 | ||
494 | Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest) | |
495 | { | |
496 | return branchAdd32(cond, src, dest); | |
497 | } | |
498 | ||
499 | Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) | |
500 | { | |
501 | return branchSub32(cond, imm, dest); | |
502 | } | |
503 | using MacroAssemblerBase::branchTest8; | |
504 | Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) | |
505 | { | |
506 | return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); | |
507 | } | |
508 | #else | |
509 | ||
510 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
511 | using MacroAssemblerBase::addPtr; | |
512 | using MacroAssemblerBase::andPtr; | |
513 | using MacroAssemblerBase::branchSubPtr; | |
514 | using MacroAssemblerBase::convertInt32ToDouble; | |
515 | using MacroAssemblerBase::storePtr; | |
516 | using MacroAssemblerBase::subPtr; | |
517 | using MacroAssemblerBase::xorPtr; | |
518 | ||
519 | bool shouldBlindDouble(double value) | |
520 | { | |
521 | // Don't trust NaN or +/-Infinity | |
522 | if (!isfinite(value)) | |
523 | return true; | |
524 | ||
525 | // Try to force normalisation, and check that there's no change | |
526 | // in the bit pattern | |
527 | if (bitwise_cast<uintptr_t>(value * 1.0) != bitwise_cast<uintptr_t>(value)) | |
528 | return true; | |
529 | ||
530 | value = abs(value); | |
531 | // Only allow a limited set of fractional components | |
532 | double scaledValue = value * 8; | |
533 | if (scaledValue / 8 != value) | |
534 | return true; | |
535 | double frac = scaledValue - floor(scaledValue); | |
536 | if (frac != 0.0) | |
537 | return true; | |
538 | ||
539 | return value > 0xff; | |
540 | } | |
541 | ||
542 | bool shouldBlind(ImmPtr imm) | |
543 | { | |
544 | #if !defined(NDEBUG) | |
545 | UNUSED_PARAM(imm); | |
546 | // Debug always blind all constants, if only so we know | |
547 | // if we've broken blinding during patch development. | |
548 | return true; | |
549 | #endif | |
550 | ||
551 | // First off we'll special case common, "safe" values to avoid hurting | |
552 | // performance too much | |
553 | uintptr_t value = imm.asTrustedImmPtr().asIntptr(); | |
554 | switch (value) { | |
555 | case 0xffff: | |
556 | case 0xffffff: | |
557 | case 0xffffffffL: | |
558 | case 0xffffffffffL: | |
559 | case 0xffffffffffffL: | |
560 | case 0xffffffffffffffL: | |
561 | case 0xffffffffffffffffL: | |
562 | return false; | |
563 | default: { | |
564 | if (value <= 0xff) | |
565 | return false; | |
566 | #if CPU(X86_64) | |
567 | JSValue jsValue = JSValue::decode(reinterpret_cast<void*>(value)); | |
568 | if (jsValue.isInt32()) | |
569 | return shouldBlind(Imm32(jsValue.asInt32())); | |
570 | if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble())) | |
571 | return false; | |
572 | ||
573 | if (!shouldBlindDouble(bitwise_cast<double>(value))) | |
574 | return false; | |
575 | #endif | |
576 | } | |
577 | } | |
578 | return shouldBlindForSpecificArch(value); | |
579 | } | |
580 | ||
581 | struct RotatedImmPtr { | |
582 | RotatedImmPtr(uintptr_t v1, uint8_t v2) | |
583 | : value(v1) | |
584 | , rotation(v2) | |
585 | { | |
586 | } | |
587 | TrustedImmPtr value; | |
588 | TrustedImm32 rotation; | |
589 | }; | |
590 | ||
591 | RotatedImmPtr rotationBlindConstant(ImmPtr imm) | |
592 | { | |
593 | uint8_t rotation = random() % (sizeof(void*) * 8); | |
594 | uintptr_t value = imm.asTrustedImmPtr().asIntptr(); | |
595 | value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation)); | |
596 | return RotatedImmPtr(value, rotation); | |
597 | } | |
598 | ||
599 | void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest) | |
600 | { | |
601 | move(constant.value, dest); | |
602 | rotateRightPtr(constant.rotation, dest); | |
603 | } | |
604 | ||
605 | void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) | |
606 | { | |
607 | if (shouldBlind(imm)) { | |
608 | RegisterID scratchRegister = scratchRegisterForBlinding(); | |
609 | loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); | |
610 | convertInt32ToDouble(scratchRegister, dest); | |
611 | } else | |
612 | convertInt32ToDouble(imm.asTrustedImm32(), dest); | |
613 | } | |
614 | ||
615 | void move(ImmPtr imm, RegisterID dest) | |
616 | { | |
617 | if (shouldBlind(imm)) | |
618 | loadRotationBlindedConstant(rotationBlindConstant(imm), dest); | |
619 | else | |
620 | move(imm.asTrustedImmPtr(), dest); | |
621 | } | |
622 | ||
623 | Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) | |
624 | { | |
625 | if (shouldBlind(right)) { | |
626 | RegisterID scratchRegister = scratchRegisterForBlinding(); | |
627 | loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister); | |
628 | return branchPtr(cond, left, scratchRegister); | |
629 | } | |
630 | return branchPtr(cond, left, right.asTrustedImmPtr()); | |
631 | } | |
632 | ||
633 | void storePtr(ImmPtr imm, Address dest) | |
634 | { | |
635 | if (shouldBlind(imm)) { | |
636 | RegisterID scratchRegister = scratchRegisterForBlinding(); | |
637 | loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); | |
638 | storePtr(scratchRegister, dest); | |
639 | } else | |
640 | storePtr(imm.asTrustedImmPtr(), dest); | |
641 | } | |
642 | ||
643 | #endif | |
644 | ||
645 | #endif // !CPU(X86_64) | |
646 | ||
647 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
648 | bool shouldBlind(Imm32 imm) | |
649 | { | |
650 | #if !defined(NDEBUG) | |
651 | UNUSED_PARAM(imm); | |
652 | // Debug always blind all constants, if only so we know | |
653 | // if we've broken blinding during patch development. | |
654 | return true; | |
655 | #else | |
656 | ||
657 | // First off we'll special case common, "safe" values to avoid hurting | |
658 | // performance too much | |
659 | uint32_t value = imm.asTrustedImm32().m_value; | |
660 | switch (value) { | |
661 | case 0xffff: | |
662 | case 0xffffff: | |
663 | case 0xffffffff: | |
664 | return false; | |
665 | default: | |
666 | if (value <= 0xff) | |
667 | return false; | |
668 | } | |
669 | return shouldBlindForSpecificArch(value); | |
670 | #endif | |
671 | } | |
672 | ||
673 | struct BlindedImm32 { | |
674 | BlindedImm32(int32_t v1, int32_t v2) | |
675 | : value1(v1) | |
676 | , value2(v2) | |
677 | { | |
678 | } | |
679 | TrustedImm32 value1; | |
680 | TrustedImm32 value2; | |
681 | }; | |
682 | ||
683 | uint32_t keyForConstant(uint32_t value, uint32_t& mask) | |
684 | { | |
685 | uint32_t key = random(); | |
686 | if (value <= 0xff) | |
687 | mask = 0xff; | |
688 | else if (value <= 0xffff) | |
689 | mask = 0xffff; | |
690 | else if (value <= 0xffffff) | |
691 | mask = 0xffffff; | |
692 | else | |
693 | mask = 0xffffffff; | |
694 | return key & mask; | |
695 | } | |
696 | ||
697 | uint32_t keyForConstant(uint32_t value) | |
698 | { | |
699 | uint32_t mask = 0; | |
700 | return keyForConstant(value, mask); | |
701 | } | |
702 | ||
703 | BlindedImm32 xorBlindConstant(Imm32 imm) | |
704 | { | |
705 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
706 | uint32_t key = keyForConstant(baseValue); | |
707 | return BlindedImm32(baseValue ^ key, key); | |
708 | } | |
709 | ||
710 | BlindedImm32 additionBlindedConstant(Imm32 imm) | |
711 | { | |
712 | // The addition immediate may be used as a pointer offset. Keep aligned based on "imm". | |
713 | static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff }; | |
714 | ||
715 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
716 | uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3]; | |
717 | if (key > baseValue) | |
718 | key = key - baseValue; | |
719 | return BlindedImm32(baseValue - key, key); | |
720 | } | |
721 | ||
722 | BlindedImm32 andBlindedConstant(Imm32 imm) | |
723 | { | |
724 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
725 | uint32_t mask = 0; | |
726 | uint32_t key = keyForConstant(baseValue, mask); | |
727 | ASSERT((baseValue & mask) == baseValue); | |
728 | return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask); | |
729 | } | |
730 | ||
731 | BlindedImm32 orBlindedConstant(Imm32 imm) | |
732 | { | |
733 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
734 | uint32_t mask = 0; | |
735 | uint32_t key = keyForConstant(baseValue, mask); | |
736 | ASSERT((baseValue & mask) == baseValue); | |
737 | return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask); | |
738 | } | |
739 | ||
740 | void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest) | |
741 | { | |
742 | move(constant.value1, dest); | |
743 | xor32(constant.value2, dest); | |
744 | } | |
745 | ||
746 | void add32(Imm32 imm, RegisterID dest) | |
747 | { | |
748 | if (shouldBlind(imm)) { | |
749 | BlindedImm32 key = additionBlindedConstant(imm); | |
750 | add32(key.value1, dest); | |
751 | add32(key.value2, dest); | |
752 | } else | |
753 | add32(imm.asTrustedImm32(), dest); | |
754 | } | |
755 | ||
756 | void addPtr(Imm32 imm, RegisterID dest) | |
757 | { | |
758 | if (shouldBlind(imm)) { | |
759 | BlindedImm32 key = additionBlindedConstant(imm); | |
760 | addPtr(key.value1, dest); | |
761 | addPtr(key.value2, dest); | |
762 | } else | |
763 | addPtr(imm.asTrustedImm32(), dest); | |
764 | } | |
765 | ||
766 | void and32(Imm32 imm, RegisterID dest) | |
767 | { | |
768 | if (shouldBlind(imm)) { | |
769 | BlindedImm32 key = andBlindedConstant(imm); | |
770 | and32(key.value1, dest); | |
771 | and32(key.value2, dest); | |
772 | } else | |
773 | and32(imm.asTrustedImm32(), dest); | |
774 | } | |
775 | ||
776 | void andPtr(Imm32 imm, RegisterID dest) | |
777 | { | |
778 | if (shouldBlind(imm)) { | |
779 | BlindedImm32 key = andBlindedConstant(imm); | |
780 | andPtr(key.value1, dest); | |
781 | andPtr(key.value2, dest); | |
782 | } else | |
783 | andPtr(imm.asTrustedImm32(), dest); | |
784 | } | |
785 | ||
786 | void and32(Imm32 imm, RegisterID src, RegisterID dest) | |
787 | { | |
788 | if (shouldBlind(imm)) { | |
789 | if (src == dest) | |
790 | return and32(imm.asTrustedImm32(), dest); | |
791 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
792 | and32(src, dest); | |
793 | } else | |
794 | and32(imm.asTrustedImm32(), src, dest); | |
795 | } | |
796 | ||
797 | void move(Imm32 imm, RegisterID dest) | |
798 | { | |
799 | if (shouldBlind(imm)) | |
800 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
801 | else | |
802 | move(imm.asTrustedImm32(), dest); | |
803 | } | |
804 | ||
805 | void or32(Imm32 imm, RegisterID src, RegisterID dest) | |
806 | { | |
807 | if (shouldBlind(imm)) { | |
808 | if (src == dest) | |
809 | return or32(imm, dest); | |
810 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
811 | or32(src, dest); | |
812 | } else | |
813 | or32(imm.asTrustedImm32(), src, dest); | |
814 | } | |
815 | ||
816 | void or32(Imm32 imm, RegisterID dest) | |
817 | { | |
818 | if (shouldBlind(imm)) { | |
819 | BlindedImm32 key = orBlindedConstant(imm); | |
820 | or32(key.value1, dest); | |
821 | or32(key.value2, dest); | |
822 | } else | |
823 | or32(imm.asTrustedImm32(), dest); | |
824 | } | |
825 | ||
826 | void poke(Imm32 value, int index = 0) | |
827 | { | |
828 | store32(value, addressForPoke(index)); | |
829 | } | |
830 | ||
831 | void poke(ImmPtr value, int index = 0) | |
832 | { | |
833 | storePtr(value, addressForPoke(index)); | |
834 | } | |
835 | ||
836 | void store32(Imm32 imm, Address dest) | |
837 | { | |
838 | if (shouldBlind(imm)) { | |
839 | #if CPU(X86) || CPU(X86_64) | |
840 | BlindedImm32 blind = xorBlindConstant(imm); | |
841 | store32(blind.value1, dest); | |
842 | xor32(blind.value2, dest); | |
843 | #else | |
844 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
845 | loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); | |
846 | store32(scratchRegister, dest); | |
847 | } else { | |
848 | // If we don't have a scratch register available for use, we'll just | |
849 | // place a random number of nops. | |
850 | uint32_t nopCount = random() & 3; | |
851 | while (nopCount--) | |
852 | nop(); | |
853 | store32(imm.asTrustedImm32(), dest); | |
854 | } | |
855 | #endif | |
856 | } else | |
857 | store32(imm.asTrustedImm32(), dest); | |
858 | } | |
859 | ||
860 | void sub32(Imm32 imm, RegisterID dest) | |
861 | { | |
862 | if (shouldBlind(imm)) { | |
863 | BlindedImm32 key = additionBlindedConstant(imm); | |
864 | sub32(key.value1, dest); | |
865 | sub32(key.value2, dest); | |
866 | } else | |
867 | sub32(imm.asTrustedImm32(), dest); | |
868 | } | |
869 | ||
870 | void subPtr(Imm32 imm, RegisterID dest) | |
871 | { | |
872 | if (shouldBlind(imm)) { | |
873 | BlindedImm32 key = additionBlindedConstant(imm); | |
874 | subPtr(key.value1, dest); | |
875 | subPtr(key.value2, dest); | |
876 | } else | |
877 | subPtr(imm.asTrustedImm32(), dest); | |
878 | } | |
879 | ||
880 | void xor32(Imm32 imm, RegisterID src, RegisterID dest) | |
881 | { | |
882 | if (shouldBlind(imm)) { | |
883 | BlindedImm32 blind = xorBlindConstant(imm); | |
884 | xor32(blind.value1, src, dest); | |
885 | xor32(blind.value2, dest); | |
886 | } else | |
887 | xor32(imm.asTrustedImm32(), src, dest); | |
888 | } | |
889 | ||
890 | void xor32(Imm32 imm, RegisterID dest) | |
891 | { | |
892 | if (shouldBlind(imm)) { | |
893 | BlindedImm32 blind = xorBlindConstant(imm); | |
894 | xor32(blind.value1, dest); | |
895 | xor32(blind.value2, dest); | |
896 | } else | |
897 | xor32(imm.asTrustedImm32(), dest); | |
898 | } | |
899 | ||
900 | Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right) | |
901 | { | |
902 | if (shouldBlind(right)) { | |
903 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
904 | loadXorBlindedConstant(xorBlindConstant(right), scratchRegister); | |
905 | return branch32(cond, left, scratchRegister); | |
906 | } | |
907 | // If we don't have a scratch register available for use, we'll just | |
908 | // place a random number of nops. | |
909 | uint32_t nopCount = random() & 3; | |
910 | while (nopCount--) | |
911 | nop(); | |
912 | return branch32(cond, left, right.asTrustedImm32()); | |
913 | } | |
914 | ||
915 | return branch32(cond, left, right.asTrustedImm32()); | |
916 | } | |
917 | ||
918 | Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) | |
919 | { | |
920 | if (src == dest) { | |
921 | if (!scratchRegisterForBlinding()) { | |
922 | // Release mode ASSERT, if this fails we will perform incorrect codegen. | |
923 | CRASH(); | |
924 | } | |
925 | } | |
926 | if (shouldBlind(imm)) { | |
927 | if (src == dest) { | |
928 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
929 | move(src, scratchRegister); | |
930 | src = scratchRegister; | |
931 | } | |
932 | } | |
933 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
934 | return branchAdd32(cond, src, dest); | |
935 | } | |
936 | return branchAdd32(cond, src, imm.asTrustedImm32(), dest); | |
937 | } | |
938 | ||
939 | Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) | |
940 | { | |
941 | if (src == dest) { | |
942 | if (!scratchRegisterForBlinding()) { | |
943 | // Release mode ASSERT, if this fails we will perform incorrect codegen. | |
944 | CRASH(); | |
945 | } | |
946 | } | |
947 | if (shouldBlind(imm)) { | |
948 | if (src == dest) { | |
949 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
950 | move(src, scratchRegister); | |
951 | src = scratchRegister; | |
952 | } | |
953 | } | |
954 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
955 | return branchMul32(cond, src, dest); | |
956 | } | |
957 | return branchMul32(cond, imm.asTrustedImm32(), src, dest); | |
958 | } | |
959 | ||
960 | // branchSub32 takes a scratch register as 32 bit platforms make use of this, | |
961 | // with src == dst, and on x86-32 we don't have a platform scratch register. | |
962 | Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch) | |
963 | { | |
964 | if (shouldBlind(imm)) { | |
965 | ASSERT(scratch != dest); | |
966 | ASSERT(scratch != src); | |
967 | loadXorBlindedConstant(xorBlindConstant(imm), scratch); | |
968 | return branchSub32(cond, src, scratch, dest); | |
969 | } | |
970 | return branchSub32(cond, src, imm.asTrustedImm32(), dest); | |
971 | } | |
972 | ||
973 | // Immediate shifts only have 5 controllable bits | |
974 | // so we'll consider them safe for now. | |
975 | TrustedImm32 trustedImm32ForShift(Imm32 imm) | |
976 | { | |
977 | return TrustedImm32(imm.asTrustedImm32().m_value & 31); | |
978 | } | |
979 | ||
980 | void lshift32(Imm32 imm, RegisterID dest) | |
981 | { | |
982 | lshift32(trustedImm32ForShift(imm), dest); | |
983 | } | |
984 | ||
985 | void lshift32(RegisterID src, Imm32 amount, RegisterID dest) | |
986 | { | |
987 | lshift32(src, trustedImm32ForShift(amount), dest); | |
988 | } | |
989 | ||
990 | void rshift32(Imm32 imm, RegisterID dest) | |
991 | { | |
992 | rshift32(trustedImm32ForShift(imm), dest); | |
993 | } | |
994 | ||
995 | void rshift32(RegisterID src, Imm32 amount, RegisterID dest) | |
996 | { | |
997 | rshift32(src, trustedImm32ForShift(amount), dest); | |
998 | } | |
999 | ||
1000 | void urshift32(Imm32 imm, RegisterID dest) | |
1001 | { | |
1002 | urshift32(trustedImm32ForShift(imm), dest); | |
1003 | } | |
1004 | ||
1005 | void urshift32(RegisterID src, Imm32 amount, RegisterID dest) | |
1006 | { | |
1007 | urshift32(src, trustedImm32ForShift(amount), dest); | |
1008 | } | |
1009 | #endif | |
1010 | }; | |
1011 | ||
1012 | } // namespace JSC | |
1013 | ||
1014 | #else // ENABLE(ASSEMBLER) | |
1015 | ||
1016 | // If there is no assembler for this platform, at least allow code to make references to | |
1017 | // some of the things it would otherwise define, albeit without giving that code any way | |
1018 | // of doing anything useful. | |
1019 | class MacroAssembler { | |
1020 | private: | |
1021 | MacroAssembler() { } | |
1022 | ||
1023 | public: | |
1024 | ||
1025 | enum RegisterID { NoRegister }; | |
1026 | enum FPRegisterID { NoFPRegister }; | |
1027 | }; | |
1028 | ||
1029 | #endif // ENABLE(ASSEMBLER) | |
1030 | ||
1031 | #endif // MacroAssembler_h |