]>
Commit | Line | Data |
---|---|---|
9dae56ea A |
1 | /* |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef MacroAssembler_h | |
27 | #define MacroAssembler_h | |
28 | ||
9dae56ea A |
29 | #if ENABLE(ASSEMBLER) |
30 | ||
f9bf01c6 | 31 | #if CPU(ARM_THUMB2) |
ba379fdc A |
32 | #include "MacroAssemblerARMv7.h" |
33 | namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; | |
9dae56ea | 34 | |
f9bf01c6 A |
35 | #elif CPU(ARM_TRADITIONAL) |
36 | #include "MacroAssemblerARM.h" | |
37 | namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; | |
38 | ||
4e4e5a6f A |
39 | #elif CPU(MIPS) |
40 | #include "MacroAssemblerMIPS.h" | |
41 | namespace JSC { | |
42 | typedef MacroAssemblerMIPS MacroAssemblerBase; | |
43 | }; | |
44 | ||
f9bf01c6 | 45 | #elif CPU(X86) |
ba379fdc A |
46 | #include "MacroAssemblerX86.h" |
47 | namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; | |
9dae56ea | 48 | |
f9bf01c6 | 49 | #elif CPU(X86_64) |
ba379fdc A |
50 | #include "MacroAssemblerX86_64.h" |
51 | namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; | |
9dae56ea | 52 | |
14957cd0 A |
53 | #elif CPU(SH4) |
54 | #include "MacroAssemblerSH4.h" | |
55 | namespace JSC { | |
56 | typedef MacroAssemblerSH4 MacroAssemblerBase; | |
57 | }; | |
58 | ||
ba379fdc A |
59 | #else |
60 | #error "The MacroAssembler is not supported on this platform." | |
9dae56ea A |
61 | #endif |
62 | ||
ba379fdc A |
63 | namespace JSC { |
64 | ||
65 | class MacroAssembler : public MacroAssemblerBase { | |
9dae56ea | 66 | public: |
ba379fdc A |
67 | |
68 | using MacroAssemblerBase::pop; | |
69 | using MacroAssemblerBase::jump; | |
70 | using MacroAssemblerBase::branch32; | |
f9bf01c6 | 71 | #if CPU(X86_64) |
ba379fdc A |
72 | using MacroAssemblerBase::branchPtr; |
73 | using MacroAssemblerBase::branchTestPtr; | |
9dae56ea | 74 | #endif |
6fe7ccc8 A |
75 | using MacroAssemblerBase::move; |
76 | ||
77 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
78 | using MacroAssemblerBase::add32; | |
79 | using MacroAssemblerBase::and32; | |
80 | using MacroAssemblerBase::branchAdd32; | |
81 | using MacroAssemblerBase::branchMul32; | |
82 | using MacroAssemblerBase::branchSub32; | |
83 | using MacroAssemblerBase::lshift32; | |
84 | using MacroAssemblerBase::or32; | |
85 | using MacroAssemblerBase::rshift32; | |
86 | using MacroAssemblerBase::store32; | |
87 | using MacroAssemblerBase::sub32; | |
88 | using MacroAssemblerBase::urshift32; | |
89 | using MacroAssemblerBase::xor32; | |
90 | #endif | |
9dae56ea | 91 | |
6fe7ccc8 A |
92 | // Utilities used by the DFG JIT. |
93 | #if ENABLE(DFG_JIT) | |
94 | using MacroAssemblerBase::invert; | |
95 | ||
96 | static DoubleCondition invert(DoubleCondition cond) | |
97 | { | |
98 | switch (cond) { | |
99 | case DoubleEqual: | |
100 | return DoubleNotEqualOrUnordered; | |
101 | case DoubleNotEqual: | |
102 | return DoubleEqualOrUnordered; | |
103 | case DoubleGreaterThan: | |
104 | return DoubleLessThanOrEqualOrUnordered; | |
105 | case DoubleGreaterThanOrEqual: | |
106 | return DoubleLessThanOrUnordered; | |
107 | case DoubleLessThan: | |
108 | return DoubleGreaterThanOrEqualOrUnordered; | |
109 | case DoubleLessThanOrEqual: | |
110 | return DoubleGreaterThanOrUnordered; | |
111 | case DoubleEqualOrUnordered: | |
112 | return DoubleNotEqual; | |
113 | case DoubleNotEqualOrUnordered: | |
114 | return DoubleEqual; | |
115 | case DoubleGreaterThanOrUnordered: | |
116 | return DoubleLessThanOrEqual; | |
117 | case DoubleGreaterThanOrEqualOrUnordered: | |
118 | return DoubleLessThan; | |
119 | case DoubleLessThanOrUnordered: | |
120 | return DoubleGreaterThanOrEqual; | |
121 | case DoubleLessThanOrEqualOrUnordered: | |
122 | return DoubleGreaterThan; | |
123 | default: | |
124 | ASSERT_NOT_REACHED(); | |
125 | return DoubleEqual; // make compiler happy | |
126 | } | |
127 | } | |
128 | ||
129 | static bool isInvertible(ResultCondition cond) | |
130 | { | |
131 | switch (cond) { | |
132 | case Zero: | |
133 | case NonZero: | |
134 | return true; | |
135 | default: | |
136 | return false; | |
137 | } | |
138 | } | |
139 | ||
140 | static ResultCondition invert(ResultCondition cond) | |
141 | { | |
142 | switch (cond) { | |
143 | case Zero: | |
144 | return NonZero; | |
145 | case NonZero: | |
146 | return Zero; | |
147 | default: | |
148 | ASSERT_NOT_REACHED(); | |
149 | return Zero; // Make compiler happy for release builds. | |
150 | } | |
151 | } | |
152 | #endif | |
ba379fdc A |
153 | |
154 | // Platform agnostic onvenience functions, | |
155 | // described in terms of other macro assembly methods. | |
156 | void pop() | |
9dae56ea | 157 | { |
14957cd0 | 158 | addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister); |
9dae56ea A |
159 | } |
160 | ||
ba379fdc | 161 | void peek(RegisterID dest, int index = 0) |
9dae56ea | 162 | { |
ba379fdc | 163 | loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest); |
9dae56ea A |
164 | } |
165 | ||
6fe7ccc8 A |
166 | Address addressForPoke(int index) |
167 | { | |
168 | return Address(stackPointerRegister, (index * sizeof(void*))); | |
169 | } | |
170 | ||
ba379fdc | 171 | void poke(RegisterID src, int index = 0) |
9dae56ea | 172 | { |
6fe7ccc8 | 173 | storePtr(src, addressForPoke(index)); |
9dae56ea A |
174 | } |
175 | ||
14957cd0 | 176 | void poke(TrustedImm32 value, int index = 0) |
9dae56ea | 177 | { |
6fe7ccc8 | 178 | store32(value, addressForPoke(index)); |
9dae56ea A |
179 | } |
180 | ||
14957cd0 | 181 | void poke(TrustedImmPtr imm, int index = 0) |
9dae56ea | 182 | { |
6fe7ccc8 | 183 | storePtr(imm, addressForPoke(index)); |
9dae56ea A |
184 | } |
185 | ||
9dae56ea | 186 | |
ba379fdc | 187 | // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. |
14957cd0 | 188 | void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target) |
9dae56ea | 189 | { |
ba379fdc | 190 | branchPtr(cond, op1, imm).linkTo(target, this); |
9dae56ea | 191 | } |
6fe7ccc8 A |
192 | void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target) |
193 | { | |
194 | branchPtr(cond, op1, imm).linkTo(target, this); | |
195 | } | |
9dae56ea | 196 | |
14957cd0 | 197 | void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target) |
9dae56ea | 198 | { |
ba379fdc | 199 | branch32(cond, op1, op2).linkTo(target, this); |
9dae56ea A |
200 | } |
201 | ||
14957cd0 | 202 | void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target) |
9dae56ea | 203 | { |
ba379fdc | 204 | branch32(cond, op1, imm).linkTo(target, this); |
9dae56ea | 205 | } |
6fe7ccc8 A |
206 | |
207 | void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target) | |
208 | { | |
209 | branch32(cond, op1, imm).linkTo(target, this); | |
210 | } | |
ba379fdc | 211 | |
14957cd0 | 212 | void branch32(RelationalCondition cond, RegisterID left, Address right, Label target) |
9dae56ea | 213 | { |
ba379fdc | 214 | branch32(cond, left, right).linkTo(target, this); |
9dae56ea | 215 | } |
ba379fdc | 216 | |
6fe7ccc8 | 217 | Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right) |
9dae56ea | 218 | { |
6fe7ccc8 | 219 | return branch32(commute(cond), right, left); |
9dae56ea | 220 | } |
6fe7ccc8 A |
221 | |
222 | Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right) | |
223 | { | |
224 | return branch32(commute(cond), right, left); | |
225 | } | |
226 | ||
14957cd0 | 227 | void branchTestPtr(ResultCondition cond, RegisterID reg, Label target) |
9dae56ea | 228 | { |
ba379fdc | 229 | branchTestPtr(cond, reg).linkTo(target, this); |
9dae56ea A |
230 | } |
231 | ||
6fe7ccc8 A |
232 | #if !CPU(ARM_THUMB2) |
233 | PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) | |
234 | { | |
235 | return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); | |
236 | } | |
237 | ||
238 | PatchableJump patchableJump() | |
239 | { | |
240 | return PatchableJump(jump()); | |
241 | } | |
242 | #endif | |
243 | ||
ba379fdc | 244 | void jump(Label target) |
9dae56ea | 245 | { |
ba379fdc | 246 | jump().linkTo(target, this); |
9dae56ea A |
247 | } |
248 | ||
6fe7ccc8 A |
249 | // Commute a relational condition, returns a new condition that will produce |
250 | // the same results given the same inputs but with their positions exchanged. | |
251 | static RelationalCondition commute(RelationalCondition condition) | |
252 | { | |
253 | switch (condition) { | |
254 | case Above: | |
255 | return Below; | |
256 | case AboveOrEqual: | |
257 | return BelowOrEqual; | |
258 | case Below: | |
259 | return Above; | |
260 | case BelowOrEqual: | |
261 | return AboveOrEqual; | |
262 | case GreaterThan: | |
263 | return LessThan; | |
264 | case GreaterThanOrEqual: | |
265 | return LessThanOrEqual; | |
266 | case LessThan: | |
267 | return GreaterThan; | |
268 | case LessThanOrEqual: | |
269 | return GreaterThanOrEqual; | |
270 | default: | |
271 | break; | |
272 | } | |
273 | ||
274 | ASSERT(condition == Equal || condition == NotEqual); | |
275 | return condition; | |
276 | } | |
277 | ||
9dae56ea | 278 | |
ba379fdc A |
279 | // Ptr methods |
280 | // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. | |
f9bf01c6 A |
281 | // FIXME: should this use a test for 32-bitness instead of this specific exception? |
282 | #if !CPU(X86_64) | |
ba379fdc | 283 | void addPtr(RegisterID src, RegisterID dest) |
9dae56ea | 284 | { |
ba379fdc | 285 | add32(src, dest); |
9dae56ea A |
286 | } |
287 | ||
14957cd0 | 288 | void addPtr(TrustedImm32 imm, RegisterID srcDest) |
9dae56ea | 289 | { |
ba379fdc | 290 | add32(imm, srcDest); |
9dae56ea | 291 | } |
9dae56ea | 292 | |
14957cd0 | 293 | void addPtr(TrustedImmPtr imm, RegisterID dest) |
ba379fdc | 294 | { |
14957cd0 | 295 | add32(TrustedImm32(imm), dest); |
9dae56ea A |
296 | } |
297 | ||
14957cd0 | 298 | void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) |
9dae56ea | 299 | { |
ba379fdc | 300 | add32(imm, src, dest); |
9dae56ea | 301 | } |
ba379fdc | 302 | |
6fe7ccc8 A |
303 | void addPtr(TrustedImm32 imm, AbsoluteAddress address) |
304 | { | |
305 | add32(imm, address); | |
306 | } | |
307 | ||
ba379fdc | 308 | void andPtr(RegisterID src, RegisterID dest) |
9dae56ea | 309 | { |
ba379fdc | 310 | and32(src, dest); |
9dae56ea | 311 | } |
ba379fdc | 312 | |
14957cd0 | 313 | void andPtr(TrustedImm32 imm, RegisterID srcDest) |
9dae56ea | 314 | { |
ba379fdc | 315 | and32(imm, srcDest); |
9dae56ea | 316 | } |
ba379fdc | 317 | |
9dae56ea A |
318 | void orPtr(RegisterID src, RegisterID dest) |
319 | { | |
9dae56ea | 320 | or32(src, dest); |
9dae56ea A |
321 | } |
322 | ||
6fe7ccc8 A |
323 | void orPtr(RegisterID op1, RegisterID op2, RegisterID dest) |
324 | { | |
325 | or32(op1, op2, dest); | |
326 | } | |
327 | ||
14957cd0 | 328 | void orPtr(TrustedImmPtr imm, RegisterID dest) |
9dae56ea | 329 | { |
14957cd0 | 330 | or32(TrustedImm32(imm), dest); |
9dae56ea A |
331 | } |
332 | ||
14957cd0 | 333 | void orPtr(TrustedImm32 imm, RegisterID dest) |
9dae56ea | 334 | { |
9dae56ea | 335 | or32(imm, dest); |
9dae56ea A |
336 | } |
337 | ||
9dae56ea A |
338 | void subPtr(RegisterID src, RegisterID dest) |
339 | { | |
9dae56ea | 340 | sub32(src, dest); |
9dae56ea A |
341 | } |
342 | ||
14957cd0 | 343 | void subPtr(TrustedImm32 imm, RegisterID dest) |
9dae56ea | 344 | { |
9dae56ea | 345 | sub32(imm, dest); |
9dae56ea A |
346 | } |
347 | ||
14957cd0 | 348 | void subPtr(TrustedImmPtr imm, RegisterID dest) |
9dae56ea | 349 | { |
14957cd0 | 350 | sub32(TrustedImm32(imm), dest); |
9dae56ea A |
351 | } |
352 | ||
353 | void xorPtr(RegisterID src, RegisterID dest) | |
354 | { | |
9dae56ea | 355 | xor32(src, dest); |
9dae56ea A |
356 | } |
357 | ||
14957cd0 | 358 | void xorPtr(TrustedImm32 imm, RegisterID srcDest) |
9dae56ea | 359 | { |
9dae56ea | 360 | xor32(imm, srcDest); |
9dae56ea | 361 | } |
9dae56ea | 362 | |
9dae56ea A |
363 | |
364 | void loadPtr(ImplicitAddress address, RegisterID dest) | |
365 | { | |
9dae56ea | 366 | load32(address, dest); |
9dae56ea A |
367 | } |
368 | ||
369 | void loadPtr(BaseIndex address, RegisterID dest) | |
370 | { | |
9dae56ea | 371 | load32(address, dest); |
9dae56ea A |
372 | } |
373 | ||
6fe7ccc8 | 374 | void loadPtr(const void* address, RegisterID dest) |
9dae56ea | 375 | { |
9dae56ea | 376 | load32(address, dest); |
9dae56ea A |
377 | } |
378 | ||
ba379fdc | 379 | DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) |
9dae56ea | 380 | { |
ba379fdc | 381 | return load32WithAddressOffsetPatch(address, dest); |
9dae56ea | 382 | } |
14957cd0 A |
383 | |
384 | DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest) | |
385 | { | |
386 | return load32WithCompactAddressOffsetPatch(address, dest); | |
387 | } | |
9dae56ea | 388 | |
6fe7ccc8 A |
389 | void move(ImmPtr imm, RegisterID dest) |
390 | { | |
391 | move(Imm32(imm.asTrustedImmPtr()), dest); | |
392 | } | |
393 | ||
14957cd0 | 394 | void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) |
9dae56ea | 395 | { |
14957cd0 | 396 | compare32(cond, left, right, dest); |
9dae56ea A |
397 | } |
398 | ||
399 | void storePtr(RegisterID src, ImplicitAddress address) | |
400 | { | |
9dae56ea | 401 | store32(src, address); |
9dae56ea A |
402 | } |
403 | ||
ba379fdc | 404 | void storePtr(RegisterID src, BaseIndex address) |
9dae56ea | 405 | { |
ba379fdc | 406 | store32(src, address); |
9dae56ea A |
407 | } |
408 | ||
ba379fdc | 409 | void storePtr(RegisterID src, void* address) |
9dae56ea | 410 | { |
9dae56ea | 411 | store32(src, address); |
9dae56ea A |
412 | } |
413 | ||
14957cd0 | 414 | void storePtr(TrustedImmPtr imm, ImplicitAddress address) |
9dae56ea | 415 | { |
14957cd0 | 416 | store32(TrustedImm32(imm), address); |
9dae56ea | 417 | } |
6fe7ccc8 A |
418 | |
419 | void storePtr(ImmPtr imm, Address address) | |
420 | { | |
421 | store32(Imm32(imm.asTrustedImmPtr()), address); | |
422 | } | |
9dae56ea | 423 | |
14957cd0 | 424 | void storePtr(TrustedImmPtr imm, void* address) |
9dae56ea | 425 | { |
14957cd0 | 426 | store32(TrustedImm32(imm), address); |
9dae56ea | 427 | } |
9dae56ea | 428 | |
ba379fdc | 429 | DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) |
9dae56ea | 430 | { |
ba379fdc | 431 | return store32WithAddressOffsetPatch(src, address); |
9dae56ea A |
432 | } |
433 | ||
14957cd0 | 434 | Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right) |
9dae56ea | 435 | { |
ba379fdc | 436 | return branch32(cond, left, right); |
9dae56ea A |
437 | } |
438 | ||
14957cd0 | 439 | Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right) |
9dae56ea | 440 | { |
14957cd0 | 441 | return branch32(cond, left, TrustedImm32(right)); |
9dae56ea | 442 | } |
6fe7ccc8 A |
443 | |
444 | Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) | |
445 | { | |
446 | return branch32(cond, left, Imm32(right.asTrustedImmPtr())); | |
447 | } | |
9dae56ea | 448 | |
14957cd0 | 449 | Jump branchPtr(RelationalCondition cond, RegisterID left, Address right) |
9dae56ea | 450 | { |
ba379fdc | 451 | return branch32(cond, left, right); |
9dae56ea A |
452 | } |
453 | ||
14957cd0 | 454 | Jump branchPtr(RelationalCondition cond, Address left, RegisterID right) |
9dae56ea | 455 | { |
ba379fdc | 456 | return branch32(cond, left, right); |
9dae56ea A |
457 | } |
458 | ||
14957cd0 | 459 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
9dae56ea | 460 | { |
ba379fdc | 461 | return branch32(cond, left, right); |
9dae56ea A |
462 | } |
463 | ||
14957cd0 | 464 | Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) |
9dae56ea | 465 | { |
14957cd0 | 466 | return branch32(cond, left, TrustedImm32(right)); |
9dae56ea | 467 | } |
6fe7ccc8 | 468 | |
14957cd0 | 469 | Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right) |
9dae56ea | 470 | { |
14957cd0 | 471 | return branch32(cond, left, TrustedImm32(right)); |
9dae56ea A |
472 | } |
473 | ||
14957cd0 | 474 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask) |
9dae56ea | 475 | { |
ba379fdc | 476 | return branchTest32(cond, reg, mask); |
9dae56ea A |
477 | } |
478 | ||
14957cd0 | 479 | Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) |
9dae56ea | 480 | { |
ba379fdc | 481 | return branchTest32(cond, reg, mask); |
9dae56ea A |
482 | } |
483 | ||
14957cd0 | 484 | Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) |
9dae56ea | 485 | { |
ba379fdc | 486 | return branchTest32(cond, address, mask); |
9dae56ea A |
487 | } |
488 | ||
14957cd0 | 489 | Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) |
9dae56ea | 490 | { |
ba379fdc | 491 | return branchTest32(cond, address, mask); |
9dae56ea A |
492 | } |
493 | ||
14957cd0 | 494 | Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest) |
9dae56ea | 495 | { |
ba379fdc | 496 | return branchAdd32(cond, src, dest); |
9dae56ea A |
497 | } |
498 | ||
14957cd0 | 499 | Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
9dae56ea | 500 | { |
ba379fdc | 501 | return branchSub32(cond, imm, dest); |
9dae56ea | 502 | } |
4e4e5a6f | 503 | using MacroAssemblerBase::branchTest8; |
14957cd0 | 504 | Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
4e4e5a6f A |
505 | { |
506 | return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); | |
507 | } | |
6fe7ccc8 A |
508 | #else |
509 | ||
510 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
511 | using MacroAssemblerBase::addPtr; | |
512 | using MacroAssemblerBase::andPtr; | |
513 | using MacroAssemblerBase::branchSubPtr; | |
514 | using MacroAssemblerBase::convertInt32ToDouble; | |
515 | using MacroAssemblerBase::storePtr; | |
516 | using MacroAssemblerBase::subPtr; | |
517 | using MacroAssemblerBase::xorPtr; | |
518 | ||
519 | bool shouldBlindDouble(double value) | |
520 | { | |
521 | // Don't trust NaN or +/-Infinity | |
522 | if (!isfinite(value)) | |
523 | return true; | |
524 | ||
525 | // Try to force normalisation, and check that there's no change | |
526 | // in the bit pattern | |
527 | if (bitwise_cast<uintptr_t>(value * 1.0) != bitwise_cast<uintptr_t>(value)) | |
528 | return true; | |
529 | ||
530 | value = abs(value); | |
531 | // Only allow a limited set of fractional components | |
532 | double scaledValue = value * 8; | |
533 | if (scaledValue / 8 != value) | |
534 | return true; | |
535 | double frac = scaledValue - floor(scaledValue); | |
536 | if (frac != 0.0) | |
537 | return true; | |
538 | ||
539 | return value > 0xff; | |
540 | } | |
541 | ||
542 | bool shouldBlind(ImmPtr imm) | |
543 | { | |
544 | #if !defined(NDEBUG) | |
545 | UNUSED_PARAM(imm); | |
546 | // Debug always blind all constants, if only so we know | |
547 | // if we've broken blinding during patch development. | |
548 | return true; | |
549 | #endif | |
550 | ||
551 | // First off we'll special case common, "safe" values to avoid hurting | |
552 | // performance too much | |
553 | uintptr_t value = imm.asTrustedImmPtr().asIntptr(); | |
554 | switch (value) { | |
555 | case 0xffff: | |
556 | case 0xffffff: | |
557 | case 0xffffffffL: | |
558 | case 0xffffffffffL: | |
559 | case 0xffffffffffffL: | |
560 | case 0xffffffffffffffL: | |
561 | case 0xffffffffffffffffL: | |
562 | return false; | |
563 | default: { | |
564 | if (value <= 0xff) | |
565 | return false; | |
566 | #if CPU(X86_64) | |
567 | JSValue jsValue = JSValue::decode(reinterpret_cast<void*>(value)); | |
568 | if (jsValue.isInt32()) | |
569 | return shouldBlind(Imm32(jsValue.asInt32())); | |
570 | if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble())) | |
571 | return false; | |
572 | ||
573 | if (!shouldBlindDouble(bitwise_cast<double>(value))) | |
574 | return false; | |
575 | #endif | |
576 | } | |
577 | } | |
578 | return shouldBlindForSpecificArch(value); | |
579 | } | |
580 | ||
581 | struct RotatedImmPtr { | |
582 | RotatedImmPtr(uintptr_t v1, uint8_t v2) | |
583 | : value(v1) | |
584 | , rotation(v2) | |
585 | { | |
586 | } | |
587 | TrustedImmPtr value; | |
588 | TrustedImm32 rotation; | |
589 | }; | |
590 | ||
591 | RotatedImmPtr rotationBlindConstant(ImmPtr imm) | |
592 | { | |
593 | uint8_t rotation = random() % (sizeof(void*) * 8); | |
594 | uintptr_t value = imm.asTrustedImmPtr().asIntptr(); | |
595 | value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation)); | |
596 | return RotatedImmPtr(value, rotation); | |
597 | } | |
598 | ||
599 | void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest) | |
600 | { | |
601 | move(constant.value, dest); | |
602 | rotateRightPtr(constant.rotation, dest); | |
603 | } | |
604 | ||
605 | void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) | |
606 | { | |
607 | if (shouldBlind(imm)) { | |
608 | RegisterID scratchRegister = scratchRegisterForBlinding(); | |
609 | loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); | |
610 | convertInt32ToDouble(scratchRegister, dest); | |
611 | } else | |
612 | convertInt32ToDouble(imm.asTrustedImm32(), dest); | |
613 | } | |
614 | ||
615 | void move(ImmPtr imm, RegisterID dest) | |
616 | { | |
617 | if (shouldBlind(imm)) | |
618 | loadRotationBlindedConstant(rotationBlindConstant(imm), dest); | |
619 | else | |
620 | move(imm.asTrustedImmPtr(), dest); | |
621 | } | |
622 | ||
623 | Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) | |
624 | { | |
625 | if (shouldBlind(right)) { | |
626 | RegisterID scratchRegister = scratchRegisterForBlinding(); | |
627 | loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister); | |
628 | return branchPtr(cond, left, scratchRegister); | |
629 | } | |
630 | return branchPtr(cond, left, right.asTrustedImmPtr()); | |
631 | } | |
632 | ||
633 | void storePtr(ImmPtr imm, Address dest) | |
634 | { | |
635 | if (shouldBlind(imm)) { | |
636 | RegisterID scratchRegister = scratchRegisterForBlinding(); | |
637 | loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); | |
638 | storePtr(scratchRegister, dest); | |
639 | } else | |
640 | storePtr(imm.asTrustedImmPtr(), dest); | |
641 | } | |
642 | ||
9dae56ea | 643 | #endif |
9dae56ea | 644 | |
6fe7ccc8 A |
645 | #endif // !CPU(X86_64) |
646 | ||
647 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
648 | bool shouldBlind(Imm32 imm) | |
649 | { | |
650 | #if !defined(NDEBUG) | |
651 | UNUSED_PARAM(imm); | |
652 | // Debug always blind all constants, if only so we know | |
653 | // if we've broken blinding during patch development. | |
654 | return true; | |
655 | #else | |
656 | ||
657 | // First off we'll special case common, "safe" values to avoid hurting | |
658 | // performance too much | |
659 | uint32_t value = imm.asTrustedImm32().m_value; | |
660 | switch (value) { | |
661 | case 0xffff: | |
662 | case 0xffffff: | |
663 | case 0xffffffff: | |
664 | return false; | |
665 | default: | |
666 | if (value <= 0xff) | |
667 | return false; | |
668 | } | |
669 | return shouldBlindForSpecificArch(value); | |
670 | #endif | |
671 | } | |
672 | ||
673 | struct BlindedImm32 { | |
674 | BlindedImm32(int32_t v1, int32_t v2) | |
675 | : value1(v1) | |
676 | , value2(v2) | |
677 | { | |
678 | } | |
679 | TrustedImm32 value1; | |
680 | TrustedImm32 value2; | |
681 | }; | |
682 | ||
683 | uint32_t keyForConstant(uint32_t value, uint32_t& mask) | |
684 | { | |
685 | uint32_t key = random(); | |
686 | if (value <= 0xff) | |
687 | mask = 0xff; | |
688 | else if (value <= 0xffff) | |
689 | mask = 0xffff; | |
690 | else if (value <= 0xffffff) | |
691 | mask = 0xffffff; | |
692 | else | |
693 | mask = 0xffffffff; | |
694 | return key & mask; | |
695 | } | |
696 | ||
697 | uint32_t keyForConstant(uint32_t value) | |
698 | { | |
699 | uint32_t mask = 0; | |
700 | return keyForConstant(value, mask); | |
701 | } | |
702 | ||
703 | BlindedImm32 xorBlindConstant(Imm32 imm) | |
704 | { | |
705 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
706 | uint32_t key = keyForConstant(baseValue); | |
707 | return BlindedImm32(baseValue ^ key, key); | |
708 | } | |
709 | ||
710 | BlindedImm32 additionBlindedConstant(Imm32 imm) | |
711 | { | |
712 | // The addition immediate may be used as a pointer offset. Keep aligned based on "imm". | |
713 | static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff }; | |
714 | ||
715 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
716 | uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3]; | |
717 | if (key > baseValue) | |
718 | key = key - baseValue; | |
719 | return BlindedImm32(baseValue - key, key); | |
720 | } | |
721 | ||
722 | BlindedImm32 andBlindedConstant(Imm32 imm) | |
723 | { | |
724 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
725 | uint32_t mask = 0; | |
726 | uint32_t key = keyForConstant(baseValue, mask); | |
727 | ASSERT((baseValue & mask) == baseValue); | |
728 | return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask); | |
729 | } | |
730 | ||
731 | BlindedImm32 orBlindedConstant(Imm32 imm) | |
732 | { | |
733 | uint32_t baseValue = imm.asTrustedImm32().m_value; | |
734 | uint32_t mask = 0; | |
735 | uint32_t key = keyForConstant(baseValue, mask); | |
736 | ASSERT((baseValue & mask) == baseValue); | |
737 | return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask); | |
738 | } | |
739 | ||
740 | void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest) | |
741 | { | |
742 | move(constant.value1, dest); | |
743 | xor32(constant.value2, dest); | |
744 | } | |
745 | ||
746 | void add32(Imm32 imm, RegisterID dest) | |
747 | { | |
748 | if (shouldBlind(imm)) { | |
749 | BlindedImm32 key = additionBlindedConstant(imm); | |
750 | add32(key.value1, dest); | |
751 | add32(key.value2, dest); | |
752 | } else | |
753 | add32(imm.asTrustedImm32(), dest); | |
754 | } | |
755 | ||
756 | void addPtr(Imm32 imm, RegisterID dest) | |
757 | { | |
758 | if (shouldBlind(imm)) { | |
759 | BlindedImm32 key = additionBlindedConstant(imm); | |
760 | addPtr(key.value1, dest); | |
761 | addPtr(key.value2, dest); | |
762 | } else | |
763 | addPtr(imm.asTrustedImm32(), dest); | |
764 | } | |
765 | ||
766 | void and32(Imm32 imm, RegisterID dest) | |
767 | { | |
768 | if (shouldBlind(imm)) { | |
769 | BlindedImm32 key = andBlindedConstant(imm); | |
770 | and32(key.value1, dest); | |
771 | and32(key.value2, dest); | |
772 | } else | |
773 | and32(imm.asTrustedImm32(), dest); | |
774 | } | |
775 | ||
776 | void andPtr(Imm32 imm, RegisterID dest) | |
777 | { | |
778 | if (shouldBlind(imm)) { | |
779 | BlindedImm32 key = andBlindedConstant(imm); | |
780 | andPtr(key.value1, dest); | |
781 | andPtr(key.value2, dest); | |
782 | } else | |
783 | andPtr(imm.asTrustedImm32(), dest); | |
784 | } | |
785 | ||
786 | void and32(Imm32 imm, RegisterID src, RegisterID dest) | |
787 | { | |
788 | if (shouldBlind(imm)) { | |
789 | if (src == dest) | |
790 | return and32(imm.asTrustedImm32(), dest); | |
791 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
792 | and32(src, dest); | |
793 | } else | |
794 | and32(imm.asTrustedImm32(), src, dest); | |
795 | } | |
796 | ||
797 | void move(Imm32 imm, RegisterID dest) | |
798 | { | |
799 | if (shouldBlind(imm)) | |
800 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
801 | else | |
802 | move(imm.asTrustedImm32(), dest); | |
803 | } | |
804 | ||
805 | void or32(Imm32 imm, RegisterID src, RegisterID dest) | |
806 | { | |
807 | if (shouldBlind(imm)) { | |
808 | if (src == dest) | |
809 | return or32(imm, dest); | |
810 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
811 | or32(src, dest); | |
812 | } else | |
813 | or32(imm.asTrustedImm32(), src, dest); | |
814 | } | |
815 | ||
816 | void or32(Imm32 imm, RegisterID dest) | |
817 | { | |
818 | if (shouldBlind(imm)) { | |
819 | BlindedImm32 key = orBlindedConstant(imm); | |
820 | or32(key.value1, dest); | |
821 | or32(key.value2, dest); | |
822 | } else | |
823 | or32(imm.asTrustedImm32(), dest); | |
824 | } | |
825 | ||
826 | void poke(Imm32 value, int index = 0) | |
827 | { | |
828 | store32(value, addressForPoke(index)); | |
829 | } | |
830 | ||
831 | void poke(ImmPtr value, int index = 0) | |
832 | { | |
833 | storePtr(value, addressForPoke(index)); | |
834 | } | |
835 | ||
836 | void store32(Imm32 imm, Address dest) | |
837 | { | |
838 | if (shouldBlind(imm)) { | |
839 | #if CPU(X86) || CPU(X86_64) | |
840 | BlindedImm32 blind = xorBlindConstant(imm); | |
841 | store32(blind.value1, dest); | |
842 | xor32(blind.value2, dest); | |
843 | #else | |
844 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
845 | loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); | |
846 | store32(scratchRegister, dest); | |
847 | } else { | |
848 | // If we don't have a scratch register available for use, we'll just | |
849 | // place a random number of nops. | |
850 | uint32_t nopCount = random() & 3; | |
851 | while (nopCount--) | |
852 | nop(); | |
853 | store32(imm.asTrustedImm32(), dest); | |
854 | } | |
855 | #endif | |
856 | } else | |
857 | store32(imm.asTrustedImm32(), dest); | |
858 | } | |
859 | ||
860 | void sub32(Imm32 imm, RegisterID dest) | |
861 | { | |
862 | if (shouldBlind(imm)) { | |
863 | BlindedImm32 key = additionBlindedConstant(imm); | |
864 | sub32(key.value1, dest); | |
865 | sub32(key.value2, dest); | |
866 | } else | |
867 | sub32(imm.asTrustedImm32(), dest); | |
868 | } | |
869 | ||
870 | void subPtr(Imm32 imm, RegisterID dest) | |
871 | { | |
872 | if (shouldBlind(imm)) { | |
873 | BlindedImm32 key = additionBlindedConstant(imm); | |
874 | subPtr(key.value1, dest); | |
875 | subPtr(key.value2, dest); | |
876 | } else | |
877 | subPtr(imm.asTrustedImm32(), dest); | |
878 | } | |
879 | ||
880 | void xor32(Imm32 imm, RegisterID src, RegisterID dest) | |
881 | { | |
882 | if (shouldBlind(imm)) { | |
883 | BlindedImm32 blind = xorBlindConstant(imm); | |
884 | xor32(blind.value1, src, dest); | |
885 | xor32(blind.value2, dest); | |
886 | } else | |
887 | xor32(imm.asTrustedImm32(), src, dest); | |
888 | } | |
889 | ||
890 | void xor32(Imm32 imm, RegisterID dest) | |
891 | { | |
892 | if (shouldBlind(imm)) { | |
893 | BlindedImm32 blind = xorBlindConstant(imm); | |
894 | xor32(blind.value1, dest); | |
895 | xor32(blind.value2, dest); | |
896 | } else | |
897 | xor32(imm.asTrustedImm32(), dest); | |
898 | } | |
899 | ||
900 | Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right) | |
901 | { | |
902 | if (shouldBlind(right)) { | |
903 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
904 | loadXorBlindedConstant(xorBlindConstant(right), scratchRegister); | |
905 | return branch32(cond, left, scratchRegister); | |
906 | } | |
907 | // If we don't have a scratch register available for use, we'll just | |
908 | // place a random number of nops. | |
909 | uint32_t nopCount = random() & 3; | |
910 | while (nopCount--) | |
911 | nop(); | |
912 | return branch32(cond, left, right.asTrustedImm32()); | |
913 | } | |
914 | ||
915 | return branch32(cond, left, right.asTrustedImm32()); | |
916 | } | |
917 | ||
918 | Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) | |
919 | { | |
920 | if (src == dest) { | |
921 | if (!scratchRegisterForBlinding()) { | |
922 | // Release mode ASSERT, if this fails we will perform incorrect codegen. | |
923 | CRASH(); | |
924 | } | |
925 | } | |
926 | if (shouldBlind(imm)) { | |
927 | if (src == dest) { | |
928 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
929 | move(src, scratchRegister); | |
930 | src = scratchRegister; | |
931 | } | |
932 | } | |
933 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
934 | return branchAdd32(cond, src, dest); | |
935 | } | |
936 | return branchAdd32(cond, src, imm.asTrustedImm32(), dest); | |
937 | } | |
938 | ||
939 | Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) | |
940 | { | |
941 | if (src == dest) { | |
942 | if (!scratchRegisterForBlinding()) { | |
943 | // Release mode ASSERT, if this fails we will perform incorrect codegen. | |
944 | CRASH(); | |
945 | } | |
946 | } | |
947 | if (shouldBlind(imm)) { | |
948 | if (src == dest) { | |
949 | if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { | |
950 | move(src, scratchRegister); | |
951 | src = scratchRegister; | |
952 | } | |
953 | } | |
954 | loadXorBlindedConstant(xorBlindConstant(imm), dest); | |
955 | return branchMul32(cond, src, dest); | |
956 | } | |
957 | return branchMul32(cond, imm.asTrustedImm32(), src, dest); | |
958 | } | |
959 | ||
960 | // branchSub32 takes a scratch register as 32 bit platforms make use of this, | |
961 | // with src == dst, and on x86-32 we don't have a platform scratch register. | |
962 | Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch) | |
963 | { | |
964 | if (shouldBlind(imm)) { | |
965 | ASSERT(scratch != dest); | |
966 | ASSERT(scratch != src); | |
967 | loadXorBlindedConstant(xorBlindConstant(imm), scratch); | |
968 | return branchSub32(cond, src, scratch, dest); | |
969 | } | |
970 | return branchSub32(cond, src, imm.asTrustedImm32(), dest); | |
971 | } | |
972 | ||
973 | // Immediate shifts only have 5 controllable bits | |
974 | // so we'll consider them safe for now. | |
975 | TrustedImm32 trustedImm32ForShift(Imm32 imm) | |
976 | { | |
977 | return TrustedImm32(imm.asTrustedImm32().m_value & 31); | |
978 | } | |
979 | ||
980 | void lshift32(Imm32 imm, RegisterID dest) | |
981 | { | |
982 | lshift32(trustedImm32ForShift(imm), dest); | |
983 | } | |
984 | ||
985 | void lshift32(RegisterID src, Imm32 amount, RegisterID dest) | |
986 | { | |
987 | lshift32(src, trustedImm32ForShift(amount), dest); | |
988 | } | |
989 | ||
990 | void rshift32(Imm32 imm, RegisterID dest) | |
991 | { | |
992 | rshift32(trustedImm32ForShift(imm), dest); | |
993 | } | |
994 | ||
995 | void rshift32(RegisterID src, Imm32 amount, RegisterID dest) | |
996 | { | |
997 | rshift32(src, trustedImm32ForShift(amount), dest); | |
998 | } | |
999 | ||
1000 | void urshift32(Imm32 imm, RegisterID dest) | |
1001 | { | |
1002 | urshift32(trustedImm32ForShift(imm), dest); | |
1003 | } | |
1004 | ||
1005 | void urshift32(RegisterID src, Imm32 amount, RegisterID dest) | |
1006 | { | |
1007 | urshift32(src, trustedImm32ForShift(amount), dest); | |
1008 | } | |
1009 | #endif | |
9dae56ea A |
1010 | }; |
1011 | ||
1012 | } // namespace JSC | |
1013 | ||
6fe7ccc8 A |
1014 | #else // ENABLE(ASSEMBLER) |
1015 | ||
1016 | // If there is no assembler for this platform, at least allow code to make references to | |
1017 | // some of the things it would otherwise define, albeit without giving that code any way | |
1018 | // of doing anything useful. | |
1019 | class MacroAssembler { | |
1020 | private: | |
1021 | MacroAssembler() { } | |
1022 | ||
1023 | public: | |
1024 | ||
1025 | enum RegisterID { NoRegister }; | |
1026 | enum FPRegisterID { NoFPRegister }; | |
1027 | }; | |
1028 | ||
9dae56ea A |
1029 | #endif // ENABLE(ASSEMBLER) |
1030 | ||
1031 | #endif // MacroAssembler_h |