]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGSpeculativeJIT.cpp
71004fe9140a0b80ac80d997f35f6268001de59b
[apple/javascriptcore.git] / dfg / DFGSpeculativeJIT.cpp
1 /*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 namespace JSC { namespace DFG {
32
33 template<bool strict>
34 GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
35 {
36 Node& node = m_jit.graph()[nodeIndex];
37 VirtualRegister virtualRegister = node.virtualRegister();
38 GenerationInfo& info = m_generationInfo[virtualRegister];
39
40 switch (info.registerFormat()) {
41 case DataFormatNone: {
42 GPRReg gpr = allocate();
43
44 if (node.isConstant()) {
45 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
46 if (isInt32Constant(nodeIndex)) {
47 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
48 info.fillInteger(gpr);
49 returnFormat = DataFormatInteger;
50 return gpr;
51 }
52 m_jit.move(constantAsJSValueAsImmPtr(nodeIndex), gpr);
53 } else {
54 DataFormat spillFormat = info.spillFormat();
55 ASSERT(spillFormat & DataFormatJS);
56
57 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
58
59 if (spillFormat == DataFormatJSInteger) {
60 // If we know this was spilled as an integer we can fill without checking.
61 if (strict) {
62 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
63 info.fillInteger(gpr);
64 returnFormat = DataFormatInteger;
65 return gpr;
66 }
67 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
68 info.fillJSValue(gpr, DataFormatJSInteger);
69 returnFormat = DataFormatJSInteger;
70 return gpr;
71 }
72 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
73 }
74
75 // Fill as JSValue, and fall through.
76 info.fillJSValue(gpr, DataFormatJSInteger);
77 m_gprs.unlock(gpr);
78 }
79
80 case DataFormatJS: {
81 // Check the value is an integer.
82 GPRReg gpr = info.gpr();
83 m_gprs.lock(gpr);
84 speculationCheck(m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
85 info.fillJSValue(gpr, DataFormatJSInteger);
86 // If !strict we're done, return.
87 if (!strict) {
88 returnFormat = DataFormatJSInteger;
89 return gpr;
90 }
91 // else fall through & handle as DataFormatJSInteger.
92 m_gprs.unlock(gpr);
93 }
94
95 case DataFormatJSInteger: {
96 // In a strict fill we need to strip off the value tag.
97 if (strict) {
98 GPRReg gpr = info.gpr();
99 GPRReg result;
100 // If the register has already been locked we need to take a copy.
101 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
102 if (m_gprs.isLocked(gpr))
103 result = allocate();
104 else {
105 m_gprs.lock(gpr);
106 info.fillInteger(gpr);
107 result = gpr;
108 }
109 m_jit.zeroExtend32ToPtr(gpr, result);
110 returnFormat = DataFormatInteger;
111 return result;
112 }
113
114 GPRReg gpr = info.gpr();
115 m_gprs.lock(gpr);
116 returnFormat = DataFormatJSInteger;
117 return gpr;
118 }
119
120 case DataFormatInteger: {
121 GPRReg gpr = info.gpr();
122 m_gprs.lock(gpr);
123 returnFormat = DataFormatInteger;
124 return gpr;
125 }
126
127 case DataFormatDouble:
128 case DataFormatCell:
129 case DataFormatJSDouble:
130 case DataFormatJSCell: {
131 terminateSpeculativeExecution();
132 returnFormat = DataFormatInteger;
133 return allocate();
134 }
135 }
136
137 ASSERT_NOT_REACHED();
138 return InvalidGPRReg;
139 }
140
141 SpeculationCheck::SpeculationCheck(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
142 : m_check(check)
143 , m_nodeIndex(jit->m_compileIndex)
144 , m_recoveryIndex(recoveryIndex)
145 {
146 for (gpr_iterator iter = jit->m_gprs.begin(); iter != jit->m_gprs.end(); ++iter) {
147 if (iter.name() != InvalidVirtualRegister) {
148 GenerationInfo& info = jit->m_generationInfo[iter.name()];
149 m_gprInfo[iter.index()].nodeIndex = info.nodeIndex();
150 m_gprInfo[iter.index()].format = info.registerFormat();
151 } else
152 m_gprInfo[iter.index()].nodeIndex = NoNode;
153 }
154 for (fpr_iterator iter = jit->m_fprs.begin(); iter != jit->m_fprs.end(); ++iter) {
155 if (iter.name() != InvalidVirtualRegister) {
156 GenerationInfo& info = jit->m_generationInfo[iter.name()];
157 ASSERT(info.registerFormat() == DataFormatDouble);
158 m_fprInfo[iter.index()] = info.nodeIndex();
159 } else
160 m_fprInfo[iter.index()] = NoNode;
161 }
162 }
163
164 GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat)
165 {
166 return fillSpeculateIntInternal<false>(nodeIndex, returnFormat);
167 }
168
169 GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
170 {
171 DataFormat mustBeDataFormatInteger;
172 GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger);
173 ASSERT(mustBeDataFormatInteger == DataFormatInteger);
174 return result;
175 }
176
177 GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
178 {
179 Node& node = m_jit.graph()[nodeIndex];
180 VirtualRegister virtualRegister = node.virtualRegister();
181 GenerationInfo& info = m_generationInfo[virtualRegister];
182
183 switch (info.registerFormat()) {
184 case DataFormatNone: {
185 GPRReg gpr = allocate();
186
187 if (node.isConstant()) {
188 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
189 JSValue jsValue = constantAsJSValue(nodeIndex);
190 if (jsValue.isCell()) {
191 m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
192 info.fillJSValue(gpr, DataFormatJSCell);
193 return gpr;
194 }
195 terminateSpeculativeExecution();
196 return gpr;
197 }
198 ASSERT(info.spillFormat() & DataFormatJS);
199 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
200 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
201
202 if (info.spillFormat() != DataFormatJSCell)
203 speculationCheck(m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
204 info.fillJSValue(gpr, DataFormatJSCell);
205 return gpr;
206 }
207
208 case DataFormatCell:
209 case DataFormatJSCell: {
210 GPRReg gpr = info.gpr();
211 m_gprs.lock(gpr);
212 return gpr;
213 }
214
215 case DataFormatJS: {
216 GPRReg gpr = info.gpr();
217 m_gprs.lock(gpr);
218 speculationCheck(m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister));
219 info.fillJSValue(gpr, DataFormatJSCell);
220 return gpr;
221 }
222
223 case DataFormatJSInteger:
224 case DataFormatInteger:
225 case DataFormatJSDouble:
226 case DataFormatDouble: {
227 terminateSpeculativeExecution();
228 return allocate();
229 }
230 }
231
232 ASSERT_NOT_REACHED();
233 return InvalidGPRReg;
234 }
235
236 void SpeculativeJIT::compilePeepHoleBranch(Node& node, JITCompiler::RelationalCondition condition)
237 {
238 Node& branchNode = m_jit.graph()[m_compileIndex + 1];
239 BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
240 BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
241
242 // The branch instruction will branch to the taken block.
243 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
244 if (taken == (m_block + 1)) {
245 condition = JITCompiler::invert(condition);
246 BlockIndex tmp = taken;
247 taken = notTaken;
248 notTaken = tmp;
249 }
250
251 int32_t imm;
252 if (isJSConstantWithInt32Value(node.child1, imm)) {
253 SpeculateIntegerOperand op2(this, node.child2);
254 addBranch(m_jit.branch32(condition, JITCompiler::Imm32(imm), op2.gpr()), taken);
255 } else if (isJSConstantWithInt32Value(node.child2, imm)) {
256 SpeculateIntegerOperand op1(this, node.child1);
257 addBranch(m_jit.branch32(condition, op1.gpr(), JITCompiler::Imm32(imm)), taken);
258 } else {
259 SpeculateIntegerOperand op1(this, node.child1);
260 SpeculateIntegerOperand op2(this, node.child2);
261 addBranch(m_jit.branch32(condition, op1.gpr(), op2.gpr()), taken);
262 }
263
264 // Check for fall through, otherwise we need to jump.
265 if (notTaken != (m_block + 1))
266 addBranch(m_jit.jump(), notTaken);
267 }
268
269 void SpeculativeJIT::compile(Node& node)
270 {
271 NodeType op = node.op;
272
273 switch (op) {
274 case Int32Constant:
275 case DoubleConstant:
276 case JSConstant:
277 initConstantInfo(m_compileIndex);
278 break;
279
280 case GetLocal: {
281 GPRTemporary result(this);
282 PredictedType prediction = m_jit.graph().getPrediction(node.local());
283 if (prediction == PredictInt32) {
284 m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
285
286 // Like integerResult, but don't useChildren - our children are phi nodes,
287 // and don't represent values within this dataflow with virtual registers.
288 VirtualRegister virtualRegister = node.virtualRegister();
289 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
290 m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr());
291 } else {
292 m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.gpr());
293
294 // Like jsValueResult, but don't useChildren - our children are phi nodes,
295 // and don't represent values within this dataflow with virtual registers.
296 VirtualRegister virtualRegister = node.virtualRegister();
297 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
298 m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), result.gpr(), (prediction == PredictArray) ? DataFormatJSCell : DataFormatJS);
299 }
300 break;
301 }
302
303 case SetLocal: {
304 switch (m_jit.graph().getPrediction(node.local())) {
305 case PredictInt32: {
306 SpeculateIntegerOperand value(this, node.child1);
307 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
308 noResult(m_compileIndex);
309 break;
310 }
311 case PredictArray: {
312 SpeculateCellOperand cell(this, node.child1);
313 m_jit.storePtr(cell.gpr(), JITCompiler::addressFor(node.local()));
314 noResult(m_compileIndex);
315 break;
316 }
317
318 default: {
319 JSValueOperand value(this, node.child1);
320 m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local()));
321 noResult(m_compileIndex);
322 break;
323 }
324 }
325 break;
326 }
327
328 case BitAnd:
329 case BitOr:
330 case BitXor:
331 if (isInt32Constant(node.child1)) {
332 SpeculateIntegerOperand op2(this, node.child2);
333 GPRTemporary result(this, op2);
334
335 bitOp(op, valueOfInt32Constant(node.child1), op2.gpr(), result.gpr());
336
337 integerResult(result.gpr(), m_compileIndex);
338 } else if (isInt32Constant(node.child2)) {
339 SpeculateIntegerOperand op1(this, node.child1);
340 GPRTemporary result(this, op1);
341
342 bitOp(op, valueOfInt32Constant(node.child2), op1.gpr(), result.gpr());
343
344 integerResult(result.gpr(), m_compileIndex);
345 } else {
346 SpeculateIntegerOperand op1(this, node.child1);
347 SpeculateIntegerOperand op2(this, node.child2);
348 GPRTemporary result(this, op1, op2);
349
350 GPRReg reg1 = op1.gpr();
351 GPRReg reg2 = op2.gpr();
352 bitOp(op, reg1, reg2, result.gpr());
353
354 integerResult(result.gpr(), m_compileIndex);
355 }
356 break;
357
358 case BitRShift:
359 case BitLShift:
360 case BitURShift:
361 if (isInt32Constant(node.child2)) {
362 SpeculateIntegerOperand op1(this, node.child1);
363 GPRTemporary result(this, op1);
364
365 shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2) & 0x1f, result.gpr());
366
367 integerResult(result.gpr(), m_compileIndex);
368 } else {
369 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
370 SpeculateIntegerOperand op1(this, node.child1);
371 SpeculateIntegerOperand op2(this, node.child2);
372 GPRTemporary result(this, op1);
373
374 GPRReg reg1 = op1.gpr();
375 GPRReg reg2 = op2.gpr();
376 shiftOp(op, reg1, reg2, result.gpr());
377
378 integerResult(result.gpr(), m_compileIndex);
379 }
380 break;
381
382 case UInt32ToNumber: {
383 IntegerOperand op1(this, node.child1);
384 GPRTemporary result(this, op1);
385
386 // Test the operand is positive.
387 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)));
388
389 m_jit.move(op1.gpr(), result.gpr());
390 integerResult(result.gpr(), m_compileIndex, op1.format());
391 break;
392 }
393
394 case NumberToInt32: {
395 SpeculateIntegerOperand op1(this, node.child1);
396 GPRTemporary result(this, op1);
397 m_jit.move(op1.gpr(), result.gpr());
398 integerResult(result.gpr(), m_compileIndex, op1.format());
399 break;
400 }
401
402 case Int32ToNumber: {
403 SpeculateIntegerOperand op1(this, node.child1);
404 GPRTemporary result(this, op1);
405 m_jit.move(op1.gpr(), result.gpr());
406 integerResult(result.gpr(), m_compileIndex, op1.format());
407 break;
408 }
409
410 case ValueToInt32: {
411 SpeculateIntegerOperand op1(this, node.child1);
412 GPRTemporary result(this, op1);
413 m_jit.move(op1.gpr(), result.gpr());
414 integerResult(result.gpr(), m_compileIndex, op1.format());
415 break;
416 }
417
418 case ValueToNumber: {
419 SpeculateIntegerOperand op1(this, node.child1);
420 GPRTemporary result(this, op1);
421 m_jit.move(op1.gpr(), result.gpr());
422 integerResult(result.gpr(), m_compileIndex, op1.format());
423 break;
424 }
425
426 case ValueAdd:
427 case ArithAdd: {
428 int32_t imm1;
429 if (isDoubleConstantWithInt32Value(node.child1, imm1)) {
430 SpeculateIntegerOperand op2(this, node.child2);
431 GPRTemporary result(this);
432
433 speculationCheck(m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
434
435 integerResult(result.gpr(), m_compileIndex);
436 break;
437 }
438
439 int32_t imm2;
440 if (isDoubleConstantWithInt32Value(node.child2, imm2)) {
441 SpeculateIntegerOperand op1(this, node.child1);
442 GPRTemporary result(this);
443
444 speculationCheck(m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
445
446 integerResult(result.gpr(), m_compileIndex);
447 break;
448 }
449
450 SpeculateIntegerOperand op1(this, node.child1);
451 SpeculateIntegerOperand op2(this, node.child2);
452 GPRTemporary result(this, op1, op2);
453
454 GPRReg gpr1 = op1.gpr();
455 GPRReg gpr2 = op2.gpr();
456 GPRReg gprResult = result.gpr();
457 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
458
459 if (gpr1 == gprResult)
460 speculationCheck(check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
461 else if (gpr2 == gprResult)
462 speculationCheck(check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
463 else
464 speculationCheck(check);
465
466 integerResult(gprResult, m_compileIndex);
467 break;
468 }
469
470 case ArithSub: {
471 int32_t imm2;
472 if (isDoubleConstantWithInt32Value(node.child2, imm2)) {
473 SpeculateIntegerOperand op1(this, node.child1);
474 GPRTemporary result(this);
475
476 speculationCheck(m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
477
478 integerResult(result.gpr(), m_compileIndex);
479 break;
480 }
481
482 SpeculateIntegerOperand op1(this, node.child1);
483 SpeculateIntegerOperand op2(this, node.child2);
484 GPRTemporary result(this);
485
486 speculationCheck(m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
487
488 integerResult(result.gpr(), m_compileIndex);
489 break;
490 }
491
492 case ArithMul: {
493 SpeculateIntegerOperand op1(this, node.child1);
494 SpeculateIntegerOperand op2(this, node.child2);
495 GPRTemporary result(this);
496
497 GPRReg reg1 = op1.gpr();
498 GPRReg reg2 = op2.gpr();
499 speculationCheck(m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
500
501 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
502 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
503 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
504 resultNonZero.link(&m_jit);
505
506 integerResult(result.gpr(), m_compileIndex);
507 break;
508 }
509
510 case ArithDiv: {
511 SpeculateIntegerOperand op1(this, node.child1);
512 SpeculateIntegerOperand op2(this, node.child2);
513 GPRTemporary result(this, op1, op2);
514
515 op1.gpr();
516 op2.gpr();
517 terminateSpeculativeExecution();
518
519 integerResult(result.gpr(), m_compileIndex);
520 break;
521 }
522
523 case ArithMod: {
524 SpeculateIntegerOperand op1(this, node.child1);
525 SpeculateIntegerOperand op2(this, node.child2);
526 GPRTemporary result(this, op1, op2);
527
528 op1.gpr();
529 op2.gpr();
530 terminateSpeculativeExecution();
531
532 integerResult(result.gpr(), m_compileIndex);
533 break;
534 }
535
536 case LogicalNot: {
537 JSValueOperand value(this, node.child1);
538 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
539
540 m_jit.move(value.gpr(), result.gpr());
541 m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
542 speculationCheck(m_jit.branchTestPtr(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
543 m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
544
545 // If we add a DataFormatBool, we should use it here.
546 jsValueResult(result.gpr(), m_compileIndex);
547 break;
548 }
549
550 case CompareLess: {
551 // Fused compare & branch.
552 if (detectPeepHoleBranch()) {
553 // detectPeepHoleBranch currently only permits the branch to be the very next node,
554 // so can be no intervening nodes to also reference the compare.
555 ASSERT(node.adjustedRefCount() == 1);
556
557 compilePeepHoleBranch(node, JITCompiler::LessThan);
558
559 use(node.child1);
560 use(node.child2);
561 ++m_compileIndex;
562 return;
563 }
564
565 // Normal case, not fused to branch.
566 SpeculateIntegerOperand op1(this, node.child1);
567 SpeculateIntegerOperand op2(this, node.child2);
568 GPRTemporary result(this, op1, op2);
569
570 m_jit.compare32(JITCompiler::LessThan, op1.gpr(), op2.gpr(), result.gpr());
571
572 // If we add a DataFormatBool, we should use it here.
573 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
574 jsValueResult(result.gpr(), m_compileIndex);
575 break;
576 }
577
578 case CompareLessEq: {
579 // Fused compare & branch.
580 if (detectPeepHoleBranch()) {
581 // detectPeepHoleBranch currently only permits the branch to be the very next node,
582 // so can be no intervening nodes to also reference the compare.
583 ASSERT(node.adjustedRefCount() == 1);
584
585 compilePeepHoleBranch(node, JITCompiler::LessThanOrEqual);
586
587 use(node.child1);
588 use(node.child2);
589 ++m_compileIndex;
590 return;
591 }
592
593 // Normal case, not fused to branch.
594 SpeculateIntegerOperand op1(this, node.child1);
595 SpeculateIntegerOperand op2(this, node.child2);
596 GPRTemporary result(this, op1, op2);
597
598 m_jit.compare32(JITCompiler::LessThanOrEqual, op1.gpr(), op2.gpr(), result.gpr());
599
600 // If we add a DataFormatBool, we should use it here.
601 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
602 jsValueResult(result.gpr(), m_compileIndex);
603 break;
604 }
605
606 case CompareEq: {
607 SpeculateIntegerOperand op1(this, node.child1);
608 SpeculateIntegerOperand op2(this, node.child2);
609 GPRTemporary result(this, op1, op2);
610
611 m_jit.compare32(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
612
613 // If we add a DataFormatBool, we should use it here.
614 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
615 jsValueResult(result.gpr(), m_compileIndex);
616 break;
617 }
618
619 case CompareStrictEq: {
620 SpeculateIntegerOperand op1(this, node.child1);
621 SpeculateIntegerOperand op2(this, node.child2);
622 GPRTemporary result(this, op1, op2);
623
624 m_jit.compare32(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
625
626 // If we add a DataFormatBool, we should use it here.
627 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
628 jsValueResult(result.gpr(), m_compileIndex);
629 break;
630 }
631
632 case GetByVal: {
633 NodeIndex alias = node.child3;
634 if (alias != NoNode) {
635 // FIXME: result should be able to reuse child1, child2. Should have an 'UnusedOperand' type.
636 JSValueOperand aliasedValue(this, node.child3);
637 GPRTemporary result(this, aliasedValue);
638 m_jit.move(aliasedValue.gpr(), result.gpr());
639 jsValueResult(result.gpr(), m_compileIndex);
640 break;
641 }
642
643 SpeculateCellOperand base(this, node.child1);
644 SpeculateStrictInt32Operand property(this, node.child2);
645 GPRTemporary storage(this);
646
647 GPRReg baseReg = base.gpr();
648 GPRReg propertyReg = property.gpr();
649 GPRReg storageReg = storage.gpr();
650
651 // Get the array storage. We haven't yet checked this is a JSArray, so this is only safe if
652 // an access with offset JSArray::storageOffset() is valid for all JSCells!
653 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
654
655 // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
656 // If we have predicted the base to be type array, we can skip the check.
657 Node& baseNode = m_jit.graph()[node.child1];
658 if (baseNode.op != GetLocal || m_jit.graph().getPrediction(baseNode.local()) != PredictArray)
659 speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
660 speculationCheck(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())));
661
662 // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
663 // the storage pointer - especially if there happens to be another register free right now. If we do so,
664 // then we'll need to allocate a new temporary for result.
665 GPRTemporary& result = storage;
666 m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
667 speculationCheck(m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
668
669 jsValueResult(result.gpr(), m_compileIndex);
670 break;
671 }
672
673 case PutByVal: {
674 SpeculateCellOperand base(this, node.child1);
675 SpeculateStrictInt32Operand property(this, node.child2);
676 JSValueOperand value(this, node.child3);
677 GPRTemporary storage(this);
678
679 // Map base, property & value into registers, allocate a register for storage.
680 GPRReg baseReg = base.gpr();
681 GPRReg propertyReg = property.gpr();
682 GPRReg valueReg = value.gpr();
683 GPRReg storageReg = storage.gpr();
684
685 // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
686 // If we have predicted the base to be type array, we can skip the check.
687 Node& baseNode = m_jit.graph()[node.child1];
688 if (baseNode.op != GetLocal || m_jit.graph().getPrediction(baseNode.local()) != PredictArray)
689 speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
690 speculationCheck(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())));
691
692 // Get the array storage.
693 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
694
695 // Check if we're writing to a hole; if so increment m_numValuesInVector.
696 MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
697 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
698
699 // If we're writing to a hole we might be growing the array;
700 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
701 m_jit.add32(TrustedImm32(1), propertyReg);
702 m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
703 m_jit.sub32(TrustedImm32(1), propertyReg);
704
705 lengthDoesNotNeedUpdate.link(&m_jit);
706 notHoleValue.link(&m_jit);
707
708 // Store the value to the array.
709 m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
710
711 noResult(m_compileIndex);
712 break;
713 }
714
715 case PutByValAlias: {
716 SpeculateCellOperand base(this, node.child1);
717 SpeculateStrictInt32Operand property(this, node.child2);
718 JSValueOperand value(this, node.child3);
719 GPRTemporary storage(this, base); // storage may overwrite base.
720
721 // Get the array storage.
722 GPRReg storageReg = storage.gpr();
723 m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSArray::storageOffset()), storageReg);
724
725 // Map property & value into registers.
726 GPRReg propertyReg = property.gpr();
727 GPRReg valueReg = value.gpr();
728
729 // Store the value to the array.
730 m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
731
732 noResult(m_compileIndex);
733 break;
734 }
735
736 case DFG::Jump: {
737 BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset());
738 if (taken != (m_block + 1))
739 addBranch(m_jit.jump(), taken);
740 noResult(m_compileIndex);
741 break;
742 }
743
744 case Branch: {
745 JSValueOperand value(this, node.child1);
746 GPRReg valueReg = value.gpr();
747
748 BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset());
749 BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(node.notTakenBytecodeOffset());
750
751 // Integers
752 addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueReg, MacroAssembler::ImmPtr(JSValue::encode(jsNumber(0)))), notTaken);
753 MacroAssembler::Jump isNonZeroInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, valueReg, GPRInfo::tagTypeNumberRegister);
754
755 // Booleans
756 addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueReg, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken);
757 speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, valueReg, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true)))));
758
759 if (taken == (m_block + 1))
760 isNonZeroInteger.link(&m_jit);
761 else {
762 addBranch(isNonZeroInteger, taken);
763 addBranch(m_jit.jump(), taken);
764 }
765
766 noResult(m_compileIndex);
767 break;
768 }
769
770 case Return: {
771 ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
772 ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
773 ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
774
775 #if DFG_SUCCESS_STATS
776 static SamplingCounter counter("SpeculativeJIT");
777 m_jit.emitCount(counter);
778 #endif
779
780 // Return the result in returnValueGPR.
781 JSValueOperand op1(this, node.child1);
782 m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
783
784 // Grab the return address.
785 m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT1);
786 // Restore our caller's "r".
787 m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister);
788 // Return.
789 m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
790 m_jit.ret();
791
792 noResult(m_compileIndex);
793 break;
794 }
795
796 case ConvertThis: {
797 SpeculateCellOperand thisValue(this, node.child1);
798 GPRTemporary temp(this);
799
800 m_jit.loadPtr(JITCompiler::Address(thisValue.gpr(), JSCell::structureOffset()), temp.gpr());
801 speculationCheck(m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(temp.gpr(), Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(NeedsThisConversion)));
802
803 cellResult(thisValue.gpr(), m_compileIndex);
804 break;
805 }
806
807 case GetById: {
808 JSValueOperand base(this, node.child1);
809 GPRReg baseGPR = base.gpr();
810 flushRegisters();
811
812 GPRResult result(this);
813 callOperation(operationGetById, result.gpr(), baseGPR, identifier(node.identifierNumber()));
814 jsValueResult(result.gpr(), m_compileIndex);
815 break;
816 }
817
818 case PutById: {
819 JSValueOperand base(this, node.child1);
820 JSValueOperand value(this, node.child2);
821 GPRReg valueGPR = value.gpr();
822 GPRReg baseGPR = base.gpr();
823 flushRegisters();
824
825 callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByIdStrict : operationPutByIdNonStrict, valueGPR, baseGPR, identifier(node.identifierNumber()));
826 noResult(m_compileIndex);
827 break;
828 }
829
830 case PutByIdDirect: {
831 JSValueOperand base(this, node.child1);
832 JSValueOperand value(this, node.child2);
833 GPRReg valueGPR = value.gpr();
834 GPRReg baseGPR = base.gpr();
835 flushRegisters();
836
837 callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByIdDirectStrict : operationPutByIdDirectNonStrict, valueGPR, baseGPR, identifier(node.identifierNumber()));
838 noResult(m_compileIndex);
839 break;
840 }
841
842 case GetGlobalVar: {
843 GPRTemporary result(this);
844
845 JSVariableObject* globalObject = m_jit.codeBlock()->globalObject();
846 m_jit.loadPtr(globalObject->addressOfRegisters(), result.gpr());
847 m_jit.loadPtr(JITCompiler::addressForGlobalVar(result.gpr(), node.varNumber()), result.gpr());
848
849 jsValueResult(result.gpr(), m_compileIndex);
850 break;
851 }
852
853 case PutGlobalVar: {
854 JSValueOperand value(this, node.child1);
855 GPRTemporary temp(this);
856
857 JSVariableObject* globalObject = m_jit.codeBlock()->globalObject();
858 m_jit.loadPtr(globalObject->addressOfRegisters(), temp.gpr());
859 m_jit.storePtr(value.gpr(), JITCompiler::addressForGlobalVar(temp.gpr(), node.varNumber()));
860
861 noResult(m_compileIndex);
862 break;
863 }
864
865 case Phi:
866 ASSERT_NOT_REACHED();
867 }
868
869 if (node.hasResult() && node.mustGenerate())
870 use(m_compileIndex);
871 }
872
873 void SpeculativeJIT::compile(BasicBlock& block)
874 {
875 ASSERT(m_compileIndex == block.begin);
876 m_blockHeads[m_block] = m_jit.label();
877 #if DFG_JIT_BREAK_ON_EVERY_BLOCK
878 m_jit.breakpoint();
879 #endif
880
881 for (; m_compileIndex < block.end; ++m_compileIndex) {
882 Node& node = m_jit.graph()[m_compileIndex];
883 if (!node.shouldGenerate())
884 continue;
885
886 #if DFG_DEBUG_VERBOSE
887 fprintf(stderr, "SpeculativeJIT generating Node @%d at JIT offset 0x%x\n", (int)m_compileIndex, m_jit.debugOffset());
888 #endif
889 #if DFG_JIT_BREAK_ON_EVERY_NODE
890 m_jit.breakpoint();
891 #endif
892 checkConsistency();
893 compile(node);
894 if (!m_compileOkay)
895 return;
896 checkConsistency();
897 }
898 }
899
900 // If we are making type predictions about our arguments then
901 // we need to check that they are correct on function entry.
902 void SpeculativeJIT::checkArgumentTypes()
903 {
904 ASSERT(!m_compileIndex);
905 for (int i = 0; i < m_jit.codeBlock()->m_numParameters; ++i) {
906 VirtualRegister virtualRegister = (VirtualRegister)(m_jit.codeBlock()->thisRegister() + i);
907 switch (m_jit.graph().getPrediction(virtualRegister)) {
908 case PredictInt32:
909 speculationCheck(m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
910 break;
911
912 case PredictArray: {
913 GPRTemporary temp(this);
914 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
915 speculationCheck(m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
916 speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
917 break;
918 }
919
920 default:
921 break;
922 }
923 }
924 }
925
926 // For any vars that we will be treating as numeric, write 0 to
927 // the var on entry. Throughout the block we will only read/write
928 // to the payload, by writing the tag now we prevent the GC from
929 // misinterpreting values as pointers.
930 void SpeculativeJIT::initializeVariableTypes()
931 {
932 ASSERT(!m_compileIndex);
933 for (int var = 0; var < m_jit.codeBlock()->m_numVars; ++var) {
934 if (m_jit.graph().getPrediction(var) == PredictInt32)
935 m_jit.storePtr(GPRInfo::tagTypeNumberRegister, JITCompiler::addressFor((VirtualRegister)var));
936 }
937 }
938
939 bool SpeculativeJIT::compile()
940 {
941 checkArgumentTypes();
942 initializeVariableTypes();
943
944 ASSERT(!m_compileIndex);
945 for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
946 compile(*m_jit.graph().m_blocks[m_block]);
947 if (!m_compileOkay)
948 return false;
949 }
950 linkBranches();
951 return true;
952 }
953
954 } } // namespace JSC::DFG
955
956 #endif