]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGSpeculativeJIT.h
JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGSpeculativeJIT.h
1 /*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef DFGSpeculativeJIT_h
27 #define DFGSpeculativeJIT_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractState.h"
32 #include "DFGGenerationInfo.h"
33 #include "DFGJITCompiler.h"
34 #include "DFGOSRExit.h"
35 #include "DFGOperations.h"
36 #include "MarkedAllocator.h"
37 #include "ValueRecovery.h"
38
39 namespace JSC { namespace DFG {
40
41 class JSValueOperand;
42 class SpeculativeJIT;
43 class SpeculateIntegerOperand;
44 class SpeculateStrictInt32Operand;
45 class SpeculateDoubleOperand;
46 class SpeculateCellOperand;
47 class SpeculateBooleanOperand;
48
49
50 enum ValueSourceKind {
51 SourceNotSet,
52 ValueInRegisterFile,
53 Int32InRegisterFile,
54 CellInRegisterFile,
55 BooleanInRegisterFile,
56 DoubleInRegisterFile,
57 SourceIsDead,
58 HaveNode
59 };
60
61 class ValueSource {
62 public:
63 ValueSource()
64 : m_nodeIndex(nodeIndexFromKind(SourceNotSet))
65 {
66 }
67
68 explicit ValueSource(ValueSourceKind valueSourceKind)
69 : m_nodeIndex(nodeIndexFromKind(valueSourceKind))
70 {
71 ASSERT(kind() != SourceNotSet);
72 ASSERT(kind() != HaveNode);
73 }
74
75 explicit ValueSource(NodeIndex nodeIndex)
76 : m_nodeIndex(nodeIndex)
77 {
78 ASSERT(kind() == HaveNode);
79 }
80
81 static ValueSource forPrediction(PredictedType prediction)
82 {
83 if (isInt32Prediction(prediction))
84 return ValueSource(Int32InRegisterFile);
85 if (isArrayPrediction(prediction))
86 return ValueSource(CellInRegisterFile);
87 if (isBooleanPrediction(prediction))
88 return ValueSource(BooleanInRegisterFile);
89 return ValueSource(ValueInRegisterFile);
90 }
91
92 bool isSet() const
93 {
94 return kindFromNodeIndex(m_nodeIndex) != SourceNotSet;
95 }
96
97 ValueSourceKind kind() const
98 {
99 return kindFromNodeIndex(m_nodeIndex);
100 }
101
102 NodeIndex nodeIndex() const
103 {
104 ASSERT(kind() == HaveNode);
105 return m_nodeIndex;
106 }
107
108 void dump(FILE* out) const;
109
110 private:
111 static NodeIndex nodeIndexFromKind(ValueSourceKind kind)
112 {
113 ASSERT(kind >= SourceNotSet && kind < HaveNode);
114 return NoNode - kind;
115 }
116
117 static ValueSourceKind kindFromNodeIndex(NodeIndex nodeIndex)
118 {
119 unsigned kind = static_cast<unsigned>(NoNode - nodeIndex);
120 if (kind >= static_cast<unsigned>(HaveNode))
121 return HaveNode;
122 return static_cast<ValueSourceKind>(kind);
123 }
124
125 NodeIndex m_nodeIndex;
126 };
127
128
129 enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue};
130
131 // === SpeculativeJIT ===
132 //
133 // The SpeculativeJIT is used to generate a fast, but potentially
134 // incomplete code path for the dataflow. When code generating
135 // we may make assumptions about operand types, dynamically check,
136 // and bail-out to an alternate code path if these checks fail.
137 // Importantly, the speculative code path cannot be reentered once
138 // a speculative check has failed. This allows the SpeculativeJIT
139 // to propagate type information (including information that has
140 // only speculatively been asserted) through the dataflow.
141 class SpeculativeJIT {
142 friend struct OSRExit;
143 private:
144 typedef JITCompiler::TrustedImm32 TrustedImm32;
145 typedef JITCompiler::Imm32 Imm32;
146 typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
147 typedef JITCompiler::ImmPtr ImmPtr;
148
149 // These constants are used to set priorities for spill order for
150 // the register allocator.
151 #if USE(JSVALUE64)
152 enum SpillOrder {
153 SpillOrderConstant = 1, // no spill, and cheap fill
154 SpillOrderSpilled = 2, // no spill
155 SpillOrderJS = 4, // needs spill
156 SpillOrderCell = 4, // needs spill
157 SpillOrderStorage = 4, // needs spill
158 SpillOrderInteger = 5, // needs spill and box
159 SpillOrderBoolean = 5, // needs spill and box
160 SpillOrderDouble = 6, // needs spill and convert
161 };
162 #elif USE(JSVALUE32_64)
163 enum SpillOrder {
164 SpillOrderConstant = 1, // no spill, and cheap fill
165 SpillOrderSpilled = 2, // no spill
166 SpillOrderJS = 4, // needs spill
167 SpillOrderStorage = 4, // needs spill
168 SpillOrderDouble = 4, // needs spill
169 SpillOrderInteger = 5, // needs spill and box
170 SpillOrderCell = 5, // needs spill and box
171 SpillOrderBoolean = 5, // needs spill and box
172 };
173 #endif
174
175 enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
176
177 public:
178 SpeculativeJIT(JITCompiler&);
179
180 bool compile();
181 void createOSREntries();
182 void linkOSREntries(LinkBuffer&);
183
184 Node& at(NodeIndex nodeIndex)
185 {
186 return m_jit.graph()[nodeIndex];
187 }
188 Node& at(Edge nodeUse)
189 {
190 return at(nodeUse.index());
191 }
192
193 GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
194 FPRReg fillDouble(NodeIndex);
195 #if USE(JSVALUE64)
196 GPRReg fillJSValue(NodeIndex);
197 #elif USE(JSVALUE32_64)
198 bool fillJSValue(NodeIndex, GPRReg&, GPRReg&, FPRReg&);
199 #endif
200 GPRReg fillStorage(NodeIndex);
201
202 // lock and unlock GPR & FPR registers.
203 void lock(GPRReg reg)
204 {
205 m_gprs.lock(reg);
206 }
207 void lock(FPRReg reg)
208 {
209 m_fprs.lock(reg);
210 }
211 void unlock(GPRReg reg)
212 {
213 m_gprs.unlock(reg);
214 }
215 void unlock(FPRReg reg)
216 {
217 m_fprs.unlock(reg);
218 }
219
220 // Used to check whether a child node is on its last use,
221 // and its machine registers may be reused.
222 bool canReuse(NodeIndex nodeIndex)
223 {
224 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
225 GenerationInfo& info = m_generationInfo[virtualRegister];
226 return info.canReuse();
227 }
228 bool canReuse(Edge nodeUse)
229 {
230 return canReuse(nodeUse.index());
231 }
232 GPRReg reuse(GPRReg reg)
233 {
234 m_gprs.lock(reg);
235 return reg;
236 }
237 FPRReg reuse(FPRReg reg)
238 {
239 m_fprs.lock(reg);
240 return reg;
241 }
242
243 // Allocate a gpr/fpr.
244 GPRReg allocate()
245 {
246 VirtualRegister spillMe;
247 GPRReg gpr = m_gprs.allocate(spillMe);
248 if (spillMe != InvalidVirtualRegister) {
249 #if USE(JSVALUE32_64)
250 GenerationInfo& info = m_generationInfo[spillMe];
251 ASSERT(info.registerFormat() != DataFormatJSDouble);
252 if ((info.registerFormat() & DataFormatJS))
253 m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
254 #endif
255 spill(spillMe);
256 }
257 return gpr;
258 }
259 GPRReg allocate(GPRReg specific)
260 {
261 VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
262 if (spillMe != InvalidVirtualRegister) {
263 #if USE(JSVALUE32_64)
264 GenerationInfo& info = m_generationInfo[spillMe];
265 ASSERT(info.registerFormat() != DataFormatJSDouble);
266 if ((info.registerFormat() & DataFormatJS))
267 m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
268 #endif
269 spill(spillMe);
270 }
271 return specific;
272 }
273 GPRReg tryAllocate()
274 {
275 return m_gprs.tryAllocate();
276 }
277 FPRReg fprAllocate()
278 {
279 VirtualRegister spillMe;
280 FPRReg fpr = m_fprs.allocate(spillMe);
281 if (spillMe != InvalidVirtualRegister)
282 spill(spillMe);
283 return fpr;
284 }
285
286 // Check whether a VirtualRegsiter is currently in a machine register.
287 // We use this when filling operands to fill those that are already in
288 // machine registers first (by locking VirtualRegsiters that are already
289 // in machine register before filling those that are not we attempt to
290 // avoid spilling values we will need immediately).
291 bool isFilled(NodeIndex nodeIndex)
292 {
293 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
294 GenerationInfo& info = m_generationInfo[virtualRegister];
295 return info.registerFormat() != DataFormatNone;
296 }
297 bool isFilledDouble(NodeIndex nodeIndex)
298 {
299 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
300 GenerationInfo& info = m_generationInfo[virtualRegister];
301 return info.registerFormat() == DataFormatDouble;
302 }
303
304 // Called on an operand once it has been consumed by a parent node.
305 void use(NodeIndex nodeIndex)
306 {
307 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
308 GenerationInfo& info = m_generationInfo[virtualRegister];
309
310 // use() returns true when the value becomes dead, and any
311 // associated resources may be freed.
312 if (!info.use())
313 return;
314
315 // Release the associated machine registers.
316 DataFormat registerFormat = info.registerFormat();
317 #if USE(JSVALUE64)
318 if (registerFormat == DataFormatDouble)
319 m_fprs.release(info.fpr());
320 else if (registerFormat != DataFormatNone)
321 m_gprs.release(info.gpr());
322 #elif USE(JSVALUE32_64)
323 if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble)
324 m_fprs.release(info.fpr());
325 else if (registerFormat & DataFormatJS) {
326 m_gprs.release(info.tagGPR());
327 m_gprs.release(info.payloadGPR());
328 } else if (registerFormat != DataFormatNone)
329 m_gprs.release(info.gpr());
330 #endif
331 }
332 void use(Edge nodeUse)
333 {
334 use(nodeUse.index());
335 }
336
337 static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
338 static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
339
340 void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
341 void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
342 void writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
343
344 static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
345 {
346 return AssemblyHelpers::selectScratchGPR(preserve1, preserve2, preserve3, preserve4);
347 }
348
349 // Called by the speculative operand types, below, to fill operand to
350 // machine registers, implicitly generating speculation checks as needed.
351 GPRReg fillSpeculateInt(NodeIndex, DataFormat& returnFormat);
352 GPRReg fillSpeculateIntStrict(NodeIndex);
353 FPRReg fillSpeculateDouble(NodeIndex);
354 GPRReg fillSpeculateCell(NodeIndex);
355 GPRReg fillSpeculateBoolean(NodeIndex);
356 GeneratedOperandType checkGeneratedTypeForToInt32(NodeIndex);
357
358 private:
359 void compile(Node&);
360 void compileMovHint(Node&);
361 void compile(BasicBlock&);
362
363 void checkArgumentTypes();
364
365 void clearGenerationInfo();
366
367 // These methods are used when generating 'unexpected'
368 // calls out from JIT code to C++ helper routines -
369 // they spill all live values to the appropriate
370 // slots in the RegisterFile without changing any state
371 // in the GenerationInfo.
372 void silentSpillGPR(VirtualRegister spillMe, GPRReg source)
373 {
374 GenerationInfo& info = m_generationInfo[spillMe];
375 ASSERT(info.registerFormat() != DataFormatNone);
376 ASSERT(info.registerFormat() != DataFormatDouble);
377
378 if (!info.needsSpill())
379 return;
380
381 DataFormat registerFormat = info.registerFormat();
382
383 #if USE(JSVALUE64)
384 ASSERT(info.gpr() == source);
385 if (registerFormat == DataFormatInteger)
386 m_jit.store32(source, JITCompiler::addressFor(spillMe));
387 else {
388 ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
389 m_jit.storePtr(source, JITCompiler::addressFor(spillMe));
390 }
391 #elif USE(JSVALUE32_64)
392 if (registerFormat & DataFormatJS) {
393 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
394 m_jit.store32(source, source == info.tagGPR() ? JITCompiler::tagFor(spillMe) : JITCompiler::payloadFor(spillMe));
395 } else {
396 ASSERT(info.gpr() == source);
397 m_jit.store32(source, JITCompiler::payloadFor(spillMe));
398 }
399 #endif
400 }
401 void silentSpillFPR(VirtualRegister spillMe, FPRReg source)
402 {
403 GenerationInfo& info = m_generationInfo[spillMe];
404 ASSERT(info.registerFormat() == DataFormatDouble);
405
406 if (!info.needsSpill()) {
407 // it's either a constant or it's already been spilled
408 ASSERT(at(info.nodeIndex()).hasConstant() || info.spillFormat() != DataFormatNone);
409 return;
410 }
411
412 // it's neither a constant nor has it been spilled.
413 ASSERT(!at(info.nodeIndex()).hasConstant());
414 ASSERT(info.spillFormat() == DataFormatNone);
415 ASSERT(info.fpr() == source);
416
417 m_jit.storeDouble(source, JITCompiler::addressFor(spillMe));
418 }
419
420 void silentFillGPR(VirtualRegister spillMe, GPRReg target)
421 {
422 GenerationInfo& info = m_generationInfo[spillMe];
423
424 NodeIndex nodeIndex = info.nodeIndex();
425 Node& node = at(nodeIndex);
426 ASSERT(info.registerFormat() != DataFormatNone);
427 ASSERT(info.registerFormat() != DataFormatDouble);
428 DataFormat registerFormat = info.registerFormat();
429
430 if (registerFormat == DataFormatInteger) {
431 ASSERT(info.gpr() == target);
432 ASSERT(isJSInteger(info.registerFormat()));
433 if (node.hasConstant()) {
434 ASSERT(isInt32Constant(nodeIndex));
435 m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), target);
436 } else
437 m_jit.load32(JITCompiler::payloadFor(spillMe), target);
438 return;
439 }
440
441 if (registerFormat == DataFormatBoolean) {
442 #if USE(JSVALUE64)
443 ASSERT_NOT_REACHED();
444 #elif USE(JSVALUE32_64)
445 ASSERT(info.gpr() == target);
446 if (node.hasConstant()) {
447 ASSERT(isBooleanConstant(nodeIndex));
448 m_jit.move(TrustedImm32(valueOfBooleanConstant(nodeIndex)), target);
449 } else
450 m_jit.load32(JITCompiler::payloadFor(spillMe), target);
451 #endif
452 return;
453 }
454
455 if (registerFormat == DataFormatCell) {
456 ASSERT(info.gpr() == target);
457 if (node.hasConstant()) {
458 JSValue value = valueOfJSConstant(nodeIndex);
459 ASSERT(value.isCell());
460 m_jit.move(TrustedImmPtr(value.asCell()), target);
461 } else
462 m_jit.loadPtr(JITCompiler::payloadFor(spillMe), target);
463 return;
464 }
465
466 if (registerFormat == DataFormatStorage) {
467 ASSERT(info.gpr() == target);
468 m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
469 return;
470 }
471
472 ASSERT(registerFormat & DataFormatJS);
473 #if USE(JSVALUE64)
474 ASSERT(info.gpr() == target);
475 if (node.hasConstant()) {
476 if (valueOfJSConstant(nodeIndex).isCell())
477 m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex).asTrustedImmPtr(), target);
478 else
479 m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), target);
480 } else if (info.spillFormat() == DataFormatInteger) {
481 ASSERT(registerFormat == DataFormatJSInteger);
482 m_jit.load32(JITCompiler::payloadFor(spillMe), target);
483 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, target);
484 } else if (info.spillFormat() == DataFormatDouble) {
485 ASSERT(registerFormat == DataFormatJSDouble);
486 m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
487 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, target);
488 } else
489 m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
490 #else
491 ASSERT(info.tagGPR() == target || info.payloadGPR() == target);
492 if (node.hasConstant()) {
493 JSValue v = valueOfJSConstant(nodeIndex);
494 m_jit.move(info.tagGPR() == target ? Imm32(v.tag()) : Imm32(v.payload()), target);
495 } else if (info.payloadGPR() == target)
496 m_jit.load32(JITCompiler::payloadFor(spillMe), target);
497 else { // Fill the Tag
498 switch (info.spillFormat()) {
499 case DataFormatInteger:
500 ASSERT(registerFormat == DataFormatJSInteger);
501 m_jit.move(TrustedImm32(JSValue::Int32Tag), target);
502 break;
503 case DataFormatCell:
504 ASSERT(registerFormat == DataFormatJSCell);
505 m_jit.move(TrustedImm32(JSValue::CellTag), target);
506 break;
507 case DataFormatBoolean:
508 ASSERT(registerFormat == DataFormatJSBoolean);
509 m_jit.move(TrustedImm32(JSValue::BooleanTag), target);
510 break;
511 default:
512 m_jit.load32(JITCompiler::tagFor(spillMe), target);
513 break;
514 }
515 }
516 #endif
517 }
518
519 void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg target)
520 {
521 GenerationInfo& info = m_generationInfo[spillMe];
522 ASSERT(info.fpr() == target);
523
524 NodeIndex nodeIndex = info.nodeIndex();
525 Node& node = at(nodeIndex);
526 #if USE(JSVALUE64)
527 ASSERT(info.registerFormat() == DataFormatDouble);
528
529 if (node.hasConstant()) {
530 ASSERT(isNumberConstant(nodeIndex));
531 m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(nodeIndex))), canTrample);
532 m_jit.movePtrToDouble(canTrample, target);
533 return;
534 }
535
536 if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
537 // it was already spilled previously and not as a double, which means we need unboxing.
538 ASSERT(info.spillFormat() & DataFormatJS);
539 m_jit.loadPtr(JITCompiler::addressFor(spillMe), canTrample);
540 unboxDouble(canTrample, target);
541 return;
542 }
543
544 m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
545 #elif USE(JSVALUE32_64)
546 UNUSED_PARAM(canTrample);
547 ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
548 if (node.hasConstant()) {
549 ASSERT(isNumberConstant(nodeIndex));
550 m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), target);
551 } else
552 m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
553 #endif
554 }
555
556 void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
557 {
558 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
559 GPRReg gpr = iter.regID();
560 if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
561 silentSpillGPR(iter.name(), gpr);
562 }
563 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
564 if (iter.name() != InvalidVirtualRegister)
565 silentSpillFPR(iter.name(), iter.regID());
566 }
567 }
568 void silentSpillAllRegisters(FPRReg exclude)
569 {
570 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
571 if (iter.name() != InvalidVirtualRegister)
572 silentSpillGPR(iter.name(), iter.regID());
573 }
574 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
575 FPRReg fpr = iter.regID();
576 if (iter.name() != InvalidVirtualRegister && fpr != exclude)
577 silentSpillFPR(iter.name(), fpr);
578 }
579 }
580
581 void silentFillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
582 {
583 GPRReg canTrample = GPRInfo::regT0;
584 if (exclude == GPRInfo::regT0)
585 canTrample = GPRInfo::regT1;
586
587 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
588 if (iter.name() != InvalidVirtualRegister)
589 silentFillFPR(iter.name(), canTrample, iter.regID());
590 }
591 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
592 GPRReg gpr = iter.regID();
593 if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
594 silentFillGPR(iter.name(), gpr);
595 }
596 }
597 void silentFillAllRegisters(FPRReg exclude)
598 {
599 GPRReg canTrample = GPRInfo::regT0;
600
601 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
602 FPRReg fpr = iter.regID();
603 if (iter.name() != InvalidVirtualRegister && fpr != exclude)
604 silentFillFPR(iter.name(), canTrample, fpr);
605 }
606 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
607 if (iter.name() != InvalidVirtualRegister)
608 silentFillGPR(iter.name(), iter.regID());
609 }
610 }
611
612 // These methods convert between doubles, and doubles boxed and JSValues.
613 #if USE(JSVALUE64)
614 GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
615 {
616 return m_jit.boxDouble(fpr, gpr);
617 }
618 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
619 {
620 return m_jit.unboxDouble(gpr, fpr);
621 }
622 GPRReg boxDouble(FPRReg fpr)
623 {
624 return boxDouble(fpr, allocate());
625 }
626 #elif USE(JSVALUE32_64)
627 void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
628 {
629 m_jit.boxDouble(fpr, tagGPR, payloadGPR);
630 }
631 void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
632 {
633 m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
634 }
635 #endif
636
637 // Spill a VirtualRegister to the RegisterFile.
638 void spill(VirtualRegister spillMe)
639 {
640 GenerationInfo& info = m_generationInfo[spillMe];
641
642 #if USE(JSVALUE32_64)
643 if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
644 return;
645 #endif
646 // Check the GenerationInfo to see if this value need writing
647 // to the RegisterFile - if not, mark it as spilled & return.
648 if (!info.needsSpill()) {
649 info.setSpilled();
650 return;
651 }
652
653 DataFormat spillFormat = info.registerFormat();
654 switch (spillFormat) {
655 case DataFormatStorage: {
656 // This is special, since it's not a JS value - as in it's not visible to JS
657 // code.
658 m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
659 info.spill(DataFormatStorage);
660 return;
661 }
662
663 case DataFormatInteger: {
664 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
665 info.spill(DataFormatInteger);
666 return;
667 }
668
669 #if USE(JSVALUE64)
670 case DataFormatDouble: {
671 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
672 info.spill(DataFormatDouble);
673 return;
674 }
675
676 default:
677 // The following code handles JSValues, int32s, and cells.
678 ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
679
680 GPRReg reg = info.gpr();
681 // We need to box int32 and cell values ...
682 // but on JSVALUE64 boxing a cell is a no-op!
683 if (spillFormat == DataFormatInteger)
684 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
685
686 // Spill the value, and record it as spilled in its boxed form.
687 m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
688 info.spill((DataFormat)(spillFormat | DataFormatJS));
689 return;
690 #elif USE(JSVALUE32_64)
691 case DataFormatCell:
692 case DataFormatBoolean: {
693 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
694 info.spill(spillFormat);
695 return;
696 }
697
698 case DataFormatDouble:
699 case DataFormatJSDouble: {
700 // On JSVALUE32_64 boxing a double is a no-op.
701 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
702 info.spill(DataFormatJSDouble);
703 return;
704 }
705
706 default:
707 // The following code handles JSValues.
708 ASSERT(spillFormat & DataFormatJS);
709 m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
710 m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
711 info.spill(spillFormat);
712 return;
713 #endif
714 }
715 }
716
717 bool isStrictInt32(NodeIndex);
718
719 bool isKnownInteger(NodeIndex);
720 bool isKnownNumeric(NodeIndex);
721 bool isKnownCell(NodeIndex);
722
723 bool isKnownNotInteger(NodeIndex);
724 bool isKnownNotNumber(NodeIndex);
725
726 bool isKnownNotCell(NodeIndex);
727
728 // Checks/accessors for constant values.
729 bool isConstant(NodeIndex nodeIndex) { return m_jit.graph().isConstant(nodeIndex); }
730 bool isJSConstant(NodeIndex nodeIndex) { return m_jit.graph().isJSConstant(nodeIndex); }
731 bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.graph().isInt32Constant(nodeIndex); }
732 bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.graph().isDoubleConstant(nodeIndex); }
733 bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.graph().isNumberConstant(nodeIndex); }
734 bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.graph().isBooleanConstant(nodeIndex); }
735 bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.graph().isFunctionConstant(nodeIndex); }
736 int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.graph().valueOfInt32Constant(nodeIndex); }
737 double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfNumberConstant(nodeIndex); }
738 int32_t valueOfNumberConstantAsInt32(NodeIndex nodeIndex)
739 {
740 if (isInt32Constant(nodeIndex))
741 return valueOfInt32Constant(nodeIndex);
742 return JSC::toInt32(valueOfNumberConstant(nodeIndex));
743 }
744 #if USE(JSVALUE32_64)
745 void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); }
746 #endif
747 JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfJSConstant(nodeIndex); }
748 bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfBooleanConstant(nodeIndex); }
749 JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfFunctionConstant(nodeIndex); }
750 bool isNullConstant(NodeIndex nodeIndex)
751 {
752 if (!isConstant(nodeIndex))
753 return false;
754 return valueOfJSConstant(nodeIndex).isNull();
755 }
756
757 Identifier* identifier(unsigned index)
758 {
759 return &m_jit.codeBlock()->identifier(index);
760 }
761
762 // Spill all VirtualRegisters back to the RegisterFile.
763 void flushRegisters()
764 {
765 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
766 if (iter.name() != InvalidVirtualRegister) {
767 spill(iter.name());
768 iter.release();
769 }
770 }
771 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
772 if (iter.name() != InvalidVirtualRegister) {
773 spill(iter.name());
774 iter.release();
775 }
776 }
777 }
778
779 #ifndef NDEBUG
780 // Used to ASSERT flushRegisters() has been called prior to
781 // calling out from JIT code to a C helper function.
782 bool isFlushed()
783 {
784 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
785 if (iter.name() != InvalidVirtualRegister)
786 return false;
787 }
788 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
789 if (iter.name() != InvalidVirtualRegister)
790 return false;
791 }
792 return true;
793 }
794 #endif
795
796 #if USE(JSVALUE64)
797 MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
798 {
799 return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
800 }
801 #endif
802
803 // Helper functions to enable code sharing in implementations of bit/shift ops.
804 void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
805 {
806 switch (op) {
807 case BitAnd:
808 m_jit.and32(Imm32(imm), op1, result);
809 break;
810 case BitOr:
811 m_jit.or32(Imm32(imm), op1, result);
812 break;
813 case BitXor:
814 m_jit.xor32(Imm32(imm), op1, result);
815 break;
816 default:
817 ASSERT_NOT_REACHED();
818 }
819 }
820 void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
821 {
822 switch (op) {
823 case BitAnd:
824 m_jit.and32(op1, op2, result);
825 break;
826 case BitOr:
827 m_jit.or32(op1, op2, result);
828 break;
829 case BitXor:
830 m_jit.xor32(op1, op2, result);
831 break;
832 default:
833 ASSERT_NOT_REACHED();
834 }
835 }
836 void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
837 {
838 switch (op) {
839 case BitRShift:
840 m_jit.rshift32(op1, Imm32(shiftAmount), result);
841 break;
842 case BitLShift:
843 m_jit.lshift32(op1, Imm32(shiftAmount), result);
844 break;
845 case BitURShift:
846 m_jit.urshift32(op1, Imm32(shiftAmount), result);
847 break;
848 default:
849 ASSERT_NOT_REACHED();
850 }
851 }
852 void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
853 {
854 switch (op) {
855 case BitRShift:
856 m_jit.rshift32(op1, shiftAmount, result);
857 break;
858 case BitLShift:
859 m_jit.lshift32(op1, shiftAmount, result);
860 break;
861 case BitURShift:
862 m_jit.urshift32(op1, shiftAmount, result);
863 break;
864 default:
865 ASSERT_NOT_REACHED();
866 }
867 }
868
869 // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
870 unsigned detectPeepHoleBranch()
871 {
872 BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
873
874 // Check that no intervening nodes will be generated.
875 for (unsigned index = m_indexInBlock + 1; index < block->size() - 1; ++index) {
876 NodeIndex nodeIndex = block->at(index);
877 if (at(nodeIndex).shouldGenerate())
878 return UINT_MAX;
879 }
880
881 // Check if the lastNode is a branch on this node.
882 Node& lastNode = at(block->last());
883 return lastNode.op() == Branch && lastNode.child1().index() == m_compileIndex ? block->size() - 1 : UINT_MAX;
884 }
885
886 void nonSpeculativeValueToNumber(Node&);
887 void nonSpeculativeValueToInt32(Node&);
888 void nonSpeculativeUInt32ToNumber(Node&);
889
890 enum SpillRegistersMode { NeedToSpill, DontSpill };
891 #if USE(JSVALUE64)
892 JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
893 void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
894 #elif USE(JSVALUE32_64)
895 JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
896 void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
897 #endif
898
899 void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false);
900 void nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert = false);
901 bool nonSpeculativeCompareNull(Node&, Edge operand, bool invert = false);
902
903 void nonSpeculativePeepholeBranch(Node&, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
904 void nonSpeculativeNonPeepholeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
905 bool nonSpeculativeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
906
907 void nonSpeculativePeepholeStrictEq(Node&, NodeIndex branchNodeIndex, bool invert = false);
908 void nonSpeculativeNonPeepholeStrictEq(Node&, bool invert = false);
909 bool nonSpeculativeStrictEq(Node&, bool invert = false);
910
911 void compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg);
912 void compileInstanceOf(Node&);
913
914 // Access to our fixed callee CallFrame.
915 MacroAssembler::Address callFrameSlot(int slot)
916 {
917 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)));
918 }
919
920 // Access to our fixed callee CallFrame.
921 MacroAssembler::Address argumentSlot(int argument)
922 {
923 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)));
924 }
925
926 MacroAssembler::Address callFrameTagSlot(int slot)
927 {
928 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
929 }
930
931 MacroAssembler::Address callFramePayloadSlot(int slot)
932 {
933 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
934 }
935
936 MacroAssembler::Address argumentTagSlot(int argument)
937 {
938 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
939 }
940
941 MacroAssembler::Address argumentPayloadSlot(int argument)
942 {
943 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
944 }
945
946 void emitCall(Node&);
947
948 // Called once a node has completed code generation but prior to setting
949 // its result, to free up its children. (This must happen prior to setting
950 // the nodes result, since the node may have the same VirtualRegister as
951 // a child, and as such will use the same GeneratioInfo).
952 void useChildren(Node&);
953
954 // These method called to initialize the the GenerationInfo
955 // to describe the result of an operation.
956 void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren)
957 {
958 Node& node = at(nodeIndex);
959 if (mode == CallUseChildren)
960 useChildren(node);
961
962 VirtualRegister virtualRegister = node.virtualRegister();
963 GenerationInfo& info = m_generationInfo[virtualRegister];
964
965 if (format == DataFormatInteger) {
966 m_jit.jitAssertIsInt32(reg);
967 m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
968 info.initInteger(nodeIndex, node.refCount(), reg);
969 } else {
970 #if USE(JSVALUE64)
971 ASSERT(format == DataFormatJSInteger);
972 m_jit.jitAssertIsJSInt32(reg);
973 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
974 info.initJSValue(nodeIndex, node.refCount(), reg, format);
975 #elif USE(JSVALUE32_64)
976 ASSERT_NOT_REACHED();
977 #endif
978 }
979 }
980 void integerResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
981 {
982 integerResult(reg, nodeIndex, DataFormatInteger, mode);
983 }
984 void noResult(NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
985 {
986 if (mode == UseChildrenCalledExplicitly)
987 return;
988 Node& node = at(nodeIndex);
989 useChildren(node);
990 }
991 void cellResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
992 {
993 Node& node = at(nodeIndex);
994 if (mode == CallUseChildren)
995 useChildren(node);
996
997 VirtualRegister virtualRegister = node.virtualRegister();
998 m_gprs.retain(reg, virtualRegister, SpillOrderCell);
999 GenerationInfo& info = m_generationInfo[virtualRegister];
1000 info.initCell(nodeIndex, node.refCount(), reg);
1001 }
1002 void booleanResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
1003 {
1004 Node& node = at(nodeIndex);
1005 if (mode == CallUseChildren)
1006 useChildren(node);
1007
1008 VirtualRegister virtualRegister = node.virtualRegister();
1009 m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
1010 GenerationInfo& info = m_generationInfo[virtualRegister];
1011 info.initBoolean(nodeIndex, node.refCount(), reg);
1012 }
1013 #if USE(JSVALUE64)
1014 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
1015 {
1016 if (format == DataFormatJSInteger)
1017 m_jit.jitAssertIsJSInt32(reg);
1018
1019 Node& node = at(nodeIndex);
1020 if (mode == CallUseChildren)
1021 useChildren(node);
1022
1023 VirtualRegister virtualRegister = node.virtualRegister();
1024 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
1025 GenerationInfo& info = m_generationInfo[virtualRegister];
1026 info.initJSValue(nodeIndex, node.refCount(), reg, format);
1027 }
1028 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
1029 {
1030 jsValueResult(reg, nodeIndex, DataFormatJS, mode);
1031 }
1032 #elif USE(JSVALUE32_64)
1033 void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
1034 {
1035 Node& node = at(nodeIndex);
1036 if (mode == CallUseChildren)
1037 useChildren(node);
1038
1039 VirtualRegister virtualRegister = node.virtualRegister();
1040 m_gprs.retain(tag, virtualRegister, SpillOrderJS);
1041 m_gprs.retain(payload, virtualRegister, SpillOrderJS);
1042 GenerationInfo& info = m_generationInfo[virtualRegister];
1043 info.initJSValue(nodeIndex, node.refCount(), tag, payload, format);
1044 }
1045 void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, UseChildrenMode mode)
1046 {
1047 jsValueResult(tag, payload, nodeIndex, DataFormatJS, mode);
1048 }
1049 #endif
1050 void storageResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
1051 {
1052 Node& node = at(nodeIndex);
1053 if (mode == CallUseChildren)
1054 useChildren(node);
1055
1056 VirtualRegister virtualRegister = node.virtualRegister();
1057 m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
1058 GenerationInfo& info = m_generationInfo[virtualRegister];
1059 info.initStorage(nodeIndex, node.refCount(), reg);
1060 }
1061 void doubleResult(FPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
1062 {
1063 Node& node = at(nodeIndex);
1064 if (mode == CallUseChildren)
1065 useChildren(node);
1066
1067 VirtualRegister virtualRegister = node.virtualRegister();
1068 m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
1069 GenerationInfo& info = m_generationInfo[virtualRegister];
1070 info.initDouble(nodeIndex, node.refCount(), reg);
1071 }
1072 void initConstantInfo(NodeIndex nodeIndex)
1073 {
1074 ASSERT(isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex) || isJSConstant(nodeIndex));
1075 Node& node = at(nodeIndex);
1076 m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount());
1077 }
1078
1079 // These methods add calls to C++ helper functions.
1080 // These methods are broadly value representation specific (i.e.
1081 // deal with the fact that a JSValue may be passed in one or two
1082 // machine registers, and delegate the calling convention specific
1083 // decision as to how to fill the regsiters to setupArguments* methods.
1084 #if USE(JSVALUE64)
1085 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer)
1086 {
1087 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
1088 return appendCallWithExceptionCheckSetResult(operation, result);
1089 }
1090 JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
1091 {
1092 m_jit.setupArguments(arg1);
1093 JITCompiler::Call call = m_jit.appendCall(operation);
1094 m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
1095 return call;
1096 }
1097 JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
1098 {
1099 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
1100 return appendCallWithExceptionCheckSetResult(operation, result);
1101 }
1102 JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier)
1103 {
1104 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
1105 return appendCallWithExceptionCheckSetResult(operation, result);
1106 }
1107 JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1)
1108 {
1109 m_jit.setupArgumentsWithExecState(arg1);
1110 return appendCallWithExceptionCheckSetResult(operation, result);
1111 }
1112 JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size)
1113 {
1114 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
1115 return appendCallWithExceptionCheckSetResult(operation, result);
1116 }
1117 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants)
1118 {
1119 m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
1120 return appendCallWithExceptionCheckSetResult(operation, result);
1121 }
1122 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer)
1123 {
1124 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
1125 return appendCallWithExceptionCheckSetResult(operation, result);
1126 }
1127 JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
1128 {
1129 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
1130 return appendCallWithExceptionCheckSetResult(operation, result);
1131 }
1132 JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
1133 {
1134 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
1135 return appendCallWithExceptionCheckSetResult(operation, result);
1136 }
1137 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1138 {
1139 m_jit.setupArgumentsWithExecState(arg1, arg2);
1140 return appendCallWithExceptionCheckSetResult(operation, result);
1141 }
1142 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, GPRReg arg1)
1143 {
1144 m_jit.setupArgumentsWithExecState(arg1);
1145 return appendCallWithExceptionCheckSetResult(operation, result);
1146 }
1147 JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
1148 {
1149 m_jit.setupArgumentsExecState();
1150 return appendCallWithExceptionCheckSetResult(operation, result);
1151 }
1152 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
1153 {
1154 m_jit.setupArgumentsWithExecState(arg1);
1155 return appendCallWithExceptionCheckSetResult(operation, result);
1156 }
1157 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, JSCell* cell)
1158 {
1159 m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
1160 return appendCallWithExceptionCheckSetResult(operation, result);
1161 }
1162 JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
1163 {
1164 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
1165 return appendCallWithExceptionCheckSetResult(operation, result);
1166 }
1167 JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1)
1168 {
1169 m_jit.setupArguments(arg1);
1170 return appendCallSetResult(operation, result);
1171 }
1172 JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
1173 {
1174 m_jit.setupArgumentsWithExecState(arg1);
1175 return appendCallWithExceptionCheckSetResult(operation, result);
1176 }
1177 JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1178 {
1179 m_jit.setupArgumentsWithExecState(arg1, arg2);
1180 return appendCallWithExceptionCheckSetResult(operation, result);
1181 }
1182 JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1183 {
1184 m_jit.setupArgumentsWithExecState(arg1, arg2);
1185 return appendCallWithExceptionCheckSetResult(operation, result);
1186 }
1187 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1188 {
1189 m_jit.setupArgumentsWithExecState(arg1, arg2);
1190 return appendCallWithExceptionCheckSetResult(operation, result);
1191 }
1192 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm)
1193 {
1194 m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
1195 return appendCallWithExceptionCheckSetResult(operation, result);
1196 }
1197 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2)
1198 {
1199 m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
1200 return appendCallWithExceptionCheckSetResult(operation, result);
1201 }
1202 JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1203 {
1204 m_jit.setupArgumentsWithExecState(arg1, arg2);
1205 return appendCallWithExceptionCheckSetResult(operation, result);
1206 }
1207 JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1208 {
1209 m_jit.setupArgumentsWithExecState(arg1, arg2);
1210 return appendCallWithExceptionCheckSetResult(operation, result);
1211 }
1212 JITCompiler::Call callOperation(V_DFGOperation_EC operation, GPRReg arg1)
1213 {
1214 m_jit.setupArgumentsWithExecState(arg1);
1215 return appendCallWithExceptionCheck(operation);
1216 }
1217 JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
1218 {
1219 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
1220 return appendCallWithExceptionCheck(operation);
1221 }
1222 JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
1223 {
1224 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier));
1225 return appendCallWithExceptionCheck(operation);
1226 }
1227 JITCompiler::Call callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1228 {
1229 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1230 return appendCallWithExceptionCheck(operation);
1231 }
1232 JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1233 {
1234 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1235 return appendCallWithExceptionCheck(operation);
1236 }
1237 JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1238 {
1239 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1240 return appendCallWithExceptionCheck(operation);
1241 }
1242 JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1243 {
1244 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1245 return appendCallWithExceptionCheck(operation);
1246 }
1247 JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1)
1248 {
1249 m_jit.setupArgumentsWithExecState(arg1);
1250 return appendCallWithExceptionCheckSetResult(operation, result);
1251 }
1252 JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
1253 {
1254 m_jit.setupArguments(arg1, arg2);
1255 return appendCallSetResult(operation, result);
1256 }
1257 JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
1258 {
1259 m_jit.setupArguments(arg1, arg2);
1260 return appendCallSetResult(operation, result);
1261 }
1262 #else
1263 JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
1264 {
1265 prepareForExternalCall();
1266 m_jit.setupArguments(arg1);
1267 JITCompiler::Call call = m_jit.appendCall(operation);
1268 m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
1269 return call;
1270 }
1271 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
1272 {
1273 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
1274 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1275 }
1276 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer)
1277 {
1278 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
1279 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1280 }
1281 JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
1282 {
1283 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
1284 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1285 }
1286 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
1287 {
1288 m_jit.setupArgumentsWithExecState(arg1);
1289 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1290 }
1291 JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier)
1292 {
1293 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
1294 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1295 }
1296 JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
1297 {
1298 m_jit.setupArgumentsWithExecState(arg1);
1299 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1300 }
1301 JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
1302 {
1303 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
1304 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1305 }
1306 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants)
1307 {
1308 m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
1309 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1310 }
1311 JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
1312 {
1313 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(pointer));
1314 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1315 }
1316 JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
1317 {
1318 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
1319 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1320 }
1321 JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
1322 {
1323 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
1324 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1325 }
1326 JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, Identifier* identifier)
1327 {
1328 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(identifier));
1329 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1330 }
1331 JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1Tag, GPRReg arg1Payload, Identifier* identifier)
1332 {
1333 m_jit.setupArgumentsWithExecState(arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(identifier));
1334 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1335 }
1336 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
1337 {
1338 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
1339 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1340 }
1341 JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
1342 {
1343 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
1344 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1345 }
1346 JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
1347 {
1348 m_jit.setupArgumentsExecState();
1349 return appendCallWithExceptionCheckSetResult(operation, result);
1350 }
1351 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
1352 {
1353 m_jit.setupArgumentsWithExecState(arg1);
1354 return appendCallWithExceptionCheckSetResult(operation, result);
1355 }
1356 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, JSCell* cell)
1357 {
1358 m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
1359 return appendCallWithExceptionCheckSetResult(operation, result);
1360 }
1361 JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
1362 {
1363 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
1364 return appendCallWithExceptionCheckSetResult(operation, result);
1365 }
1366 JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1367 {
1368 m_jit.setupArguments(arg1Payload, arg1Tag);
1369 return appendCallSetResult(operation, result);
1370 }
1371 JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1372 {
1373 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
1374 return appendCallWithExceptionCheckSetResult(operation, result);
1375 }
1376 JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1377 {
1378 m_jit.setupArgumentsWithExecState(arg1, arg2);
1379 return appendCallWithExceptionCheckSetResult(operation, result);
1380 }
1381 JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
1382 {
1383 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
1384 return appendCallWithExceptionCheckSetResult(operation, result);
1385 }
1386 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
1387 {
1388 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
1389 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1390 }
1391 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm)
1392 {
1393 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag));
1394 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1395 }
1396 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)
1397 {
1398 m_jit.setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
1399 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1400 }
1401 JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
1402 {
1403 m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
1404 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1405 }
1406 JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
1407 {
1408 m_jit.setupArgumentsWithExecState(arg1, arg2);
1409 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1410 }
1411 JITCompiler::Call callOperation(V_DFGOperation_EC operation, GPRReg arg1)
1412 {
1413 m_jit.setupArgumentsWithExecState(arg1);
1414 return appendCallWithExceptionCheck(operation);
1415 }
1416 JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
1417 {
1418 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
1419 return appendCallWithExceptionCheck(operation);
1420 }
1421 JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, Identifier* identifier)
1422 {
1423 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier));
1424 return appendCallWithExceptionCheck(operation);
1425 }
1426 JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
1427 {
1428 m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
1429 return appendCallWithExceptionCheck(operation);
1430 }
1431 JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
1432 {
1433 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
1434 return appendCallWithExceptionCheck(operation);
1435 }
1436 JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
1437 {
1438 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
1439 return appendCallWithExceptionCheck(operation);
1440 }
1441
1442 JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1443 {
1444 m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
1445 return appendCallWithExceptionCheckSetResult(operation, result);
1446 }
1447
1448 JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
1449 {
1450 m_jit.setupArguments(arg1, arg2);
1451 return appendCallSetResult(operation, result);
1452 }
1453 JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
1454 {
1455 m_jit.setupArguments(arg1, arg2);
1456 return appendCallSetResult(operation, result);
1457 }
1458 #endif
1459
1460 #if !defined(NDEBUG) && !CPU(ARM_THUMB2)
1461 void prepareForExternalCall()
1462 {
1463 for (unsigned i = 0; i < sizeof(void*) / 4; i++)
1464 m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.globalData()->topCallFrame) + i * 4);
1465 }
1466 #else
1467 void prepareForExternalCall() { }
1468 #endif
1469
1470 // These methods add call instructions, with optional exception checks & setting results.
1471 JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
1472 {
1473 prepareForExternalCall();
1474 CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
1475 CallBeginToken token = m_jit.beginCall();
1476 JITCompiler::Call call = m_jit.appendCall(function);
1477 m_jit.addExceptionCheck(call, codeOrigin, token);
1478 return call;
1479 }
1480 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result)
1481 {
1482 JITCompiler::Call call = appendCallWithExceptionCheck(function);
1483 m_jit.move(GPRInfo::returnValueGPR, result);
1484 return call;
1485 }
1486 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result)
1487 {
1488 prepareForExternalCall();
1489 JITCompiler::Call call = m_jit.appendCall(function);
1490 m_jit.move(GPRInfo::returnValueGPR, result);
1491 return call;
1492 }
1493 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
1494 {
1495 JITCompiler::Call call = appendCallWithExceptionCheck(function);
1496 m_jit.setupResults(result1, result2);
1497 return call;
1498 }
1499 #if CPU(X86)
1500 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
1501 {
1502 JITCompiler::Call call = appendCallWithExceptionCheck(function);
1503 m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
1504 m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
1505 return call;
1506 }
1507 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
1508 {
1509 JITCompiler::Call call = m_jit.appendCall(function);
1510 m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
1511 m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
1512 return call;
1513 }
1514 #elif CPU(ARM)
1515 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
1516 {
1517 JITCompiler::Call call = appendCallWithExceptionCheck(function);
1518 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
1519 return call;
1520 }
1521 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
1522 {
1523 JITCompiler::Call call = m_jit.appendCall(function);
1524 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
1525 return call;
1526 }
1527 #else
1528 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
1529 {
1530 JITCompiler::Call call = appendCallWithExceptionCheck(function);
1531 m_jit.moveDouble(FPRInfo::returnValueFPR, result);
1532 return call;
1533 }
1534 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
1535 {
1536 JITCompiler::Call call = m_jit.appendCall(function);
1537 m_jit.moveDouble(FPRInfo::returnValueFPR, result);
1538 return call;
1539 }
1540 #endif
1541
1542 void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BlockIndex destination)
1543 {
1544 if (!haveEdgeCodeToEmit(destination))
1545 return addBranch(m_jit.branchDouble(cond, left, right), destination);
1546
1547 JITCompiler::Jump notTaken = m_jit.branchDouble(JITCompiler::invert(cond), left, right);
1548 emitEdgeCode(destination);
1549 addBranch(m_jit.jump(), destination);
1550 notTaken.link(&m_jit);
1551 }
1552
1553 void branchDoubleNonZero(FPRReg value, FPRReg scratch, BlockIndex destination)
1554 {
1555 if (!haveEdgeCodeToEmit(destination))
1556 return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination);
1557
1558 JITCompiler::Jump notTaken = m_jit.branchDoubleZeroOrNaN(value, scratch);
1559 emitEdgeCode(destination);
1560 addBranch(m_jit.jump(), destination);
1561 notTaken.link(&m_jit);
1562 }
1563
1564 template<typename T, typename U>
1565 void branch32(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
1566 {
1567 if (!haveEdgeCodeToEmit(destination))
1568 return addBranch(m_jit.branch32(cond, left, right), destination);
1569
1570 JITCompiler::Jump notTaken = m_jit.branch32(JITCompiler::invert(cond), left, right);
1571 emitEdgeCode(destination);
1572 addBranch(m_jit.jump(), destination);
1573 notTaken.link(&m_jit);
1574 }
1575
1576 template<typename T, typename U>
1577 void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
1578 {
1579 ASSERT(JITCompiler::isInvertible(cond));
1580
1581 if (!haveEdgeCodeToEmit(destination))
1582 return addBranch(m_jit.branchTest32(cond, value, mask), destination);
1583
1584 JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value, mask);
1585 emitEdgeCode(destination);
1586 addBranch(m_jit.jump(), destination);
1587 notTaken.link(&m_jit);
1588 }
1589
1590 template<typename T>
1591 void branchTest32(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
1592 {
1593 ASSERT(JITCompiler::isInvertible(cond));
1594
1595 if (!haveEdgeCodeToEmit(destination))
1596 return addBranch(m_jit.branchTest32(cond, value), destination);
1597
1598 JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value);
1599 emitEdgeCode(destination);
1600 addBranch(m_jit.jump(), destination);
1601 notTaken.link(&m_jit);
1602 }
1603
1604 template<typename T, typename U>
1605 void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
1606 {
1607 if (!haveEdgeCodeToEmit(destination))
1608 return addBranch(m_jit.branchPtr(cond, left, right), destination);
1609
1610 JITCompiler::Jump notTaken = m_jit.branchPtr(JITCompiler::invert(cond), left, right);
1611 emitEdgeCode(destination);
1612 addBranch(m_jit.jump(), destination);
1613 notTaken.link(&m_jit);
1614 }
1615
1616 template<typename T, typename U>
1617 void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
1618 {
1619 ASSERT(JITCompiler::isInvertible(cond));
1620
1621 if (!haveEdgeCodeToEmit(destination))
1622 return addBranch(m_jit.branchTestPtr(cond, value, mask), destination);
1623
1624 JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value, mask);
1625 emitEdgeCode(destination);
1626 addBranch(m_jit.jump(), destination);
1627 notTaken.link(&m_jit);
1628 }
1629
1630 template<typename T>
1631 void branchTestPtr(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
1632 {
1633 ASSERT(JITCompiler::isInvertible(cond));
1634
1635 if (!haveEdgeCodeToEmit(destination))
1636 return addBranch(m_jit.branchTestPtr(cond, value), destination);
1637
1638 JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value);
1639 emitEdgeCode(destination);
1640 addBranch(m_jit.jump(), destination);
1641 notTaken.link(&m_jit);
1642 }
1643
1644 template<typename T, typename U>
1645 void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
1646 {
1647 ASSERT(JITCompiler::isInvertible(cond));
1648
1649 if (!haveEdgeCodeToEmit(destination))
1650 return addBranch(m_jit.branchTest8(cond, value, mask), destination);
1651
1652 JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value, mask);
1653 emitEdgeCode(destination);
1654 addBranch(m_jit.jump(), destination);
1655 notTaken.link(&m_jit);
1656 }
1657
1658 template<typename T>
1659 void branchTest8(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
1660 {
1661 ASSERT(JITCompiler::isInvertible(cond));
1662
1663 if (!haveEdgeCodeToEmit(destination))
1664 return addBranch(m_jit.branchTest8(cond, value), destination);
1665
1666 JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value);
1667 emitEdgeCode(destination);
1668 addBranch(m_jit.jump(), destination);
1669 notTaken.link(&m_jit);
1670 }
1671
1672 enum FallThroughMode {
1673 AtFallThroughPoint,
1674 ForceJump
1675 };
1676 void jump(BlockIndex destination, FallThroughMode fallThroughMode = AtFallThroughPoint)
1677 {
1678 if (haveEdgeCodeToEmit(destination))
1679 emitEdgeCode(destination);
1680 if (destination == m_block + 1
1681 && fallThroughMode == AtFallThroughPoint)
1682 return;
1683 addBranch(m_jit.jump(), destination);
1684 }
1685
1686 inline bool haveEdgeCodeToEmit(BlockIndex)
1687 {
1688 return DFG_ENABLE_EDGE_CODE_VERIFICATION;
1689 }
1690 void emitEdgeCode(BlockIndex destination)
1691 {
1692 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION)
1693 return;
1694 m_jit.move(TrustedImm32(destination), GPRInfo::regT0);
1695 }
1696
1697 void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
1698 {
1699 m_branches.append(BranchRecord(jump, destination));
1700 }
1701
1702 void linkBranches()
1703 {
1704 for (size_t i = 0; i < m_branches.size(); ++i) {
1705 BranchRecord& branch = m_branches[i];
1706 branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
1707 }
1708 }
1709
1710 BasicBlock* block()
1711 {
1712 return m_jit.graph().m_blocks[m_block].get();
1713 }
1714
1715 #ifndef NDEBUG
1716 void dump(const char* label = 0);
1717 #endif
1718
1719 #if DFG_ENABLE(CONSISTENCY_CHECK)
1720 void checkConsistency();
1721 #else
1722 void checkConsistency() { }
1723 #endif
1724
1725 bool isInteger(NodeIndex nodeIndex)
1726 {
1727 Node& node = at(nodeIndex);
1728 if (node.hasInt32Result())
1729 return true;
1730
1731 if (isInt32Constant(nodeIndex))
1732 return true;
1733
1734 VirtualRegister virtualRegister = node.virtualRegister();
1735 GenerationInfo& info = m_generationInfo[virtualRegister];
1736
1737 return info.isJSInteger();
1738 }
1739
1740 bool compare(Node&, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ);
1741 bool compilePeepHoleBranch(Node&, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ);
1742 void compilePeepHoleIntegerBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition);
1743 void compilePeepHoleDoubleBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition);
1744 void compilePeepHoleObjectEquality(Node&, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
1745 void compilePeepHoleObjectToObjectOrOtherEquality(
1746 Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
1747 void compileObjectEquality(Node&, const ClassInfo*, PredictionChecker);
1748 void compileObjectToObjectOrOtherEquality(
1749 Edge leftChild, Edge rightChild, const ClassInfo*, PredictionChecker);
1750 void compileValueAdd(Node&);
1751 void compileObjectOrOtherLogicalNot(Edge value, const ClassInfo*, bool needSpeculationCheck);
1752 void compileLogicalNot(Node&);
1753 void emitObjectOrOtherBranch(Edge value, BlockIndex taken, BlockIndex notTaken, const ClassInfo*, bool needSpeculationCheck);
1754 void emitBranch(Node&);
1755
1756 void compileIntegerCompare(Node&, MacroAssembler::RelationalCondition);
1757 void compileDoubleCompare(Node&, MacroAssembler::DoubleCondition);
1758
1759 bool compileStrictEqForConstant(Node&, Edge value, JSValue constant);
1760
1761 bool compileStrictEq(Node&);
1762
1763 void compileGetCharCodeAt(Node&);
1764 void compileGetByValOnString(Node&);
1765 void compileValueToInt32(Node&);
1766 void compileUInt32ToNumber(Node&);
1767 void compileDoubleAsInt32(Node&);
1768 void compileInt32ToDouble(Node&);
1769 void compileAdd(Node&);
1770 void compileArithSub(Node&);
1771 void compileArithNegate(Node&);
1772 void compileArithMul(Node&);
1773 #if CPU(X86) || CPU(X86_64)
1774 void compileIntegerArithDivForX86(Node&);
1775 #endif
1776 void compileArithMod(Node&);
1777 void compileSoftModulo(Node&);
1778 void compileGetTypedArrayLength(const TypedArrayDescriptor&, Node&, bool needsSpeculationCheck);
1779 enum TypedArraySpeculationRequirements {
1780 NoTypedArraySpecCheck,
1781 NoTypedArrayTypeSpecCheck,
1782 AllTypedArraySpecChecks
1783 };
1784 enum TypedArraySignedness {
1785 SignedTypedArray,
1786 UnsignedTypedArray
1787 };
1788 enum TypedArrayRounding {
1789 TruncateRounding,
1790 ClampRounding
1791 };
1792 void compileGetIndexedPropertyStorage(Node&);
1793 void compileGetByValOnIntTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness);
1794 void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness, TypedArrayRounding = TruncateRounding);
1795 void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements);
1796 void compilePutByValForFloatTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements);
1797 void compileNewFunctionNoCheck(Node&);
1798 void compileNewFunctionExpression(Node&);
1799 bool compileRegExpExec(Node&);
1800
1801 template <typename ClassType, bool destructor, typename StructureType>
1802 void emitAllocateBasicJSObject(StructureType structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
1803 {
1804 MarkedAllocator* allocator = 0;
1805 if (destructor)
1806 allocator = &m_jit.globalData()->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
1807 else
1808 allocator = &m_jit.globalData()->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
1809
1810 m_jit.loadPtr(&allocator->m_freeList.head, resultGPR);
1811 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
1812
1813 // The object is half-allocated: we have what we know is a fresh object, but
1814 // it's still on the GC's free list.
1815
1816 // Ditch the structure by placing it into the structure slot, so that we can reuse
1817 // scratchGPR.
1818 m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSObject::structureOffset()));
1819
1820 // Now that we have scratchGPR back, remove the object from the free list
1821 m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR);
1822 m_jit.storePtr(scratchGPR, &allocator->m_freeList.head);
1823
1824 // Initialize the object's classInfo pointer
1825 m_jit.storePtr(MacroAssembler::TrustedImmPtr(&ClassType::s_info), MacroAssembler::Address(resultGPR, JSCell::classInfoOffset()));
1826
1827 // Initialize the object's inheritorID.
1828 m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, JSObject::offsetOfInheritorID()));
1829
1830 // Initialize the object's property storage pointer.
1831 m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSObject)), resultGPR, scratchGPR);
1832 m_jit.storePtr(scratchGPR, MacroAssembler::Address(resultGPR, ClassType::offsetOfPropertyStorage()));
1833 }
1834
1835 // It is acceptable to have structure be equal to scratch, so long as you're fine
1836 // with the structure GPR being clobbered.
1837 template<typename T>
1838 void emitAllocateJSFinalObject(T structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
1839 {
1840 return emitAllocateBasicJSObject<JSFinalObject, false>(structure, resultGPR, scratchGPR, slowPath);
1841 }
1842
1843 #if USE(JSVALUE64)
1844 JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp);
1845 #elif USE(JSVALUE32_64)
1846 JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result);
1847 #endif
1848
1849 // Add a speculation check without additional recovery.
1850 void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail)
1851 {
1852 if (!m_compileOkay)
1853 return;
1854 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this));
1855 }
1856 void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
1857 {
1858 speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail);
1859 }
1860 // Add a set of speculation checks without additional recovery.
1861 void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail)
1862 {
1863 Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
1864 for (unsigned i = 0; i < jumpVector.size(); ++i)
1865 speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]);
1866 }
1867 void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::JumpList& jumpsToFail)
1868 {
1869 speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail);
1870 }
1871 // Add a speculation check with additional recovery.
1872 void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
1873 {
1874 if (!m_compileOkay)
1875 return;
1876 m_jit.codeBlock()->appendSpeculationRecovery(recovery);
1877 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
1878 }
1879 void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
1880 {
1881 speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail, recovery);
1882 }
1883 void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
1884 {
1885 speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
1886
1887 unsigned setLocalIndexInBlock = m_indexInBlock + 1;
1888
1889 Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock));
1890
1891 if (setLocal->op() == Int32ToDouble) {
1892 setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
1893 ASSERT(at(setLocal->child1()).child1() == m_compileIndex);
1894 } else
1895 ASSERT(setLocal->child1() == m_compileIndex);
1896
1897 ASSERT(setLocal->op() == SetLocal);
1898 ASSERT(setLocal->codeOrigin == at(m_compileIndex).codeOrigin);
1899
1900 Node* nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1));
1901 if (nextNode->codeOrigin == at(m_compileIndex).codeOrigin) {
1902 ASSERT(nextNode->op() == Flush);
1903 nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 2));
1904 ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin); // duplicate the same assertion as below so that if we fail, we'll know we came down this path.
1905 }
1906 ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
1907
1908 OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
1909 exit.m_codeOrigin = nextNode->codeOrigin;
1910 exit.m_lastSetOperand = setLocal->local();
1911
1912 exit.valueRecoveryForOperand(setLocal->local()) = valueRecovery;
1913 }
1914 void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
1915 {
1916 Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
1917 for (unsigned i = 0; i < jumpVector.size(); ++i)
1918 forwardSpeculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i], valueRecovery);
1919 }
1920
1921 // Called when we statically determine that a speculation will fail.
1922 void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
1923 {
1924 #if DFG_ENABLE(DEBUG_VERBOSE)
1925 dataLog("SpeculativeJIT was terminated.\n");
1926 #endif
1927 if (!m_compileOkay)
1928 return;
1929 speculationCheck(kind, jsValueRegs, nodeIndex, m_jit.jump());
1930 m_compileOkay = false;
1931 }
1932 void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
1933 {
1934 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.index());
1935 }
1936
1937 template<bool strict>
1938 GPRReg fillSpeculateIntInternal(NodeIndex, DataFormat& returnFormat);
1939
1940 // It is possible, during speculative generation, to reach a situation in which we
1941 // can statically determine a speculation will fail (for example, when two nodes
1942 // will make conflicting speculations about the same operand). In such cases this
1943 // flag is cleared, indicating no further code generation should take place.
1944 bool m_compileOkay;
1945
1946 // Tracking for which nodes are currently holding the values of arguments and bytecode
1947 // operand-indexed variables.
1948
1949 ValueSource valueSourceForOperand(int operand)
1950 {
1951 return valueSourceReferenceForOperand(operand);
1952 }
1953
1954 void setNodeIndexForOperand(NodeIndex nodeIndex, int operand)
1955 {
1956 valueSourceReferenceForOperand(operand) = ValueSource(nodeIndex);
1957 }
1958
1959 // Call this with care, since it both returns a reference into an array
1960 // and potentially resizes the array. So it would not be right to call this
1961 // twice and then perform operands on both references, since the one from
1962 // the first call may no longer be valid.
1963 ValueSource& valueSourceReferenceForOperand(int operand)
1964 {
1965 if (operandIsArgument(operand)) {
1966 int argument = operandToArgument(operand);
1967 return m_arguments[argument];
1968 }
1969
1970 if ((unsigned)operand >= m_variables.size())
1971 m_variables.resize(operand + 1);
1972
1973 return m_variables[operand];
1974 }
1975
1976 // The JIT, while also provides MacroAssembler functionality.
1977 JITCompiler& m_jit;
1978 // The current node being generated.
1979 BlockIndex m_block;
1980 NodeIndex m_compileIndex;
1981 unsigned m_indexInBlock;
1982 // Virtual and physical register maps.
1983 Vector<GenerationInfo, 32> m_generationInfo;
1984 RegisterBank<GPRInfo> m_gprs;
1985 RegisterBank<FPRInfo> m_fprs;
1986
1987 Vector<MacroAssembler::Label> m_blockHeads;
1988 Vector<MacroAssembler::Label> m_osrEntryHeads;
1989
1990 struct BranchRecord {
1991 BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
1992 : jump(jump)
1993 , destination(destination)
1994 {
1995 }
1996
1997 MacroAssembler::Jump jump;
1998 BlockIndex destination;
1999 };
2000 Vector<BranchRecord, 8> m_branches;
2001
2002 Vector<ValueSource, 0> m_arguments;
2003 Vector<ValueSource, 0> m_variables;
2004 int m_lastSetOperand;
2005 CodeOrigin m_codeOriginForOSR;
2006
2007 AbstractState m_state;
2008
2009 ValueRecovery computeValueRecoveryFor(const ValueSource&);
2010
2011 ValueRecovery computeValueRecoveryFor(int operand)
2012 {
2013 return computeValueRecoveryFor(valueSourceForOperand(operand));
2014 }
2015 };
2016
2017
2018 // === Operand types ===
2019 //
2020 // IntegerOperand, DoubleOperand and JSValueOperand.
2021 //
2022 // These classes are used to lock the operands to a node into machine
2023 // registers. These classes implement of pattern of locking a value
2024 // into register at the point of construction only if it is already in
2025 // registers, and otherwise loading it lazily at the point it is first
2026 // used. We do so in order to attempt to avoid spilling one operand
2027 // in order to make space available for another.
2028
2029 class IntegerOperand {
2030 public:
2031 explicit IntegerOperand(SpeculativeJIT* jit, Edge use)
2032 : m_jit(jit)
2033 , m_index(use.index())
2034 , m_gprOrInvalid(InvalidGPRReg)
2035 #ifndef NDEBUG
2036 , m_format(DataFormatNone)
2037 #endif
2038 {
2039 ASSERT(m_jit);
2040 ASSERT(use.useKind() != DoubleUse);
2041 if (jit->isFilled(m_index))
2042 gpr();
2043 }
2044
2045 ~IntegerOperand()
2046 {
2047 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2048 m_jit->unlock(m_gprOrInvalid);
2049 }
2050
2051 NodeIndex index() const
2052 {
2053 return m_index;
2054 }
2055
2056 DataFormat format()
2057 {
2058 gpr(); // m_format is set when m_gpr is locked.
2059 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
2060 return m_format;
2061 }
2062
2063 GPRReg gpr()
2064 {
2065 if (m_gprOrInvalid == InvalidGPRReg)
2066 m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
2067 return m_gprOrInvalid;
2068 }
2069
2070 void use()
2071 {
2072 m_jit->use(m_index);
2073 }
2074
2075 private:
2076 SpeculativeJIT* m_jit;
2077 NodeIndex m_index;
2078 GPRReg m_gprOrInvalid;
2079 DataFormat m_format;
2080 };
2081
2082 class DoubleOperand {
2083 public:
2084 explicit DoubleOperand(SpeculativeJIT* jit, Edge use)
2085 : m_jit(jit)
2086 , m_index(use.index())
2087 , m_fprOrInvalid(InvalidFPRReg)
2088 {
2089 ASSERT(m_jit);
2090
2091 // This is counter-intuitive but correct. DoubleOperand is intended to
2092 // be used only when you're a node that is happy to accept an untyped
2093 // value, but will special-case for doubles (using DoubleOperand) if the
2094 // value happened to already be represented as a double. The implication
2095 // is that you will not try to force the value to become a double if it
2096 // is not one already.
2097 ASSERT(use.useKind() != DoubleUse);
2098
2099 if (jit->isFilledDouble(m_index))
2100 fpr();
2101 }
2102
2103 ~DoubleOperand()
2104 {
2105 ASSERT(m_fprOrInvalid != InvalidFPRReg);
2106 m_jit->unlock(m_fprOrInvalid);
2107 }
2108
2109 NodeIndex index() const
2110 {
2111 return m_index;
2112 }
2113
2114 FPRReg fpr()
2115 {
2116 if (m_fprOrInvalid == InvalidFPRReg)
2117 m_fprOrInvalid = m_jit->fillDouble(index());
2118 return m_fprOrInvalid;
2119 }
2120
2121 void use()
2122 {
2123 m_jit->use(m_index);
2124 }
2125
2126 private:
2127 SpeculativeJIT* m_jit;
2128 NodeIndex m_index;
2129 FPRReg m_fprOrInvalid;
2130 };
2131
2132 class JSValueOperand {
2133 public:
2134 explicit JSValueOperand(SpeculativeJIT* jit, Edge use)
2135 : m_jit(jit)
2136 , m_index(use.index())
2137 #if USE(JSVALUE64)
2138 , m_gprOrInvalid(InvalidGPRReg)
2139 #elif USE(JSVALUE32_64)
2140 , m_isDouble(false)
2141 #endif
2142 {
2143 ASSERT(m_jit);
2144 ASSERT(use.useKind() != DoubleUse);
2145 #if USE(JSVALUE64)
2146 if (jit->isFilled(m_index))
2147 gpr();
2148 #elif USE(JSVALUE32_64)
2149 m_register.pair.tagGPR = InvalidGPRReg;
2150 m_register.pair.payloadGPR = InvalidGPRReg;
2151 if (jit->isFilled(m_index))
2152 fill();
2153 #endif
2154 }
2155
2156 ~JSValueOperand()
2157 {
2158 #if USE(JSVALUE64)
2159 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2160 m_jit->unlock(m_gprOrInvalid);
2161 #elif USE(JSVALUE32_64)
2162 if (m_isDouble) {
2163 ASSERT(m_register.fpr != InvalidFPRReg);
2164 m_jit->unlock(m_register.fpr);
2165 } else {
2166 ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg);
2167 m_jit->unlock(m_register.pair.tagGPR);
2168 m_jit->unlock(m_register.pair.payloadGPR);
2169 }
2170 #endif
2171 }
2172
2173 NodeIndex index() const
2174 {
2175 return m_index;
2176 }
2177
2178 #if USE(JSVALUE64)
2179 GPRReg gpr()
2180 {
2181 if (m_gprOrInvalid == InvalidGPRReg)
2182 m_gprOrInvalid = m_jit->fillJSValue(index());
2183 return m_gprOrInvalid;
2184 }
2185 JSValueRegs jsValueRegs()
2186 {
2187 return JSValueRegs(gpr());
2188 }
2189 #elif USE(JSVALUE32_64)
2190 bool isDouble() { return m_isDouble; }
2191
2192 void fill()
2193 {
2194 if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg)
2195 m_isDouble = !m_jit->fillJSValue(index(), m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
2196 }
2197
2198 GPRReg tagGPR()
2199 {
2200 fill();
2201 ASSERT(!m_isDouble);
2202 return m_register.pair.tagGPR;
2203 }
2204
2205 GPRReg payloadGPR()
2206 {
2207 fill();
2208 ASSERT(!m_isDouble);
2209 return m_register.pair.payloadGPR;
2210 }
2211
2212 JSValueRegs jsValueRegs()
2213 {
2214 return JSValueRegs(tagGPR(), payloadGPR());
2215 }
2216
2217 FPRReg fpr()
2218 {
2219 fill();
2220 ASSERT(m_isDouble);
2221 return m_register.fpr;
2222 }
2223 #endif
2224
2225 void use()
2226 {
2227 m_jit->use(m_index);
2228 }
2229
2230 private:
2231 SpeculativeJIT* m_jit;
2232 NodeIndex m_index;
2233 #if USE(JSVALUE64)
2234 GPRReg m_gprOrInvalid;
2235 #elif USE(JSVALUE32_64)
2236 union {
2237 struct {
2238 GPRReg tagGPR;
2239 GPRReg payloadGPR;
2240 } pair;
2241 FPRReg fpr;
2242 } m_register;
2243 bool m_isDouble;
2244 #endif
2245 };
2246
2247 class StorageOperand {
2248 public:
2249 explicit StorageOperand(SpeculativeJIT* jit, Edge use)
2250 : m_jit(jit)
2251 , m_index(use.index())
2252 , m_gprOrInvalid(InvalidGPRReg)
2253 {
2254 ASSERT(m_jit);
2255 ASSERT(use.useKind() != DoubleUse);
2256 if (jit->isFilled(m_index))
2257 gpr();
2258 }
2259
2260 ~StorageOperand()
2261 {
2262 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2263 m_jit->unlock(m_gprOrInvalid);
2264 }
2265
2266 NodeIndex index() const
2267 {
2268 return m_index;
2269 }
2270
2271 GPRReg gpr()
2272 {
2273 if (m_gprOrInvalid == InvalidGPRReg)
2274 m_gprOrInvalid = m_jit->fillStorage(index());
2275 return m_gprOrInvalid;
2276 }
2277
2278 void use()
2279 {
2280 m_jit->use(m_index);
2281 }
2282
2283 private:
2284 SpeculativeJIT* m_jit;
2285 NodeIndex m_index;
2286 GPRReg m_gprOrInvalid;
2287 };
2288
2289
2290 // === Temporaries ===
2291 //
2292 // These classes are used to allocate temporary registers.
2293 // A mechanism is provided to attempt to reuse the registers
2294 // currently allocated to child nodes whose value is consumed
2295 // by, and not live after, this operation.
2296
2297 class GPRTemporary {
2298 public:
2299 GPRTemporary();
2300 GPRTemporary(SpeculativeJIT*);
2301 GPRTemporary(SpeculativeJIT*, GPRReg specific);
2302 GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&);
2303 GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
2304 GPRTemporary(SpeculativeJIT*, SpeculateStrictInt32Operand&);
2305 GPRTemporary(SpeculativeJIT*, IntegerOperand&);
2306 GPRTemporary(SpeculativeJIT*, IntegerOperand&, IntegerOperand&);
2307 GPRTemporary(SpeculativeJIT*, SpeculateCellOperand&);
2308 GPRTemporary(SpeculativeJIT*, SpeculateBooleanOperand&);
2309 #if USE(JSVALUE64)
2310 GPRTemporary(SpeculativeJIT*, JSValueOperand&);
2311 #elif USE(JSVALUE32_64)
2312 GPRTemporary(SpeculativeJIT*, JSValueOperand&, bool tag = true);
2313 #endif
2314 GPRTemporary(SpeculativeJIT*, StorageOperand&);
2315
2316 void adopt(GPRTemporary&);
2317
2318 ~GPRTemporary()
2319 {
2320 if (m_jit && m_gpr != InvalidGPRReg)
2321 m_jit->unlock(gpr());
2322 }
2323
2324 GPRReg gpr()
2325 {
2326 return m_gpr;
2327 }
2328
2329 private:
2330 SpeculativeJIT* m_jit;
2331 GPRReg m_gpr;
2332 };
2333
2334 class FPRTemporary {
2335 public:
2336 FPRTemporary(SpeculativeJIT*);
2337 FPRTemporary(SpeculativeJIT*, DoubleOperand&);
2338 FPRTemporary(SpeculativeJIT*, DoubleOperand&, DoubleOperand&);
2339 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&);
2340 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
2341 #if USE(JSVALUE32_64)
2342 FPRTemporary(SpeculativeJIT*, JSValueOperand&);
2343 #endif
2344
2345 ~FPRTemporary()
2346 {
2347 m_jit->unlock(fpr());
2348 }
2349
2350 FPRReg fpr() const
2351 {
2352 ASSERT(m_fpr != InvalidFPRReg);
2353 return m_fpr;
2354 }
2355
2356 protected:
2357 FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR)
2358 : m_jit(jit)
2359 , m_fpr(lockedFPR)
2360 {
2361 }
2362
2363 private:
2364 SpeculativeJIT* m_jit;
2365 FPRReg m_fpr;
2366 };
2367
2368
2369 // === Results ===
2370 //
2371 // These classes lock the result of a call to a C++ helper function.
2372
2373 class GPRResult : public GPRTemporary {
2374 public:
2375 GPRResult(SpeculativeJIT* jit)
2376 : GPRTemporary(jit, GPRInfo::returnValueGPR)
2377 {
2378 }
2379 };
2380
2381 #if USE(JSVALUE32_64)
2382 class GPRResult2 : public GPRTemporary {
2383 public:
2384 GPRResult2(SpeculativeJIT* jit)
2385 : GPRTemporary(jit, GPRInfo::returnValueGPR2)
2386 {
2387 }
2388 };
2389 #endif
2390
2391 class FPRResult : public FPRTemporary {
2392 public:
2393 FPRResult(SpeculativeJIT* jit)
2394 : FPRTemporary(jit, lockedResult(jit))
2395 {
2396 }
2397
2398 private:
2399 static FPRReg lockedResult(SpeculativeJIT* jit)
2400 {
2401 jit->lock(FPRInfo::returnValueFPR);
2402 return FPRInfo::returnValueFPR;
2403 }
2404 };
2405
2406
2407 // === Speculative Operand types ===
2408 //
2409 // SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand.
2410 //
2411 // These are used to lock the operands to a node into machine registers within the
2412 // SpeculativeJIT. The classes operate like those above, however these will
2413 // perform a speculative check for a more restrictive type than we can statically
2414 // determine the operand to have. If the operand does not have the requested type,
2415 // a bail-out to the non-speculative path will be taken.
2416
2417 class SpeculateIntegerOperand {
2418 public:
2419 explicit SpeculateIntegerOperand(SpeculativeJIT* jit, Edge use)
2420 : m_jit(jit)
2421 , m_index(use.index())
2422 , m_gprOrInvalid(InvalidGPRReg)
2423 #ifndef NDEBUG
2424 , m_format(DataFormatNone)
2425 #endif
2426 {
2427 ASSERT(m_jit);
2428 ASSERT(use.useKind() != DoubleUse);
2429 if (jit->isFilled(m_index))
2430 gpr();
2431 }
2432
2433 ~SpeculateIntegerOperand()
2434 {
2435 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2436 m_jit->unlock(m_gprOrInvalid);
2437 }
2438
2439 NodeIndex index() const
2440 {
2441 return m_index;
2442 }
2443
2444 DataFormat format()
2445 {
2446 gpr(); // m_format is set when m_gpr is locked.
2447 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
2448 return m_format;
2449 }
2450
2451 GPRReg gpr()
2452 {
2453 if (m_gprOrInvalid == InvalidGPRReg)
2454 m_gprOrInvalid = m_jit->fillSpeculateInt(index(), m_format);
2455 return m_gprOrInvalid;
2456 }
2457
2458 private:
2459 SpeculativeJIT* m_jit;
2460 NodeIndex m_index;
2461 GPRReg m_gprOrInvalid;
2462 DataFormat m_format;
2463 };
2464
2465 class SpeculateStrictInt32Operand {
2466 public:
2467 explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge use)
2468 : m_jit(jit)
2469 , m_index(use.index())
2470 , m_gprOrInvalid(InvalidGPRReg)
2471 {
2472 ASSERT(m_jit);
2473 ASSERT(use.useKind() != DoubleUse);
2474 if (jit->isFilled(m_index))
2475 gpr();
2476 }
2477
2478 ~SpeculateStrictInt32Operand()
2479 {
2480 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2481 m_jit->unlock(m_gprOrInvalid);
2482 }
2483
2484 NodeIndex index() const
2485 {
2486 return m_index;
2487 }
2488
2489 GPRReg gpr()
2490 {
2491 if (m_gprOrInvalid == InvalidGPRReg)
2492 m_gprOrInvalid = m_jit->fillSpeculateIntStrict(index());
2493 return m_gprOrInvalid;
2494 }
2495
2496 void use()
2497 {
2498 m_jit->use(m_index);
2499 }
2500
2501 private:
2502 SpeculativeJIT* m_jit;
2503 NodeIndex m_index;
2504 GPRReg m_gprOrInvalid;
2505 };
2506
2507 class SpeculateDoubleOperand {
2508 public:
2509 explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge use)
2510 : m_jit(jit)
2511 , m_index(use.index())
2512 , m_fprOrInvalid(InvalidFPRReg)
2513 {
2514 ASSERT(m_jit);
2515 ASSERT(use.useKind() == DoubleUse);
2516 if (jit->isFilled(m_index))
2517 fpr();
2518 }
2519
2520 ~SpeculateDoubleOperand()
2521 {
2522 ASSERT(m_fprOrInvalid != InvalidFPRReg);
2523 m_jit->unlock(m_fprOrInvalid);
2524 }
2525
2526 NodeIndex index() const
2527 {
2528 return m_index;
2529 }
2530
2531 FPRReg fpr()
2532 {
2533 if (m_fprOrInvalid == InvalidFPRReg)
2534 m_fprOrInvalid = m_jit->fillSpeculateDouble(index());
2535 return m_fprOrInvalid;
2536 }
2537
2538 private:
2539 SpeculativeJIT* m_jit;
2540 NodeIndex m_index;
2541 FPRReg m_fprOrInvalid;
2542 };
2543
2544 class SpeculateCellOperand {
2545 public:
2546 explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge use)
2547 : m_jit(jit)
2548 , m_index(use.index())
2549 , m_gprOrInvalid(InvalidGPRReg)
2550 {
2551 ASSERT(m_jit);
2552 ASSERT(use.useKind() != DoubleUse);
2553 if (jit->isFilled(m_index))
2554 gpr();
2555 }
2556
2557 ~SpeculateCellOperand()
2558 {
2559 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2560 m_jit->unlock(m_gprOrInvalid);
2561 }
2562
2563 NodeIndex index() const
2564 {
2565 return m_index;
2566 }
2567
2568 GPRReg gpr()
2569 {
2570 if (m_gprOrInvalid == InvalidGPRReg)
2571 m_gprOrInvalid = m_jit->fillSpeculateCell(index());
2572 return m_gprOrInvalid;
2573 }
2574
2575 void use()
2576 {
2577 m_jit->use(m_index);
2578 }
2579
2580 private:
2581 SpeculativeJIT* m_jit;
2582 NodeIndex m_index;
2583 GPRReg m_gprOrInvalid;
2584 };
2585
2586 class SpeculateBooleanOperand {
2587 public:
2588 explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge use)
2589 : m_jit(jit)
2590 , m_index(use.index())
2591 , m_gprOrInvalid(InvalidGPRReg)
2592 {
2593 ASSERT(m_jit);
2594 ASSERT(use.useKind() != DoubleUse);
2595 if (jit->isFilled(m_index))
2596 gpr();
2597 }
2598
2599 ~SpeculateBooleanOperand()
2600 {
2601 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2602 m_jit->unlock(m_gprOrInvalid);
2603 }
2604
2605 NodeIndex index() const
2606 {
2607 return m_index;
2608 }
2609
2610 GPRReg gpr()
2611 {
2612 if (m_gprOrInvalid == InvalidGPRReg)
2613 m_gprOrInvalid = m_jit->fillSpeculateBoolean(index());
2614 return m_gprOrInvalid;
2615 }
2616
2617 void use()
2618 {
2619 m_jit->use(m_index);
2620 }
2621
2622 private:
2623 SpeculativeJIT* m_jit;
2624 NodeIndex m_index;
2625 GPRReg m_gprOrInvalid;
2626 };
2627
2628 inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
2629 : m_compileOkay(true)
2630 , m_jit(jit)
2631 , m_compileIndex(0)
2632 , m_indexInBlock(0)
2633 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
2634 , m_blockHeads(jit.graph().m_blocks.size())
2635 , m_arguments(jit.codeBlock()->numParameters())
2636 , m_variables(jit.graph().m_localVars)
2637 , m_lastSetOperand(std::numeric_limits<int>::max())
2638 , m_state(m_jit.graph())
2639 {
2640 }
2641
2642 } } // namespace JSC::DFG
2643
2644 #endif
2645 #endif
2646