2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGSpeculativeJIT_h
27 #define DFGSpeculativeJIT_h
31 #include "DFGAbstractState.h"
32 #include "DFGGenerationInfo.h"
33 #include "DFGJITCompiler.h"
34 #include "DFGOSRExit.h"
35 #include "DFGOperations.h"
36 #include "MarkedAllocator.h"
37 #include "ValueRecovery.h"
39 namespace JSC
{ namespace DFG
{
43 class SpeculateIntegerOperand
;
44 class SpeculateStrictInt32Operand
;
45 class SpeculateDoubleOperand
;
46 class SpeculateCellOperand
;
47 class SpeculateBooleanOperand
;
50 enum ValueSourceKind
{
55 BooleanInRegisterFile
,
64 : m_nodeIndex(nodeIndexFromKind(SourceNotSet
))
68 explicit ValueSource(ValueSourceKind valueSourceKind
)
69 : m_nodeIndex(nodeIndexFromKind(valueSourceKind
))
71 ASSERT(kind() != SourceNotSet
);
72 ASSERT(kind() != HaveNode
);
75 explicit ValueSource(NodeIndex nodeIndex
)
76 : m_nodeIndex(nodeIndex
)
78 ASSERT(kind() == HaveNode
);
81 static ValueSource
forPrediction(PredictedType prediction
)
83 if (isInt32Prediction(prediction
))
84 return ValueSource(Int32InRegisterFile
);
85 if (isArrayPrediction(prediction
))
86 return ValueSource(CellInRegisterFile
);
87 if (isBooleanPrediction(prediction
))
88 return ValueSource(BooleanInRegisterFile
);
89 return ValueSource(ValueInRegisterFile
);
94 return kindFromNodeIndex(m_nodeIndex
) != SourceNotSet
;
97 ValueSourceKind
kind() const
99 return kindFromNodeIndex(m_nodeIndex
);
102 NodeIndex
nodeIndex() const
104 ASSERT(kind() == HaveNode
);
108 void dump(FILE* out
) const;
111 static NodeIndex
nodeIndexFromKind(ValueSourceKind kind
)
113 ASSERT(kind
>= SourceNotSet
&& kind
< HaveNode
);
114 return NoNode
- kind
;
117 static ValueSourceKind
kindFromNodeIndex(NodeIndex nodeIndex
)
119 unsigned kind
= static_cast<unsigned>(NoNode
- nodeIndex
);
120 if (kind
>= static_cast<unsigned>(HaveNode
))
122 return static_cast<ValueSourceKind
>(kind
);
125 NodeIndex m_nodeIndex
;
129 enum GeneratedOperandType
{ GeneratedOperandTypeUnknown
, GeneratedOperandInteger
, GeneratedOperandDouble
, GeneratedOperandJSValue
};
131 // === SpeculativeJIT ===
133 // The SpeculativeJIT is used to generate a fast, but potentially
134 // incomplete code path for the dataflow. When code generating
135 // we may make assumptions about operand types, dynamically check,
136 // and bail-out to an alternate code path if these checks fail.
137 // Importantly, the speculative code path cannot be reentered once
138 // a speculative check has failed. This allows the SpeculativeJIT
139 // to propagate type information (including information that has
140 // only speculatively been asserted) through the dataflow.
141 class SpeculativeJIT
{
142 friend struct OSRExit
;
144 typedef JITCompiler::TrustedImm32 TrustedImm32
;
145 typedef JITCompiler::Imm32 Imm32
;
146 typedef JITCompiler::TrustedImmPtr TrustedImmPtr
;
147 typedef JITCompiler::ImmPtr ImmPtr
;
149 // These constants are used to set priorities for spill order for
150 // the register allocator.
153 SpillOrderConstant
= 1, // no spill, and cheap fill
154 SpillOrderSpilled
= 2, // no spill
155 SpillOrderJS
= 4, // needs spill
156 SpillOrderCell
= 4, // needs spill
157 SpillOrderStorage
= 4, // needs spill
158 SpillOrderInteger
= 5, // needs spill and box
159 SpillOrderBoolean
= 5, // needs spill and box
160 SpillOrderDouble
= 6, // needs spill and convert
162 #elif USE(JSVALUE32_64)
164 SpillOrderConstant
= 1, // no spill, and cheap fill
165 SpillOrderSpilled
= 2, // no spill
166 SpillOrderJS
= 4, // needs spill
167 SpillOrderStorage
= 4, // needs spill
168 SpillOrderDouble
= 4, // needs spill
169 SpillOrderInteger
= 5, // needs spill and box
170 SpillOrderCell
= 5, // needs spill and box
171 SpillOrderBoolean
= 5, // needs spill and box
175 enum UseChildrenMode
{ CallUseChildren
, UseChildrenCalledExplicitly
};
178 SpeculativeJIT(JITCompiler
&);
181 void createOSREntries();
182 void linkOSREntries(LinkBuffer
&);
184 Node
& at(NodeIndex nodeIndex
)
186 return m_jit
.graph()[nodeIndex
];
188 Node
& at(Edge nodeUse
)
190 return at(nodeUse
.index());
193 GPRReg
fillInteger(NodeIndex
, DataFormat
& returnFormat
);
194 FPRReg
fillDouble(NodeIndex
);
196 GPRReg
fillJSValue(NodeIndex
);
197 #elif USE(JSVALUE32_64)
198 bool fillJSValue(NodeIndex
, GPRReg
&, GPRReg
&, FPRReg
&);
200 GPRReg
fillStorage(NodeIndex
);
202 // lock and unlock GPR & FPR registers.
203 void lock(GPRReg reg
)
207 void lock(FPRReg reg
)
211 void unlock(GPRReg reg
)
215 void unlock(FPRReg reg
)
220 // Used to check whether a child node is on its last use,
221 // and its machine registers may be reused.
222 bool canReuse(NodeIndex nodeIndex
)
224 VirtualRegister virtualRegister
= at(nodeIndex
).virtualRegister();
225 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
226 return info
.canReuse();
228 bool canReuse(Edge nodeUse
)
230 return canReuse(nodeUse
.index());
232 GPRReg
reuse(GPRReg reg
)
237 FPRReg
reuse(FPRReg reg
)
243 // Allocate a gpr/fpr.
246 VirtualRegister spillMe
;
247 GPRReg gpr
= m_gprs
.allocate(spillMe
);
248 if (spillMe
!= InvalidVirtualRegister
) {
249 #if USE(JSVALUE32_64)
250 GenerationInfo
& info
= m_generationInfo
[spillMe
];
251 ASSERT(info
.registerFormat() != DataFormatJSDouble
);
252 if ((info
.registerFormat() & DataFormatJS
))
253 m_gprs
.release(info
.tagGPR() == gpr
? info
.payloadGPR() : info
.tagGPR());
259 GPRReg
allocate(GPRReg specific
)
261 VirtualRegister spillMe
= m_gprs
.allocateSpecific(specific
);
262 if (spillMe
!= InvalidVirtualRegister
) {
263 #if USE(JSVALUE32_64)
264 GenerationInfo
& info
= m_generationInfo
[spillMe
];
265 ASSERT(info
.registerFormat() != DataFormatJSDouble
);
266 if ((info
.registerFormat() & DataFormatJS
))
267 m_gprs
.release(info
.tagGPR() == specific
? info
.payloadGPR() : info
.tagGPR());
275 return m_gprs
.tryAllocate();
279 VirtualRegister spillMe
;
280 FPRReg fpr
= m_fprs
.allocate(spillMe
);
281 if (spillMe
!= InvalidVirtualRegister
)
286 // Check whether a VirtualRegsiter is currently in a machine register.
287 // We use this when filling operands to fill those that are already in
288 // machine registers first (by locking VirtualRegsiters that are already
289 // in machine register before filling those that are not we attempt to
290 // avoid spilling values we will need immediately).
291 bool isFilled(NodeIndex nodeIndex
)
293 VirtualRegister virtualRegister
= at(nodeIndex
).virtualRegister();
294 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
295 return info
.registerFormat() != DataFormatNone
;
297 bool isFilledDouble(NodeIndex nodeIndex
)
299 VirtualRegister virtualRegister
= at(nodeIndex
).virtualRegister();
300 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
301 return info
.registerFormat() == DataFormatDouble
;
304 // Called on an operand once it has been consumed by a parent node.
305 void use(NodeIndex nodeIndex
)
307 VirtualRegister virtualRegister
= at(nodeIndex
).virtualRegister();
308 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
310 // use() returns true when the value becomes dead, and any
311 // associated resources may be freed.
315 // Release the associated machine registers.
316 DataFormat registerFormat
= info
.registerFormat();
318 if (registerFormat
== DataFormatDouble
)
319 m_fprs
.release(info
.fpr());
320 else if (registerFormat
!= DataFormatNone
)
321 m_gprs
.release(info
.gpr());
322 #elif USE(JSVALUE32_64)
323 if (registerFormat
== DataFormatDouble
|| registerFormat
== DataFormatJSDouble
)
324 m_fprs
.release(info
.fpr());
325 else if (registerFormat
& DataFormatJS
) {
326 m_gprs
.release(info
.tagGPR());
327 m_gprs
.release(info
.payloadGPR());
328 } else if (registerFormat
!= DataFormatNone
)
329 m_gprs
.release(info
.gpr());
332 void use(Edge nodeUse
)
334 use(nodeUse
.index());
337 static void markCellCard(MacroAssembler
&, GPRReg ownerGPR
, GPRReg scratchGPR1
, GPRReg scratchGPR2
);
338 static void writeBarrier(MacroAssembler
&, GPRReg ownerGPR
, GPRReg scratchGPR1
, GPRReg scratchGPR2
, WriteBarrierUseKind
);
340 void writeBarrier(GPRReg ownerGPR
, GPRReg valueGPR
, Edge valueUse
, WriteBarrierUseKind
, GPRReg scratchGPR1
= InvalidGPRReg
, GPRReg scratchGPR2
= InvalidGPRReg
);
341 void writeBarrier(GPRReg ownerGPR
, JSCell
* value
, WriteBarrierUseKind
, GPRReg scratchGPR1
= InvalidGPRReg
, GPRReg scratchGPR2
= InvalidGPRReg
);
342 void writeBarrier(JSCell
* owner
, GPRReg valueGPR
, Edge valueUse
, WriteBarrierUseKind
, GPRReg scratchGPR1
= InvalidGPRReg
);
344 static GPRReg
selectScratchGPR(GPRReg preserve1
= InvalidGPRReg
, GPRReg preserve2
= InvalidGPRReg
, GPRReg preserve3
= InvalidGPRReg
, GPRReg preserve4
= InvalidGPRReg
)
346 return AssemblyHelpers::selectScratchGPR(preserve1
, preserve2
, preserve3
, preserve4
);
349 // Called by the speculative operand types, below, to fill operand to
350 // machine registers, implicitly generating speculation checks as needed.
351 GPRReg
fillSpeculateInt(NodeIndex
, DataFormat
& returnFormat
);
352 GPRReg
fillSpeculateIntStrict(NodeIndex
);
353 FPRReg
fillSpeculateDouble(NodeIndex
);
354 GPRReg
fillSpeculateCell(NodeIndex
);
355 GPRReg
fillSpeculateBoolean(NodeIndex
);
356 GeneratedOperandType
checkGeneratedTypeForToInt32(NodeIndex
);
360 void compileMovHint(Node
&);
361 void compile(BasicBlock
&);
363 void checkArgumentTypes();
365 void clearGenerationInfo();
367 // These methods are used when generating 'unexpected'
368 // calls out from JIT code to C++ helper routines -
369 // they spill all live values to the appropriate
370 // slots in the RegisterFile without changing any state
371 // in the GenerationInfo.
372 void silentSpillGPR(VirtualRegister spillMe
, GPRReg source
)
374 GenerationInfo
& info
= m_generationInfo
[spillMe
];
375 ASSERT(info
.registerFormat() != DataFormatNone
);
376 ASSERT(info
.registerFormat() != DataFormatDouble
);
378 if (!info
.needsSpill())
381 DataFormat registerFormat
= info
.registerFormat();
384 ASSERT(info
.gpr() == source
);
385 if (registerFormat
== DataFormatInteger
)
386 m_jit
.store32(source
, JITCompiler::addressFor(spillMe
));
388 ASSERT(registerFormat
& DataFormatJS
|| registerFormat
== DataFormatCell
|| registerFormat
== DataFormatStorage
);
389 m_jit
.storePtr(source
, JITCompiler::addressFor(spillMe
));
391 #elif USE(JSVALUE32_64)
392 if (registerFormat
& DataFormatJS
) {
393 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
394 m_jit
.store32(source
, source
== info
.tagGPR() ? JITCompiler::tagFor(spillMe
) : JITCompiler::payloadFor(spillMe
));
396 ASSERT(info
.gpr() == source
);
397 m_jit
.store32(source
, JITCompiler::payloadFor(spillMe
));
401 void silentSpillFPR(VirtualRegister spillMe
, FPRReg source
)
403 GenerationInfo
& info
= m_generationInfo
[spillMe
];
404 ASSERT(info
.registerFormat() == DataFormatDouble
);
406 if (!info
.needsSpill()) {
407 // it's either a constant or it's already been spilled
408 ASSERT(at(info
.nodeIndex()).hasConstant() || info
.spillFormat() != DataFormatNone
);
412 // it's neither a constant nor has it been spilled.
413 ASSERT(!at(info
.nodeIndex()).hasConstant());
414 ASSERT(info
.spillFormat() == DataFormatNone
);
415 ASSERT(info
.fpr() == source
);
417 m_jit
.storeDouble(source
, JITCompiler::addressFor(spillMe
));
420 void silentFillGPR(VirtualRegister spillMe
, GPRReg target
)
422 GenerationInfo
& info
= m_generationInfo
[spillMe
];
424 NodeIndex nodeIndex
= info
.nodeIndex();
425 Node
& node
= at(nodeIndex
);
426 ASSERT(info
.registerFormat() != DataFormatNone
);
427 ASSERT(info
.registerFormat() != DataFormatDouble
);
428 DataFormat registerFormat
= info
.registerFormat();
430 if (registerFormat
== DataFormatInteger
) {
431 ASSERT(info
.gpr() == target
);
432 ASSERT(isJSInteger(info
.registerFormat()));
433 if (node
.hasConstant()) {
434 ASSERT(isInt32Constant(nodeIndex
));
435 m_jit
.move(Imm32(valueOfInt32Constant(nodeIndex
)), target
);
437 m_jit
.load32(JITCompiler::payloadFor(spillMe
), target
);
441 if (registerFormat
== DataFormatBoolean
) {
443 ASSERT_NOT_REACHED();
444 #elif USE(JSVALUE32_64)
445 ASSERT(info
.gpr() == target
);
446 if (node
.hasConstant()) {
447 ASSERT(isBooleanConstant(nodeIndex
));
448 m_jit
.move(TrustedImm32(valueOfBooleanConstant(nodeIndex
)), target
);
450 m_jit
.load32(JITCompiler::payloadFor(spillMe
), target
);
455 if (registerFormat
== DataFormatCell
) {
456 ASSERT(info
.gpr() == target
);
457 if (node
.hasConstant()) {
458 JSValue value
= valueOfJSConstant(nodeIndex
);
459 ASSERT(value
.isCell());
460 m_jit
.move(TrustedImmPtr(value
.asCell()), target
);
462 m_jit
.loadPtr(JITCompiler::payloadFor(spillMe
), target
);
466 if (registerFormat
== DataFormatStorage
) {
467 ASSERT(info
.gpr() == target
);
468 m_jit
.loadPtr(JITCompiler::addressFor(spillMe
), target
);
472 ASSERT(registerFormat
& DataFormatJS
);
474 ASSERT(info
.gpr() == target
);
475 if (node
.hasConstant()) {
476 if (valueOfJSConstant(nodeIndex
).isCell())
477 m_jit
.move(valueOfJSConstantAsImmPtr(nodeIndex
).asTrustedImmPtr(), target
);
479 m_jit
.move(valueOfJSConstantAsImmPtr(nodeIndex
), target
);
480 } else if (info
.spillFormat() == DataFormatInteger
) {
481 ASSERT(registerFormat
== DataFormatJSInteger
);
482 m_jit
.load32(JITCompiler::payloadFor(spillMe
), target
);
483 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, target
);
484 } else if (info
.spillFormat() == DataFormatDouble
) {
485 ASSERT(registerFormat
== DataFormatJSDouble
);
486 m_jit
.loadPtr(JITCompiler::addressFor(spillMe
), target
);
487 m_jit
.subPtr(GPRInfo::tagTypeNumberRegister
, target
);
489 m_jit
.loadPtr(JITCompiler::addressFor(spillMe
), target
);
491 ASSERT(info
.tagGPR() == target
|| info
.payloadGPR() == target
);
492 if (node
.hasConstant()) {
493 JSValue v
= valueOfJSConstant(nodeIndex
);
494 m_jit
.move(info
.tagGPR() == target
? Imm32(v
.tag()) : Imm32(v
.payload()), target
);
495 } else if (info
.payloadGPR() == target
)
496 m_jit
.load32(JITCompiler::payloadFor(spillMe
), target
);
497 else { // Fill the Tag
498 switch (info
.spillFormat()) {
499 case DataFormatInteger
:
500 ASSERT(registerFormat
== DataFormatJSInteger
);
501 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), target
);
504 ASSERT(registerFormat
== DataFormatJSCell
);
505 m_jit
.move(TrustedImm32(JSValue::CellTag
), target
);
507 case DataFormatBoolean
:
508 ASSERT(registerFormat
== DataFormatJSBoolean
);
509 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), target
);
512 m_jit
.load32(JITCompiler::tagFor(spillMe
), target
);
519 void silentFillFPR(VirtualRegister spillMe
, GPRReg canTrample
, FPRReg target
)
521 GenerationInfo
& info
= m_generationInfo
[spillMe
];
522 ASSERT(info
.fpr() == target
);
524 NodeIndex nodeIndex
= info
.nodeIndex();
525 Node
& node
= at(nodeIndex
);
527 ASSERT(info
.registerFormat() == DataFormatDouble
);
529 if (node
.hasConstant()) {
530 ASSERT(isNumberConstant(nodeIndex
));
531 m_jit
.move(ImmPtr(bitwise_cast
<void*>(valueOfNumberConstant(nodeIndex
))), canTrample
);
532 m_jit
.movePtrToDouble(canTrample
, target
);
536 if (info
.spillFormat() != DataFormatNone
&& info
.spillFormat() != DataFormatDouble
) {
537 // it was already spilled previously and not as a double, which means we need unboxing.
538 ASSERT(info
.spillFormat() & DataFormatJS
);
539 m_jit
.loadPtr(JITCompiler::addressFor(spillMe
), canTrample
);
540 unboxDouble(canTrample
, target
);
544 m_jit
.loadDouble(JITCompiler::addressFor(spillMe
), target
);
545 #elif USE(JSVALUE32_64)
546 UNUSED_PARAM(canTrample
);
547 ASSERT(info
.registerFormat() == DataFormatDouble
|| info
.registerFormat() == DataFormatJSDouble
);
548 if (node
.hasConstant()) {
549 ASSERT(isNumberConstant(nodeIndex
));
550 m_jit
.loadDouble(addressOfDoubleConstant(nodeIndex
), target
);
552 m_jit
.loadDouble(JITCompiler::addressFor(spillMe
), target
);
556 void silentSpillAllRegisters(GPRReg exclude
, GPRReg exclude2
= InvalidGPRReg
)
558 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
559 GPRReg gpr
= iter
.regID();
560 if (iter
.name() != InvalidVirtualRegister
&& gpr
!= exclude
&& gpr
!= exclude2
)
561 silentSpillGPR(iter
.name(), gpr
);
563 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
564 if (iter
.name() != InvalidVirtualRegister
)
565 silentSpillFPR(iter
.name(), iter
.regID());
568 void silentSpillAllRegisters(FPRReg exclude
)
570 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
571 if (iter
.name() != InvalidVirtualRegister
)
572 silentSpillGPR(iter
.name(), iter
.regID());
574 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
575 FPRReg fpr
= iter
.regID();
576 if (iter
.name() != InvalidVirtualRegister
&& fpr
!= exclude
)
577 silentSpillFPR(iter
.name(), fpr
);
581 void silentFillAllRegisters(GPRReg exclude
, GPRReg exclude2
= InvalidGPRReg
)
583 GPRReg canTrample
= GPRInfo::regT0
;
584 if (exclude
== GPRInfo::regT0
)
585 canTrample
= GPRInfo::regT1
;
587 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
588 if (iter
.name() != InvalidVirtualRegister
)
589 silentFillFPR(iter
.name(), canTrample
, iter
.regID());
591 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
592 GPRReg gpr
= iter
.regID();
593 if (iter
.name() != InvalidVirtualRegister
&& gpr
!= exclude
&& gpr
!= exclude2
)
594 silentFillGPR(iter
.name(), gpr
);
597 void silentFillAllRegisters(FPRReg exclude
)
599 GPRReg canTrample
= GPRInfo::regT0
;
601 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
602 FPRReg fpr
= iter
.regID();
603 if (iter
.name() != InvalidVirtualRegister
&& fpr
!= exclude
)
604 silentFillFPR(iter
.name(), canTrample
, fpr
);
606 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
607 if (iter
.name() != InvalidVirtualRegister
)
608 silentFillGPR(iter
.name(), iter
.regID());
612 // These methods convert between doubles, and doubles boxed and JSValues.
614 GPRReg
boxDouble(FPRReg fpr
, GPRReg gpr
)
616 return m_jit
.boxDouble(fpr
, gpr
);
618 FPRReg
unboxDouble(GPRReg gpr
, FPRReg fpr
)
620 return m_jit
.unboxDouble(gpr
, fpr
);
622 GPRReg
boxDouble(FPRReg fpr
)
624 return boxDouble(fpr
, allocate());
626 #elif USE(JSVALUE32_64)
627 void boxDouble(FPRReg fpr
, GPRReg tagGPR
, GPRReg payloadGPR
)
629 m_jit
.boxDouble(fpr
, tagGPR
, payloadGPR
);
631 void unboxDouble(GPRReg tagGPR
, GPRReg payloadGPR
, FPRReg fpr
, FPRReg scratchFPR
)
633 m_jit
.unboxDouble(tagGPR
, payloadGPR
, fpr
, scratchFPR
);
637 // Spill a VirtualRegister to the RegisterFile.
638 void spill(VirtualRegister spillMe
)
640 GenerationInfo
& info
= m_generationInfo
[spillMe
];
642 #if USE(JSVALUE32_64)
643 if (info
.registerFormat() == DataFormatNone
) // it has been spilled. JS values which have two GPRs can reach here
646 // Check the GenerationInfo to see if this value need writing
647 // to the RegisterFile - if not, mark it as spilled & return.
648 if (!info
.needsSpill()) {
653 DataFormat spillFormat
= info
.registerFormat();
654 switch (spillFormat
) {
655 case DataFormatStorage
: {
656 // This is special, since it's not a JS value - as in it's not visible to JS
658 m_jit
.storePtr(info
.gpr(), JITCompiler::addressFor(spillMe
));
659 info
.spill(DataFormatStorage
);
663 case DataFormatInteger
: {
664 m_jit
.store32(info
.gpr(), JITCompiler::payloadFor(spillMe
));
665 info
.spill(DataFormatInteger
);
670 case DataFormatDouble
: {
671 m_jit
.storeDouble(info
.fpr(), JITCompiler::addressFor(spillMe
));
672 info
.spill(DataFormatDouble
);
677 // The following code handles JSValues, int32s, and cells.
678 ASSERT(spillFormat
== DataFormatCell
|| spillFormat
& DataFormatJS
);
680 GPRReg reg
= info
.gpr();
681 // We need to box int32 and cell values ...
682 // but on JSVALUE64 boxing a cell is a no-op!
683 if (spillFormat
== DataFormatInteger
)
684 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, reg
);
686 // Spill the value, and record it as spilled in its boxed form.
687 m_jit
.storePtr(reg
, JITCompiler::addressFor(spillMe
));
688 info
.spill((DataFormat
)(spillFormat
| DataFormatJS
));
690 #elif USE(JSVALUE32_64)
692 case DataFormatBoolean
: {
693 m_jit
.store32(info
.gpr(), JITCompiler::payloadFor(spillMe
));
694 info
.spill(spillFormat
);
698 case DataFormatDouble
:
699 case DataFormatJSDouble
: {
700 // On JSVALUE32_64 boxing a double is a no-op.
701 m_jit
.storeDouble(info
.fpr(), JITCompiler::addressFor(spillMe
));
702 info
.spill(DataFormatJSDouble
);
707 // The following code handles JSValues.
708 ASSERT(spillFormat
& DataFormatJS
);
709 m_jit
.store32(info
.tagGPR(), JITCompiler::tagFor(spillMe
));
710 m_jit
.store32(info
.payloadGPR(), JITCompiler::payloadFor(spillMe
));
711 info
.spill(spillFormat
);
717 bool isStrictInt32(NodeIndex
);
719 bool isKnownInteger(NodeIndex
);
720 bool isKnownNumeric(NodeIndex
);
721 bool isKnownCell(NodeIndex
);
723 bool isKnownNotInteger(NodeIndex
);
724 bool isKnownNotNumber(NodeIndex
);
726 bool isKnownNotCell(NodeIndex
);
728 // Checks/accessors for constant values.
729 bool isConstant(NodeIndex nodeIndex
) { return m_jit
.graph().isConstant(nodeIndex
); }
730 bool isJSConstant(NodeIndex nodeIndex
) { return m_jit
.graph().isJSConstant(nodeIndex
); }
731 bool isInt32Constant(NodeIndex nodeIndex
) { return m_jit
.graph().isInt32Constant(nodeIndex
); }
732 bool isDoubleConstant(NodeIndex nodeIndex
) { return m_jit
.graph().isDoubleConstant(nodeIndex
); }
733 bool isNumberConstant(NodeIndex nodeIndex
) { return m_jit
.graph().isNumberConstant(nodeIndex
); }
734 bool isBooleanConstant(NodeIndex nodeIndex
) { return m_jit
.graph().isBooleanConstant(nodeIndex
); }
735 bool isFunctionConstant(NodeIndex nodeIndex
) { return m_jit
.graph().isFunctionConstant(nodeIndex
); }
736 int32_t valueOfInt32Constant(NodeIndex nodeIndex
) { return m_jit
.graph().valueOfInt32Constant(nodeIndex
); }
737 double valueOfNumberConstant(NodeIndex nodeIndex
) { return m_jit
.graph().valueOfNumberConstant(nodeIndex
); }
738 int32_t valueOfNumberConstantAsInt32(NodeIndex nodeIndex
)
740 if (isInt32Constant(nodeIndex
))
741 return valueOfInt32Constant(nodeIndex
);
742 return JSC::toInt32(valueOfNumberConstant(nodeIndex
));
744 #if USE(JSVALUE32_64)
745 void* addressOfDoubleConstant(NodeIndex nodeIndex
) { return m_jit
.addressOfDoubleConstant(nodeIndex
); }
747 JSValue
valueOfJSConstant(NodeIndex nodeIndex
) { return m_jit
.graph().valueOfJSConstant(nodeIndex
); }
748 bool valueOfBooleanConstant(NodeIndex nodeIndex
) { return m_jit
.graph().valueOfBooleanConstant(nodeIndex
); }
749 JSFunction
* valueOfFunctionConstant(NodeIndex nodeIndex
) { return m_jit
.graph().valueOfFunctionConstant(nodeIndex
); }
750 bool isNullConstant(NodeIndex nodeIndex
)
752 if (!isConstant(nodeIndex
))
754 return valueOfJSConstant(nodeIndex
).isNull();
757 Identifier
* identifier(unsigned index
)
759 return &m_jit
.codeBlock()->identifier(index
);
762 // Spill all VirtualRegisters back to the RegisterFile.
763 void flushRegisters()
765 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
766 if (iter
.name() != InvalidVirtualRegister
) {
771 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
772 if (iter
.name() != InvalidVirtualRegister
) {
780 // Used to ASSERT flushRegisters() has been called prior to
781 // calling out from JIT code to a C helper function.
784 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
785 if (iter
.name() != InvalidVirtualRegister
)
788 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
789 if (iter
.name() != InvalidVirtualRegister
)
797 MacroAssembler::ImmPtr
valueOfJSConstantAsImmPtr(NodeIndex nodeIndex
)
799 return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex
)));
803 // Helper functions to enable code sharing in implementations of bit/shift ops.
804 void bitOp(NodeType op
, int32_t imm
, GPRReg op1
, GPRReg result
)
808 m_jit
.and32(Imm32(imm
), op1
, result
);
811 m_jit
.or32(Imm32(imm
), op1
, result
);
814 m_jit
.xor32(Imm32(imm
), op1
, result
);
817 ASSERT_NOT_REACHED();
820 void bitOp(NodeType op
, GPRReg op1
, GPRReg op2
, GPRReg result
)
824 m_jit
.and32(op1
, op2
, result
);
827 m_jit
.or32(op1
, op2
, result
);
830 m_jit
.xor32(op1
, op2
, result
);
833 ASSERT_NOT_REACHED();
836 void shiftOp(NodeType op
, GPRReg op1
, int32_t shiftAmount
, GPRReg result
)
840 m_jit
.rshift32(op1
, Imm32(shiftAmount
), result
);
843 m_jit
.lshift32(op1
, Imm32(shiftAmount
), result
);
846 m_jit
.urshift32(op1
, Imm32(shiftAmount
), result
);
849 ASSERT_NOT_REACHED();
852 void shiftOp(NodeType op
, GPRReg op1
, GPRReg shiftAmount
, GPRReg result
)
856 m_jit
.rshift32(op1
, shiftAmount
, result
);
859 m_jit
.lshift32(op1
, shiftAmount
, result
);
862 m_jit
.urshift32(op1
, shiftAmount
, result
);
865 ASSERT_NOT_REACHED();
869 // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
870 unsigned detectPeepHoleBranch()
872 BasicBlock
* block
= m_jit
.graph().m_blocks
[m_block
].get();
874 // Check that no intervening nodes will be generated.
875 for (unsigned index
= m_indexInBlock
+ 1; index
< block
->size() - 1; ++index
) {
876 NodeIndex nodeIndex
= block
->at(index
);
877 if (at(nodeIndex
).shouldGenerate())
881 // Check if the lastNode is a branch on this node.
882 Node
& lastNode
= at(block
->last());
883 return lastNode
.op() == Branch
&& lastNode
.child1().index() == m_compileIndex
? block
->size() - 1 : UINT_MAX
;
886 void nonSpeculativeValueToNumber(Node
&);
887 void nonSpeculativeValueToInt32(Node
&);
888 void nonSpeculativeUInt32ToNumber(Node
&);
890 enum SpillRegistersMode
{ NeedToSpill
, DontSpill
};
892 JITCompiler::Call
cachedGetById(CodeOrigin
, GPRReg baseGPR
, GPRReg resultGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump(), SpillRegistersMode
= NeedToSpill
);
893 void cachedPutById(CodeOrigin
, GPRReg base
, GPRReg value
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump());
894 #elif USE(JSVALUE32_64)
895 JITCompiler::Call
cachedGetById(CodeOrigin
, GPRReg baseTagGPROrNone
, GPRReg basePayloadGPR
, GPRReg resultTagGPR
, GPRReg resultPayloadGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump(), SpillRegistersMode
= NeedToSpill
);
896 void cachedPutById(CodeOrigin
, GPRReg basePayloadGPR
, GPRReg valueTagGPR
, GPRReg valuePayloadGPR
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump());
899 void nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
= false);
900 void nonSpeculativePeepholeBranchNull(Edge operand
, NodeIndex branchNodeIndex
, bool invert
= false);
901 bool nonSpeculativeCompareNull(Node
&, Edge operand
, bool invert
= false);
903 void nonSpeculativePeepholeBranch(Node
&, NodeIndex branchNodeIndex
, MacroAssembler::RelationalCondition
, S_DFGOperation_EJJ helperFunction
);
904 void nonSpeculativeNonPeepholeCompare(Node
&, MacroAssembler::RelationalCondition
, S_DFGOperation_EJJ helperFunction
);
905 bool nonSpeculativeCompare(Node
&, MacroAssembler::RelationalCondition
, S_DFGOperation_EJJ helperFunction
);
907 void nonSpeculativePeepholeStrictEq(Node
&, NodeIndex branchNodeIndex
, bool invert
= false);
908 void nonSpeculativeNonPeepholeStrictEq(Node
&, bool invert
= false);
909 bool nonSpeculativeStrictEq(Node
&, bool invert
= false);
911 void compileInstanceOfForObject(Node
&, GPRReg valueReg
, GPRReg prototypeReg
, GPRReg scratchAndResultReg
);
912 void compileInstanceOf(Node
&);
914 // Access to our fixed callee CallFrame.
915 MacroAssembler::Address
callFrameSlot(int slot
)
917 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ slot
) * static_cast<int>(sizeof(Register
)));
920 // Access to our fixed callee CallFrame.
921 MacroAssembler::Address
argumentSlot(int argument
)
923 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ argumentToOperand(argument
)) * static_cast<int>(sizeof(Register
)));
926 MacroAssembler::Address
callFrameTagSlot(int slot
)
928 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ slot
) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
931 MacroAssembler::Address
callFramePayloadSlot(int slot
)
933 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ slot
) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
936 MacroAssembler::Address
argumentTagSlot(int argument
)
938 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ argumentToOperand(argument
)) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
941 MacroAssembler::Address
argumentPayloadSlot(int argument
)
943 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ argumentToOperand(argument
)) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
946 void emitCall(Node
&);
948 // Called once a node has completed code generation but prior to setting
949 // its result, to free up its children. (This must happen prior to setting
950 // the nodes result, since the node may have the same VirtualRegister as
951 // a child, and as such will use the same GeneratioInfo).
952 void useChildren(Node
&);
954 // These method called to initialize the the GenerationInfo
955 // to describe the result of an operation.
956 void integerResult(GPRReg reg
, NodeIndex nodeIndex
, DataFormat format
= DataFormatInteger
, UseChildrenMode mode
= CallUseChildren
)
958 Node
& node
= at(nodeIndex
);
959 if (mode
== CallUseChildren
)
962 VirtualRegister virtualRegister
= node
.virtualRegister();
963 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
965 if (format
== DataFormatInteger
) {
966 m_jit
.jitAssertIsInt32(reg
);
967 m_gprs
.retain(reg
, virtualRegister
, SpillOrderInteger
);
968 info
.initInteger(nodeIndex
, node
.refCount(), reg
);
971 ASSERT(format
== DataFormatJSInteger
);
972 m_jit
.jitAssertIsJSInt32(reg
);
973 m_gprs
.retain(reg
, virtualRegister
, SpillOrderJS
);
974 info
.initJSValue(nodeIndex
, node
.refCount(), reg
, format
);
975 #elif USE(JSVALUE32_64)
976 ASSERT_NOT_REACHED();
980 void integerResult(GPRReg reg
, NodeIndex nodeIndex
, UseChildrenMode mode
)
982 integerResult(reg
, nodeIndex
, DataFormatInteger
, mode
);
984 void noResult(NodeIndex nodeIndex
, UseChildrenMode mode
= CallUseChildren
)
986 if (mode
== UseChildrenCalledExplicitly
)
988 Node
& node
= at(nodeIndex
);
991 void cellResult(GPRReg reg
, NodeIndex nodeIndex
, UseChildrenMode mode
= CallUseChildren
)
993 Node
& node
= at(nodeIndex
);
994 if (mode
== CallUseChildren
)
997 VirtualRegister virtualRegister
= node
.virtualRegister();
998 m_gprs
.retain(reg
, virtualRegister
, SpillOrderCell
);
999 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1000 info
.initCell(nodeIndex
, node
.refCount(), reg
);
1002 void booleanResult(GPRReg reg
, NodeIndex nodeIndex
, UseChildrenMode mode
= CallUseChildren
)
1004 Node
& node
= at(nodeIndex
);
1005 if (mode
== CallUseChildren
)
1008 VirtualRegister virtualRegister
= node
.virtualRegister();
1009 m_gprs
.retain(reg
, virtualRegister
, SpillOrderBoolean
);
1010 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1011 info
.initBoolean(nodeIndex
, node
.refCount(), reg
);
1014 void jsValueResult(GPRReg reg
, NodeIndex nodeIndex
, DataFormat format
= DataFormatJS
, UseChildrenMode mode
= CallUseChildren
)
1016 if (format
== DataFormatJSInteger
)
1017 m_jit
.jitAssertIsJSInt32(reg
);
1019 Node
& node
= at(nodeIndex
);
1020 if (mode
== CallUseChildren
)
1023 VirtualRegister virtualRegister
= node
.virtualRegister();
1024 m_gprs
.retain(reg
, virtualRegister
, SpillOrderJS
);
1025 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1026 info
.initJSValue(nodeIndex
, node
.refCount(), reg
, format
);
1028 void jsValueResult(GPRReg reg
, NodeIndex nodeIndex
, UseChildrenMode mode
)
1030 jsValueResult(reg
, nodeIndex
, DataFormatJS
, mode
);
1032 #elif USE(JSVALUE32_64)
1033 void jsValueResult(GPRReg tag
, GPRReg payload
, NodeIndex nodeIndex
, DataFormat format
= DataFormatJS
, UseChildrenMode mode
= CallUseChildren
)
1035 Node
& node
= at(nodeIndex
);
1036 if (mode
== CallUseChildren
)
1039 VirtualRegister virtualRegister
= node
.virtualRegister();
1040 m_gprs
.retain(tag
, virtualRegister
, SpillOrderJS
);
1041 m_gprs
.retain(payload
, virtualRegister
, SpillOrderJS
);
1042 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1043 info
.initJSValue(nodeIndex
, node
.refCount(), tag
, payload
, format
);
1045 void jsValueResult(GPRReg tag
, GPRReg payload
, NodeIndex nodeIndex
, UseChildrenMode mode
)
1047 jsValueResult(tag
, payload
, nodeIndex
, DataFormatJS
, mode
);
1050 void storageResult(GPRReg reg
, NodeIndex nodeIndex
, UseChildrenMode mode
= CallUseChildren
)
1052 Node
& node
= at(nodeIndex
);
1053 if (mode
== CallUseChildren
)
1056 VirtualRegister virtualRegister
= node
.virtualRegister();
1057 m_gprs
.retain(reg
, virtualRegister
, SpillOrderStorage
);
1058 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1059 info
.initStorage(nodeIndex
, node
.refCount(), reg
);
1061 void doubleResult(FPRReg reg
, NodeIndex nodeIndex
, UseChildrenMode mode
= CallUseChildren
)
1063 Node
& node
= at(nodeIndex
);
1064 if (mode
== CallUseChildren
)
1067 VirtualRegister virtualRegister
= node
.virtualRegister();
1068 m_fprs
.retain(reg
, virtualRegister
, SpillOrderDouble
);
1069 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1070 info
.initDouble(nodeIndex
, node
.refCount(), reg
);
1072 void initConstantInfo(NodeIndex nodeIndex
)
1074 ASSERT(isInt32Constant(nodeIndex
) || isNumberConstant(nodeIndex
) || isJSConstant(nodeIndex
));
1075 Node
& node
= at(nodeIndex
);
1076 m_generationInfo
[node
.virtualRegister()].initConstant(nodeIndex
, node
.refCount());
1079 // These methods add calls to C++ helper functions.
1080 // These methods are broadly value representation specific (i.e.
1081 // deal with the fact that a JSValue may be passed in one or two
1082 // machine registers, and delegate the calling convention specific
1083 // decision as to how to fill the regsiters to setupArguments* methods.
1085 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg result
, void* pointer
)
1087 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
));
1088 return appendCallWithExceptionCheckSetResult(operation
, result
);
1090 JITCompiler::Call
callOperation(Z_DFGOperation_D operation
, GPRReg result
, FPRReg arg1
)
1092 m_jit
.setupArguments(arg1
);
1093 JITCompiler::Call call
= m_jit
.appendCall(operation
);
1094 m_jit
.zeroExtend32ToPtr(GPRInfo::returnValueGPR
, result
);
1097 JITCompiler::Call
callOperation(J_DFGOperation_EGI operation
, GPRReg result
, GPRReg arg1
, Identifier
* identifier
)
1099 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1100 return appendCallWithExceptionCheckSetResult(operation
, result
);
1102 JITCompiler::Call
callOperation(J_DFGOperation_EI operation
, GPRReg result
, Identifier
* identifier
)
1104 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
));
1105 return appendCallWithExceptionCheckSetResult(operation
, result
);
1107 JITCompiler::Call
callOperation(J_DFGOperation_EA operation
, GPRReg result
, GPRReg arg1
)
1109 m_jit
.setupArgumentsWithExecState(arg1
);
1110 return appendCallWithExceptionCheckSetResult(operation
, result
);
1112 JITCompiler::Call
callOperation(J_DFGOperation_EPS operation
, GPRReg result
, void* pointer
, size_t size
)
1114 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
), TrustedImmPtr(size
));
1115 return appendCallWithExceptionCheckSetResult(operation
, result
);
1117 JITCompiler::Call
callOperation(J_DFGOperation_ESS operation
, GPRReg result
, int startConstant
, int numConstants
)
1119 m_jit
.setupArgumentsWithExecState(TrustedImm32(startConstant
), TrustedImm32(numConstants
));
1120 return appendCallWithExceptionCheckSetResult(operation
, result
);
1122 JITCompiler::Call
callOperation(J_DFGOperation_EPP operation
, GPRReg result
, GPRReg arg1
, void* pointer
)
1124 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(pointer
));
1125 return appendCallWithExceptionCheckSetResult(operation
, result
);
1127 JITCompiler::Call
callOperation(J_DFGOperation_ECI operation
, GPRReg result
, GPRReg arg1
, Identifier
* identifier
)
1129 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1130 return appendCallWithExceptionCheckSetResult(operation
, result
);
1132 JITCompiler::Call
callOperation(J_DFGOperation_EJI operation
, GPRReg result
, GPRReg arg1
, Identifier
* identifier
)
1134 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1135 return appendCallWithExceptionCheckSetResult(operation
, result
);
1137 JITCompiler::Call
callOperation(J_DFGOperation_EJA operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1139 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1140 return appendCallWithExceptionCheckSetResult(operation
, result
);
1142 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg result
, GPRReg arg1
)
1144 m_jit
.setupArgumentsWithExecState(arg1
);
1145 return appendCallWithExceptionCheckSetResult(operation
, result
);
1147 JITCompiler::Call
callOperation(C_DFGOperation_E operation
, GPRReg result
)
1149 m_jit
.setupArgumentsExecState();
1150 return appendCallWithExceptionCheckSetResult(operation
, result
);
1152 JITCompiler::Call
callOperation(C_DFGOperation_EC operation
, GPRReg result
, GPRReg arg1
)
1154 m_jit
.setupArgumentsWithExecState(arg1
);
1155 return appendCallWithExceptionCheckSetResult(operation
, result
);
1157 JITCompiler::Call
callOperation(C_DFGOperation_EC operation
, GPRReg result
, JSCell
* cell
)
1159 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(cell
));
1160 return appendCallWithExceptionCheckSetResult(operation
, result
);
1162 JITCompiler::Call
callOperation(C_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, JSCell
* cell
)
1164 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(cell
));
1165 return appendCallWithExceptionCheckSetResult(operation
, result
);
1167 JITCompiler::Call
callOperation(S_DFGOperation_J operation
, GPRReg result
, GPRReg arg1
)
1169 m_jit
.setupArguments(arg1
);
1170 return appendCallSetResult(operation
, result
);
1172 JITCompiler::Call
callOperation(S_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1
)
1174 m_jit
.setupArgumentsWithExecState(arg1
);
1175 return appendCallWithExceptionCheckSetResult(operation
, result
);
1177 JITCompiler::Call
callOperation(S_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1179 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1180 return appendCallWithExceptionCheckSetResult(operation
, result
);
1182 JITCompiler::Call
callOperation(S_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1184 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1185 return appendCallWithExceptionCheckSetResult(operation
, result
);
1187 JITCompiler::Call
callOperation(J_DFGOperation_EPP operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1189 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1190 return appendCallWithExceptionCheckSetResult(operation
, result
);
1192 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, MacroAssembler::TrustedImm32 imm
)
1194 m_jit
.setupArgumentsWithExecState(arg1
, MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm
.m_value
)))));
1195 return appendCallWithExceptionCheckSetResult(operation
, result
);
1197 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg result
, MacroAssembler::TrustedImm32 imm
, GPRReg arg2
)
1199 m_jit
.setupArgumentsWithExecState(MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm
.m_value
)))), arg2
);
1200 return appendCallWithExceptionCheckSetResult(operation
, result
);
1202 JITCompiler::Call
callOperation(J_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1204 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1205 return appendCallWithExceptionCheckSetResult(operation
, result
);
1207 JITCompiler::Call
callOperation(J_DFGOperation_ECJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1209 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1210 return appendCallWithExceptionCheckSetResult(operation
, result
);
1212 JITCompiler::Call
callOperation(V_DFGOperation_EC operation
, GPRReg arg1
)
1214 m_jit
.setupArgumentsWithExecState(arg1
);
1215 return appendCallWithExceptionCheck(operation
);
1217 JITCompiler::Call
callOperation(V_DFGOperation_EJPP operation
, GPRReg arg1
, GPRReg arg2
, void* pointer
)
1219 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(pointer
));
1220 return appendCallWithExceptionCheck(operation
);
1222 JITCompiler::Call
callOperation(V_DFGOperation_EJCI operation
, GPRReg arg1
, GPRReg arg2
, Identifier
* identifier
)
1224 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(identifier
));
1225 return appendCallWithExceptionCheck(operation
);
1227 JITCompiler::Call
callOperation(V_DFGOperation_EJJJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1229 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1230 return appendCallWithExceptionCheck(operation
);
1232 JITCompiler::Call
callOperation(V_DFGOperation_EPZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1234 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1235 return appendCallWithExceptionCheck(operation
);
1237 JITCompiler::Call
callOperation(V_DFGOperation_EAZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1239 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1240 return appendCallWithExceptionCheck(operation
);
1242 JITCompiler::Call
callOperation(V_DFGOperation_ECJJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1244 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1245 return appendCallWithExceptionCheck(operation
);
1247 JITCompiler::Call
callOperation(D_DFGOperation_EJ operation
, FPRReg result
, GPRReg arg1
)
1249 m_jit
.setupArgumentsWithExecState(arg1
);
1250 return appendCallWithExceptionCheckSetResult(operation
, result
);
1252 JITCompiler::Call
callOperation(D_DFGOperation_ZZ operation
, FPRReg result
, GPRReg arg1
, GPRReg arg2
)
1254 m_jit
.setupArguments(arg1
, arg2
);
1255 return appendCallSetResult(operation
, result
);
1257 JITCompiler::Call
callOperation(D_DFGOperation_DD operation
, FPRReg result
, FPRReg arg1
, FPRReg arg2
)
1259 m_jit
.setupArguments(arg1
, arg2
);
1260 return appendCallSetResult(operation
, result
);
1263 JITCompiler::Call
callOperation(Z_DFGOperation_D operation
, GPRReg result
, FPRReg arg1
)
1265 prepareForExternalCall();
1266 m_jit
.setupArguments(arg1
);
1267 JITCompiler::Call call
= m_jit
.appendCall(operation
);
1268 m_jit
.zeroExtend32ToPtr(GPRInfo::returnValueGPR
, result
);
1271 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg resultTag
, GPRReg resultPayload
, void* pointer
)
1273 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
));
1274 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1276 JITCompiler::Call
callOperation(J_DFGOperation_EPP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, void* pointer
)
1278 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(pointer
));
1279 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1281 JITCompiler::Call
callOperation(J_DFGOperation_EGI operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, Identifier
* identifier
)
1283 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1284 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1286 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
)
1288 m_jit
.setupArgumentsWithExecState(arg1
);
1289 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1291 JITCompiler::Call
callOperation(J_DFGOperation_EI operation
, GPRReg resultTag
, GPRReg resultPayload
, Identifier
* identifier
)
1293 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
));
1294 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1296 JITCompiler::Call
callOperation(J_DFGOperation_EA operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
)
1298 m_jit
.setupArgumentsWithExecState(arg1
);
1299 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1301 JITCompiler::Call
callOperation(J_DFGOperation_EPS operation
, GPRReg resultTag
, GPRReg resultPayload
, void* pointer
, size_t size
)
1303 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
), TrustedImmPtr(size
));
1304 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1306 JITCompiler::Call
callOperation(J_DFGOperation_ESS operation
, GPRReg resultTag
, GPRReg resultPayload
, int startConstant
, int numConstants
)
1308 m_jit
.setupArgumentsWithExecState(TrustedImm32(startConstant
), TrustedImm32(numConstants
));
1309 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1311 JITCompiler::Call
callOperation(J_DFGOperation_EJP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, void* pointer
)
1313 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, TrustedImmPtr(pointer
));
1314 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1316 JITCompiler::Call
callOperation(J_DFGOperation_EJP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
)
1318 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, arg2
);
1319 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1321 JITCompiler::Call
callOperation(J_DFGOperation_ECI operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, Identifier
* identifier
)
1323 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1324 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1326 JITCompiler::Call
callOperation(J_DFGOperation_EJI operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, Identifier
* identifier
)
1328 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, TrustedImmPtr(identifier
));
1329 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1331 JITCompiler::Call
callOperation(J_DFGOperation_EJI operation
, GPRReg resultTag
, GPRReg resultPayload
, int32_t arg1Tag
, GPRReg arg1Payload
, Identifier
* identifier
)
1333 m_jit
.setupArgumentsWithExecState(arg1Payload
, TrustedImm32(arg1Tag
), TrustedImmPtr(identifier
));
1334 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1336 JITCompiler::Call
callOperation(J_DFGOperation_EJA operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
)
1338 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, arg2
);
1339 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1341 JITCompiler::Call
callOperation(J_DFGOperation_EJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1343 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
);
1344 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1346 JITCompiler::Call
callOperation(C_DFGOperation_E operation
, GPRReg result
)
1348 m_jit
.setupArgumentsExecState();
1349 return appendCallWithExceptionCheckSetResult(operation
, result
);
1351 JITCompiler::Call
callOperation(C_DFGOperation_EC operation
, GPRReg result
, GPRReg arg1
)
1353 m_jit
.setupArgumentsWithExecState(arg1
);
1354 return appendCallWithExceptionCheckSetResult(operation
, result
);
1356 JITCompiler::Call
callOperation(C_DFGOperation_EC operation
, GPRReg result
, JSCell
* cell
)
1358 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(cell
));
1359 return appendCallWithExceptionCheckSetResult(operation
, result
);
1361 JITCompiler::Call
callOperation(C_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, JSCell
* cell
)
1363 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(cell
));
1364 return appendCallWithExceptionCheckSetResult(operation
, result
);
1366 JITCompiler::Call
callOperation(S_DFGOperation_J operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1368 m_jit
.setupArguments(arg1Payload
, arg1Tag
);
1369 return appendCallSetResult(operation
, result
);
1371 JITCompiler::Call
callOperation(S_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1373 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
);
1374 return appendCallWithExceptionCheckSetResult(operation
, result
);
1376 JITCompiler::Call
callOperation(S_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1378 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1379 return appendCallWithExceptionCheckSetResult(operation
, result
);
1381 JITCompiler::Call
callOperation(S_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1383 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, arg2Payload
, arg2Tag
);
1384 return appendCallWithExceptionCheckSetResult(operation
, result
);
1386 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1388 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, arg2Payload
, arg2Tag
);
1389 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1391 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, MacroAssembler::TrustedImm32 imm
)
1393 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, imm
, TrustedImm32(JSValue::Int32Tag
));
1394 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1396 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg resultTag
, GPRReg resultPayload
, MacroAssembler::TrustedImm32 imm
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1398 m_jit
.setupArgumentsWithExecState(imm
, TrustedImm32(JSValue::Int32Tag
), arg2Payload
, arg2Tag
);
1399 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1401 JITCompiler::Call
callOperation(J_DFGOperation_ECJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1403 m_jit
.setupArgumentsWithExecState(arg1
, arg2Payload
, arg2Tag
);
1404 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1406 JITCompiler::Call
callOperation(J_DFGOperation_ECC operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, GPRReg arg2
)
1408 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1409 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1411 JITCompiler::Call
callOperation(V_DFGOperation_EC operation
, GPRReg arg1
)
1413 m_jit
.setupArgumentsWithExecState(arg1
);
1414 return appendCallWithExceptionCheck(operation
);
1416 JITCompiler::Call
callOperation(V_DFGOperation_EJPP operation
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
, void* pointer
)
1418 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, arg2
, TrustedImmPtr(pointer
));
1419 return appendCallWithExceptionCheck(operation
);
1421 JITCompiler::Call
callOperation(V_DFGOperation_EJCI operation
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
, Identifier
* identifier
)
1423 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
, arg2
, TrustedImmPtr(identifier
));
1424 return appendCallWithExceptionCheck(operation
);
1426 JITCompiler::Call
callOperation(V_DFGOperation_ECJJ operation
, GPRReg arg1
, GPRReg arg2Tag
, GPRReg arg2Payload
, GPRReg arg3Tag
, GPRReg arg3Payload
)
1428 m_jit
.setupArgumentsWithExecState(arg1
, arg2Payload
, arg2Tag
, arg3Payload
, arg3Tag
);
1429 return appendCallWithExceptionCheck(operation
);
1431 JITCompiler::Call
callOperation(V_DFGOperation_EPZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3Tag
, GPRReg arg3Payload
)
1433 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3Payload
, arg3Tag
);
1434 return appendCallWithExceptionCheck(operation
);
1436 JITCompiler::Call
callOperation(V_DFGOperation_EAZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3Tag
, GPRReg arg3Payload
)
1438 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3Payload
, arg3Tag
);
1439 return appendCallWithExceptionCheck(operation
);
1442 JITCompiler::Call
callOperation(D_DFGOperation_EJ operation
, FPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1444 m_jit
.setupArgumentsWithExecState(arg1Payload
, arg1Tag
);
1445 return appendCallWithExceptionCheckSetResult(operation
, result
);
1448 JITCompiler::Call
callOperation(D_DFGOperation_ZZ operation
, FPRReg result
, GPRReg arg1
, GPRReg arg2
)
1450 m_jit
.setupArguments(arg1
, arg2
);
1451 return appendCallSetResult(operation
, result
);
1453 JITCompiler::Call
callOperation(D_DFGOperation_DD operation
, FPRReg result
, FPRReg arg1
, FPRReg arg2
)
1455 m_jit
.setupArguments(arg1
, arg2
);
1456 return appendCallSetResult(operation
, result
);
1460 #if !defined(NDEBUG) && !CPU(ARM_THUMB2)
1461 void prepareForExternalCall()
1463 for (unsigned i
= 0; i
< sizeof(void*) / 4; i
++)
1464 m_jit
.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit
.globalData()->topCallFrame
) + i
* 4);
1467 void prepareForExternalCall() { }
1470 // These methods add call instructions, with optional exception checks & setting results.
1471 JITCompiler::Call
appendCallWithExceptionCheck(const FunctionPtr
& function
)
1473 prepareForExternalCall();
1474 CodeOrigin codeOrigin
= at(m_compileIndex
).codeOrigin
;
1475 CallBeginToken token
= m_jit
.beginCall();
1476 JITCompiler::Call call
= m_jit
.appendCall(function
);
1477 m_jit
.addExceptionCheck(call
, codeOrigin
, token
);
1480 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, GPRReg result
)
1482 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1483 m_jit
.move(GPRInfo::returnValueGPR
, result
);
1486 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, GPRReg result
)
1488 prepareForExternalCall();
1489 JITCompiler::Call call
= m_jit
.appendCall(function
);
1490 m_jit
.move(GPRInfo::returnValueGPR
, result
);
1493 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, GPRReg result1
, GPRReg result2
)
1495 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1496 m_jit
.setupResults(result1
, result2
);
1500 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1502 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1503 m_jit
.assembler().fstpl(0, JITCompiler::stackPointerRegister
);
1504 m_jit
.loadDouble(JITCompiler::stackPointerRegister
, result
);
1507 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1509 JITCompiler::Call call
= m_jit
.appendCall(function
);
1510 m_jit
.assembler().fstpl(0, JITCompiler::stackPointerRegister
);
1511 m_jit
.loadDouble(JITCompiler::stackPointerRegister
, result
);
1515 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1517 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1518 m_jit
.assembler().vmov(result
, GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
1521 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1523 JITCompiler::Call call
= m_jit
.appendCall(function
);
1524 m_jit
.assembler().vmov(result
, GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
1528 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1530 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1531 m_jit
.moveDouble(FPRInfo::returnValueFPR
, result
);
1534 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1536 JITCompiler::Call call
= m_jit
.appendCall(function
);
1537 m_jit
.moveDouble(FPRInfo::returnValueFPR
, result
);
1542 void branchDouble(JITCompiler::DoubleCondition cond
, FPRReg left
, FPRReg right
, BlockIndex destination
)
1544 if (!haveEdgeCodeToEmit(destination
))
1545 return addBranch(m_jit
.branchDouble(cond
, left
, right
), destination
);
1547 JITCompiler::Jump notTaken
= m_jit
.branchDouble(JITCompiler::invert(cond
), left
, right
);
1548 emitEdgeCode(destination
);
1549 addBranch(m_jit
.jump(), destination
);
1550 notTaken
.link(&m_jit
);
1553 void branchDoubleNonZero(FPRReg value
, FPRReg scratch
, BlockIndex destination
)
1555 if (!haveEdgeCodeToEmit(destination
))
1556 return addBranch(m_jit
.branchDoubleNonZero(value
, scratch
), destination
);
1558 JITCompiler::Jump notTaken
= m_jit
.branchDoubleZeroOrNaN(value
, scratch
);
1559 emitEdgeCode(destination
);
1560 addBranch(m_jit
.jump(), destination
);
1561 notTaken
.link(&m_jit
);
1564 template<typename T
, typename U
>
1565 void branch32(JITCompiler::RelationalCondition cond
, T left
, U right
, BlockIndex destination
)
1567 if (!haveEdgeCodeToEmit(destination
))
1568 return addBranch(m_jit
.branch32(cond
, left
, right
), destination
);
1570 JITCompiler::Jump notTaken
= m_jit
.branch32(JITCompiler::invert(cond
), left
, right
);
1571 emitEdgeCode(destination
);
1572 addBranch(m_jit
.jump(), destination
);
1573 notTaken
.link(&m_jit
);
1576 template<typename T
, typename U
>
1577 void branchTest32(JITCompiler::ResultCondition cond
, T value
, U mask
, BlockIndex destination
)
1579 ASSERT(JITCompiler::isInvertible(cond
));
1581 if (!haveEdgeCodeToEmit(destination
))
1582 return addBranch(m_jit
.branchTest32(cond
, value
, mask
), destination
);
1584 JITCompiler::Jump notTaken
= m_jit
.branchTest32(JITCompiler::invert(cond
), value
, mask
);
1585 emitEdgeCode(destination
);
1586 addBranch(m_jit
.jump(), destination
);
1587 notTaken
.link(&m_jit
);
1590 template<typename T
>
1591 void branchTest32(JITCompiler::ResultCondition cond
, T value
, BlockIndex destination
)
1593 ASSERT(JITCompiler::isInvertible(cond
));
1595 if (!haveEdgeCodeToEmit(destination
))
1596 return addBranch(m_jit
.branchTest32(cond
, value
), destination
);
1598 JITCompiler::Jump notTaken
= m_jit
.branchTest32(JITCompiler::invert(cond
), value
);
1599 emitEdgeCode(destination
);
1600 addBranch(m_jit
.jump(), destination
);
1601 notTaken
.link(&m_jit
);
1604 template<typename T
, typename U
>
1605 void branchPtr(JITCompiler::RelationalCondition cond
, T left
, U right
, BlockIndex destination
)
1607 if (!haveEdgeCodeToEmit(destination
))
1608 return addBranch(m_jit
.branchPtr(cond
, left
, right
), destination
);
1610 JITCompiler::Jump notTaken
= m_jit
.branchPtr(JITCompiler::invert(cond
), left
, right
);
1611 emitEdgeCode(destination
);
1612 addBranch(m_jit
.jump(), destination
);
1613 notTaken
.link(&m_jit
);
1616 template<typename T
, typename U
>
1617 void branchTestPtr(JITCompiler::ResultCondition cond
, T value
, U mask
, BlockIndex destination
)
1619 ASSERT(JITCompiler::isInvertible(cond
));
1621 if (!haveEdgeCodeToEmit(destination
))
1622 return addBranch(m_jit
.branchTestPtr(cond
, value
, mask
), destination
);
1624 JITCompiler::Jump notTaken
= m_jit
.branchTestPtr(JITCompiler::invert(cond
), value
, mask
);
1625 emitEdgeCode(destination
);
1626 addBranch(m_jit
.jump(), destination
);
1627 notTaken
.link(&m_jit
);
1630 template<typename T
>
1631 void branchTestPtr(JITCompiler::ResultCondition cond
, T value
, BlockIndex destination
)
1633 ASSERT(JITCompiler::isInvertible(cond
));
1635 if (!haveEdgeCodeToEmit(destination
))
1636 return addBranch(m_jit
.branchTestPtr(cond
, value
), destination
);
1638 JITCompiler::Jump notTaken
= m_jit
.branchTestPtr(JITCompiler::invert(cond
), value
);
1639 emitEdgeCode(destination
);
1640 addBranch(m_jit
.jump(), destination
);
1641 notTaken
.link(&m_jit
);
1644 template<typename T
, typename U
>
1645 void branchTest8(JITCompiler::ResultCondition cond
, T value
, U mask
, BlockIndex destination
)
1647 ASSERT(JITCompiler::isInvertible(cond
));
1649 if (!haveEdgeCodeToEmit(destination
))
1650 return addBranch(m_jit
.branchTest8(cond
, value
, mask
), destination
);
1652 JITCompiler::Jump notTaken
= m_jit
.branchTest8(JITCompiler::invert(cond
), value
, mask
);
1653 emitEdgeCode(destination
);
1654 addBranch(m_jit
.jump(), destination
);
1655 notTaken
.link(&m_jit
);
1658 template<typename T
>
1659 void branchTest8(JITCompiler::ResultCondition cond
, T value
, BlockIndex destination
)
1661 ASSERT(JITCompiler::isInvertible(cond
));
1663 if (!haveEdgeCodeToEmit(destination
))
1664 return addBranch(m_jit
.branchTest8(cond
, value
), destination
);
1666 JITCompiler::Jump notTaken
= m_jit
.branchTest8(JITCompiler::invert(cond
), value
);
1667 emitEdgeCode(destination
);
1668 addBranch(m_jit
.jump(), destination
);
1669 notTaken
.link(&m_jit
);
1672 enum FallThroughMode
{
1676 void jump(BlockIndex destination
, FallThroughMode fallThroughMode
= AtFallThroughPoint
)
1678 if (haveEdgeCodeToEmit(destination
))
1679 emitEdgeCode(destination
);
1680 if (destination
== m_block
+ 1
1681 && fallThroughMode
== AtFallThroughPoint
)
1683 addBranch(m_jit
.jump(), destination
);
1686 inline bool haveEdgeCodeToEmit(BlockIndex
)
1688 return DFG_ENABLE_EDGE_CODE_VERIFICATION
;
1690 void emitEdgeCode(BlockIndex destination
)
1692 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION
)
1694 m_jit
.move(TrustedImm32(destination
), GPRInfo::regT0
);
1697 void addBranch(const MacroAssembler::Jump
& jump
, BlockIndex destination
)
1699 m_branches
.append(BranchRecord(jump
, destination
));
1704 for (size_t i
= 0; i
< m_branches
.size(); ++i
) {
1705 BranchRecord
& branch
= m_branches
[i
];
1706 branch
.jump
.linkTo(m_blockHeads
[branch
.destination
], &m_jit
);
1712 return m_jit
.graph().m_blocks
[m_block
].get();
1716 void dump(const char* label
= 0);
1719 #if DFG_ENABLE(CONSISTENCY_CHECK)
1720 void checkConsistency();
1722 void checkConsistency() { }
1725 bool isInteger(NodeIndex nodeIndex
)
1727 Node
& node
= at(nodeIndex
);
1728 if (node
.hasInt32Result())
1731 if (isInt32Constant(nodeIndex
))
1734 VirtualRegister virtualRegister
= node
.virtualRegister();
1735 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1737 return info
.isJSInteger();
1740 bool compare(Node
&, MacroAssembler::RelationalCondition
, MacroAssembler::DoubleCondition
, S_DFGOperation_EJJ
);
1741 bool compilePeepHoleBranch(Node
&, MacroAssembler::RelationalCondition
, MacroAssembler::DoubleCondition
, S_DFGOperation_EJJ
);
1742 void compilePeepHoleIntegerBranch(Node
&, NodeIndex branchNodeIndex
, JITCompiler::RelationalCondition
);
1743 void compilePeepHoleDoubleBranch(Node
&, NodeIndex branchNodeIndex
, JITCompiler::DoubleCondition
);
1744 void compilePeepHoleObjectEquality(Node
&, NodeIndex branchNodeIndex
, const ClassInfo
*, PredictionChecker
);
1745 void compilePeepHoleObjectToObjectOrOtherEquality(
1746 Edge leftChild
, Edge rightChild
, NodeIndex branchNodeIndex
, const ClassInfo
*, PredictionChecker
);
1747 void compileObjectEquality(Node
&, const ClassInfo
*, PredictionChecker
);
1748 void compileObjectToObjectOrOtherEquality(
1749 Edge leftChild
, Edge rightChild
, const ClassInfo
*, PredictionChecker
);
1750 void compileValueAdd(Node
&);
1751 void compileObjectOrOtherLogicalNot(Edge value
, const ClassInfo
*, bool needSpeculationCheck
);
1752 void compileLogicalNot(Node
&);
1753 void emitObjectOrOtherBranch(Edge value
, BlockIndex taken
, BlockIndex notTaken
, const ClassInfo
*, bool needSpeculationCheck
);
1754 void emitBranch(Node
&);
1756 void compileIntegerCompare(Node
&, MacroAssembler::RelationalCondition
);
1757 void compileDoubleCompare(Node
&, MacroAssembler::DoubleCondition
);
1759 bool compileStrictEqForConstant(Node
&, Edge value
, JSValue constant
);
1761 bool compileStrictEq(Node
&);
1763 void compileGetCharCodeAt(Node
&);
1764 void compileGetByValOnString(Node
&);
1765 void compileValueToInt32(Node
&);
1766 void compileUInt32ToNumber(Node
&);
1767 void compileDoubleAsInt32(Node
&);
1768 void compileInt32ToDouble(Node
&);
1769 void compileAdd(Node
&);
1770 void compileArithSub(Node
&);
1771 void compileArithNegate(Node
&);
1772 void compileArithMul(Node
&);
1773 #if CPU(X86) || CPU(X86_64)
1774 void compileIntegerArithDivForX86(Node
&);
1776 void compileArithMod(Node
&);
1777 void compileSoftModulo(Node
&);
1778 void compileGetTypedArrayLength(const TypedArrayDescriptor
&, Node
&, bool needsSpeculationCheck
);
1779 enum TypedArraySpeculationRequirements
{
1780 NoTypedArraySpecCheck
,
1781 NoTypedArrayTypeSpecCheck
,
1782 AllTypedArraySpecChecks
1784 enum TypedArraySignedness
{
1788 enum TypedArrayRounding
{
1792 void compileGetIndexedPropertyStorage(Node
&);
1793 void compileGetByValOnIntTypedArray(const TypedArrayDescriptor
&, Node
&, size_t elementSize
, TypedArraySpeculationRequirements
, TypedArraySignedness
);
1794 void compilePutByValForIntTypedArray(const TypedArrayDescriptor
&, GPRReg base
, GPRReg property
, Node
&, size_t elementSize
, TypedArraySpeculationRequirements
, TypedArraySignedness
, TypedArrayRounding
= TruncateRounding
);
1795 void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor
&, Node
&, size_t elementSize
, TypedArraySpeculationRequirements
);
1796 void compilePutByValForFloatTypedArray(const TypedArrayDescriptor
&, GPRReg base
, GPRReg property
, Node
&, size_t elementSize
, TypedArraySpeculationRequirements
);
1797 void compileNewFunctionNoCheck(Node
&);
1798 void compileNewFunctionExpression(Node
&);
1799 bool compileRegExpExec(Node
&);
1801 template <typename ClassType
, bool destructor
, typename StructureType
>
1802 void emitAllocateBasicJSObject(StructureType structure
, GPRReg resultGPR
, GPRReg scratchGPR
, MacroAssembler::JumpList
& slowPath
)
1804 MarkedAllocator
* allocator
= 0;
1806 allocator
= &m_jit
.globalData()->heap
.allocatorForObjectWithDestructor(sizeof(ClassType
));
1808 allocator
= &m_jit
.globalData()->heap
.allocatorForObjectWithoutDestructor(sizeof(ClassType
));
1810 m_jit
.loadPtr(&allocator
->m_freeList
.head
, resultGPR
);
1811 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, resultGPR
));
1813 // The object is half-allocated: we have what we know is a fresh object, but
1814 // it's still on the GC's free list.
1816 // Ditch the structure by placing it into the structure slot, so that we can reuse
1818 m_jit
.storePtr(structure
, MacroAssembler::Address(resultGPR
, JSObject::structureOffset()));
1820 // Now that we have scratchGPR back, remove the object from the free list
1821 m_jit
.loadPtr(MacroAssembler::Address(resultGPR
), scratchGPR
);
1822 m_jit
.storePtr(scratchGPR
, &allocator
->m_freeList
.head
);
1824 // Initialize the object's classInfo pointer
1825 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(&ClassType::s_info
), MacroAssembler::Address(resultGPR
, JSCell::classInfoOffset()));
1827 // Initialize the object's inheritorID.
1828 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR
, JSObject::offsetOfInheritorID()));
1830 // Initialize the object's property storage pointer.
1831 m_jit
.addPtr(MacroAssembler::TrustedImm32(sizeof(JSObject
)), resultGPR
, scratchGPR
);
1832 m_jit
.storePtr(scratchGPR
, MacroAssembler::Address(resultGPR
, ClassType::offsetOfPropertyStorage()));
1835 // It is acceptable to have structure be equal to scratch, so long as you're fine
1836 // with the structure GPR being clobbered.
1837 template<typename T
>
1838 void emitAllocateJSFinalObject(T structure
, GPRReg resultGPR
, GPRReg scratchGPR
, MacroAssembler::JumpList
& slowPath
)
1840 return emitAllocateBasicJSObject
<JSFinalObject
, false>(structure
, resultGPR
, scratchGPR
, slowPath
);
1844 JITCompiler::Jump
convertToDouble(GPRReg value
, FPRReg result
, GPRReg tmp
);
1845 #elif USE(JSVALUE32_64)
1846 JITCompiler::Jump
convertToDouble(JSValueOperand
&, FPRReg result
);
1849 // Add a speculation check without additional recovery.
1850 void speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, NodeIndex nodeIndex
, MacroAssembler::Jump jumpToFail
)
1854 m_jit
.codeBlock()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(nodeIndex
), jumpToFail
, this));
1856 void speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
)
1858 speculationCheck(kind
, jsValueSource
, nodeUse
.index(), jumpToFail
);
1860 // Add a set of speculation checks without additional recovery.
1861 void speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, NodeIndex nodeIndex
, MacroAssembler::JumpList
& jumpsToFail
)
1863 Vector
<MacroAssembler::Jump
, 16> jumpVector
= jumpsToFail
.jumps();
1864 for (unsigned i
= 0; i
< jumpVector
.size(); ++i
)
1865 speculationCheck(kind
, jsValueSource
, nodeIndex
, jumpVector
[i
]);
1867 void speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::JumpList
& jumpsToFail
)
1869 speculationCheck(kind
, jsValueSource
, nodeUse
.index(), jumpsToFail
);
1871 // Add a speculation check with additional recovery.
1872 void speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, NodeIndex nodeIndex
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
1876 m_jit
.codeBlock()->appendSpeculationRecovery(recovery
);
1877 m_jit
.codeBlock()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(nodeIndex
), jumpToFail
, this, m_jit
.codeBlock()->numberOfSpeculationRecoveries()));
1879 void speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
1881 speculationCheck(kind
, jsValueSource
, nodeUse
.index(), jumpToFail
, recovery
);
1883 void forwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, NodeIndex nodeIndex
, MacroAssembler::Jump jumpToFail
, const ValueRecovery
& valueRecovery
)
1885 speculationCheck(kind
, jsValueSource
, nodeIndex
, jumpToFail
);
1887 unsigned setLocalIndexInBlock
= m_indexInBlock
+ 1;
1889 Node
* setLocal
= &at(m_jit
.graph().m_blocks
[m_block
]->at(setLocalIndexInBlock
));
1891 if (setLocal
->op() == Int32ToDouble
) {
1892 setLocal
= &at(m_jit
.graph().m_blocks
[m_block
]->at(++setLocalIndexInBlock
));
1893 ASSERT(at(setLocal
->child1()).child1() == m_compileIndex
);
1895 ASSERT(setLocal
->child1() == m_compileIndex
);
1897 ASSERT(setLocal
->op() == SetLocal
);
1898 ASSERT(setLocal
->codeOrigin
== at(m_compileIndex
).codeOrigin
);
1900 Node
* nextNode
= &at(m_jit
.graph().m_blocks
[m_block
]->at(setLocalIndexInBlock
+ 1));
1901 if (nextNode
->codeOrigin
== at(m_compileIndex
).codeOrigin
) {
1902 ASSERT(nextNode
->op() == Flush
);
1903 nextNode
= &at(m_jit
.graph().m_blocks
[m_block
]->at(setLocalIndexInBlock
+ 2));
1904 ASSERT(nextNode
->codeOrigin
!= at(m_compileIndex
).codeOrigin
); // duplicate the same assertion as below so that if we fail, we'll know we came down this path.
1906 ASSERT(nextNode
->codeOrigin
!= at(m_compileIndex
).codeOrigin
);
1908 OSRExit
& exit
= m_jit
.codeBlock()->lastOSRExit();
1909 exit
.m_codeOrigin
= nextNode
->codeOrigin
;
1910 exit
.m_lastSetOperand
= setLocal
->local();
1912 exit
.valueRecoveryForOperand(setLocal
->local()) = valueRecovery
;
1914 void forwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, NodeIndex nodeIndex
, MacroAssembler::JumpList
& jumpsToFail
, const ValueRecovery
& valueRecovery
)
1916 Vector
<MacroAssembler::Jump
, 16> jumpVector
= jumpsToFail
.jumps();
1917 for (unsigned i
= 0; i
< jumpVector
.size(); ++i
)
1918 forwardSpeculationCheck(kind
, jsValueSource
, nodeIndex
, jumpVector
[i
], valueRecovery
);
1921 // Called when we statically determine that a speculation will fail.
1922 void terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, NodeIndex nodeIndex
)
1924 #if DFG_ENABLE(DEBUG_VERBOSE)
1925 dataLog("SpeculativeJIT was terminated.\n");
1929 speculationCheck(kind
, jsValueRegs
, nodeIndex
, m_jit
.jump());
1930 m_compileOkay
= false;
1932 void terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Edge nodeUse
)
1934 terminateSpeculativeExecution(kind
, jsValueRegs
, nodeUse
.index());
1937 template<bool strict
>
1938 GPRReg
fillSpeculateIntInternal(NodeIndex
, DataFormat
& returnFormat
);
1940 // It is possible, during speculative generation, to reach a situation in which we
1941 // can statically determine a speculation will fail (for example, when two nodes
1942 // will make conflicting speculations about the same operand). In such cases this
1943 // flag is cleared, indicating no further code generation should take place.
1946 // Tracking for which nodes are currently holding the values of arguments and bytecode
1947 // operand-indexed variables.
1949 ValueSource
valueSourceForOperand(int operand
)
1951 return valueSourceReferenceForOperand(operand
);
1954 void setNodeIndexForOperand(NodeIndex nodeIndex
, int operand
)
1956 valueSourceReferenceForOperand(operand
) = ValueSource(nodeIndex
);
1959 // Call this with care, since it both returns a reference into an array
1960 // and potentially resizes the array. So it would not be right to call this
1961 // twice and then perform operands on both references, since the one from
1962 // the first call may no longer be valid.
1963 ValueSource
& valueSourceReferenceForOperand(int operand
)
1965 if (operandIsArgument(operand
)) {
1966 int argument
= operandToArgument(operand
);
1967 return m_arguments
[argument
];
1970 if ((unsigned)operand
>= m_variables
.size())
1971 m_variables
.resize(operand
+ 1);
1973 return m_variables
[operand
];
1976 // The JIT, while also provides MacroAssembler functionality.
1978 // The current node being generated.
1980 NodeIndex m_compileIndex
;
1981 unsigned m_indexInBlock
;
1982 // Virtual and physical register maps.
1983 Vector
<GenerationInfo
, 32> m_generationInfo
;
1984 RegisterBank
<GPRInfo
> m_gprs
;
1985 RegisterBank
<FPRInfo
> m_fprs
;
1987 Vector
<MacroAssembler::Label
> m_blockHeads
;
1988 Vector
<MacroAssembler::Label
> m_osrEntryHeads
;
1990 struct BranchRecord
{
1991 BranchRecord(MacroAssembler::Jump jump
, BlockIndex destination
)
1993 , destination(destination
)
1997 MacroAssembler::Jump jump
;
1998 BlockIndex destination
;
2000 Vector
<BranchRecord
, 8> m_branches
;
2002 Vector
<ValueSource
, 0> m_arguments
;
2003 Vector
<ValueSource
, 0> m_variables
;
2004 int m_lastSetOperand
;
2005 CodeOrigin m_codeOriginForOSR
;
2007 AbstractState m_state
;
2009 ValueRecovery
computeValueRecoveryFor(const ValueSource
&);
2011 ValueRecovery
computeValueRecoveryFor(int operand
)
2013 return computeValueRecoveryFor(valueSourceForOperand(operand
));
2018 // === Operand types ===
2020 // IntegerOperand, DoubleOperand and JSValueOperand.
2022 // These classes are used to lock the operands to a node into machine
2023 // registers. These classes implement of pattern of locking a value
2024 // into register at the point of construction only if it is already in
2025 // registers, and otherwise loading it lazily at the point it is first
2026 // used. We do so in order to attempt to avoid spilling one operand
2027 // in order to make space available for another.
2029 class IntegerOperand
{
2031 explicit IntegerOperand(SpeculativeJIT
* jit
, Edge use
)
2033 , m_index(use
.index())
2034 , m_gprOrInvalid(InvalidGPRReg
)
2036 , m_format(DataFormatNone
)
2040 ASSERT(use
.useKind() != DoubleUse
);
2041 if (jit
->isFilled(m_index
))
2047 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2048 m_jit
->unlock(m_gprOrInvalid
);
2051 NodeIndex
index() const
2058 gpr(); // m_format is set when m_gpr is locked.
2059 ASSERT(m_format
== DataFormatInteger
|| m_format
== DataFormatJSInteger
);
2065 if (m_gprOrInvalid
== InvalidGPRReg
)
2066 m_gprOrInvalid
= m_jit
->fillInteger(index(), m_format
);
2067 return m_gprOrInvalid
;
2072 m_jit
->use(m_index
);
2076 SpeculativeJIT
* m_jit
;
2078 GPRReg m_gprOrInvalid
;
2079 DataFormat m_format
;
2082 class DoubleOperand
{
2084 explicit DoubleOperand(SpeculativeJIT
* jit
, Edge use
)
2086 , m_index(use
.index())
2087 , m_fprOrInvalid(InvalidFPRReg
)
2091 // This is counter-intuitive but correct. DoubleOperand is intended to
2092 // be used only when you're a node that is happy to accept an untyped
2093 // value, but will special-case for doubles (using DoubleOperand) if the
2094 // value happened to already be represented as a double. The implication
2095 // is that you will not try to force the value to become a double if it
2096 // is not one already.
2097 ASSERT(use
.useKind() != DoubleUse
);
2099 if (jit
->isFilledDouble(m_index
))
2105 ASSERT(m_fprOrInvalid
!= InvalidFPRReg
);
2106 m_jit
->unlock(m_fprOrInvalid
);
2109 NodeIndex
index() const
2116 if (m_fprOrInvalid
== InvalidFPRReg
)
2117 m_fprOrInvalid
= m_jit
->fillDouble(index());
2118 return m_fprOrInvalid
;
2123 m_jit
->use(m_index
);
2127 SpeculativeJIT
* m_jit
;
2129 FPRReg m_fprOrInvalid
;
2132 class JSValueOperand
{
2134 explicit JSValueOperand(SpeculativeJIT
* jit
, Edge use
)
2136 , m_index(use
.index())
2138 , m_gprOrInvalid(InvalidGPRReg
)
2139 #elif USE(JSVALUE32_64)
2144 ASSERT(use
.useKind() != DoubleUse
);
2146 if (jit
->isFilled(m_index
))
2148 #elif USE(JSVALUE32_64)
2149 m_register
.pair
.tagGPR
= InvalidGPRReg
;
2150 m_register
.pair
.payloadGPR
= InvalidGPRReg
;
2151 if (jit
->isFilled(m_index
))
2159 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2160 m_jit
->unlock(m_gprOrInvalid
);
2161 #elif USE(JSVALUE32_64)
2163 ASSERT(m_register
.fpr
!= InvalidFPRReg
);
2164 m_jit
->unlock(m_register
.fpr
);
2166 ASSERT(m_register
.pair
.tagGPR
!= InvalidGPRReg
&& m_register
.pair
.payloadGPR
!= InvalidGPRReg
);
2167 m_jit
->unlock(m_register
.pair
.tagGPR
);
2168 m_jit
->unlock(m_register
.pair
.payloadGPR
);
2173 NodeIndex
index() const
2181 if (m_gprOrInvalid
== InvalidGPRReg
)
2182 m_gprOrInvalid
= m_jit
->fillJSValue(index());
2183 return m_gprOrInvalid
;
2185 JSValueRegs
jsValueRegs()
2187 return JSValueRegs(gpr());
2189 #elif USE(JSVALUE32_64)
2190 bool isDouble() { return m_isDouble
; }
2194 if (m_register
.pair
.tagGPR
== InvalidGPRReg
&& m_register
.pair
.payloadGPR
== InvalidGPRReg
)
2195 m_isDouble
= !m_jit
->fillJSValue(index(), m_register
.pair
.tagGPR
, m_register
.pair
.payloadGPR
, m_register
.fpr
);
2201 ASSERT(!m_isDouble
);
2202 return m_register
.pair
.tagGPR
;
2208 ASSERT(!m_isDouble
);
2209 return m_register
.pair
.payloadGPR
;
2212 JSValueRegs
jsValueRegs()
2214 return JSValueRegs(tagGPR(), payloadGPR());
2221 return m_register
.fpr
;
2227 m_jit
->use(m_index
);
2231 SpeculativeJIT
* m_jit
;
2234 GPRReg m_gprOrInvalid
;
2235 #elif USE(JSVALUE32_64)
2247 class StorageOperand
{
2249 explicit StorageOperand(SpeculativeJIT
* jit
, Edge use
)
2251 , m_index(use
.index())
2252 , m_gprOrInvalid(InvalidGPRReg
)
2255 ASSERT(use
.useKind() != DoubleUse
);
2256 if (jit
->isFilled(m_index
))
2262 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2263 m_jit
->unlock(m_gprOrInvalid
);
2266 NodeIndex
index() const
2273 if (m_gprOrInvalid
== InvalidGPRReg
)
2274 m_gprOrInvalid
= m_jit
->fillStorage(index());
2275 return m_gprOrInvalid
;
2280 m_jit
->use(m_index
);
2284 SpeculativeJIT
* m_jit
;
2286 GPRReg m_gprOrInvalid
;
2290 // === Temporaries ===
2292 // These classes are used to allocate temporary registers.
2293 // A mechanism is provided to attempt to reuse the registers
2294 // currently allocated to child nodes whose value is consumed
2295 // by, and not live after, this operation.
2297 class GPRTemporary
{
2300 GPRTemporary(SpeculativeJIT
*);
2301 GPRTemporary(SpeculativeJIT
*, GPRReg specific
);
2302 GPRTemporary(SpeculativeJIT
*, SpeculateIntegerOperand
&);
2303 GPRTemporary(SpeculativeJIT
*, SpeculateIntegerOperand
&, SpeculateIntegerOperand
&);
2304 GPRTemporary(SpeculativeJIT
*, SpeculateStrictInt32Operand
&);
2305 GPRTemporary(SpeculativeJIT
*, IntegerOperand
&);
2306 GPRTemporary(SpeculativeJIT
*, IntegerOperand
&, IntegerOperand
&);
2307 GPRTemporary(SpeculativeJIT
*, SpeculateCellOperand
&);
2308 GPRTemporary(SpeculativeJIT
*, SpeculateBooleanOperand
&);
2310 GPRTemporary(SpeculativeJIT
*, JSValueOperand
&);
2311 #elif USE(JSVALUE32_64)
2312 GPRTemporary(SpeculativeJIT
*, JSValueOperand
&, bool tag
= true);
2314 GPRTemporary(SpeculativeJIT
*, StorageOperand
&);
2316 void adopt(GPRTemporary
&);
2320 if (m_jit
&& m_gpr
!= InvalidGPRReg
)
2321 m_jit
->unlock(gpr());
2330 SpeculativeJIT
* m_jit
;
2334 class FPRTemporary
{
2336 FPRTemporary(SpeculativeJIT
*);
2337 FPRTemporary(SpeculativeJIT
*, DoubleOperand
&);
2338 FPRTemporary(SpeculativeJIT
*, DoubleOperand
&, DoubleOperand
&);
2339 FPRTemporary(SpeculativeJIT
*, SpeculateDoubleOperand
&);
2340 FPRTemporary(SpeculativeJIT
*, SpeculateDoubleOperand
&, SpeculateDoubleOperand
&);
2341 #if USE(JSVALUE32_64)
2342 FPRTemporary(SpeculativeJIT
*, JSValueOperand
&);
2347 m_jit
->unlock(fpr());
2352 ASSERT(m_fpr
!= InvalidFPRReg
);
2357 FPRTemporary(SpeculativeJIT
* jit
, FPRReg lockedFPR
)
2364 SpeculativeJIT
* m_jit
;
2371 // These classes lock the result of a call to a C++ helper function.
2373 class GPRResult
: public GPRTemporary
{
2375 GPRResult(SpeculativeJIT
* jit
)
2376 : GPRTemporary(jit
, GPRInfo::returnValueGPR
)
2381 #if USE(JSVALUE32_64)
2382 class GPRResult2
: public GPRTemporary
{
2384 GPRResult2(SpeculativeJIT
* jit
)
2385 : GPRTemporary(jit
, GPRInfo::returnValueGPR2
)
2391 class FPRResult
: public FPRTemporary
{
2393 FPRResult(SpeculativeJIT
* jit
)
2394 : FPRTemporary(jit
, lockedResult(jit
))
2399 static FPRReg
lockedResult(SpeculativeJIT
* jit
)
2401 jit
->lock(FPRInfo::returnValueFPR
);
2402 return FPRInfo::returnValueFPR
;
2407 // === Speculative Operand types ===
2409 // SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand.
2411 // These are used to lock the operands to a node into machine registers within the
2412 // SpeculativeJIT. The classes operate like those above, however these will
2413 // perform a speculative check for a more restrictive type than we can statically
2414 // determine the operand to have. If the operand does not have the requested type,
2415 // a bail-out to the non-speculative path will be taken.
2417 class SpeculateIntegerOperand
{
2419 explicit SpeculateIntegerOperand(SpeculativeJIT
* jit
, Edge use
)
2421 , m_index(use
.index())
2422 , m_gprOrInvalid(InvalidGPRReg
)
2424 , m_format(DataFormatNone
)
2428 ASSERT(use
.useKind() != DoubleUse
);
2429 if (jit
->isFilled(m_index
))
2433 ~SpeculateIntegerOperand()
2435 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2436 m_jit
->unlock(m_gprOrInvalid
);
2439 NodeIndex
index() const
2446 gpr(); // m_format is set when m_gpr is locked.
2447 ASSERT(m_format
== DataFormatInteger
|| m_format
== DataFormatJSInteger
);
2453 if (m_gprOrInvalid
== InvalidGPRReg
)
2454 m_gprOrInvalid
= m_jit
->fillSpeculateInt(index(), m_format
);
2455 return m_gprOrInvalid
;
2459 SpeculativeJIT
* m_jit
;
2461 GPRReg m_gprOrInvalid
;
2462 DataFormat m_format
;
2465 class SpeculateStrictInt32Operand
{
2467 explicit SpeculateStrictInt32Operand(SpeculativeJIT
* jit
, Edge use
)
2469 , m_index(use
.index())
2470 , m_gprOrInvalid(InvalidGPRReg
)
2473 ASSERT(use
.useKind() != DoubleUse
);
2474 if (jit
->isFilled(m_index
))
2478 ~SpeculateStrictInt32Operand()
2480 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2481 m_jit
->unlock(m_gprOrInvalid
);
2484 NodeIndex
index() const
2491 if (m_gprOrInvalid
== InvalidGPRReg
)
2492 m_gprOrInvalid
= m_jit
->fillSpeculateIntStrict(index());
2493 return m_gprOrInvalid
;
2498 m_jit
->use(m_index
);
2502 SpeculativeJIT
* m_jit
;
2504 GPRReg m_gprOrInvalid
;
2507 class SpeculateDoubleOperand
{
2509 explicit SpeculateDoubleOperand(SpeculativeJIT
* jit
, Edge use
)
2511 , m_index(use
.index())
2512 , m_fprOrInvalid(InvalidFPRReg
)
2515 ASSERT(use
.useKind() == DoubleUse
);
2516 if (jit
->isFilled(m_index
))
2520 ~SpeculateDoubleOperand()
2522 ASSERT(m_fprOrInvalid
!= InvalidFPRReg
);
2523 m_jit
->unlock(m_fprOrInvalid
);
2526 NodeIndex
index() const
2533 if (m_fprOrInvalid
== InvalidFPRReg
)
2534 m_fprOrInvalid
= m_jit
->fillSpeculateDouble(index());
2535 return m_fprOrInvalid
;
2539 SpeculativeJIT
* m_jit
;
2541 FPRReg m_fprOrInvalid
;
2544 class SpeculateCellOperand
{
2546 explicit SpeculateCellOperand(SpeculativeJIT
* jit
, Edge use
)
2548 , m_index(use
.index())
2549 , m_gprOrInvalid(InvalidGPRReg
)
2552 ASSERT(use
.useKind() != DoubleUse
);
2553 if (jit
->isFilled(m_index
))
2557 ~SpeculateCellOperand()
2559 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2560 m_jit
->unlock(m_gprOrInvalid
);
2563 NodeIndex
index() const
2570 if (m_gprOrInvalid
== InvalidGPRReg
)
2571 m_gprOrInvalid
= m_jit
->fillSpeculateCell(index());
2572 return m_gprOrInvalid
;
2577 m_jit
->use(m_index
);
2581 SpeculativeJIT
* m_jit
;
2583 GPRReg m_gprOrInvalid
;
2586 class SpeculateBooleanOperand
{
2588 explicit SpeculateBooleanOperand(SpeculativeJIT
* jit
, Edge use
)
2590 , m_index(use
.index())
2591 , m_gprOrInvalid(InvalidGPRReg
)
2594 ASSERT(use
.useKind() != DoubleUse
);
2595 if (jit
->isFilled(m_index
))
2599 ~SpeculateBooleanOperand()
2601 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2602 m_jit
->unlock(m_gprOrInvalid
);
2605 NodeIndex
index() const
2612 if (m_gprOrInvalid
== InvalidGPRReg
)
2613 m_gprOrInvalid
= m_jit
->fillSpeculateBoolean(index());
2614 return m_gprOrInvalid
;
2619 m_jit
->use(m_index
);
2623 SpeculativeJIT
* m_jit
;
2625 GPRReg m_gprOrInvalid
;
2628 inline SpeculativeJIT::SpeculativeJIT(JITCompiler
& jit
)
2629 : m_compileOkay(true)
2633 , m_generationInfo(m_jit
.codeBlock()->m_numCalleeRegisters
)
2634 , m_blockHeads(jit
.graph().m_blocks
.size())
2635 , m_arguments(jit
.codeBlock()->numParameters())
2636 , m_variables(jit
.graph().m_localVars
)
2637 , m_lastSetOperand(std::numeric_limits
<int>::max())
2638 , m_state(m_jit
.graph())
2642 } } // namespace JSC::DFG