2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGSpeculativeJIT_h
27 #define DFGSpeculativeJIT_h
29 #include <wtf/Platform.h>
33 #include "DFGAbstractState.h"
34 #include "DFGGenerationInfo.h"
35 #include "DFGJITCompiler.h"
36 #include "DFGOSRExit.h"
37 #include "DFGOSRExitJumpPlaceholder.h"
38 #include "DFGOperations.h"
39 #include "DFGSilentRegisterSavePlan.h"
40 #include "DFGValueSource.h"
41 #include "MarkedAllocator.h"
42 #include "ValueRecovery.h"
44 namespace JSC
{ namespace DFG
{
48 class SlowPathGenerator
;
50 class SpeculateIntegerOperand
;
51 class SpeculateStrictInt32Operand
;
52 class SpeculateDoubleOperand
;
53 class SpeculateCellOperand
;
54 class SpeculateBooleanOperand
;
56 enum GeneratedOperandType
{ GeneratedOperandTypeUnknown
, GeneratedOperandInteger
, GeneratedOperandDouble
, GeneratedOperandJSValue
};
58 // === SpeculativeJIT ===
60 // The SpeculativeJIT is used to generate a fast, but potentially
61 // incomplete code path for the dataflow. When code generating
62 // we may make assumptions about operand types, dynamically check,
63 // and bail-out to an alternate code path if these checks fail.
64 // Importantly, the speculative code path cannot be reentered once
65 // a speculative check has failed. This allows the SpeculativeJIT
66 // to propagate type information (including information that has
67 // only speculatively been asserted) through the dataflow.
68 class SpeculativeJIT
{
69 friend struct OSRExit
;
71 typedef JITCompiler::TrustedImm32 TrustedImm32
;
72 typedef JITCompiler::Imm32 Imm32
;
73 typedef JITCompiler::TrustedImmPtr TrustedImmPtr
;
74 typedef JITCompiler::ImmPtr ImmPtr
;
75 typedef JITCompiler::TrustedImm64 TrustedImm64
;
76 typedef JITCompiler::Imm64 Imm64
;
78 // These constants are used to set priorities for spill order for
79 // the register allocator.
82 SpillOrderConstant
= 1, // no spill, and cheap fill
83 SpillOrderSpilled
= 2, // no spill
84 SpillOrderJS
= 4, // needs spill
85 SpillOrderCell
= 4, // needs spill
86 SpillOrderStorage
= 4, // needs spill
87 SpillOrderInteger
= 5, // needs spill and box
88 SpillOrderBoolean
= 5, // needs spill and box
89 SpillOrderDouble
= 6, // needs spill and convert
91 #elif USE(JSVALUE32_64)
93 SpillOrderConstant
= 1, // no spill, and cheap fill
94 SpillOrderSpilled
= 2, // no spill
95 SpillOrderJS
= 4, // needs spill
96 SpillOrderStorage
= 4, // needs spill
97 SpillOrderDouble
= 4, // needs spill
98 SpillOrderInteger
= 5, // needs spill and box
99 SpillOrderCell
= 5, // needs spill and box
100 SpillOrderBoolean
= 5, // needs spill and box
104 enum UseChildrenMode
{ CallUseChildren
, UseChildrenCalledExplicitly
};
107 SpeculativeJIT(JITCompiler
&);
111 void createOSREntries();
112 void linkOSREntries(LinkBuffer
&);
114 BlockIndex
nextBlock()
116 for (BlockIndex result
= m_block
+ 1; ; result
++) {
117 if (result
>= m_jit
.graph().m_blocks
.size())
119 if (m_jit
.graph().m_blocks
[result
])
124 GPRReg
fillInteger(Edge
, DataFormat
& returnFormat
);
126 GPRReg
fillJSValue(Edge
);
127 #elif USE(JSVALUE32_64)
128 bool fillJSValue(Edge
, GPRReg
&, GPRReg
&, FPRReg
&);
130 GPRReg
fillStorage(Edge
);
132 // lock and unlock GPR & FPR registers.
133 void lock(GPRReg reg
)
137 void lock(FPRReg reg
)
141 void unlock(GPRReg reg
)
145 void unlock(FPRReg reg
)
150 // Used to check whether a child node is on its last use,
151 // and its machine registers may be reused.
152 bool canReuse(Node
* node
)
154 VirtualRegister virtualRegister
= node
->virtualRegister();
155 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
156 return info
.canReuse();
158 bool canReuse(Edge nodeUse
)
160 return canReuse(nodeUse
.node());
162 GPRReg
reuse(GPRReg reg
)
167 FPRReg
reuse(FPRReg reg
)
173 // Allocate a gpr/fpr.
176 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
177 m_jit
.addRegisterAllocationAtOffset(m_jit
.debugOffset());
179 VirtualRegister spillMe
;
180 GPRReg gpr
= m_gprs
.allocate(spillMe
);
181 if (spillMe
!= InvalidVirtualRegister
) {
182 #if USE(JSVALUE32_64)
183 GenerationInfo
& info
= m_generationInfo
[spillMe
];
184 RELEASE_ASSERT(info
.registerFormat() != DataFormatJSDouble
);
185 if ((info
.registerFormat() & DataFormatJS
))
186 m_gprs
.release(info
.tagGPR() == gpr
? info
.payloadGPR() : info
.tagGPR());
192 GPRReg
allocate(GPRReg specific
)
194 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
195 m_jit
.addRegisterAllocationAtOffset(m_jit
.debugOffset());
197 VirtualRegister spillMe
= m_gprs
.allocateSpecific(specific
);
198 if (spillMe
!= InvalidVirtualRegister
) {
199 #if USE(JSVALUE32_64)
200 GenerationInfo
& info
= m_generationInfo
[spillMe
];
201 RELEASE_ASSERT(info
.registerFormat() != DataFormatJSDouble
);
202 if ((info
.registerFormat() & DataFormatJS
))
203 m_gprs
.release(info
.tagGPR() == specific
? info
.payloadGPR() : info
.tagGPR());
211 return m_gprs
.tryAllocate();
215 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
216 m_jit
.addRegisterAllocationAtOffset(m_jit
.debugOffset());
218 VirtualRegister spillMe
;
219 FPRReg fpr
= m_fprs
.allocate(spillMe
);
220 if (spillMe
!= InvalidVirtualRegister
)
225 // Check whether a VirtualRegsiter is currently in a machine register.
226 // We use this when filling operands to fill those that are already in
227 // machine registers first (by locking VirtualRegsiters that are already
228 // in machine register before filling those that are not we attempt to
229 // avoid spilling values we will need immediately).
230 bool isFilled(Node
* node
)
232 VirtualRegister virtualRegister
= node
->virtualRegister();
233 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
234 return info
.registerFormat() != DataFormatNone
;
236 bool isFilledDouble(Node
* node
)
238 VirtualRegister virtualRegister
= node
->virtualRegister();
239 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
240 return info
.registerFormat() == DataFormatDouble
;
243 // Called on an operand once it has been consumed by a parent node.
246 if (!node
->hasResult())
248 VirtualRegister virtualRegister
= node
->virtualRegister();
249 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
251 // use() returns true when the value becomes dead, and any
252 // associated resources may be freed.
253 if (!info
.use(*m_stream
))
256 // Release the associated machine registers.
257 DataFormat registerFormat
= info
.registerFormat();
259 if (registerFormat
== DataFormatDouble
)
260 m_fprs
.release(info
.fpr());
261 else if (registerFormat
!= DataFormatNone
)
262 m_gprs
.release(info
.gpr());
263 #elif USE(JSVALUE32_64)
264 if (registerFormat
== DataFormatDouble
|| registerFormat
== DataFormatJSDouble
)
265 m_fprs
.release(info
.fpr());
266 else if (registerFormat
& DataFormatJS
) {
267 m_gprs
.release(info
.tagGPR());
268 m_gprs
.release(info
.payloadGPR());
269 } else if (registerFormat
!= DataFormatNone
)
270 m_gprs
.release(info
.gpr());
273 void use(Edge nodeUse
)
278 RegisterSet
usedRegisters()
281 for (unsigned i
= GPRInfo::numberOfRegisters
; i
--;) {
282 GPRReg gpr
= GPRInfo::toRegister(i
);
283 if (m_gprs
.isInUse(gpr
))
286 for (unsigned i
= FPRInfo::numberOfRegisters
; i
--;) {
287 FPRReg fpr
= FPRInfo::toRegister(i
);
288 if (m_fprs
.isInUse(fpr
))
294 static void writeBarrier(MacroAssembler
&, GPRReg ownerGPR
, GPRReg scratchGPR1
, GPRReg scratchGPR2
, WriteBarrierUseKind
);
296 void writeBarrier(GPRReg ownerGPR
, GPRReg valueGPR
, Edge valueUse
, WriteBarrierUseKind
, GPRReg scratchGPR1
= InvalidGPRReg
, GPRReg scratchGPR2
= InvalidGPRReg
);
297 void writeBarrier(GPRReg ownerGPR
, JSCell
* value
, WriteBarrierUseKind
, GPRReg scratchGPR1
= InvalidGPRReg
, GPRReg scratchGPR2
= InvalidGPRReg
);
298 void writeBarrier(JSCell
* owner
, GPRReg valueGPR
, Edge valueUse
, WriteBarrierUseKind
, GPRReg scratchGPR1
= InvalidGPRReg
);
300 static GPRReg
selectScratchGPR(GPRReg preserve1
= InvalidGPRReg
, GPRReg preserve2
= InvalidGPRReg
, GPRReg preserve3
= InvalidGPRReg
, GPRReg preserve4
= InvalidGPRReg
)
302 return AssemblyHelpers::selectScratchGPR(preserve1
, preserve2
, preserve3
, preserve4
);
305 // Called by the speculative operand types, below, to fill operand to
306 // machine registers, implicitly generating speculation checks as needed.
307 GPRReg
fillSpeculateInt(Edge
, DataFormat
& returnFormat
);
308 GPRReg
fillSpeculateIntStrict(Edge
);
309 FPRReg
fillSpeculateDouble(Edge
);
310 GPRReg
fillSpeculateCell(Edge
);
311 GPRReg
fillSpeculateBoolean(Edge
);
312 GeneratedOperandType
checkGeneratedTypeForToInt32(Node
*);
314 void addSlowPathGenerator(PassOwnPtr
<SlowPathGenerator
>);
315 void runSlowPathGenerators();
318 void noticeOSRBirth(Node
*);
319 void compile(BasicBlock
&);
321 void checkArgumentTypes();
323 void clearGenerationInfo();
325 // These methods are used when generating 'unexpected'
326 // calls out from JIT code to C++ helper routines -
327 // they spill all live values to the appropriate
328 // slots in the JSStack without changing any state
329 // in the GenerationInfo.
330 SilentRegisterSavePlan
silentSavePlanForGPR(VirtualRegister spillMe
, GPRReg source
);
331 SilentRegisterSavePlan
silentSavePlanForFPR(VirtualRegister spillMe
, FPRReg source
);
332 void silentSpill(const SilentRegisterSavePlan
&);
333 void silentFill(const SilentRegisterSavePlan
&, GPRReg canTrample
);
335 template<typename CollectionType
>
336 void silentSpillAllRegistersImpl(bool doSpill
, CollectionType
& plans
, GPRReg exclude
, GPRReg exclude2
= InvalidGPRReg
, FPRReg fprExclude
= InvalidFPRReg
)
338 ASSERT(plans
.isEmpty());
339 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
340 GPRReg gpr
= iter
.regID();
341 if (iter
.name() != InvalidVirtualRegister
&& gpr
!= exclude
&& gpr
!= exclude2
) {
342 SilentRegisterSavePlan plan
= silentSavePlanForGPR(iter
.name(), gpr
);
348 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
349 if (iter
.name() != InvalidVirtualRegister
&& iter
.regID() != fprExclude
) {
350 SilentRegisterSavePlan plan
= silentSavePlanForFPR(iter
.name(), iter
.regID());
357 template<typename CollectionType
>
358 void silentSpillAllRegistersImpl(bool doSpill
, CollectionType
& plans
, NoResultTag
)
360 silentSpillAllRegistersImpl(doSpill
, plans
, InvalidGPRReg
, InvalidGPRReg
, InvalidFPRReg
);
362 template<typename CollectionType
>
363 void silentSpillAllRegistersImpl(bool doSpill
, CollectionType
& plans
, FPRReg exclude
)
365 silentSpillAllRegistersImpl(doSpill
, plans
, InvalidGPRReg
, InvalidGPRReg
, exclude
);
367 #if USE(JSVALUE32_64)
368 template<typename CollectionType
>
369 void silentSpillAllRegistersImpl(bool doSpill
, CollectionType
& plans
, JSValueRegs exclude
)
371 silentSpillAllRegistersImpl(doSpill
, plans
, exclude
.tagGPR(), exclude
.payloadGPR());
375 void silentSpillAllRegisters(GPRReg exclude
, GPRReg exclude2
= InvalidGPRReg
, FPRReg fprExclude
= InvalidFPRReg
)
377 silentSpillAllRegistersImpl(true, m_plans
, exclude
, exclude2
, fprExclude
);
379 void silentSpillAllRegisters(FPRReg exclude
)
381 silentSpillAllRegisters(InvalidGPRReg
, InvalidGPRReg
, exclude
);
384 static GPRReg
pickCanTrample(GPRReg exclude
)
386 GPRReg result
= GPRInfo::regT0
;
387 if (result
== exclude
)
388 result
= GPRInfo::regT1
;
391 static GPRReg
pickCanTrample(FPRReg
)
393 return GPRInfo::regT0
;
395 static GPRReg
pickCanTrample(NoResultTag
)
397 return GPRInfo::regT0
;
400 #if USE(JSVALUE32_64)
401 static GPRReg
pickCanTrample(JSValueRegs exclude
)
403 GPRReg result
= GPRInfo::regT0
;
404 if (result
== exclude
.tagGPR()) {
405 result
= GPRInfo::regT1
;
406 if (result
== exclude
.payloadGPR())
407 result
= GPRInfo::regT2
;
408 } else if (result
== exclude
.payloadGPR()) {
409 result
= GPRInfo::regT1
;
410 if (result
== exclude
.tagGPR())
411 result
= GPRInfo::regT2
;
417 template<typename RegisterType
>
418 void silentFillAllRegisters(RegisterType exclude
)
420 GPRReg canTrample
= pickCanTrample(exclude
);
422 while (!m_plans
.isEmpty()) {
423 SilentRegisterSavePlan
& plan
= m_plans
.last();
424 silentFill(plan
, canTrample
);
425 m_plans
.removeLast();
429 // These methods convert between doubles, and doubles boxed and JSValues.
431 GPRReg
boxDouble(FPRReg fpr
, GPRReg gpr
)
433 return m_jit
.boxDouble(fpr
, gpr
);
435 FPRReg
unboxDouble(GPRReg gpr
, FPRReg fpr
)
437 return m_jit
.unboxDouble(gpr
, fpr
);
439 GPRReg
boxDouble(FPRReg fpr
)
441 return boxDouble(fpr
, allocate());
443 #elif USE(JSVALUE32_64)
444 void boxDouble(FPRReg fpr
, GPRReg tagGPR
, GPRReg payloadGPR
)
446 m_jit
.boxDouble(fpr
, tagGPR
, payloadGPR
);
448 void unboxDouble(GPRReg tagGPR
, GPRReg payloadGPR
, FPRReg fpr
, FPRReg scratchFPR
)
450 m_jit
.unboxDouble(tagGPR
, payloadGPR
, fpr
, scratchFPR
);
454 // Spill a VirtualRegister to the JSStack.
455 void spill(VirtualRegister spillMe
)
457 GenerationInfo
& info
= m_generationInfo
[spillMe
];
459 #if USE(JSVALUE32_64)
460 if (info
.registerFormat() == DataFormatNone
) // it has been spilled. JS values which have two GPRs can reach here
463 // Check the GenerationInfo to see if this value need writing
464 // to the JSStack - if not, mark it as spilled & return.
465 if (!info
.needsSpill()) {
466 info
.setSpilled(*m_stream
, spillMe
);
470 DataFormat spillFormat
= info
.registerFormat();
471 switch (spillFormat
) {
472 case DataFormatStorage
: {
473 // This is special, since it's not a JS value - as in it's not visible to JS
475 m_jit
.storePtr(info
.gpr(), JITCompiler::addressFor(spillMe
));
476 info
.spill(*m_stream
, spillMe
, DataFormatStorage
);
480 case DataFormatInteger
: {
481 m_jit
.store32(info
.gpr(), JITCompiler::payloadFor(spillMe
));
482 info
.spill(*m_stream
, spillMe
, DataFormatInteger
);
487 case DataFormatDouble
: {
488 m_jit
.storeDouble(info
.fpr(), JITCompiler::addressFor(spillMe
));
489 info
.spill(*m_stream
, spillMe
, DataFormatDouble
);
494 // The following code handles JSValues, int32s, and cells.
495 RELEASE_ASSERT(spillFormat
== DataFormatCell
|| spillFormat
& DataFormatJS
);
497 GPRReg reg
= info
.gpr();
498 // We need to box int32 and cell values ...
499 // but on JSVALUE64 boxing a cell is a no-op!
500 if (spillFormat
== DataFormatInteger
)
501 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, reg
);
503 // Spill the value, and record it as spilled in its boxed form.
504 m_jit
.store64(reg
, JITCompiler::addressFor(spillMe
));
505 info
.spill(*m_stream
, spillMe
, (DataFormat
)(spillFormat
| DataFormatJS
));
507 #elif USE(JSVALUE32_64)
509 case DataFormatBoolean
: {
510 m_jit
.store32(info
.gpr(), JITCompiler::payloadFor(spillMe
));
511 info
.spill(*m_stream
, spillMe
, spillFormat
);
515 case DataFormatDouble
:
516 case DataFormatJSDouble
: {
517 // On JSVALUE32_64 boxing a double is a no-op.
518 m_jit
.storeDouble(info
.fpr(), JITCompiler::addressFor(spillMe
));
519 info
.spill(*m_stream
, spillMe
, DataFormatJSDouble
);
524 // The following code handles JSValues.
525 RELEASE_ASSERT(spillFormat
& DataFormatJS
);
526 m_jit
.store32(info
.tagGPR(), JITCompiler::tagFor(spillMe
));
527 m_jit
.store32(info
.payloadGPR(), JITCompiler::payloadFor(spillMe
));
528 info
.spill(*m_stream
, spillMe
, spillFormat
);
534 bool isKnownInteger(Node
* node
) { return !(m_state
.forNode(node
).m_type
& ~SpecInt32
); }
535 bool isKnownCell(Node
* node
) { return !(m_state
.forNode(node
).m_type
& ~SpecCell
); }
537 bool isKnownNotInteger(Node
* node
) { return !(m_state
.forNode(node
).m_type
& SpecInt32
); }
538 bool isKnownNotNumber(Node
* node
) { return !(m_state
.forNode(node
).m_type
& SpecNumber
); }
539 bool isKnownNotCell(Node
* node
) { return !(m_state
.forNode(node
).m_type
& SpecCell
); }
541 // Checks/accessors for constant values.
542 bool isConstant(Node
* node
) { return m_jit
.graph().isConstant(node
); }
543 bool isJSConstant(Node
* node
) { return m_jit
.graph().isJSConstant(node
); }
544 bool isInt32Constant(Node
* node
) { return m_jit
.graph().isInt32Constant(node
); }
545 bool isDoubleConstant(Node
* node
) { return m_jit
.graph().isDoubleConstant(node
); }
546 bool isNumberConstant(Node
* node
) { return m_jit
.graph().isNumberConstant(node
); }
547 bool isBooleanConstant(Node
* node
) { return m_jit
.graph().isBooleanConstant(node
); }
548 bool isFunctionConstant(Node
* node
) { return m_jit
.graph().isFunctionConstant(node
); }
549 int32_t valueOfInt32Constant(Node
* node
) { return m_jit
.graph().valueOfInt32Constant(node
); }
550 double valueOfNumberConstant(Node
* node
) { return m_jit
.graph().valueOfNumberConstant(node
); }
551 #if USE(JSVALUE32_64)
552 void* addressOfDoubleConstant(Node
* node
) { return m_jit
.addressOfDoubleConstant(node
); }
554 JSValue
valueOfJSConstant(Node
* node
) { return m_jit
.graph().valueOfJSConstant(node
); }
555 bool valueOfBooleanConstant(Node
* node
) { return m_jit
.graph().valueOfBooleanConstant(node
); }
556 JSFunction
* valueOfFunctionConstant(Node
* node
) { return m_jit
.graph().valueOfFunctionConstant(node
); }
557 bool isNullConstant(Node
* node
)
559 if (!isConstant(node
))
561 return valueOfJSConstant(node
).isNull();
564 Identifier
* identifier(unsigned index
)
566 return &m_jit
.codeBlock()->identifier(index
);
569 // Spill all VirtualRegisters back to the JSStack.
570 void flushRegisters()
572 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
573 if (iter
.name() != InvalidVirtualRegister
) {
578 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
579 if (iter
.name() != InvalidVirtualRegister
) {
587 // Used to ASSERT flushRegisters() has been called prior to
588 // calling out from JIT code to a C helper function.
591 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
592 if (iter
.name() != InvalidVirtualRegister
)
595 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
596 if (iter
.name() != InvalidVirtualRegister
)
604 MacroAssembler::Imm64
valueOfJSConstantAsImm64(Node
* node
)
606 return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(node
)));
610 // Helper functions to enable code sharing in implementations of bit/shift ops.
611 void bitOp(NodeType op
, int32_t imm
, GPRReg op1
, GPRReg result
)
615 m_jit
.and32(Imm32(imm
), op1
, result
);
618 m_jit
.or32(Imm32(imm
), op1
, result
);
621 m_jit
.xor32(Imm32(imm
), op1
, result
);
624 RELEASE_ASSERT_NOT_REACHED();
627 void bitOp(NodeType op
, GPRReg op1
, GPRReg op2
, GPRReg result
)
631 m_jit
.and32(op1
, op2
, result
);
634 m_jit
.or32(op1
, op2
, result
);
637 m_jit
.xor32(op1
, op2
, result
);
640 RELEASE_ASSERT_NOT_REACHED();
643 void shiftOp(NodeType op
, GPRReg op1
, int32_t shiftAmount
, GPRReg result
)
647 m_jit
.rshift32(op1
, Imm32(shiftAmount
), result
);
650 m_jit
.lshift32(op1
, Imm32(shiftAmount
), result
);
653 m_jit
.urshift32(op1
, Imm32(shiftAmount
), result
);
656 RELEASE_ASSERT_NOT_REACHED();
659 void shiftOp(NodeType op
, GPRReg op1
, GPRReg shiftAmount
, GPRReg result
)
663 m_jit
.rshift32(op1
, shiftAmount
, result
);
666 m_jit
.lshift32(op1
, shiftAmount
, result
);
669 m_jit
.urshift32(op1
, shiftAmount
, result
);
672 RELEASE_ASSERT_NOT_REACHED();
676 // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
677 unsigned detectPeepHoleBranch()
679 BasicBlock
* block
= m_jit
.graph().m_blocks
[m_block
].get();
681 // Check that no intervening nodes will be generated.
682 for (unsigned index
= m_indexInBlock
+ 1; index
< block
->size() - 1; ++index
) {
683 Node
* node
= block
->at(index
);
684 if (node
->shouldGenerate())
688 // Check if the lastNode is a branch on this node.
689 Node
* lastNode
= block
->last();
690 return lastNode
->op() == Branch
&& lastNode
->child1() == m_currentNode
? block
->size() - 1 : UINT_MAX
;
693 void compileMovHint(Node
*);
694 void compileMovHintAndCheck(Node
*);
695 void compileInlineStart(Node
*);
697 void nonSpeculativeUInt32ToNumber(Node
*);
700 void cachedGetById(CodeOrigin
, GPRReg baseGPR
, GPRReg resultGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump(), SpillRegistersMode
= NeedToSpill
);
701 void cachedPutById(CodeOrigin
, GPRReg base
, GPRReg value
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump());
702 #elif USE(JSVALUE32_64)
703 void cachedGetById(CodeOrigin
, GPRReg baseTagGPROrNone
, GPRReg basePayloadGPR
, GPRReg resultTagGPR
, GPRReg resultPayloadGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump(), SpillRegistersMode
= NeedToSpill
);
704 void cachedPutById(CodeOrigin
, GPRReg basePayloadGPR
, GPRReg valueTagGPR
, GPRReg valuePayloadGPR
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind
, JITCompiler::Jump slowPathTarget
= JITCompiler::Jump());
707 void nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
= false);
708 void nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
= false);
709 bool nonSpeculativeCompareNull(Node
*, Edge operand
, bool invert
= false);
711 void nonSpeculativePeepholeBranch(Node
*, Node
* branchNode
, MacroAssembler::RelationalCondition
, S_DFGOperation_EJJ helperFunction
);
712 void nonSpeculativeNonPeepholeCompare(Node
*, MacroAssembler::RelationalCondition
, S_DFGOperation_EJJ helperFunction
);
713 bool nonSpeculativeCompare(Node
*, MacroAssembler::RelationalCondition
, S_DFGOperation_EJJ helperFunction
);
715 void nonSpeculativePeepholeStrictEq(Node
*, Node
* branchNode
, bool invert
= false);
716 void nonSpeculativeNonPeepholeStrictEq(Node
*, bool invert
= false);
717 bool nonSpeculativeStrictEq(Node
*, bool invert
= false);
719 void compileInstanceOfForObject(Node
*, GPRReg valueReg
, GPRReg prototypeReg
, GPRReg scratchAndResultReg
);
720 void compileInstanceOf(Node
*);
722 // Access to our fixed callee CallFrame.
723 MacroAssembler::Address
callFrameSlot(int slot
)
725 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ slot
) * static_cast<int>(sizeof(Register
)));
728 // Access to our fixed callee CallFrame.
729 MacroAssembler::Address
argumentSlot(int argument
)
731 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ argumentToOperand(argument
)) * static_cast<int>(sizeof(Register
)));
734 MacroAssembler::Address
callFrameTagSlot(int slot
)
736 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ slot
) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
739 MacroAssembler::Address
callFramePayloadSlot(int slot
)
741 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ slot
) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
744 MacroAssembler::Address
argumentTagSlot(int argument
)
746 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ argumentToOperand(argument
)) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
749 MacroAssembler::Address
argumentPayloadSlot(int argument
)
751 return MacroAssembler::Address(GPRInfo::callFrameRegister
, (m_jit
.codeBlock()->m_numCalleeRegisters
+ argumentToOperand(argument
)) * static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
754 void emitCall(Node
*);
756 // Called once a node has completed code generation but prior to setting
757 // its result, to free up its children. (This must happen prior to setting
758 // the nodes result, since the node may have the same VirtualRegister as
759 // a child, and as such will use the same GeneratioInfo).
760 void useChildren(Node
*);
762 // These method called to initialize the the GenerationInfo
763 // to describe the result of an operation.
764 void integerResult(GPRReg reg
, Node
* node
, DataFormat format
= DataFormatInteger
, UseChildrenMode mode
= CallUseChildren
)
766 if (mode
== CallUseChildren
)
769 VirtualRegister virtualRegister
= node
->virtualRegister();
770 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
772 if (format
== DataFormatInteger
) {
773 m_jit
.jitAssertIsInt32(reg
);
774 m_gprs
.retain(reg
, virtualRegister
, SpillOrderInteger
);
775 info
.initInteger(node
, node
->refCount(), reg
);
778 RELEASE_ASSERT(format
== DataFormatJSInteger
);
779 m_jit
.jitAssertIsJSInt32(reg
);
780 m_gprs
.retain(reg
, virtualRegister
, SpillOrderJS
);
781 info
.initJSValue(node
, node
->refCount(), reg
, format
);
782 #elif USE(JSVALUE32_64)
783 RELEASE_ASSERT_NOT_REACHED();
787 void integerResult(GPRReg reg
, Node
* node
, UseChildrenMode mode
)
789 integerResult(reg
, node
, DataFormatInteger
, mode
);
791 void noResult(Node
* node
, UseChildrenMode mode
= CallUseChildren
)
793 if (mode
== UseChildrenCalledExplicitly
)
797 void cellResult(GPRReg reg
, Node
* node
, UseChildrenMode mode
= CallUseChildren
)
799 if (mode
== CallUseChildren
)
802 VirtualRegister virtualRegister
= node
->virtualRegister();
803 m_gprs
.retain(reg
, virtualRegister
, SpillOrderCell
);
804 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
805 info
.initCell(node
, node
->refCount(), reg
);
807 void booleanResult(GPRReg reg
, Node
* node
, UseChildrenMode mode
= CallUseChildren
)
809 if (mode
== CallUseChildren
)
812 VirtualRegister virtualRegister
= node
->virtualRegister();
813 m_gprs
.retain(reg
, virtualRegister
, SpillOrderBoolean
);
814 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
815 info
.initBoolean(node
, node
->refCount(), reg
);
818 void jsValueResult(GPRReg reg
, Node
* node
, DataFormat format
= DataFormatJS
, UseChildrenMode mode
= CallUseChildren
)
820 if (format
== DataFormatJSInteger
)
821 m_jit
.jitAssertIsJSInt32(reg
);
823 if (mode
== CallUseChildren
)
826 VirtualRegister virtualRegister
= node
->virtualRegister();
827 m_gprs
.retain(reg
, virtualRegister
, SpillOrderJS
);
828 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
829 info
.initJSValue(node
, node
->refCount(), reg
, format
);
831 void jsValueResult(GPRReg reg
, Node
* node
, UseChildrenMode mode
)
833 jsValueResult(reg
, node
, DataFormatJS
, mode
);
835 #elif USE(JSVALUE32_64)
836 void jsValueResult(GPRReg tag
, GPRReg payload
, Node
* node
, DataFormat format
= DataFormatJS
, UseChildrenMode mode
= CallUseChildren
)
838 if (mode
== CallUseChildren
)
841 VirtualRegister virtualRegister
= node
->virtualRegister();
842 m_gprs
.retain(tag
, virtualRegister
, SpillOrderJS
);
843 m_gprs
.retain(payload
, virtualRegister
, SpillOrderJS
);
844 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
845 info
.initJSValue(node
, node
->refCount(), tag
, payload
, format
);
847 void jsValueResult(GPRReg tag
, GPRReg payload
, Node
* node
, UseChildrenMode mode
)
849 jsValueResult(tag
, payload
, node
, DataFormatJS
, mode
);
852 void storageResult(GPRReg reg
, Node
* node
, UseChildrenMode mode
= CallUseChildren
)
854 if (mode
== CallUseChildren
)
857 VirtualRegister virtualRegister
= node
->virtualRegister();
858 m_gprs
.retain(reg
, virtualRegister
, SpillOrderStorage
);
859 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
860 info
.initStorage(node
, node
->refCount(), reg
);
862 void doubleResult(FPRReg reg
, Node
* node
, UseChildrenMode mode
= CallUseChildren
)
864 if (mode
== CallUseChildren
)
867 VirtualRegister virtualRegister
= node
->virtualRegister();
868 m_fprs
.retain(reg
, virtualRegister
, SpillOrderDouble
);
869 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
870 info
.initDouble(node
, node
->refCount(), reg
);
872 void initConstantInfo(Node
* node
)
874 ASSERT(isInt32Constant(node
) || isNumberConstant(node
) || isJSConstant(node
));
875 m_generationInfo
[node
->virtualRegister()].initConstant(node
, node
->refCount());
878 // These methods add calls to C++ helper functions.
879 // These methods are broadly value representation specific (i.e.
880 // deal with the fact that a JSValue may be passed in one or two
881 // machine registers, and delegate the calling convention specific
882 // decision as to how to fill the regsiters to setupArguments* methods.
884 JITCompiler::Call
callOperation(P_DFGOperation_E operation
, GPRReg result
)
886 m_jit
.setupArgumentsExecState();
887 return appendCallWithExceptionCheckSetResult(operation
, result
);
889 JITCompiler::Call
callOperation(P_DFGOperation_EC operation
, GPRReg result
, GPRReg cell
)
891 m_jit
.setupArgumentsWithExecState(cell
);
892 return appendCallWithExceptionCheckSetResult(operation
, result
);
894 JITCompiler::Call
callOperation(P_DFGOperation_EO operation
, GPRReg result
, GPRReg object
)
896 m_jit
.setupArgumentsWithExecState(object
);
897 return appendCallWithExceptionCheckSetResult(operation
, result
);
899 JITCompiler::Call
callOperation(P_DFGOperation_EOS operation
, GPRReg result
, GPRReg object
, size_t size
)
901 m_jit
.setupArgumentsWithExecState(object
, TrustedImmPtr(size
));
902 return appendCallWithExceptionCheckSetResult(operation
, result
);
904 JITCompiler::Call
callOperation(P_DFGOperation_EOZ operation
, GPRReg result
, GPRReg object
, int32_t size
)
906 m_jit
.setupArgumentsWithExecState(object
, TrustedImmPtr(size
));
907 return appendCallWithExceptionCheckSetResult(operation
, result
);
909 JITCompiler::Call
callOperation(C_DFGOperation_EOZ operation
, GPRReg result
, GPRReg object
, int32_t size
)
911 m_jit
.setupArgumentsWithExecState(object
, TrustedImmPtr(static_cast<size_t>(size
)));
912 return appendCallWithExceptionCheckSetResult(operation
, result
);
914 JITCompiler::Call
callOperation(P_DFGOperation_EPS operation
, GPRReg result
, GPRReg old
, size_t size
)
916 m_jit
.setupArgumentsWithExecState(old
, TrustedImmPtr(size
));
917 return appendCallWithExceptionCheckSetResult(operation
, result
);
919 JITCompiler::Call
callOperation(P_DFGOperation_ES operation
, GPRReg result
, size_t size
)
921 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(size
));
922 return appendCallWithExceptionCheckSetResult(operation
, result
);
924 JITCompiler::Call
callOperation(P_DFGOperation_ESt operation
, GPRReg result
, Structure
* structure
)
926 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(structure
));
927 return appendCallWithExceptionCheckSetResult(operation
, result
);
929 JITCompiler::Call
callOperation(P_DFGOperation_EStZ operation
, GPRReg result
, Structure
* structure
, GPRReg arg2
)
931 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(structure
), arg2
);
932 return appendCallWithExceptionCheckSetResult(operation
, result
);
934 JITCompiler::Call
callOperation(P_DFGOperation_EStZ operation
, GPRReg result
, Structure
* structure
, size_t arg2
)
936 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(structure
), TrustedImm32(arg2
));
937 return appendCallWithExceptionCheckSetResult(operation
, result
);
939 JITCompiler::Call
callOperation(P_DFGOperation_EStZ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
941 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
942 return appendCallWithExceptionCheckSetResult(operation
, result
);
944 JITCompiler::Call
callOperation(P_DFGOperation_EStPS operation
, GPRReg result
, Structure
* structure
, void* pointer
, size_t size
)
946 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(structure
), TrustedImmPtr(pointer
), TrustedImmPtr(size
));
947 return appendCallWithExceptionCheckSetResult(operation
, result
);
949 JITCompiler::Call
callOperation(P_DFGOperation_EStSS operation
, GPRReg result
, Structure
* structure
, size_t index
, size_t size
)
951 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(structure
), TrustedImmPtr(index
), TrustedImmPtr(size
));
952 return appendCallWithExceptionCheckSetResult(operation
, result
);
955 JITCompiler::Call
callOperation(C_DFGOperation_E operation
, GPRReg result
)
957 m_jit
.setupArgumentsExecState();
958 return appendCallWithExceptionCheckSetResult(operation
, result
);
960 JITCompiler::Call
callOperation(C_DFGOperation_EC operation
, GPRReg result
, GPRReg arg1
)
962 m_jit
.setupArgumentsWithExecState(arg1
);
963 return appendCallWithExceptionCheckSetResult(operation
, result
);
965 JITCompiler::Call
callOperation(C_DFGOperation_EC operation
, GPRReg result
, JSCell
* cell
)
967 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(cell
));
968 return appendCallWithExceptionCheckSetResult(operation
, result
);
970 JITCompiler::Call
callOperation(C_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, JSCell
* cell
)
972 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(cell
));
973 return appendCallWithExceptionCheckSetResult(operation
, result
);
975 JITCompiler::Call
callOperation(C_DFGOperation_EIcf operation
, GPRReg result
, InlineCallFrame
* inlineCallFrame
)
977 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame
));
978 return appendCallWithExceptionCheckSetResult(operation
, result
);
980 JITCompiler::Call
callOperation(C_DFGOperation_ESt operation
, GPRReg result
, Structure
* structure
)
982 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(structure
));
983 return appendCallWithExceptionCheckSetResult(operation
, result
);
985 JITCompiler::Call
callOperation(C_DFGOperation_EJssSt operation
, GPRReg result
, GPRReg arg1
, Structure
* structure
)
987 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(structure
));
988 return appendCallWithExceptionCheckSetResult(operation
, result
);
990 JITCompiler::Call
callOperation(C_DFGOperation_EJssJss operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
992 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
993 return appendCallWithExceptionCheckSetResult(operation
, result
);
995 JITCompiler::Call
callOperation(C_DFGOperation_EJssJssJss operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
997 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
998 return appendCallWithExceptionCheckSetResult(operation
, result
);
1001 JITCompiler::Call
callOperation(S_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1003 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1004 return appendCallWithExceptionCheckSetResult(operation
, result
);
1007 JITCompiler::Call
callOperation(V_DFGOperation_EC operation
, GPRReg arg1
)
1009 m_jit
.setupArgumentsWithExecState(arg1
);
1010 return appendCallWithExceptionCheck(operation
);
1013 JITCompiler::Call
callOperation(V_DFGOperation_ECIcf operation
, GPRReg arg1
, InlineCallFrame
* inlineCallFrame
)
1015 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(inlineCallFrame
));
1016 return appendCallWithExceptionCheck(operation
);
1018 JITCompiler::Call
callOperation(V_DFGOperation_ECCIcf operation
, GPRReg arg1
, GPRReg arg2
, InlineCallFrame
* inlineCallFrame
)
1020 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(inlineCallFrame
));
1021 return appendCallWithExceptionCheck(operation
);
1024 JITCompiler::Call
callOperation(V_DFGOperation_ECZ operation
, GPRReg arg1
, int arg2
)
1026 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImm32(arg2
));
1027 return appendCallWithExceptionCheck(operation
);
1030 JITCompiler::Call
callOperation(V_DFGOperation_ECC operation
, GPRReg arg1
, GPRReg arg2
)
1032 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1033 return appendCallWithExceptionCheck(operation
);
1036 JITCompiler::Call
callOperation(V_DFGOperation_EOZD operation
, GPRReg arg1
, GPRReg arg2
, FPRReg arg3
)
1038 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1039 return appendCallWithExceptionCheck(operation
);
1042 JITCompiler::Call
callOperation(V_DFGOperation_W operation
, WatchpointSet
* watchpointSet
)
1044 m_jit
.setupArguments(TrustedImmPtr(watchpointSet
));
1045 return appendCall(operation
);
1048 template<typename FunctionType
, typename ArgumentType1
>
1049 JITCompiler::Call
callOperation(FunctionType operation
, NoResultTag
, ArgumentType1 arg1
)
1051 return callOperation(operation
, arg1
);
1053 template<typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
>
1054 JITCompiler::Call
callOperation(FunctionType operation
, NoResultTag
, ArgumentType1 arg1
, ArgumentType2 arg2
)
1056 return callOperation(operation
, arg1
, arg2
);
1058 template<typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
, typename ArgumentType3
>
1059 JITCompiler::Call
callOperation(FunctionType operation
, NoResultTag
, ArgumentType1 arg1
, ArgumentType2 arg2
, ArgumentType3 arg3
)
1061 return callOperation(operation
, arg1
, arg2
, arg3
);
1063 template<typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
, typename ArgumentType3
, typename ArgumentType4
>
1064 JITCompiler::Call
callOperation(FunctionType operation
, NoResultTag
, ArgumentType1 arg1
, ArgumentType2 arg2
, ArgumentType3 arg3
, ArgumentType4 arg4
)
1066 return callOperation(operation
, arg1
, arg2
, arg3
, arg4
);
1068 template<typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
, typename ArgumentType3
, typename ArgumentType4
, typename ArgumentType5
>
1069 JITCompiler::Call
callOperation(FunctionType operation
, NoResultTag
, ArgumentType1 arg1
, ArgumentType2 arg2
, ArgumentType3 arg3
, ArgumentType4 arg4
, ArgumentType5 arg5
)
1071 return callOperation(operation
, arg1
, arg2
, arg3
, arg4
, arg5
);
1074 JITCompiler::Call
callOperation(D_DFGOperation_ZZ operation
, FPRReg result
, GPRReg arg1
, GPRReg arg2
)
1076 m_jit
.setupArguments(arg1
, arg2
);
1077 return appendCallSetResult(operation
, result
);
1079 JITCompiler::Call
callOperation(D_DFGOperation_DD operation
, FPRReg result
, FPRReg arg1
, FPRReg arg2
)
1081 m_jit
.setupArguments(arg1
, arg2
);
1082 return appendCallSetResult(operation
, result
);
1084 JITCompiler::Call
callOperation(Str_DFGOperation_EJss operation
, GPRReg result
, GPRReg arg1
)
1086 m_jit
.setupArgumentsWithExecState(arg1
);
1087 return appendCallWithExceptionCheckSetResult(operation
, result
);
1089 JITCompiler::Call
callOperation(C_DFGOperation_EZ operation
, GPRReg result
, GPRReg arg1
)
1091 m_jit
.setupArgumentsWithExecState(arg1
);
1092 return appendCallWithExceptionCheckSetResult(operation
, result
);
1096 JITCompiler::Call
callOperation(J_DFGOperation_E operation
, GPRReg result
)
1098 m_jit
.setupArgumentsExecState();
1099 return appendCallWithExceptionCheckSetResult(operation
, result
);
1101 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg result
, void* pointer
)
1103 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
));
1104 return appendCallWithExceptionCheckSetResult(operation
, result
);
1106 JITCompiler::Call
callOperation(Z_DFGOperation_D operation
, GPRReg result
, FPRReg arg1
)
1108 m_jit
.setupArguments(arg1
);
1109 JITCompiler::Call call
= m_jit
.appendCall(operation
);
1110 m_jit
.zeroExtend32ToPtr(GPRInfo::returnValueGPR
, result
);
1113 JITCompiler::Call
callOperation(J_DFGOperation_EGriJsgI operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
, Identifier
* identifier
)
1115 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(identifier
));
1116 return appendCallWithExceptionCheckSetResult(operation
, result
);
1118 JITCompiler::Call
callOperation(J_DFGOperation_EI operation
, GPRReg result
, Identifier
* identifier
)
1120 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
));
1121 return appendCallWithExceptionCheckSetResult(operation
, result
);
1123 JITCompiler::Call
callOperation(J_DFGOperation_EIRo operation
, GPRReg result
, Identifier
* identifier
, ResolveOperations
* operations
)
1125 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
), TrustedImmPtr(operations
));
1126 return appendCallWithExceptionCheckSetResult(operation
, result
);
1128 JITCompiler::Call
callOperation(J_DFGOperation_EIRoPtbo operation
, GPRReg result
, Identifier
* identifier
, ResolveOperations
* operations
, PutToBaseOperation
* putToBaseOperations
)
1130 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
), TrustedImmPtr(operations
), TrustedImmPtr(putToBaseOperations
));
1131 return appendCallWithExceptionCheckSetResult(operation
, result
);
1133 JITCompiler::Call
callOperation(J_DFGOperation_EA operation
, GPRReg result
, GPRReg arg1
)
1135 m_jit
.setupArgumentsWithExecState(arg1
);
1136 return appendCallWithExceptionCheckSetResult(operation
, result
);
1138 JITCompiler::Call
callOperation(J_DFGOperation_EAZ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1140 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1141 return appendCallWithExceptionCheckSetResult(operation
, result
);
1143 JITCompiler::Call
callOperation(J_DFGOperation_EPS operation
, GPRReg result
, void* pointer
, size_t size
)
1145 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
), TrustedImmPtr(size
));
1146 return appendCallWithExceptionCheckSetResult(operation
, result
);
1148 JITCompiler::Call
callOperation(J_DFGOperation_ESS operation
, GPRReg result
, int startConstant
, int numConstants
)
1150 m_jit
.setupArgumentsWithExecState(TrustedImm32(startConstant
), TrustedImm32(numConstants
));
1151 return appendCallWithExceptionCheckSetResult(operation
, result
);
1153 JITCompiler::Call
callOperation(J_DFGOperation_EPP operation
, GPRReg result
, GPRReg arg1
, void* pointer
)
1155 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(pointer
));
1156 return appendCallWithExceptionCheckSetResult(operation
, result
);
1158 JITCompiler::Call
callOperation(J_DFGOperation_EC operation
, GPRReg result
, JSCell
* cell
)
1160 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(cell
));
1161 return appendCallWithExceptionCheckSetResult(operation
, result
);
1163 JITCompiler::Call
callOperation(J_DFGOperation_ECI operation
, GPRReg result
, GPRReg arg1
, Identifier
* identifier
)
1165 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1166 return appendCallWithExceptionCheckSetResult(operation
, result
);
1168 JITCompiler::Call
callOperation(J_DFGOperation_EJI operation
, GPRReg result
, GPRReg arg1
, Identifier
* identifier
)
1170 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1171 return appendCallWithExceptionCheckSetResult(operation
, result
);
1173 JITCompiler::Call
callOperation(J_DFGOperation_EDA operation
, GPRReg result
, FPRReg arg1
, GPRReg arg2
)
1175 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1176 return appendCallWithExceptionCheckSetResult(operation
, result
);
1178 JITCompiler::Call
callOperation(J_DFGOperation_EJA operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1180 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1181 return appendCallWithExceptionCheckSetResult(operation
, result
);
1183 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg result
, GPRReg arg1
)
1185 m_jit
.setupArgumentsWithExecState(arg1
);
1186 return appendCallWithExceptionCheckSetResult(operation
, result
);
1188 JITCompiler::Call
callOperation(J_DFGOperation_EZ operation
, GPRReg result
, GPRReg arg1
)
1190 m_jit
.setupArgumentsWithExecState(arg1
);
1191 return appendCallWithExceptionCheckSetResult(operation
, result
);
1193 JITCompiler::Call
callOperation(J_DFGOperation_EZ operation
, GPRReg result
, int32_t arg1
)
1195 m_jit
.setupArgumentsWithExecState(TrustedImm32(arg1
));
1196 return appendCallWithExceptionCheckSetResult(operation
, result
);
1198 JITCompiler::Call
callOperation(J_DFGOperation_EZZ operation
, GPRReg result
, int32_t arg1
, GPRReg arg2
)
1200 m_jit
.setupArgumentsWithExecState(TrustedImm32(arg1
), arg2
);
1201 return appendCallWithExceptionCheckSetResult(operation
, result
);
1203 JITCompiler::Call
callOperation(J_DFGOperation_EZIcfZ operation
, GPRReg result
, int32_t arg1
, InlineCallFrame
* inlineCallFrame
, GPRReg arg2
)
1205 m_jit
.setupArgumentsWithExecState(TrustedImm32(arg1
), TrustedImmPtr(inlineCallFrame
), arg2
);
1206 return appendCallWithExceptionCheckSetResult(operation
, result
);
1210 JITCompiler::Call
callOperation(C_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1
)
1212 m_jit
.setupArgumentsWithExecState(arg1
);
1213 return appendCallWithExceptionCheckSetResult(operation
, result
);
1215 JITCompiler::Call
callOperation(S_DFGOperation_J operation
, GPRReg result
, GPRReg arg1
)
1217 m_jit
.setupArguments(arg1
);
1218 return appendCallSetResult(operation
, result
);
1220 JITCompiler::Call
callOperation(S_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1
)
1222 m_jit
.setupArgumentsWithExecState(arg1
);
1223 return appendCallWithExceptionCheckSetResult(operation
, result
);
1225 JITCompiler::Call
callOperation(J_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1
)
1227 m_jit
.setupArgumentsWithExecState(arg1
);
1228 return appendCallWithExceptionCheckSetResult(operation
, result
);
1230 JITCompiler::Call
callOperation(S_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1232 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1233 return appendCallWithExceptionCheckSetResult(operation
, result
);
1236 JITCompiler::Call
callOperation(J_DFGOperation_EPP operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1238 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1239 return appendCallWithExceptionCheckSetResult(operation
, result
);
1241 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1243 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1244 return appendCallWithExceptionCheckSetResult(operation
, result
);
1246 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, MacroAssembler::TrustedImm32 imm
)
1248 m_jit
.setupArgumentsWithExecState(arg1
, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm
.m_value
))));
1249 return appendCallWithExceptionCheckSetResult(operation
, result
);
1251 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg result
, MacroAssembler::TrustedImm32 imm
, GPRReg arg2
)
1253 m_jit
.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm
.m_value
))), arg2
);
1254 return appendCallWithExceptionCheckSetResult(operation
, result
);
1256 JITCompiler::Call
callOperation(J_DFGOperation_ECC operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1258 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1259 return appendCallWithExceptionCheckSetResult(operation
, result
);
1261 JITCompiler::Call
callOperation(J_DFGOperation_ECJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
1263 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1264 return appendCallWithExceptionCheckSetResult(operation
, result
);
1267 JITCompiler::Call
callOperation(V_DFGOperation_EJPP operation
, GPRReg arg1
, GPRReg arg2
, void* pointer
)
1269 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(pointer
));
1270 return appendCallWithExceptionCheck(operation
);
1272 JITCompiler::Call
callOperation(V_DFGOperation_EJCI operation
, GPRReg arg1
, GPRReg arg2
, Identifier
* identifier
)
1274 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(identifier
));
1275 return appendCallWithExceptionCheck(operation
);
1277 JITCompiler::Call
callOperation(V_DFGOperation_EJJJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1279 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1280 return appendCallWithExceptionCheck(operation
);
1282 JITCompiler::Call
callOperation(V_DFGOperation_EPZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1284 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1285 return appendCallWithExceptionCheck(operation
);
1288 JITCompiler::Call
callOperation(V_DFGOperation_EOZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1290 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1291 return appendCallWithExceptionCheck(operation
);
1293 JITCompiler::Call
callOperation(V_DFGOperation_ECJJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
1295 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, arg3
);
1296 return appendCallWithExceptionCheck(operation
);
1299 JITCompiler::Call
callOperation(D_DFGOperation_EJ operation
, FPRReg result
, GPRReg arg1
)
1301 m_jit
.setupArgumentsWithExecState(arg1
);
1302 return appendCallWithExceptionCheckSetResult(operation
, result
);
1305 #else // USE(JSVALUE32_64)
1307 // EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
1308 // To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
1309 #if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
1310 #define EABI_32BIT_DUMMY_ARG TrustedImm32(0),
1312 #define EABI_32BIT_DUMMY_ARG
1315 JITCompiler::Call
callOperation(Z_DFGOperation_D operation
, GPRReg result
, FPRReg arg1
)
1317 prepareForExternalCall();
1318 m_jit
.setupArguments(arg1
);
1319 JITCompiler::Call call
= m_jit
.appendCall(operation
);
1320 m_jit
.zeroExtend32ToPtr(GPRInfo::returnValueGPR
, result
);
1323 JITCompiler::Call
callOperation(J_DFGOperation_E operation
, GPRReg resultTag
, GPRReg resultPayload
)
1325 m_jit
.setupArgumentsExecState();
1326 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1328 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg resultTag
, GPRReg resultPayload
, void* pointer
)
1330 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
));
1331 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1333 JITCompiler::Call
callOperation(J_DFGOperation_EPP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, void* pointer
)
1335 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(pointer
));
1336 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1338 JITCompiler::Call
callOperation(J_DFGOperation_EGriJsgI operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, GPRReg arg2
, Identifier
* identifier
)
1340 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, TrustedImmPtr(identifier
));
1341 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1343 JITCompiler::Call
callOperation(J_DFGOperation_EP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
)
1345 m_jit
.setupArgumentsWithExecState(arg1
);
1346 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1348 JITCompiler::Call
callOperation(J_DFGOperation_EI operation
, GPRReg resultTag
, GPRReg resultPayload
, Identifier
* identifier
)
1350 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
));
1351 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1353 JITCompiler::Call
callOperation(J_DFGOperation_EA operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
)
1355 m_jit
.setupArgumentsWithExecState(arg1
);
1356 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1358 JITCompiler::Call
callOperation(J_DFGOperation_EAZ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, GPRReg arg2
)
1360 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1361 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1363 JITCompiler::Call
callOperation(J_DFGOperation_EPS operation
, GPRReg resultTag
, GPRReg resultPayload
, void* pointer
, size_t size
)
1365 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(pointer
), TrustedImmPtr(size
));
1366 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1368 JITCompiler::Call
callOperation(J_DFGOperation_ESS operation
, GPRReg resultTag
, GPRReg resultPayload
, int startConstant
, int numConstants
)
1370 m_jit
.setupArgumentsWithExecState(TrustedImm32(startConstant
), TrustedImm32(numConstants
));
1371 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1373 JITCompiler::Call
callOperation(J_DFGOperation_EJP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, void* pointer
)
1375 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, TrustedImmPtr(pointer
));
1376 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1378 JITCompiler::Call
callOperation(J_DFGOperation_EJP operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
)
1380 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2
);
1381 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1384 JITCompiler::Call
callOperation(J_DFGOperation_EC operation
, GPRReg resultTag
, GPRReg resultPayload
, JSCell
* cell
)
1386 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(cell
));
1387 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1389 JITCompiler::Call
callOperation(J_DFGOperation_ECI operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, Identifier
* identifier
)
1391 m_jit
.setupArgumentsWithExecState(arg1
, TrustedImmPtr(identifier
));
1392 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1394 JITCompiler::Call
callOperation(J_DFGOperation_EJI operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, Identifier
* identifier
)
1396 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, TrustedImmPtr(identifier
));
1397 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1399 JITCompiler::Call
callOperation(J_DFGOperation_EJI operation
, GPRReg resultTag
, GPRReg resultPayload
, int32_t arg1Tag
, GPRReg arg1Payload
, Identifier
* identifier
)
1401 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, TrustedImm32(arg1Tag
), TrustedImmPtr(identifier
));
1402 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1404 JITCompiler::Call
callOperation(J_DFGOperation_EDA operation
, GPRReg resultTag
, GPRReg resultPayload
, FPRReg arg1
, GPRReg arg2
)
1406 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1407 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1409 JITCompiler::Call
callOperation(J_DFGOperation_EJA operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
)
1411 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2
);
1412 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1414 JITCompiler::Call
callOperation(J_DFGOperation_EJA operation
, GPRReg resultTag
, GPRReg resultPayload
, TrustedImm32 arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
)
1416 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2
);
1417 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1419 JITCompiler::Call
callOperation(J_DFGOperation_EJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1421 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
);
1422 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1424 JITCompiler::Call
callOperation(J_DFGOperation_EZ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
)
1426 m_jit
.setupArgumentsWithExecState(arg1
);
1427 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1429 JITCompiler::Call
callOperation(J_DFGOperation_EZ operation
, GPRReg resultTag
, GPRReg resultPayload
, int32_t arg1
)
1431 m_jit
.setupArgumentsWithExecState(TrustedImm32(arg1
));
1432 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1434 JITCompiler::Call
callOperation(J_DFGOperation_EZIcfZ operation
, GPRReg resultTag
, GPRReg resultPayload
, int32_t arg1
, InlineCallFrame
* inlineCallFrame
, GPRReg arg2
)
1436 m_jit
.setupArgumentsWithExecState(TrustedImm32(arg1
), TrustedImmPtr(inlineCallFrame
), arg2
);
1437 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1439 JITCompiler::Call
callOperation(J_DFGOperation_EZZ operation
, GPRReg resultTag
, GPRReg resultPayload
, int32_t arg1
, GPRReg arg2
)
1441 m_jit
.setupArgumentsWithExecState(TrustedImm32(arg1
), arg2
);
1442 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1446 JITCompiler::Call
callOperation(C_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1448 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
);
1449 return appendCallWithExceptionCheckSetResult(operation
, result
);
1451 JITCompiler::Call
callOperation(S_DFGOperation_J operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1453 m_jit
.setupArguments(arg1Payload
, arg1Tag
);
1454 return appendCallSetResult(operation
, result
);
1456 JITCompiler::Call
callOperation(S_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1458 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
);
1459 return appendCallWithExceptionCheckSetResult(operation
, result
);
1462 JITCompiler::Call
callOperation(S_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1464 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2Payload
, arg2Tag
);
1465 return appendCallWithExceptionCheckSetResult(operation
, result
);
1467 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1469 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2Payload
, arg2Tag
);
1470 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1472 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1Tag
, GPRReg arg1Payload
, MacroAssembler::TrustedImm32 imm
)
1474 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, imm
, TrustedImm32(JSValue::Int32Tag
));
1475 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1477 JITCompiler::Call
callOperation(J_DFGOperation_EJJ operation
, GPRReg resultTag
, GPRReg resultPayload
, MacroAssembler::TrustedImm32 imm
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1479 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm
, TrustedImm32(JSValue::Int32Tag
), arg2Payload
, arg2Tag
);
1480 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1483 JITCompiler::Call
callOperation(J_DFGOperation_EIRo operation
, GPRReg resultTag
, GPRReg resultPayload
, Identifier
* identifier
, ResolveOperations
* operations
)
1485 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
), TrustedImmPtr(operations
));
1486 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1489 JITCompiler::Call
callOperation(J_DFGOperation_EIRoPtbo operation
, GPRReg resultTag
, GPRReg resultPayload
, Identifier
* identifier
, ResolveOperations
* operations
, PutToBaseOperation
* putToBaseOperations
)
1491 m_jit
.setupArgumentsWithExecState(TrustedImmPtr(identifier
), TrustedImmPtr(operations
), TrustedImmPtr(putToBaseOperations
));
1492 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1495 JITCompiler::Call
callOperation(J_DFGOperation_ECJ operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, GPRReg arg2Tag
, GPRReg arg2Payload
)
1497 m_jit
.setupArgumentsWithExecState(arg1
, arg2Payload
, arg2Tag
);
1498 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1500 JITCompiler::Call
callOperation(J_DFGOperation_ECC operation
, GPRReg resultTag
, GPRReg resultPayload
, GPRReg arg1
, GPRReg arg2
)
1502 m_jit
.setupArgumentsWithExecState(arg1
, arg2
);
1503 return appendCallWithExceptionCheckSetResult(operation
, resultPayload
, resultTag
);
1506 JITCompiler::Call
callOperation(V_DFGOperation_EJPP operation
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
, void* pointer
)
1508 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2
, TrustedImmPtr(pointer
));
1509 return appendCallWithExceptionCheck(operation
);
1511 JITCompiler::Call
callOperation(V_DFGOperation_EJCI operation
, GPRReg arg1Tag
, GPRReg arg1Payload
, GPRReg arg2
, Identifier
* identifier
)
1513 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
, arg2
, TrustedImmPtr(identifier
));
1514 return appendCallWithExceptionCheck(operation
);
1516 JITCompiler::Call
callOperation(V_DFGOperation_ECJJ operation
, GPRReg arg1
, GPRReg arg2Tag
, GPRReg arg2Payload
, GPRReg arg3Tag
, GPRReg arg3Payload
)
1518 m_jit
.setupArgumentsWithExecState(arg1
, arg2Payload
, arg2Tag
, arg3Payload
, arg3Tag
);
1519 return appendCallWithExceptionCheck(operation
);
1522 JITCompiler::Call
callOperation(V_DFGOperation_EPZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3Tag
, GPRReg arg3Payload
)
1524 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, EABI_32BIT_DUMMY_ARG arg3Payload
, arg3Tag
);
1525 return appendCallWithExceptionCheck(operation
);
1528 JITCompiler::Call
callOperation(V_DFGOperation_EOZJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3Tag
, GPRReg arg3Payload
)
1530 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, EABI_32BIT_DUMMY_ARG arg3Payload
, arg3Tag
);
1531 return appendCallWithExceptionCheck(operation
);
1533 JITCompiler::Call
callOperation(V_DFGOperation_EOZJ operation
, GPRReg arg1
, GPRReg arg2
, TrustedImm32 arg3Tag
, GPRReg arg3Payload
)
1535 m_jit
.setupArgumentsWithExecState(arg1
, arg2
, EABI_32BIT_DUMMY_ARG arg3Payload
, arg3Tag
);
1536 return appendCallWithExceptionCheck(operation
);
1539 JITCompiler::Call
callOperation(D_DFGOperation_EJ operation
, FPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
)
1541 m_jit
.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload
, arg1Tag
);
1542 return appendCallWithExceptionCheckSetResult(operation
, result
);
1545 #undef EABI_32BIT_DUMMY_ARG
1547 template<typename FunctionType
>
1548 JITCompiler::Call
callOperation(
1549 FunctionType operation
, JSValueRegs result
)
1551 return callOperation(operation
, result
.tagGPR(), result
.payloadGPR());
1553 template<typename FunctionType
, typename ArgumentType1
>
1554 JITCompiler::Call
callOperation(
1555 FunctionType operation
, JSValueRegs result
, ArgumentType1 arg1
)
1557 return callOperation(operation
, result
.tagGPR(), result
.payloadGPR(), arg1
);
1559 template<typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
>
1560 JITCompiler::Call
callOperation(
1561 FunctionType operation
, JSValueRegs result
, ArgumentType1 arg1
, ArgumentType2 arg2
)
1563 return callOperation(operation
, result
.tagGPR(), result
.payloadGPR(), arg1
, arg2
);
1566 typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
,
1567 typename ArgumentType3
>
1568 JITCompiler::Call
callOperation(
1569 FunctionType operation
, JSValueRegs result
, ArgumentType1 arg1
, ArgumentType2 arg2
,
1572 return callOperation(operation
, result
.tagGPR(), result
.payloadGPR(), arg1
, arg2
, arg3
);
1575 typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
,
1576 typename ArgumentType3
, typename ArgumentType4
>
1577 JITCompiler::Call
callOperation(
1578 FunctionType operation
, JSValueRegs result
, ArgumentType1 arg1
, ArgumentType2 arg2
,
1579 ArgumentType3 arg3
, ArgumentType4 arg4
)
1581 return callOperation(operation
, result
.tagGPR(), result
.payloadGPR(), arg1
, arg2
, arg3
, arg4
);
1584 typename FunctionType
, typename ArgumentType1
, typename ArgumentType2
,
1585 typename ArgumentType3
, typename ArgumentType4
, typename ArgumentType5
>
1586 JITCompiler::Call
callOperation(
1587 FunctionType operation
, JSValueRegs result
, ArgumentType1 arg1
, ArgumentType2 arg2
,
1588 ArgumentType3 arg3
, ArgumentType4 arg4
, ArgumentType5 arg5
)
1590 return callOperation(
1591 operation
, result
.tagGPR(), result
.payloadGPR(), arg1
, arg2
, arg3
, arg4
, arg5
);
1593 #endif // USE(JSVALUE32_64)
1595 #if !defined(NDEBUG) && !CPU(ARM) && !CPU(MIPS)
1596 void prepareForExternalCall()
1598 // We're about to call out to a "native" helper function. The helper
1599 // function is expected to set topCallFrame itself with the ExecState
1600 // that is passed to it.
1602 // We explicitly trash topCallFrame here so that we'll know if some of
1603 // the helper functions are not setting topCallFrame when they should
1604 // be doing so. Note: the previous value in topcallFrame was not valid
1605 // anyway since it was not being updated by JIT'ed code by design.
1607 for (unsigned i
= 0; i
< sizeof(void*) / 4; i
++)
1608 m_jit
.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit
.vm()->topCallFrame
) + i
* 4);
1611 void prepareForExternalCall() { }
1614 // These methods add call instructions, with optional exception checks & setting results.
1615 JITCompiler::Call
appendCallWithExceptionCheck(const FunctionPtr
& function
)
1617 prepareForExternalCall();
1618 CodeOrigin codeOrigin
= m_currentNode
->codeOrigin
;
1619 CallBeginToken token
;
1620 m_jit
.beginCall(codeOrigin
, token
);
1621 JITCompiler::Call call
= m_jit
.appendCall(function
);
1622 m_jit
.addExceptionCheck(call
, codeOrigin
, token
);
1625 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, GPRReg result
)
1627 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1628 m_jit
.move(GPRInfo::returnValueGPR
, result
);
1631 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, GPRReg result
)
1633 prepareForExternalCall();
1634 JITCompiler::Call call
= m_jit
.appendCall(function
);
1635 m_jit
.move(GPRInfo::returnValueGPR
, result
);
1638 JITCompiler::Call
appendCall(const FunctionPtr
& function
)
1640 prepareForExternalCall();
1641 return m_jit
.appendCall(function
);
1643 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, GPRReg result1
, GPRReg result2
)
1645 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1646 m_jit
.setupResults(result1
, result2
);
1650 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1652 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1653 m_jit
.assembler().fstpl(0, JITCompiler::stackPointerRegister
);
1654 m_jit
.loadDouble(JITCompiler::stackPointerRegister
, result
);
1657 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1659 JITCompiler::Call call
= m_jit
.appendCall(function
);
1660 m_jit
.assembler().fstpl(0, JITCompiler::stackPointerRegister
);
1661 m_jit
.loadDouble(JITCompiler::stackPointerRegister
, result
);
1666 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1668 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1669 m_jit
.moveDouble(result
, FPRInfo::argumentFPR0
);
1672 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1674 JITCompiler::Call call
= m_jit
.appendCall(function
);
1675 m_jit
.moveDouble(result
, FPRInfo::argumentFPR0
);
1679 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1681 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1682 m_jit
.assembler().vmov(result
, GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
1685 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1687 JITCompiler::Call call
= m_jit
.appendCall(function
);
1688 m_jit
.assembler().vmov(result
, GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
1691 #endif // CPU(ARM_HARDFP)
1693 JITCompiler::Call
appendCallWithExceptionCheckSetResult(const FunctionPtr
& function
, FPRReg result
)
1695 JITCompiler::Call call
= appendCallWithExceptionCheck(function
);
1696 m_jit
.moveDouble(FPRInfo::returnValueFPR
, result
);
1699 JITCompiler::Call
appendCallSetResult(const FunctionPtr
& function
, FPRReg result
)
1701 JITCompiler::Call call
= m_jit
.appendCall(function
);
1702 m_jit
.moveDouble(FPRInfo::returnValueFPR
, result
);
1707 void branchDouble(JITCompiler::DoubleCondition cond
, FPRReg left
, FPRReg right
, BlockIndex destination
)
1709 if (!haveEdgeCodeToEmit(destination
))
1710 return addBranch(m_jit
.branchDouble(cond
, left
, right
), destination
);
1712 JITCompiler::Jump notTaken
= m_jit
.branchDouble(JITCompiler::invert(cond
), left
, right
);
1713 emitEdgeCode(destination
);
1714 addBranch(m_jit
.jump(), destination
);
1715 notTaken
.link(&m_jit
);
1718 void branchDoubleNonZero(FPRReg value
, FPRReg scratch
, BlockIndex destination
)
1720 if (!haveEdgeCodeToEmit(destination
))
1721 return addBranch(m_jit
.branchDoubleNonZero(value
, scratch
), destination
);
1723 JITCompiler::Jump notTaken
= m_jit
.branchDoubleZeroOrNaN(value
, scratch
);
1724 emitEdgeCode(destination
);
1725 addBranch(m_jit
.jump(), destination
);
1726 notTaken
.link(&m_jit
);
1729 template<typename T
, typename U
>
1730 void branch32(JITCompiler::RelationalCondition cond
, T left
, U right
, BlockIndex destination
)
1732 if (!haveEdgeCodeToEmit(destination
))
1733 return addBranch(m_jit
.branch32(cond
, left
, right
), destination
);
1735 JITCompiler::Jump notTaken
= m_jit
.branch32(JITCompiler::invert(cond
), left
, right
);
1736 emitEdgeCode(destination
);
1737 addBranch(m_jit
.jump(), destination
);
1738 notTaken
.link(&m_jit
);
1741 template<typename T
, typename U
>
1742 void branchTest32(JITCompiler::ResultCondition cond
, T value
, U mask
, BlockIndex destination
)
1744 ASSERT(JITCompiler::isInvertible(cond
));
1746 if (!haveEdgeCodeToEmit(destination
))
1747 return addBranch(m_jit
.branchTest32(cond
, value
, mask
), destination
);
1749 JITCompiler::Jump notTaken
= m_jit
.branchTest32(JITCompiler::invert(cond
), value
, mask
);
1750 emitEdgeCode(destination
);
1751 addBranch(m_jit
.jump(), destination
);
1752 notTaken
.link(&m_jit
);
1755 template<typename T
>
1756 void branchTest32(JITCompiler::ResultCondition cond
, T value
, BlockIndex destination
)
1758 ASSERT(JITCompiler::isInvertible(cond
));
1760 if (!haveEdgeCodeToEmit(destination
))
1761 return addBranch(m_jit
.branchTest32(cond
, value
), destination
);
1763 JITCompiler::Jump notTaken
= m_jit
.branchTest32(JITCompiler::invert(cond
), value
);
1764 emitEdgeCode(destination
);
1765 addBranch(m_jit
.jump(), destination
);
1766 notTaken
.link(&m_jit
);
1770 template<typename T
, typename U
>
1771 void branch64(JITCompiler::RelationalCondition cond
, T left
, U right
, BlockIndex destination
)
1773 if (!haveEdgeCodeToEmit(destination
))
1774 return addBranch(m_jit
.branch64(cond
, left
, right
), destination
);
1776 JITCompiler::Jump notTaken
= m_jit
.branch64(JITCompiler::invert(cond
), left
, right
);
1777 emitEdgeCode(destination
);
1778 addBranch(m_jit
.jump(), destination
);
1779 notTaken
.link(&m_jit
);
1783 template<typename T
, typename U
>
1784 void branchPtr(JITCompiler::RelationalCondition cond
, T left
, U right
, BlockIndex destination
)
1786 if (!haveEdgeCodeToEmit(destination
))
1787 return addBranch(m_jit
.branchPtr(cond
, left
, right
), destination
);
1789 JITCompiler::Jump notTaken
= m_jit
.branchPtr(JITCompiler::invert(cond
), left
, right
);
1790 emitEdgeCode(destination
);
1791 addBranch(m_jit
.jump(), destination
);
1792 notTaken
.link(&m_jit
);
1795 template<typename T
, typename U
>
1796 void branchTestPtr(JITCompiler::ResultCondition cond
, T value
, U mask
, BlockIndex destination
)
1798 ASSERT(JITCompiler::isInvertible(cond
));
1800 if (!haveEdgeCodeToEmit(destination
))
1801 return addBranch(m_jit
.branchTestPtr(cond
, value
, mask
), destination
);
1803 JITCompiler::Jump notTaken
= m_jit
.branchTestPtr(JITCompiler::invert(cond
), value
, mask
);
1804 emitEdgeCode(destination
);
1805 addBranch(m_jit
.jump(), destination
);
1806 notTaken
.link(&m_jit
);
1809 template<typename T
>
1810 void branchTestPtr(JITCompiler::ResultCondition cond
, T value
, BlockIndex destination
)
1812 ASSERT(JITCompiler::isInvertible(cond
));
1814 if (!haveEdgeCodeToEmit(destination
))
1815 return addBranch(m_jit
.branchTestPtr(cond
, value
), destination
);
1817 JITCompiler::Jump notTaken
= m_jit
.branchTestPtr(JITCompiler::invert(cond
), value
);
1818 emitEdgeCode(destination
);
1819 addBranch(m_jit
.jump(), destination
);
1820 notTaken
.link(&m_jit
);
1823 template<typename T
, typename U
>
1824 void branchTest8(JITCompiler::ResultCondition cond
, T value
, U mask
, BlockIndex destination
)
1826 ASSERT(JITCompiler::isInvertible(cond
));
1828 if (!haveEdgeCodeToEmit(destination
))
1829 return addBranch(m_jit
.branchTest8(cond
, value
, mask
), destination
);
1831 JITCompiler::Jump notTaken
= m_jit
.branchTest8(JITCompiler::invert(cond
), value
, mask
);
1832 emitEdgeCode(destination
);
1833 addBranch(m_jit
.jump(), destination
);
1834 notTaken
.link(&m_jit
);
1837 template<typename T
>
1838 void branchTest8(JITCompiler::ResultCondition cond
, T value
, BlockIndex destination
)
1840 ASSERT(JITCompiler::isInvertible(cond
));
1842 if (!haveEdgeCodeToEmit(destination
))
1843 return addBranch(m_jit
.branchTest8(cond
, value
), destination
);
1845 JITCompiler::Jump notTaken
= m_jit
.branchTest8(JITCompiler::invert(cond
), value
);
1846 emitEdgeCode(destination
);
1847 addBranch(m_jit
.jump(), destination
);
1848 notTaken
.link(&m_jit
);
1851 enum FallThroughMode
{
1855 void jump(BlockIndex destination
, FallThroughMode fallThroughMode
= AtFallThroughPoint
)
1857 if (haveEdgeCodeToEmit(destination
))
1858 emitEdgeCode(destination
);
1859 if (destination
== nextBlock()
1860 && fallThroughMode
== AtFallThroughPoint
)
1862 addBranch(m_jit
.jump(), destination
);
1865 inline bool haveEdgeCodeToEmit(BlockIndex
)
1867 return DFG_ENABLE_EDGE_CODE_VERIFICATION
;
1869 void emitEdgeCode(BlockIndex destination
)
1871 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION
)
1873 m_jit
.move(TrustedImm32(destination
), GPRInfo::regT0
);
1876 void addBranch(const MacroAssembler::Jump
& jump
, BlockIndex destination
)
1878 m_branches
.append(BranchRecord(jump
, destination
));
1883 for (size_t i
= 0; i
< m_branches
.size(); ++i
) {
1884 BranchRecord
& branch
= m_branches
[i
];
1885 branch
.jump
.linkTo(m_blockHeads
[branch
.destination
], &m_jit
);
1891 return m_jit
.graph().m_blocks
[m_block
].get();
1895 void dump(const char* label
= 0);
1898 #if DFG_ENABLE(CONSISTENCY_CHECK)
1899 void checkConsistency();
1901 void checkConsistency() { }
1904 bool isInteger(Node
* node
)
1906 if (node
->hasInt32Result())
1909 if (isInt32Constant(node
))
1912 VirtualRegister virtualRegister
= node
->virtualRegister();
1913 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1915 return info
.isJSInteger();
1918 bool compare(Node
*, MacroAssembler::RelationalCondition
, MacroAssembler::DoubleCondition
, S_DFGOperation_EJJ
);
1919 bool compilePeepHoleBranch(Node
*, MacroAssembler::RelationalCondition
, MacroAssembler::DoubleCondition
, S_DFGOperation_EJJ
);
1920 void compilePeepHoleIntegerBranch(Node
*, Node
* branchNode
, JITCompiler::RelationalCondition
);
1921 void compilePeepHoleBooleanBranch(Node
*, Node
* branchNode
, JITCompiler::RelationalCondition
);
1922 void compilePeepHoleDoubleBranch(Node
*, Node
* branchNode
, JITCompiler::DoubleCondition
);
1923 void compilePeepHoleObjectEquality(Node
*, Node
* branchNode
);
1924 void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
);
1925 void compileObjectEquality(Node
*);
1926 void compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
);
1927 void compileValueAdd(Node
*);
1928 void compileObjectOrOtherLogicalNot(Edge value
);
1929 void compileLogicalNot(Node
*);
1930 void compileStringEquality(Node
*);
1931 void emitObjectOrOtherBranch(Edge value
, BlockIndex taken
, BlockIndex notTaken
);
1932 void emitBranch(Node
*);
1934 void compileToStringOnCell(Node
*);
1935 void compileNewStringObject(Node
*);
1937 void compileIntegerCompare(Node
*, MacroAssembler::RelationalCondition
);
1938 void compileBooleanCompare(Node
*, MacroAssembler::RelationalCondition
);
1939 void compileDoubleCompare(Node
*, MacroAssembler::DoubleCondition
);
1941 bool compileStrictEqForConstant(Node
*, Edge value
, JSValue constant
);
1943 bool compileStrictEq(Node
*);
1945 void compileAllocatePropertyStorage(Node
*);
1946 void compileReallocatePropertyStorage(Node
*);
1948 #if USE(JSVALUE32_64)
1949 template<typename BaseOperandType
, typename PropertyOperandType
, typename ValueOperandType
, typename TagType
>
1950 void compileContiguousPutByVal(Node
*, BaseOperandType
&, PropertyOperandType
&, ValueOperandType
&, GPRReg valuePayloadReg
, TagType valueTag
);
1952 void compileDoublePutByVal(Node
*, SpeculateCellOperand
& base
, SpeculateStrictInt32Operand
& property
);
1953 bool putByValWillNeedExtraRegister(ArrayMode arrayMode
)
1955 return arrayMode
.mayStoreToHole();
1957 GPRReg
temporaryRegisterForPutByVal(GPRTemporary
&, ArrayMode
);
1958 GPRReg
temporaryRegisterForPutByVal(GPRTemporary
& temporary
, Node
* node
)
1960 return temporaryRegisterForPutByVal(temporary
, node
->arrayMode());
1963 void compileGetCharCodeAt(Node
*);
1964 void compileGetByValOnString(Node
*);
1965 void compileFromCharCode(Node
*);
1967 void compileGetByValOnArguments(Node
*);
1968 void compileGetArgumentsLength(Node
*);
1970 void compileGetArrayLength(Node
*);
1972 void compileValueToInt32(Node
*);
1973 void compileUInt32ToNumber(Node
*);
1974 void compileDoubleAsInt32(Node
*);
1975 void compileInt32ToDouble(Node
*);
1976 void compileAdd(Node
*);
1977 void compileMakeRope(Node
*);
1978 void compileArithSub(Node
*);
1979 void compileArithNegate(Node
*);
1980 void compileArithMul(Node
*);
1981 void compileArithIMul(Node
*);
1982 #if CPU(X86) || CPU(X86_64)
1983 void compileIntegerArithDivForX86(Node
*);
1985 void compileIntegerArithDivForARM64(Node
*);
1986 #elif CPU(APPLE_ARMV7S)
1987 void compileIntegerArithDivForARMv7s(Node
*);
1989 void compileArithMod(Node
*);
1990 void compileSoftModulo(Node
*);
1991 void compileGetIndexedPropertyStorage(Node
*);
1992 void compileGetByValOnIntTypedArray(const TypedArrayDescriptor
&, Node
*, size_t elementSize
, TypedArraySignedness
);
1993 void compilePutByValForIntTypedArray(const TypedArrayDescriptor
&, GPRReg base
, GPRReg property
, Node
*, size_t elementSize
, TypedArraySignedness
, TypedArrayRounding
= TruncateRounding
);
1994 void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor
&, Node
*, size_t elementSize
);
1995 void compilePutByValForFloatTypedArray(const TypedArrayDescriptor
&, GPRReg base
, GPRReg property
, Node
*, size_t elementSize
);
1996 void compileNewFunctionNoCheck(Node
*);
1997 void compileNewFunctionExpression(Node
*);
1998 bool compileRegExpExec(Node
*);
2000 // size can be an immediate or a register, and must be in bytes. If size is a register,
2001 // it must be a different register than resultGPR. Emits code that place a pointer to
2002 // the end of the allocation. The returned jump is the jump to the slow path.
2003 template<typename SizeType
>
2004 MacroAssembler::Jump
emitAllocateBasicStorage(SizeType size
, GPRReg resultGPR
)
2006 CopiedAllocator
* copiedAllocator
= &m_jit
.vm()->heap
.storageAllocator();
2008 m_jit
.loadPtr(&copiedAllocator
->m_currentRemaining
, resultGPR
);
2009 MacroAssembler::Jump slowPath
= m_jit
.branchSubPtr(JITCompiler::Signed
, size
, resultGPR
);
2010 m_jit
.storePtr(resultGPR
, &copiedAllocator
->m_currentRemaining
);
2011 m_jit
.negPtr(resultGPR
);
2012 m_jit
.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator
->m_currentPayloadEnd
), resultGPR
);
2017 // Allocator for a cell of a specific size.
2018 template <typename StructureType
> // StructureType can be GPR or ImmPtr.
2019 void emitAllocateJSCell(GPRReg resultGPR
, GPRReg allocatorGPR
, StructureType structure
,
2020 GPRReg scratchGPR
, MacroAssembler::JumpList
& slowPath
)
2022 m_jit
.loadPtr(MacroAssembler::Address(allocatorGPR
, MarkedAllocator::offsetOfFreeListHead()), resultGPR
);
2023 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, resultGPR
));
2025 // The object is half-allocated: we have what we know is a fresh object, but
2026 // it's still on the GC's free list.
2027 m_jit
.loadPtr(MacroAssembler::Address(resultGPR
), scratchGPR
);
2028 m_jit
.storePtr(scratchGPR
, MacroAssembler::Address(allocatorGPR
, MarkedAllocator::offsetOfFreeListHead()));
2030 // Initialize the object's Structure.
2031 m_jit
.storePtr(structure
, MacroAssembler::Address(resultGPR
, JSCell::structureOffset()));
2034 // Allocator for an object of a specific size.
2035 template <typename StructureType
, typename StorageType
> // StructureType and StorageType can be GPR or ImmPtr.
2036 void emitAllocateJSObject(GPRReg resultGPR
, GPRReg allocatorGPR
, StructureType structure
,
2037 StorageType storage
, GPRReg scratchGPR
, MacroAssembler::JumpList
& slowPath
)
2039 emitAllocateJSCell(resultGPR
, allocatorGPR
, structure
, scratchGPR
, slowPath
);
2041 // Initialize the object's property storage pointer.
2042 m_jit
.storePtr(storage
, MacroAssembler::Address(resultGPR
, JSObject::butterflyOffset()));
2045 // Convenience allocator for a buit-in object.
2046 template <typename ClassType
, typename StructureType
, typename StorageType
> // StructureType and StorageType can be GPR or ImmPtr.
2047 void emitAllocateJSObject(GPRReg resultGPR
, StructureType structure
, StorageType storage
,
2048 GPRReg scratchGPR1
, GPRReg scratchGPR2
, MacroAssembler::JumpList
& slowPath
)
2050 MarkedAllocator
* allocator
= 0;
2051 size_t size
= ClassType::allocationSize(0);
2052 if (ClassType::needsDestruction
&& ClassType::hasImmortalStructure
)
2053 allocator
= &m_jit
.vm()->heap
.allocatorForObjectWithImmortalStructureDestructor(size
);
2054 else if (ClassType::needsDestruction
)
2055 allocator
= &m_jit
.vm()->heap
.allocatorForObjectWithNormalDestructor(size
);
2057 allocator
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(size
);
2058 m_jit
.move(TrustedImmPtr(allocator
), scratchGPR1
);
2059 emitAllocateJSObject(resultGPR
, scratchGPR1
, structure
, storage
, scratchGPR2
, slowPath
);
2062 void emitAllocateJSArray(GPRReg resultGPR
, Structure
*, GPRReg storageGPR
, unsigned numElements
);
2065 JITCompiler::Jump
convertToDouble(GPRReg value
, FPRReg result
, GPRReg tmp
);
2066 #elif USE(JSVALUE32_64)
2067 JITCompiler::Jump
convertToDouble(JSValueOperand
&, FPRReg result
);
2070 // Add a backward speculation check.
2071 void backwardSpeculationCheck(ExitKind
, JSValueSource
, Node
*, MacroAssembler::Jump jumpToFail
);
2072 void backwardSpeculationCheck(ExitKind
, JSValueSource
, Node
*, const MacroAssembler::JumpList
& jumpsToFail
);
2074 // Add a speculation check without additional recovery.
2075 void speculationCheck(ExitKind
, JSValueSource
, Node
*, MacroAssembler::Jump jumpToFail
);
2076 void speculationCheck(ExitKind
, JSValueSource
, Edge
, MacroAssembler::Jump jumpToFail
);
2077 // Add a speculation check without additional recovery, and with a promise to supply a jump later.
2078 OSRExitJumpPlaceholder
backwardSpeculationCheck(ExitKind
, JSValueSource
, Node
*);
2079 OSRExitJumpPlaceholder
backwardSpeculationCheck(ExitKind
, JSValueSource
, Edge
);
2080 // Add a set of speculation checks without additional recovery.
2081 void speculationCheck(ExitKind
, JSValueSource
, Node
*, const MacroAssembler::JumpList
& jumpsToFail
);
2082 void speculationCheck(ExitKind
, JSValueSource
, Edge
, const MacroAssembler::JumpList
& jumpsToFail
);
2083 // Add a speculation check with additional recovery.
2084 void backwardSpeculationCheck(ExitKind
, JSValueSource
, Node
*, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
&);
2085 void backwardSpeculationCheck(ExitKind
, JSValueSource
, Edge
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
&);
2086 // Use this like you would use speculationCheck(), except that you don't pass it a jump
2087 // (because you don't have to execute a branch; that's kind of the whole point), and you
2088 // must register the returned Watchpoint with something relevant. In general, this should
2089 // be used with extreme care. Use speculationCheck() unless you've got an amazing reason
2091 JumpReplacementWatchpoint
* speculationWatchpoint(ExitKind
, JSValueSource
, Node
*);
2092 // The default for speculation watchpoints is that they're uncounted, because the
2093 // act of firing a watchpoint invalidates it. So, future recompilations will not
2094 // attempt to set this watchpoint again.
2095 JumpReplacementWatchpoint
* speculationWatchpoint(ExitKind
= UncountableWatchpoint
);
2097 // It is generally a good idea to not use this directly.
2098 void convertLastOSRExitToForward(const ValueRecovery
& = ValueRecovery());
2100 // Note: not specifying the valueRecovery argument (leaving it as ValueRecovery()) implies
2101 // that you've ensured that there exists a MovHint prior to your use of forwardSpeculationCheck().
2102 void forwardSpeculationCheck(ExitKind
, JSValueSource
, Node
*, MacroAssembler::Jump jumpToFail
, const ValueRecovery
& = ValueRecovery());
2103 void forwardSpeculationCheck(ExitKind
, JSValueSource
, Node
*, const MacroAssembler::JumpList
& jumpsToFail
, const ValueRecovery
& = ValueRecovery());
2104 void speculationCheck(ExitKind
, JSValueSource
, Node
*, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
&);
2105 void speculationCheck(ExitKind
, JSValueSource
, Edge
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
&);
2106 // Called when we statically determine that a speculation will fail.
2107 void terminateSpeculativeExecution(ExitKind
, JSValueRegs
, Node
*);
2108 void terminateSpeculativeExecution(ExitKind
, JSValueRegs
, Edge
);
2110 // Helpers for performing type checks on an edge stored in the given registers.
2111 bool needsTypeCheck(Edge edge
, SpeculatedType typesPassedThrough
) { return m_state
.forNode(edge
).m_type
& ~typesPassedThrough
; }
2112 void backwardTypeCheck(JSValueSource
, Edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
);
2113 void typeCheck(JSValueSource
, Edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
);
2114 void forwardTypeCheck(JSValueSource
, Edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
, const ValueRecovery
&);
2116 void speculateInt32(Edge
);
2117 void speculateNumber(Edge
);
2118 void speculateRealNumber(Edge
);
2119 void speculateBoolean(Edge
);
2120 void speculateCell(Edge
);
2121 void speculateObject(Edge
);
2122 void speculateObjectOrOther(Edge
);
2123 void speculateString(Edge
);
2124 template<typename StructureLocationType
>
2125 void speculateStringObjectForStructure(Edge
, StructureLocationType
);
2126 void speculateStringObject(Edge
, GPRReg
);
2127 void speculateStringObject(Edge
);
2128 void speculateStringOrStringObject(Edge
);
2129 void speculateNotCell(Edge
);
2130 void speculateOther(Edge
);
2131 void speculate(Node
*, Edge
);
2133 const TypedArrayDescriptor
* typedArrayDescriptor(ArrayMode
);
2135 JITCompiler::Jump
jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg
, ArrayMode
, IndexingType
);
2136 JITCompiler::JumpList
jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg
, ArrayMode
);
2137 void checkArray(Node
*);
2138 void arrayify(Node
*, GPRReg baseReg
, GPRReg propertyReg
);
2139 void arrayify(Node
*);
2141 template<bool strict
>
2142 GPRReg
fillSpeculateIntInternal(Edge
, DataFormat
& returnFormat
);
2144 // It is possible, during speculative generation, to reach a situation in which we
2145 // can statically determine a speculation will fail (for example, when two nodes
2146 // will make conflicting speculations about the same operand). In such cases this
2147 // flag is cleared, indicating no further code generation should take place.
2150 // Tracking for which nodes are currently holding the values of arguments and bytecode
2151 // operand-indexed variables.
2153 ValueSource
valueSourceForOperand(int operand
)
2155 return valueSourceReferenceForOperand(operand
);
2158 void setNodeForOperand(Node
* node
, int operand
)
2160 valueSourceReferenceForOperand(operand
) = ValueSource(MinifiedID(node
));
2163 // Call this with care, since it both returns a reference into an array
2164 // and potentially resizes the array. So it would not be right to call this
2165 // twice and then perform operands on both references, since the one from
2166 // the first call may no longer be valid.
2167 ValueSource
& valueSourceReferenceForOperand(int operand
)
2169 if (operandIsArgument(operand
)) {
2170 int argument
= operandToArgument(operand
);
2171 return m_arguments
[argument
];
2174 if ((unsigned)operand
>= m_variables
.size())
2175 m_variables
.resize(operand
+ 1);
2177 return m_variables
[operand
];
2180 void recordSetLocal(int operand
, ValueSource valueSource
)
2182 valueSourceReferenceForOperand(operand
) = valueSource
;
2183 m_stream
->appendAndLog(VariableEvent::setLocal(operand
, valueSource
.dataFormat()));
2186 // The JIT, while also provides MacroAssembler functionality.
2189 // The current node being generated.
2191 Node
* m_currentNode
;
2192 SpeculationDirection m_speculationDirection
;
2193 #if !ASSERT_DISABLED
2196 unsigned m_indexInBlock
;
2197 // Virtual and physical register maps.
2198 Vector
<GenerationInfo
, 32> m_generationInfo
;
2199 RegisterBank
<GPRInfo
> m_gprs
;
2200 RegisterBank
<FPRInfo
> m_fprs
;
2202 Vector
<MacroAssembler::Label
> m_blockHeads
;
2203 Vector
<MacroAssembler::Label
> m_osrEntryHeads
;
2205 struct BranchRecord
{
2206 BranchRecord(MacroAssembler::Jump jump
, BlockIndex destination
)
2208 , destination(destination
)
2212 MacroAssembler::Jump jump
;
2213 BlockIndex destination
;
2215 Vector
<BranchRecord
, 8> m_branches
;
2217 Vector
<ValueSource
, 0> m_arguments
;
2218 Vector
<ValueSource
, 0> m_variables
;
2219 int m_lastSetOperand
;
2220 CodeOrigin m_codeOriginForOSR
;
2222 AbstractState m_state
;
2224 VariableEventStream
* m_stream
;
2225 MinifiedGraph
* m_minifiedGraph
;
2227 bool m_isCheckingArgumentTypes
;
2229 Vector
<OwnPtr
<SlowPathGenerator
>, 8> m_slowPathGenerators
;
2230 Vector
<SilentRegisterSavePlan
> m_plans
;
2232 ValueRecovery
computeValueRecoveryFor(const ValueSource
&);
2234 ValueRecovery
computeValueRecoveryFor(int operand
)
2236 return computeValueRecoveryFor(valueSourceForOperand(operand
));
2241 // === Operand types ===
2243 // IntegerOperand and JSValueOperand.
2245 // These classes are used to lock the operands to a node into machine
2246 // registers. These classes implement of pattern of locking a value
2247 // into register at the point of construction only if it is already in
2248 // registers, and otherwise loading it lazily at the point it is first
2249 // used. We do so in order to attempt to avoid spilling one operand
2250 // in order to make space available for another.
2252 class IntegerOperand
{
2254 explicit IntegerOperand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2257 , m_gprOrInvalid(InvalidGPRReg
)
2259 , m_format(DataFormatNone
)
2263 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| edge
.useKind() == KnownInt32Use
);
2264 if (jit
->isFilled(edge
.node()))
2270 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2271 m_jit
->unlock(m_gprOrInvalid
);
2281 return edge().node();
2286 gpr(); // m_format is set when m_gpr is locked.
2287 ASSERT(m_format
== DataFormatInteger
|| m_format
== DataFormatJSInteger
);
2293 if (m_gprOrInvalid
== InvalidGPRReg
)
2294 m_gprOrInvalid
= m_jit
->fillInteger(m_edge
, m_format
);
2295 return m_gprOrInvalid
;
2304 SpeculativeJIT
* m_jit
;
2306 GPRReg m_gprOrInvalid
;
2307 DataFormat m_format
;
2310 class JSValueOperand
{
2312 explicit JSValueOperand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2316 , m_gprOrInvalid(InvalidGPRReg
)
2317 #elif USE(JSVALUE32_64)
2322 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| edge
.useKind() == UntypedUse
);
2324 if (jit
->isFilled(node()))
2326 #elif USE(JSVALUE32_64)
2327 m_register
.pair
.tagGPR
= InvalidGPRReg
;
2328 m_register
.pair
.payloadGPR
= InvalidGPRReg
;
2329 if (jit
->isFilled(node()))
2337 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2338 m_jit
->unlock(m_gprOrInvalid
);
2339 #elif USE(JSVALUE32_64)
2341 ASSERT(m_register
.fpr
!= InvalidFPRReg
);
2342 m_jit
->unlock(m_register
.fpr
);
2344 ASSERT(m_register
.pair
.tagGPR
!= InvalidGPRReg
&& m_register
.pair
.payloadGPR
!= InvalidGPRReg
);
2345 m_jit
->unlock(m_register
.pair
.tagGPR
);
2346 m_jit
->unlock(m_register
.pair
.payloadGPR
);
2358 return edge().node();
2364 if (m_gprOrInvalid
== InvalidGPRReg
)
2365 m_gprOrInvalid
= m_jit
->fillJSValue(m_edge
);
2366 return m_gprOrInvalid
;
2368 JSValueRegs
jsValueRegs()
2370 return JSValueRegs(gpr());
2372 #elif USE(JSVALUE32_64)
2373 bool isDouble() { return m_isDouble
; }
2377 if (m_register
.pair
.tagGPR
== InvalidGPRReg
&& m_register
.pair
.payloadGPR
== InvalidGPRReg
)
2378 m_isDouble
= !m_jit
->fillJSValue(m_edge
, m_register
.pair
.tagGPR
, m_register
.pair
.payloadGPR
, m_register
.fpr
);
2384 ASSERT(!m_isDouble
);
2385 return m_register
.pair
.tagGPR
;
2391 ASSERT(!m_isDouble
);
2392 return m_register
.pair
.payloadGPR
;
2395 JSValueRegs
jsValueRegs()
2397 return JSValueRegs(tagGPR(), payloadGPR());
2404 return m_register
.fpr
;
2414 SpeculativeJIT
* m_jit
;
2417 GPRReg m_gprOrInvalid
;
2418 #elif USE(JSVALUE32_64)
2430 class StorageOperand
{
2432 explicit StorageOperand(SpeculativeJIT
* jit
, Edge edge
)
2435 , m_gprOrInvalid(InvalidGPRReg
)
2438 ASSERT(edge
.useKind() == UntypedUse
|| edge
.useKind() == KnownCellUse
);
2439 if (jit
->isFilled(node()))
2445 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2446 m_jit
->unlock(m_gprOrInvalid
);
2456 return edge().node();
2461 if (m_gprOrInvalid
== InvalidGPRReg
)
2462 m_gprOrInvalid
= m_jit
->fillStorage(edge());
2463 return m_gprOrInvalid
;
2472 SpeculativeJIT
* m_jit
;
2474 GPRReg m_gprOrInvalid
;
2478 // === Temporaries ===
2480 // These classes are used to allocate temporary registers.
2481 // A mechanism is provided to attempt to reuse the registers
2482 // currently allocated to child nodes whose value is consumed
2483 // by, and not live after, this operation.
2485 class GPRTemporary
{
2488 GPRTemporary(SpeculativeJIT
*);
2489 GPRTemporary(SpeculativeJIT
*, GPRReg specific
);
2490 GPRTemporary(SpeculativeJIT
*, SpeculateIntegerOperand
&);
2491 GPRTemporary(SpeculativeJIT
*, SpeculateIntegerOperand
&, SpeculateIntegerOperand
&);
2492 GPRTemporary(SpeculativeJIT
*, SpeculateStrictInt32Operand
&);
2493 GPRTemporary(SpeculativeJIT
*, IntegerOperand
&);
2494 GPRTemporary(SpeculativeJIT
*, IntegerOperand
&, IntegerOperand
&);
2495 GPRTemporary(SpeculativeJIT
*, SpeculateCellOperand
&);
2496 GPRTemporary(SpeculativeJIT
*, SpeculateBooleanOperand
&);
2498 GPRTemporary(SpeculativeJIT
*, JSValueOperand
&);
2499 #elif USE(JSVALUE32_64)
2500 GPRTemporary(SpeculativeJIT
*, JSValueOperand
&, bool tag
= true);
2502 GPRTemporary(SpeculativeJIT
*, StorageOperand
&);
2504 void adopt(GPRTemporary
&);
2508 if (m_jit
&& m_gpr
!= InvalidGPRReg
)
2509 m_jit
->unlock(gpr());
2518 SpeculativeJIT
* m_jit
;
2522 class FPRTemporary
{
2524 FPRTemporary(SpeculativeJIT
*);
2525 FPRTemporary(SpeculativeJIT
*, SpeculateDoubleOperand
&);
2526 FPRTemporary(SpeculativeJIT
*, SpeculateDoubleOperand
&, SpeculateDoubleOperand
&);
2527 #if USE(JSVALUE32_64)
2528 FPRTemporary(SpeculativeJIT
*, JSValueOperand
&);
2533 m_jit
->unlock(fpr());
2538 ASSERT(m_fpr
!= InvalidFPRReg
);
2543 FPRTemporary(SpeculativeJIT
* jit
, FPRReg lockedFPR
)
2550 SpeculativeJIT
* m_jit
;
2557 // These classes lock the result of a call to a C++ helper function.
2559 class GPRResult
: public GPRTemporary
{
2561 GPRResult(SpeculativeJIT
* jit
)
2562 : GPRTemporary(jit
, GPRInfo::returnValueGPR
)
2567 #if USE(JSVALUE32_64)
2568 class GPRResult2
: public GPRTemporary
{
2570 GPRResult2(SpeculativeJIT
* jit
)
2571 : GPRTemporary(jit
, GPRInfo::returnValueGPR2
)
2577 class FPRResult
: public FPRTemporary
{
2579 FPRResult(SpeculativeJIT
* jit
)
2580 : FPRTemporary(jit
, lockedResult(jit
))
2585 static FPRReg
lockedResult(SpeculativeJIT
* jit
)
2587 jit
->lock(FPRInfo::returnValueFPR
);
2588 return FPRInfo::returnValueFPR
;
2593 // === Speculative Operand types ===
2595 // SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand.
2597 // These are used to lock the operands to a node into machine registers within the
2598 // SpeculativeJIT. The classes operate like those above, however these will
2599 // perform a speculative check for a more restrictive type than we can statically
2600 // determine the operand to have. If the operand does not have the requested type,
2601 // a bail-out to the non-speculative path will be taken.
2603 class SpeculateIntegerOperand
{
2605 explicit SpeculateIntegerOperand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2608 , m_gprOrInvalid(InvalidGPRReg
)
2610 , m_format(DataFormatNone
)
2614 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| (edge
.useKind() == Int32Use
|| edge
.useKind() == KnownInt32Use
));
2615 if (jit
->isFilled(node()))
2619 ~SpeculateIntegerOperand()
2621 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2622 m_jit
->unlock(m_gprOrInvalid
);
2632 return edge().node();
2637 gpr(); // m_format is set when m_gpr is locked.
2638 ASSERT(m_format
== DataFormatInteger
|| m_format
== DataFormatJSInteger
);
2644 if (m_gprOrInvalid
== InvalidGPRReg
)
2645 m_gprOrInvalid
= m_jit
->fillSpeculateInt(edge(), m_format
);
2646 return m_gprOrInvalid
;
2655 SpeculativeJIT
* m_jit
;
2657 GPRReg m_gprOrInvalid
;
2658 DataFormat m_format
;
2661 class SpeculateStrictInt32Operand
{
2663 explicit SpeculateStrictInt32Operand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2666 , m_gprOrInvalid(InvalidGPRReg
)
2669 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| (edge
.useKind() == Int32Use
|| edge
.useKind() == KnownInt32Use
));
2670 if (jit
->isFilled(node()))
2674 ~SpeculateStrictInt32Operand()
2676 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2677 m_jit
->unlock(m_gprOrInvalid
);
2687 return edge().node();
2692 if (m_gprOrInvalid
== InvalidGPRReg
)
2693 m_gprOrInvalid
= m_jit
->fillSpeculateIntStrict(edge());
2694 return m_gprOrInvalid
;
2703 SpeculativeJIT
* m_jit
;
2705 GPRReg m_gprOrInvalid
;
2708 class SpeculateDoubleOperand
{
2710 explicit SpeculateDoubleOperand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2713 , m_fprOrInvalid(InvalidFPRReg
)
2716 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| (edge
.useKind() == NumberUse
|| edge
.useKind() == KnownNumberUse
|| edge
.useKind() == RealNumberUse
));
2717 if (jit
->isFilled(node()))
2721 ~SpeculateDoubleOperand()
2723 ASSERT(m_fprOrInvalid
!= InvalidFPRReg
);
2724 m_jit
->unlock(m_fprOrInvalid
);
2734 return edge().node();
2739 if (m_fprOrInvalid
== InvalidFPRReg
)
2740 m_fprOrInvalid
= m_jit
->fillSpeculateDouble(edge());
2741 return m_fprOrInvalid
;
2750 SpeculativeJIT
* m_jit
;
2752 FPRReg m_fprOrInvalid
;
2755 class SpeculateCellOperand
{
2757 explicit SpeculateCellOperand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2760 , m_gprOrInvalid(InvalidGPRReg
)
2765 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| (edge
.useKind() == CellUse
|| edge
.useKind() == KnownCellUse
|| edge
.useKind() == ObjectUse
|| edge
.useKind() == StringUse
|| edge
.useKind() == KnownStringUse
|| edge
.useKind() == StringObjectUse
|| edge
.useKind() == StringOrStringObjectUse
));
2766 if (jit
->isFilled(node()))
2770 ~SpeculateCellOperand()
2774 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2775 m_jit
->unlock(m_gprOrInvalid
);
2785 return edge().node();
2791 if (m_gprOrInvalid
== InvalidGPRReg
)
2792 m_gprOrInvalid
= m_jit
->fillSpeculateCell(edge());
2793 return m_gprOrInvalid
;
2803 SpeculativeJIT
* m_jit
;
2805 GPRReg m_gprOrInvalid
;
2808 class SpeculateBooleanOperand
{
2810 explicit SpeculateBooleanOperand(SpeculativeJIT
* jit
, Edge edge
, OperandSpeculationMode mode
= AutomaticOperandSpeculation
)
2813 , m_gprOrInvalid(InvalidGPRReg
)
2816 ASSERT_UNUSED(mode
, mode
== ManualOperandSpeculation
|| edge
.useKind() == BooleanUse
);
2817 if (jit
->isFilled(node()))
2821 ~SpeculateBooleanOperand()
2823 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
2824 m_jit
->unlock(m_gprOrInvalid
);
2834 return edge().node();
2839 if (m_gprOrInvalid
== InvalidGPRReg
)
2840 m_gprOrInvalid
= m_jit
->fillSpeculateBoolean(edge());
2841 return m_gprOrInvalid
;
2850 SpeculativeJIT
* m_jit
;
2852 GPRReg m_gprOrInvalid
;
2855 template<typename StructureLocationType
>
2856 void SpeculativeJIT::speculateStringObjectForStructure(Edge edge
, StructureLocationType structureLocation
)
2858 Structure
* stringObjectStructure
=
2859 m_jit
.globalObjectFor(m_currentNode
->codeOrigin
)->stringObjectStructure();
2860 Structure
* stringPrototypeStructure
= stringObjectStructure
->storedPrototype().asCell()->structure();
2861 ASSERT(stringPrototypeStructure
->transitionWatchpointSetIsStillValid());
2863 if (!m_state
.forNode(edge
).m_currentKnownStructure
.isSubsetOf(StructureSet(m_jit
.globalObjectFor(m_currentNode
->codeOrigin
)->stringObjectStructure()))) {
2865 NotStringObject
, JSValueRegs(), 0,
2867 JITCompiler::NotEqual
, structureLocation
, TrustedImmPtr(stringObjectStructure
)));
2869 stringPrototypeStructure
->addTransitionWatchpoint(speculationWatchpoint(NotStringObject
));
2872 #define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) do { \
2873 if (!needsTypeCheck((edge), (typesPassedThrough))) \
2875 typeCheck((source), (edge), (typesPassedThrough), (jumpToFail)); \
2878 } } // namespace JSC::DFG