2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGJITCodeGenerator_h
27 #define DFGJITCodeGenerator_h
31 #include "CodeBlock.h"
32 #include <dfg/DFGGenerationInfo.h>
33 #include <dfg/DFGGraph.h>
34 #include <dfg/DFGJITCompiler.h>
35 #include <dfg/DFGOperations.h>
36 #include <dfg/DFGRegisterBank.h>
38 namespace JSC
{ namespace DFG
{
40 class SpeculateIntegerOperand
;
41 class SpeculateStrictInt32Operand
;
42 class SpeculateCellOperand
;
45 // === JITCodeGenerator ===
47 // This class provides common infrastructure used by the speculative &
48 // non-speculative JITs. Provides common mechanisms for virtual and
49 // physical register management, calls out from JIT code to helper
51 class JITCodeGenerator
{
53 typedef MacroAssembler::TrustedImm32 TrustedImm32
;
54 typedef MacroAssembler::Imm32 Imm32
;
56 // These constants are used to set priorities for spill order for
57 // the register allocator.
59 SpillOrderConstant
= 1, // no spill, and cheap fill
60 SpillOrderSpilled
= 2, // no spill
61 SpillOrderJS
= 4, // needs spill
62 SpillOrderCell
= 4, // needs spill
63 SpillOrderInteger
= 5, // needs spill and box
64 SpillOrderDouble
= 6, // needs spill and convert
69 GPRReg
fillInteger(NodeIndex
, DataFormat
& returnFormat
);
70 FPRReg
fillDouble(NodeIndex
);
71 GPRReg
fillJSValue(NodeIndex
);
73 // lock and unlock GPR & FPR registers.
82 void unlock(GPRReg reg
)
86 void unlock(FPRReg reg
)
91 // Used to check whether a child node is on its last use,
92 // and its machine registers may be reused.
93 bool canReuse(NodeIndex nodeIndex
)
95 VirtualRegister virtualRegister
= m_jit
.graph()[nodeIndex
].virtualRegister();
96 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
97 return info
.canReuse();
99 GPRReg
reuse(GPRReg reg
)
104 FPRReg
reuse(FPRReg reg
)
110 // Allocate a gpr/fpr.
113 VirtualRegister spillMe
;
114 GPRReg gpr
= m_gprs
.allocate(spillMe
);
115 if (spillMe
!= InvalidVirtualRegister
)
121 VirtualRegister spillMe
;
122 FPRReg fpr
= m_fprs
.allocate(spillMe
);
123 if (spillMe
!= InvalidVirtualRegister
)
128 // Check whether a VirtualRegsiter is currently in a machine register.
129 // We use this when filling operands to fill those that are already in
130 // machine registers first (by locking VirtualRegsiters that are already
131 // in machine register before filling those that are not we attempt to
132 // avoid spilling values we will need immediately).
133 bool isFilled(NodeIndex nodeIndex
)
135 VirtualRegister virtualRegister
= m_jit
.graph()[nodeIndex
].virtualRegister();
136 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
137 return info
.registerFormat() != DataFormatNone
;
139 bool isFilledDouble(NodeIndex nodeIndex
)
141 VirtualRegister virtualRegister
= m_jit
.graph()[nodeIndex
].virtualRegister();
142 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
143 return info
.registerFormat() == DataFormatDouble
;
147 JITCodeGenerator(JITCompiler
& jit
, bool isSpeculative
)
149 , m_isSpeculative(isSpeculative
)
151 , m_generationInfo(m_jit
.codeBlock()->m_numCalleeRegisters
)
152 , m_blockHeads(jit
.graph().m_blocks
.size())
156 // These methods convert between doubles, and doubles boxed and JSValues.
157 GPRReg
boxDouble(FPRReg fpr
, GPRReg gpr
)
159 m_jit
.moveDoubleToPtr(fpr
, gpr
);
160 m_jit
.subPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
163 FPRReg
unboxDouble(GPRReg gpr
, FPRReg fpr
)
165 m_jit
.addPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
166 m_jit
.movePtrToDouble(gpr
, fpr
);
169 GPRReg
boxDouble(FPRReg fpr
)
171 return boxDouble(fpr
, allocate());
173 FPRReg
unboxDouble(GPRReg gpr
)
175 return unboxDouble(gpr
, fprAllocate());
178 // Called on an operand once it has been consumed by a parent node.
179 void use(NodeIndex nodeIndex
)
181 VirtualRegister virtualRegister
= m_jit
.graph()[nodeIndex
].virtualRegister();
182 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
184 // use() returns true when the value becomes dead, and any
185 // associated resources may be freed.
189 // Release the associated machine registers.
190 DataFormat registerFormat
= info
.registerFormat();
191 if (registerFormat
== DataFormatDouble
)
192 m_fprs
.release(info
.fpr());
193 else if (registerFormat
!= DataFormatNone
)
194 m_gprs
.release(info
.gpr());
197 // Spill a VirtualRegister to the RegisterFile.
198 void spill(VirtualRegister spillMe
)
200 GenerationInfo
& info
= m_generationInfo
[spillMe
];
202 // Check the GenerationInfo to see if this value need writing
203 // to the RegisterFile - if not, mark it as spilled & return.
204 if (!info
.needsSpill()) {
209 DataFormat spillFormat
= info
.registerFormat();
210 if (spillFormat
== DataFormatDouble
) {
211 // All values are spilled as JSValues, so box the double via a temporary gpr.
212 GPRReg gpr
= boxDouble(info
.fpr());
213 m_jit
.storePtr(gpr
, JITCompiler::addressFor(spillMe
));
215 info
.spill(DataFormatJSDouble
);
219 // The following code handles JSValues, int32s, and cells.
220 ASSERT(spillFormat
== DataFormatInteger
|| spillFormat
== DataFormatCell
|| spillFormat
& DataFormatJS
);
222 GPRReg reg
= info
.gpr();
223 // We need to box int32 and cell values ...
224 // but on JSVALUE64 boxing a cell is a no-op!
225 if (spillFormat
== DataFormatInteger
)
226 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, reg
);
228 // Spill the value, and record it as spilled in its boxed form.
229 m_jit
.storePtr(reg
, JITCompiler::addressFor(spillMe
));
230 info
.spill((DataFormat
)(spillFormat
| DataFormatJS
));
233 // Checks/accessors for constant values.
234 bool isConstant(NodeIndex nodeIndex
) { return m_jit
.isConstant(nodeIndex
); }
235 bool isInt32Constant(NodeIndex nodeIndex
) { return m_jit
.isInt32Constant(nodeIndex
); }
236 bool isDoubleConstant(NodeIndex nodeIndex
) { return m_jit
.isDoubleConstant(nodeIndex
); }
237 bool isJSConstant(NodeIndex nodeIndex
) { return m_jit
.isJSConstant(nodeIndex
); }
238 int32_t valueOfInt32Constant(NodeIndex nodeIndex
) { return m_jit
.valueOfInt32Constant(nodeIndex
); }
239 double valueOfDoubleConstant(NodeIndex nodeIndex
) { return m_jit
.valueOfDoubleConstant(nodeIndex
); }
240 JSValue
valueOfJSConstant(NodeIndex nodeIndex
) { return m_jit
.valueOfJSConstant(nodeIndex
); }
242 Identifier
* identifier(unsigned index
)
244 return &m_jit
.codeBlock()->identifier(index
);
247 // Spill all VirtualRegisters back to the RegisterFile.
248 void flushRegisters()
250 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
251 if (iter
.name() != InvalidVirtualRegister
) {
256 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
257 if (iter
.name() != InvalidVirtualRegister
) {
265 // Used to ASSERT flushRegisters() has been called prior to
266 // calling out from JIT code to a C helper function.
269 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
270 if (iter
.name() != InvalidVirtualRegister
)
273 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
274 if (iter
.name() != InvalidVirtualRegister
)
281 // Get the JSValue representation of a constant.
282 JSValue
constantAsJSValue(NodeIndex nodeIndex
)
284 Node
& node
= m_jit
.graph()[nodeIndex
];
285 if (isInt32Constant(nodeIndex
))
286 return jsNumber(node
.int32Constant());
287 if (isDoubleConstant(nodeIndex
))
288 return JSValue(JSValue::EncodeAsDouble
, node
.numericConstant());
289 ASSERT(isJSConstant(nodeIndex
));
290 return valueOfJSConstant(nodeIndex
);
292 MacroAssembler::ImmPtr
constantAsJSValueAsImmPtr(NodeIndex nodeIndex
)
294 return MacroAssembler::ImmPtr(JSValue::encode(constantAsJSValue(nodeIndex
)));
297 // Helper functions to enable code sharing in implementations of bit/shift ops.
298 void bitOp(NodeType op
, int32_t imm
, GPRReg op1
, GPRReg result
)
302 m_jit
.and32(Imm32(imm
), op1
, result
);
305 m_jit
.or32(Imm32(imm
), op1
, result
);
308 m_jit
.xor32(Imm32(imm
), op1
, result
);
311 ASSERT_NOT_REACHED();
314 void bitOp(NodeType op
, GPRReg op1
, GPRReg op2
, GPRReg result
)
318 m_jit
.and32(op1
, op2
, result
);
321 m_jit
.or32(op1
, op2
, result
);
324 m_jit
.xor32(op1
, op2
, result
);
327 ASSERT_NOT_REACHED();
330 void shiftOp(NodeType op
, GPRReg op1
, int32_t shiftAmount
, GPRReg result
)
334 m_jit
.rshift32(op1
, Imm32(shiftAmount
), result
);
337 m_jit
.lshift32(op1
, Imm32(shiftAmount
), result
);
340 m_jit
.urshift32(op1
, Imm32(shiftAmount
), result
);
343 ASSERT_NOT_REACHED();
346 void shiftOp(NodeType op
, GPRReg op1
, GPRReg shiftAmount
, GPRReg result
)
350 m_jit
.rshift32(op1
, shiftAmount
, result
);
353 m_jit
.lshift32(op1
, shiftAmount
, result
);
356 m_jit
.urshift32(op1
, shiftAmount
, result
);
359 ASSERT_NOT_REACHED();
363 // Called once a node has completed code generation but prior to setting
364 // its result, to free up its children. (This must happen prior to setting
365 // the nodes result, since the node may have the same VirtualRegister as
366 // a child, and as such will use the same GeneratioInfo).
367 void useChildren(Node
&);
369 // These method called to initialize the the GenerationInfo
370 // to describe the result of an operation.
371 void integerResult(GPRReg reg
, NodeIndex nodeIndex
, DataFormat format
= DataFormatInteger
)
373 Node
& node
= m_jit
.graph()[nodeIndex
];
376 VirtualRegister virtualRegister
= node
.virtualRegister();
377 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
379 if (format
== DataFormatInteger
) {
380 m_jit
.jitAssertIsInt32(reg
);
381 m_gprs
.retain(reg
, virtualRegister
, SpillOrderInteger
);
382 info
.initInteger(nodeIndex
, node
.refCount(), reg
);
384 ASSERT(format
== DataFormatJSInteger
);
385 m_jit
.jitAssertIsJSInt32(reg
);
386 m_gprs
.retain(reg
, virtualRegister
, SpillOrderJS
);
387 info
.initJSValue(nodeIndex
, node
.refCount(), reg
, format
);
390 void noResult(NodeIndex nodeIndex
)
392 Node
& node
= m_jit
.graph()[nodeIndex
];
395 void cellResult(GPRReg reg
, NodeIndex nodeIndex
)
397 Node
& node
= m_jit
.graph()[nodeIndex
];
400 VirtualRegister virtualRegister
= node
.virtualRegister();
401 m_gprs
.retain(reg
, virtualRegister
, SpillOrderCell
);
402 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
403 info
.initCell(nodeIndex
, node
.refCount(), reg
);
405 void jsValueResult(GPRReg reg
, NodeIndex nodeIndex
, DataFormat format
= DataFormatJS
)
407 if (format
== DataFormatJSInteger
)
408 m_jit
.jitAssertIsJSInt32(reg
);
410 Node
& node
= m_jit
.graph()[nodeIndex
];
413 VirtualRegister virtualRegister
= node
.virtualRegister();
414 m_gprs
.retain(reg
, virtualRegister
, SpillOrderJS
);
415 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
416 info
.initJSValue(nodeIndex
, node
.refCount(), reg
, format
);
418 void doubleResult(FPRReg reg
, NodeIndex nodeIndex
)
420 Node
& node
= m_jit
.graph()[nodeIndex
];
423 VirtualRegister virtualRegister
= node
.virtualRegister();
424 m_fprs
.retain(reg
, virtualRegister
, SpillOrderDouble
);
425 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
426 info
.initDouble(nodeIndex
, node
.refCount(), reg
);
428 void initConstantInfo(NodeIndex nodeIndex
)
430 ASSERT(isInt32Constant(nodeIndex
) || isDoubleConstant(nodeIndex
) || isJSConstant(nodeIndex
));
431 Node
& node
= m_jit
.graph()[nodeIndex
];
432 m_generationInfo
[node
.virtualRegister()].initConstant(nodeIndex
, node
.refCount());
435 // These methods used to sort arguments into the correct registers.
436 template<GPRReg destA
, GPRReg destB
>
437 void setupTwoStubArgs(GPRReg srcA
, GPRReg srcB
)
439 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
440 // (1) both are already in arg regs, the right way around.
441 // (2) both are already in arg regs, the wrong way around.
442 // (3) neither are currently in arg registers.
443 // (4) srcA in in its correct reg.
444 // (5) srcA in in the incorrect reg.
445 // (6) srcB in in its correct reg.
446 // (7) srcB in in the incorrect reg.
448 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
449 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
450 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
451 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
454 // Handle the easy cases - two simple moves.
455 m_jit
.move(srcA
, destA
);
456 m_jit
.move(srcB
, destB
);
457 } else if (srcA
!= destB
) {
458 // Handle the non-swap case - just put srcB in place first.
459 m_jit
.move(srcB
, destB
);
460 m_jit
.move(srcA
, destA
);
462 m_jit
.swap(destB
, destB
);
464 template<FPRReg destA
, FPRReg destB
>
465 void setupTwoStubArgs(FPRReg srcA
, FPRReg srcB
)
467 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
468 // (1) both are already in arg regs, the right way around.
469 // (2) both are already in arg regs, the wrong way around.
470 // (3) neither are currently in arg registers.
471 // (4) srcA in in its correct reg.
472 // (5) srcA in in the incorrect reg.
473 // (6) srcB in in its correct reg.
474 // (7) srcB in in the incorrect reg.
476 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
477 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
478 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
479 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
482 // Handle the easy cases - two simple moves.
483 m_jit
.moveDouble(srcA
, destA
);
484 m_jit
.moveDouble(srcB
, destB
);
489 // Handle the non-swap case - just put srcB in place first.
490 m_jit
.moveDouble(srcB
, destB
);
491 m_jit
.moveDouble(srcA
, destA
);
495 ASSERT(srcB
== destA
&& srcA
== destB
);
496 // Need to swap; pick a temporary register.
498 if (destA
!= FPRInfo::argumentFPR3
&& destA
!= FPRInfo::argumentFPR3
)
499 temp
= FPRInfo::argumentFPR3
;
500 else if (destA
!= FPRInfo::argumentFPR2
&& destA
!= FPRInfo::argumentFPR2
)
501 temp
= FPRInfo::argumentFPR2
;
503 ASSERT(destA
!= FPRInfo::argumentFPR1
&& destA
!= FPRInfo::argumentFPR1
);
504 temp
= FPRInfo::argumentFPR1
;
506 m_jit
.moveDouble(destA
, temp
);
507 m_jit
.moveDouble(destB
, destA
);
508 m_jit
.moveDouble(temp
, destB
);
510 void setupStubArguments(GPRReg arg1
, GPRReg arg2
)
512 setupTwoStubArgs
<GPRInfo::argumentGPR1
, GPRInfo::argumentGPR2
>(arg1
, arg2
);
514 void setupStubArguments(GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
516 // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
517 // Then we can use setupTwoStubArgs to fix arg2/arg3.
518 if (arg2
!= GPRInfo::argumentGPR1
&& arg3
!= GPRInfo::argumentGPR1
) {
519 m_jit
.move(arg1
, GPRInfo::argumentGPR1
);
520 setupTwoStubArgs
<GPRInfo::argumentGPR2
, GPRInfo::argumentGPR3
>(arg2
, arg3
);
524 // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
525 // Then we can use setupTwoStubArgs to fix arg1/arg3.
526 if (arg1
!= GPRInfo::argumentGPR2
&& arg3
!= GPRInfo::argumentGPR2
) {
527 m_jit
.move(arg2
, GPRInfo::argumentGPR2
);
528 setupTwoStubArgs
<GPRInfo::argumentGPR1
, GPRInfo::argumentGPR3
>(arg1
, arg3
);
532 // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
533 // Then we can use setupTwoStubArgs to fix arg1/arg2.
534 if (arg1
!= GPRInfo::argumentGPR3
&& arg2
!= GPRInfo::argumentGPR3
) {
535 m_jit
.move(arg3
, GPRInfo::argumentGPR3
);
536 setupTwoStubArgs
<GPRInfo::argumentGPR1
, GPRInfo::argumentGPR2
>(arg1
, arg2
);
540 // If we get here, we haven't been able to move any of arg1/arg2/arg3.
541 // Since all three are blocked, then all three must already be in the argument register.
542 // But are they in the right ones?
544 // First, ensure arg1 is in place.
545 if (arg1
!= GPRInfo::argumentGPR1
) {
546 m_jit
.swap(arg1
, GPRInfo::argumentGPR1
);
548 // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
549 ASSERT(arg2
== GPRInfo::argumentGPR1
|| arg3
== GPRInfo::argumentGPR1
);
550 // If arg2 was in argumentGPR1 it no longer is (due to the swap).
551 // Otherwise arg3 must have been. Mark him as moved.
552 if (arg2
== GPRInfo::argumentGPR1
)
558 // Either arg2 & arg3 need swapping, or we're all done.
559 ASSERT((arg2
== GPRInfo::argumentGPR2
|| arg3
== GPRInfo::argumentGPR3
)
560 || (arg2
== GPRInfo::argumentGPR3
|| arg3
== GPRInfo::argumentGPR2
));
562 if (arg2
!= GPRInfo::argumentGPR2
)
563 m_jit
.swap(GPRInfo::argumentGPR2
, GPRInfo::argumentGPR3
);
566 // These methods add calls to C++ helper functions.
567 void callOperation(J_DFGOperation_EJP operation
, GPRReg result
, GPRReg arg1
, void* pointer
)
571 m_jit
.move(arg1
, GPRInfo::argumentGPR1
);
572 m_jit
.move(JITCompiler::TrustedImmPtr(pointer
), GPRInfo::argumentGPR2
);
573 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
575 appendCallWithExceptionCheck(operation
);
576 m_jit
.move(GPRInfo::returnValueGPR
, result
);
578 void callOperation(J_DFGOperation_EJI operation
, GPRReg result
, GPRReg arg1
, Identifier
* identifier
)
580 callOperation((J_DFGOperation_EJP
)operation
, result
, arg1
, identifier
);
582 void callOperation(J_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1
)
586 m_jit
.move(arg1
, GPRInfo::argumentGPR1
);
587 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
589 appendCallWithExceptionCheck(operation
);
590 m_jit
.move(GPRInfo::returnValueGPR
, result
);
592 void callOperation(Z_DFGOperation_EJ operation
, GPRReg result
, GPRReg arg1
)
596 m_jit
.move(arg1
, GPRInfo::argumentGPR1
);
597 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
599 appendCallWithExceptionCheck(operation
);
600 m_jit
.move(GPRInfo::returnValueGPR
, result
);
602 void callOperation(Z_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
606 setupStubArguments(arg1
, arg2
);
607 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
609 appendCallWithExceptionCheck(operation
);
610 m_jit
.move(GPRInfo::returnValueGPR
, result
);
612 void callOperation(J_DFGOperation_EJJ operation
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
616 setupStubArguments(arg1
, arg2
);
617 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
619 appendCallWithExceptionCheck(operation
);
620 m_jit
.move(GPRInfo::returnValueGPR
, result
);
622 void callOperation(V_DFGOperation_EJJP operation
, GPRReg arg1
, GPRReg arg2
, void* pointer
)
626 setupStubArguments(arg1
, arg2
);
627 m_jit
.move(JITCompiler::TrustedImmPtr(pointer
), GPRInfo::argumentGPR3
);
628 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
630 appendCallWithExceptionCheck(operation
);
632 void callOperation(V_DFGOperation_EJJI operation
, GPRReg arg1
, GPRReg arg2
, Identifier
* identifier
)
634 callOperation((V_DFGOperation_EJJP
)operation
, arg1
, arg2
, identifier
);
636 void callOperation(V_DFGOperation_EJJJ operation
, GPRReg arg1
, GPRReg arg2
, GPRReg arg3
)
640 setupStubArguments(arg1
, arg2
, arg3
);
641 m_jit
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
643 appendCallWithExceptionCheck(operation
);
645 void callOperation(D_DFGOperation_DD operation
, FPRReg result
, FPRReg arg1
, FPRReg arg2
)
649 setupTwoStubArgs
<FPRInfo::argumentFPR0
, FPRInfo::argumentFPR1
>(arg1
, arg2
);
651 m_jit
.appendCall(operation
);
652 m_jit
.moveDouble(FPRInfo::returnValueFPR
, result
);
655 void appendCallWithExceptionCheck(const FunctionPtr
& function
)
657 m_jit
.appendCallWithExceptionCheck(function
, m_jit
.graph()[m_compileIndex
].exceptionInfo
);
660 void addBranch(const MacroAssembler::Jump
& jump
, BlockIndex destination
)
662 m_branches
.append(BranchRecord(jump
, destination
));
667 for (size_t i
= 0; i
< m_branches
.size(); ++i
) {
668 BranchRecord
& branch
= m_branches
[i
];
669 branch
.jump
.linkTo(m_blockHeads
[branch
.destination
], &m_jit
);
674 void dump(const char* label
= 0);
677 #if DFG_CONSISTENCY_CHECK
678 void checkConsistency();
680 void checkConsistency() {}
683 // The JIT, while also provides MacroAssembler functionality.
685 // This flag is used to distinguish speculative and non-speculative
686 // code generation. This is significant when filling spilled values
687 // from the RegisterFile. When spilling we attempt to store information
688 // as to the type of boxed value being stored (int32, double, cell), and
689 // when filling on the speculative path we will retrieve this type info
690 // where available. On the non-speculative path, however, we cannot rely
691 // on the spill format info, since the a value being loaded might have
692 // been spilled by either the speculative or non-speculative paths (where
693 // we entered the non-speculative path on an intervening bail-out), and
694 // the value may have been boxed differently on the two paths.
695 bool m_isSpeculative
;
696 // The current node being generated.
698 NodeIndex m_compileIndex
;
699 // Virtual and physical register maps.
700 Vector
<GenerationInfo
, 32> m_generationInfo
;
701 RegisterBank
<GPRInfo
> m_gprs
;
702 RegisterBank
<FPRInfo
> m_fprs
;
704 Vector
<MacroAssembler::Label
> m_blockHeads
;
705 struct BranchRecord
{
706 BranchRecord(MacroAssembler::Jump jump
, BlockIndex destination
)
708 , destination(destination
)
712 MacroAssembler::Jump jump
;
713 BlockIndex destination
;
715 Vector
<BranchRecord
, 8> m_branches
;
718 // === Operand types ===
720 // IntegerOperand, DoubleOperand and JSValueOperand.
722 // These classes are used to lock the operands to a node into machine
723 // registers. These classes implement of pattern of locking a value
724 // into register at the point of construction only if it is already in
725 // registers, and otherwise loading it lazily at the point it is first
726 // used. We do so in order to attempt to avoid spilling one operand
727 // in order to make space available for another.
729 class IntegerOperand
{
731 explicit IntegerOperand(JITCodeGenerator
* jit
, NodeIndex index
)
734 , m_gprOrInvalid(InvalidGPRReg
)
736 , m_format(DataFormatNone
)
740 if (jit
->isFilled(index
))
746 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
747 m_jit
->unlock(m_gprOrInvalid
);
750 NodeIndex
index() const
757 gpr(); // m_format is set when m_gpr is locked.
758 ASSERT(m_format
== DataFormatInteger
|| m_format
== DataFormatJSInteger
);
764 if (m_gprOrInvalid
== InvalidGPRReg
)
765 m_gprOrInvalid
= m_jit
->fillInteger(index(), m_format
);
766 return m_gprOrInvalid
;
770 JITCodeGenerator
* m_jit
;
772 GPRReg m_gprOrInvalid
;
776 class DoubleOperand
{
778 explicit DoubleOperand(JITCodeGenerator
* jit
, NodeIndex index
)
781 , m_fprOrInvalid(InvalidFPRReg
)
784 if (jit
->isFilledDouble(index
))
790 ASSERT(m_fprOrInvalid
!= InvalidFPRReg
);
791 m_jit
->unlock(m_fprOrInvalid
);
794 NodeIndex
index() const
801 if (m_fprOrInvalid
== InvalidFPRReg
)
802 m_fprOrInvalid
= m_jit
->fillDouble(index());
803 return m_fprOrInvalid
;
807 JITCodeGenerator
* m_jit
;
809 FPRReg m_fprOrInvalid
;
812 class JSValueOperand
{
814 explicit JSValueOperand(JITCodeGenerator
* jit
, NodeIndex index
)
817 , m_gprOrInvalid(InvalidGPRReg
)
820 if (jit
->isFilled(index
))
826 ASSERT(m_gprOrInvalid
!= InvalidGPRReg
);
827 m_jit
->unlock(m_gprOrInvalid
);
830 NodeIndex
index() const
837 if (m_gprOrInvalid
== InvalidGPRReg
)
838 m_gprOrInvalid
= m_jit
->fillJSValue(index());
839 return m_gprOrInvalid
;
843 JITCodeGenerator
* m_jit
;
845 GPRReg m_gprOrInvalid
;
849 // === Temporaries ===
851 // These classes are used to allocate temporary registers.
852 // A mechanism is provided to attempt to reuse the registers
853 // currently allocated to child nodes whose value is consumed
854 // by, and not live after, this operation.
858 GPRTemporary(JITCodeGenerator
*);
859 GPRTemporary(JITCodeGenerator
*, SpeculateIntegerOperand
&);
860 GPRTemporary(JITCodeGenerator
*, SpeculateIntegerOperand
&, SpeculateIntegerOperand
&);
861 GPRTemporary(JITCodeGenerator
*, IntegerOperand
&);
862 GPRTemporary(JITCodeGenerator
*, IntegerOperand
&, IntegerOperand
&);
863 GPRTemporary(JITCodeGenerator
*, SpeculateCellOperand
&);
864 GPRTemporary(JITCodeGenerator
*, JSValueOperand
&);
868 m_jit
->unlock(gpr());
873 ASSERT(m_gpr
!= InvalidGPRReg
);
878 GPRTemporary(JITCodeGenerator
* jit
, GPRReg lockedGPR
)
885 JITCodeGenerator
* m_jit
;
891 FPRTemporary(JITCodeGenerator
*);
892 FPRTemporary(JITCodeGenerator
*, DoubleOperand
&);
893 FPRTemporary(JITCodeGenerator
*, DoubleOperand
&, DoubleOperand
&);
897 m_jit
->unlock(fpr());
902 ASSERT(m_fpr
!= InvalidFPRReg
);
907 FPRTemporary(JITCodeGenerator
* jit
, FPRReg lockedFPR
)
914 JITCodeGenerator
* m_jit
;
921 // These classes lock the result of a call to a C++ helper function.
923 class GPRResult
: public GPRTemporary
{
925 GPRResult(JITCodeGenerator
* jit
)
926 : GPRTemporary(jit
, lockedResult(jit
))
931 static GPRReg
lockedResult(JITCodeGenerator
* jit
)
933 jit
->lock(GPRInfo::returnValueGPR
);
934 return GPRInfo::returnValueGPR
;
938 class FPRResult
: public FPRTemporary
{
940 FPRResult(JITCodeGenerator
* jit
)
941 : FPRTemporary(jit
, lockedResult(jit
))
946 static FPRReg
lockedResult(JITCodeGenerator
* jit
)
948 jit
->lock(FPRInfo::returnValueFPR
);
949 return FPRInfo::returnValueFPR
;
953 } } // namespace JSC::DFG