]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGJITCodeGenerator.h
3c0998d2b703b13cc509e6d17763d894161ae2a5
[apple/javascriptcore.git] / dfg / DFGJITCodeGenerator.h
1 /*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef DFGJITCodeGenerator_h
27 #define DFGJITCodeGenerator_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include <dfg/DFGGenerationInfo.h>
33 #include <dfg/DFGGraph.h>
34 #include <dfg/DFGJITCompiler.h>
35 #include <dfg/DFGOperations.h>
36 #include <dfg/DFGRegisterBank.h>
37
38 namespace JSC { namespace DFG {
39
40 class SpeculateIntegerOperand;
41 class SpeculateStrictInt32Operand;
42 class SpeculateCellOperand;
43
44
45 // === JITCodeGenerator ===
46 //
47 // This class provides common infrastructure used by the speculative &
48 // non-speculative JITs. Provides common mechanisms for virtual and
49 // physical register management, calls out from JIT code to helper
50 // functions, etc.
51 class JITCodeGenerator {
52 protected:
53 typedef MacroAssembler::TrustedImm32 TrustedImm32;
54 typedef MacroAssembler::Imm32 Imm32;
55
56 // These constants are used to set priorities for spill order for
57 // the register allocator.
58 enum SpillOrder {
59 SpillOrderConstant = 1, // no spill, and cheap fill
60 SpillOrderSpilled = 2, // no spill
61 SpillOrderJS = 4, // needs spill
62 SpillOrderCell = 4, // needs spill
63 SpillOrderInteger = 5, // needs spill and box
64 SpillOrderDouble = 6, // needs spill and convert
65 };
66
67
68 public:
69 GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
70 FPRReg fillDouble(NodeIndex);
71 GPRReg fillJSValue(NodeIndex);
72
73 // lock and unlock GPR & FPR registers.
74 void lock(GPRReg reg)
75 {
76 m_gprs.lock(reg);
77 }
78 void lock(FPRReg reg)
79 {
80 m_fprs.lock(reg);
81 }
82 void unlock(GPRReg reg)
83 {
84 m_gprs.unlock(reg);
85 }
86 void unlock(FPRReg reg)
87 {
88 m_fprs.unlock(reg);
89 }
90
91 // Used to check whether a child node is on its last use,
92 // and its machine registers may be reused.
93 bool canReuse(NodeIndex nodeIndex)
94 {
95 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
96 GenerationInfo& info = m_generationInfo[virtualRegister];
97 return info.canReuse();
98 }
99 GPRReg reuse(GPRReg reg)
100 {
101 m_gprs.lock(reg);
102 return reg;
103 }
104 FPRReg reuse(FPRReg reg)
105 {
106 m_fprs.lock(reg);
107 return reg;
108 }
109
110 // Allocate a gpr/fpr.
111 GPRReg allocate()
112 {
113 VirtualRegister spillMe;
114 GPRReg gpr = m_gprs.allocate(spillMe);
115 if (spillMe != InvalidVirtualRegister)
116 spill(spillMe);
117 return gpr;
118 }
119 FPRReg fprAllocate()
120 {
121 VirtualRegister spillMe;
122 FPRReg fpr = m_fprs.allocate(spillMe);
123 if (spillMe != InvalidVirtualRegister)
124 spill(spillMe);
125 return fpr;
126 }
127
128 // Check whether a VirtualRegsiter is currently in a machine register.
129 // We use this when filling operands to fill those that are already in
130 // machine registers first (by locking VirtualRegsiters that are already
131 // in machine register before filling those that are not we attempt to
132 // avoid spilling values we will need immediately).
133 bool isFilled(NodeIndex nodeIndex)
134 {
135 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
136 GenerationInfo& info = m_generationInfo[virtualRegister];
137 return info.registerFormat() != DataFormatNone;
138 }
139 bool isFilledDouble(NodeIndex nodeIndex)
140 {
141 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
142 GenerationInfo& info = m_generationInfo[virtualRegister];
143 return info.registerFormat() == DataFormatDouble;
144 }
145
146 protected:
147 JITCodeGenerator(JITCompiler& jit, bool isSpeculative)
148 : m_jit(jit)
149 , m_isSpeculative(isSpeculative)
150 , m_compileIndex(0)
151 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
152 , m_blockHeads(jit.graph().m_blocks.size())
153 {
154 }
155
156 // These methods convert between doubles, and doubles boxed and JSValues.
157 GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
158 {
159 m_jit.moveDoubleToPtr(fpr, gpr);
160 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr);
161 return gpr;
162 }
163 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
164 {
165 m_jit.addPtr(GPRInfo::tagTypeNumberRegister, gpr);
166 m_jit.movePtrToDouble(gpr, fpr);
167 return fpr;
168 }
169 GPRReg boxDouble(FPRReg fpr)
170 {
171 return boxDouble(fpr, allocate());
172 }
173 FPRReg unboxDouble(GPRReg gpr)
174 {
175 return unboxDouble(gpr, fprAllocate());
176 }
177
178 // Called on an operand once it has been consumed by a parent node.
179 void use(NodeIndex nodeIndex)
180 {
181 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
182 GenerationInfo& info = m_generationInfo[virtualRegister];
183
184 // use() returns true when the value becomes dead, and any
185 // associated resources may be freed.
186 if (!info.use())
187 return;
188
189 // Release the associated machine registers.
190 DataFormat registerFormat = info.registerFormat();
191 if (registerFormat == DataFormatDouble)
192 m_fprs.release(info.fpr());
193 else if (registerFormat != DataFormatNone)
194 m_gprs.release(info.gpr());
195 }
196
197 // Spill a VirtualRegister to the RegisterFile.
198 void spill(VirtualRegister spillMe)
199 {
200 GenerationInfo& info = m_generationInfo[spillMe];
201
202 // Check the GenerationInfo to see if this value need writing
203 // to the RegisterFile - if not, mark it as spilled & return.
204 if (!info.needsSpill()) {
205 info.setSpilled();
206 return;
207 }
208
209 DataFormat spillFormat = info.registerFormat();
210 if (spillFormat == DataFormatDouble) {
211 // All values are spilled as JSValues, so box the double via a temporary gpr.
212 GPRReg gpr = boxDouble(info.fpr());
213 m_jit.storePtr(gpr, JITCompiler::addressFor(spillMe));
214 unlock(gpr);
215 info.spill(DataFormatJSDouble);
216 return;
217 }
218
219 // The following code handles JSValues, int32s, and cells.
220 ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
221
222 GPRReg reg = info.gpr();
223 // We need to box int32 and cell values ...
224 // but on JSVALUE64 boxing a cell is a no-op!
225 if (spillFormat == DataFormatInteger)
226 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
227
228 // Spill the value, and record it as spilled in its boxed form.
229 m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
230 info.spill((DataFormat)(spillFormat | DataFormatJS));
231 }
232
233 // Checks/accessors for constant values.
234 bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
235 bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); }
236 bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); }
237 bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); }
238 int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
239 double valueOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.valueOfDoubleConstant(nodeIndex); }
240 JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
241
242 Identifier* identifier(unsigned index)
243 {
244 return &m_jit.codeBlock()->identifier(index);
245 }
246
247 // Spill all VirtualRegisters back to the RegisterFile.
248 void flushRegisters()
249 {
250 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
251 if (iter.name() != InvalidVirtualRegister) {
252 spill(iter.name());
253 iter.release();
254 }
255 }
256 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
257 if (iter.name() != InvalidVirtualRegister) {
258 spill(iter.name());
259 iter.release();
260 }
261 }
262 }
263
264 #ifndef NDEBUG
265 // Used to ASSERT flushRegisters() has been called prior to
266 // calling out from JIT code to a C helper function.
267 bool isFlushed()
268 {
269 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
270 if (iter.name() != InvalidVirtualRegister)
271 return false;
272 }
273 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
274 if (iter.name() != InvalidVirtualRegister)
275 return false;
276 }
277 return true;
278 }
279 #endif
280
281 // Get the JSValue representation of a constant.
282 JSValue constantAsJSValue(NodeIndex nodeIndex)
283 {
284 Node& node = m_jit.graph()[nodeIndex];
285 if (isInt32Constant(nodeIndex))
286 return jsNumber(node.int32Constant());
287 if (isDoubleConstant(nodeIndex))
288 return JSValue(JSValue::EncodeAsDouble, node.numericConstant());
289 ASSERT(isJSConstant(nodeIndex));
290 return valueOfJSConstant(nodeIndex);
291 }
292 MacroAssembler::ImmPtr constantAsJSValueAsImmPtr(NodeIndex nodeIndex)
293 {
294 return MacroAssembler::ImmPtr(JSValue::encode(constantAsJSValue(nodeIndex)));
295 }
296
297 // Helper functions to enable code sharing in implementations of bit/shift ops.
298 void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
299 {
300 switch (op) {
301 case BitAnd:
302 m_jit.and32(Imm32(imm), op1, result);
303 break;
304 case BitOr:
305 m_jit.or32(Imm32(imm), op1, result);
306 break;
307 case BitXor:
308 m_jit.xor32(Imm32(imm), op1, result);
309 break;
310 default:
311 ASSERT_NOT_REACHED();
312 }
313 }
314 void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
315 {
316 switch (op) {
317 case BitAnd:
318 m_jit.and32(op1, op2, result);
319 break;
320 case BitOr:
321 m_jit.or32(op1, op2, result);
322 break;
323 case BitXor:
324 m_jit.xor32(op1, op2, result);
325 break;
326 default:
327 ASSERT_NOT_REACHED();
328 }
329 }
330 void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
331 {
332 switch (op) {
333 case BitRShift:
334 m_jit.rshift32(op1, Imm32(shiftAmount), result);
335 break;
336 case BitLShift:
337 m_jit.lshift32(op1, Imm32(shiftAmount), result);
338 break;
339 case BitURShift:
340 m_jit.urshift32(op1, Imm32(shiftAmount), result);
341 break;
342 default:
343 ASSERT_NOT_REACHED();
344 }
345 }
346 void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
347 {
348 switch (op) {
349 case BitRShift:
350 m_jit.rshift32(op1, shiftAmount, result);
351 break;
352 case BitLShift:
353 m_jit.lshift32(op1, shiftAmount, result);
354 break;
355 case BitURShift:
356 m_jit.urshift32(op1, shiftAmount, result);
357 break;
358 default:
359 ASSERT_NOT_REACHED();
360 }
361 }
362
363 // Called once a node has completed code generation but prior to setting
364 // its result, to free up its children. (This must happen prior to setting
365 // the nodes result, since the node may have the same VirtualRegister as
366 // a child, and as such will use the same GeneratioInfo).
367 void useChildren(Node&);
368
369 // These method called to initialize the the GenerationInfo
370 // to describe the result of an operation.
371 void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger)
372 {
373 Node& node = m_jit.graph()[nodeIndex];
374 useChildren(node);
375
376 VirtualRegister virtualRegister = node.virtualRegister();
377 GenerationInfo& info = m_generationInfo[virtualRegister];
378
379 if (format == DataFormatInteger) {
380 m_jit.jitAssertIsInt32(reg);
381 m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
382 info.initInteger(nodeIndex, node.refCount(), reg);
383 } else {
384 ASSERT(format == DataFormatJSInteger);
385 m_jit.jitAssertIsJSInt32(reg);
386 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
387 info.initJSValue(nodeIndex, node.refCount(), reg, format);
388 }
389 }
390 void noResult(NodeIndex nodeIndex)
391 {
392 Node& node = m_jit.graph()[nodeIndex];
393 useChildren(node);
394 }
395 void cellResult(GPRReg reg, NodeIndex nodeIndex)
396 {
397 Node& node = m_jit.graph()[nodeIndex];
398 useChildren(node);
399
400 VirtualRegister virtualRegister = node.virtualRegister();
401 m_gprs.retain(reg, virtualRegister, SpillOrderCell);
402 GenerationInfo& info = m_generationInfo[virtualRegister];
403 info.initCell(nodeIndex, node.refCount(), reg);
404 }
405 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS)
406 {
407 if (format == DataFormatJSInteger)
408 m_jit.jitAssertIsJSInt32(reg);
409
410 Node& node = m_jit.graph()[nodeIndex];
411 useChildren(node);
412
413 VirtualRegister virtualRegister = node.virtualRegister();
414 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
415 GenerationInfo& info = m_generationInfo[virtualRegister];
416 info.initJSValue(nodeIndex, node.refCount(), reg, format);
417 }
418 void doubleResult(FPRReg reg, NodeIndex nodeIndex)
419 {
420 Node& node = m_jit.graph()[nodeIndex];
421 useChildren(node);
422
423 VirtualRegister virtualRegister = node.virtualRegister();
424 m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
425 GenerationInfo& info = m_generationInfo[virtualRegister];
426 info.initDouble(nodeIndex, node.refCount(), reg);
427 }
428 void initConstantInfo(NodeIndex nodeIndex)
429 {
430 ASSERT(isInt32Constant(nodeIndex) || isDoubleConstant(nodeIndex) || isJSConstant(nodeIndex));
431 Node& node = m_jit.graph()[nodeIndex];
432 m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount());
433 }
434
435 // These methods used to sort arguments into the correct registers.
436 template<GPRReg destA, GPRReg destB>
437 void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
438 {
439 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
440 // (1) both are already in arg regs, the right way around.
441 // (2) both are already in arg regs, the wrong way around.
442 // (3) neither are currently in arg registers.
443 // (4) srcA in in its correct reg.
444 // (5) srcA in in the incorrect reg.
445 // (6) srcB in in its correct reg.
446 // (7) srcB in in the incorrect reg.
447 //
448 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
449 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
450 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
451 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
452
453 if (srcB != destA) {
454 // Handle the easy cases - two simple moves.
455 m_jit.move(srcA, destA);
456 m_jit.move(srcB, destB);
457 } else if (srcA != destB) {
458 // Handle the non-swap case - just put srcB in place first.
459 m_jit.move(srcB, destB);
460 m_jit.move(srcA, destA);
461 } else
462 m_jit.swap(destB, destB);
463 }
464 template<FPRReg destA, FPRReg destB>
465 void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
466 {
467 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
468 // (1) both are already in arg regs, the right way around.
469 // (2) both are already in arg regs, the wrong way around.
470 // (3) neither are currently in arg registers.
471 // (4) srcA in in its correct reg.
472 // (5) srcA in in the incorrect reg.
473 // (6) srcB in in its correct reg.
474 // (7) srcB in in the incorrect reg.
475 //
476 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
477 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
478 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
479 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
480
481 if (srcB != destA) {
482 // Handle the easy cases - two simple moves.
483 m_jit.moveDouble(srcA, destA);
484 m_jit.moveDouble(srcB, destB);
485 return;
486 }
487
488 if (srcA != destB) {
489 // Handle the non-swap case - just put srcB in place first.
490 m_jit.moveDouble(srcB, destB);
491 m_jit.moveDouble(srcA, destA);
492 return;
493 }
494
495 ASSERT(srcB == destA && srcA == destB);
496 // Need to swap; pick a temporary register.
497 FPRReg temp;
498 if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3)
499 temp = FPRInfo::argumentFPR3;
500 else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2)
501 temp = FPRInfo::argumentFPR2;
502 else {
503 ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1);
504 temp = FPRInfo::argumentFPR1;
505 }
506 m_jit.moveDouble(destA, temp);
507 m_jit.moveDouble(destB, destA);
508 m_jit.moveDouble(temp, destB);
509 }
510 void setupStubArguments(GPRReg arg1, GPRReg arg2)
511 {
512 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
513 }
514 void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
515 {
516 // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
517 // Then we can use setupTwoStubArgs to fix arg2/arg3.
518 if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) {
519 m_jit.move(arg1, GPRInfo::argumentGPR1);
520 setupTwoStubArgs<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
521 return;
522 }
523
524 // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
525 // Then we can use setupTwoStubArgs to fix arg1/arg3.
526 if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) {
527 m_jit.move(arg2, GPRInfo::argumentGPR2);
528 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
529 return;
530 }
531
532 // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
533 // Then we can use setupTwoStubArgs to fix arg1/arg2.
534 if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) {
535 m_jit.move(arg3, GPRInfo::argumentGPR3);
536 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
537 return;
538 }
539
540 // If we get here, we haven't been able to move any of arg1/arg2/arg3.
541 // Since all three are blocked, then all three must already be in the argument register.
542 // But are they in the right ones?
543
544 // First, ensure arg1 is in place.
545 if (arg1 != GPRInfo::argumentGPR1) {
546 m_jit.swap(arg1, GPRInfo::argumentGPR1);
547
548 // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
549 ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1);
550 // If arg2 was in argumentGPR1 it no longer is (due to the swap).
551 // Otherwise arg3 must have been. Mark him as moved.
552 if (arg2 == GPRInfo::argumentGPR1)
553 arg2 = arg1;
554 else
555 arg3 = arg1;
556 }
557
558 // Either arg2 & arg3 need swapping, or we're all done.
559 ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3)
560 || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2));
561
562 if (arg2 != GPRInfo::argumentGPR2)
563 m_jit.swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
564 }
565
566 // These methods add calls to C++ helper functions.
567 void callOperation(J_DFGOperation_EJP operation, GPRReg result, GPRReg arg1, void* pointer)
568 {
569 ASSERT(isFlushed());
570
571 m_jit.move(arg1, GPRInfo::argumentGPR1);
572 m_jit.move(JITCompiler::TrustedImmPtr(pointer), GPRInfo::argumentGPR2);
573 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
574
575 appendCallWithExceptionCheck(operation);
576 m_jit.move(GPRInfo::returnValueGPR, result);
577 }
578 void callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
579 {
580 callOperation((J_DFGOperation_EJP)operation, result, arg1, identifier);
581 }
582 void callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
583 {
584 ASSERT(isFlushed());
585
586 m_jit.move(arg1, GPRInfo::argumentGPR1);
587 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
588
589 appendCallWithExceptionCheck(operation);
590 m_jit.move(GPRInfo::returnValueGPR, result);
591 }
592 void callOperation(Z_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
593 {
594 ASSERT(isFlushed());
595
596 m_jit.move(arg1, GPRInfo::argumentGPR1);
597 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
598
599 appendCallWithExceptionCheck(operation);
600 m_jit.move(GPRInfo::returnValueGPR, result);
601 }
602 void callOperation(Z_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
603 {
604 ASSERT(isFlushed());
605
606 setupStubArguments(arg1, arg2);
607 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
608
609 appendCallWithExceptionCheck(operation);
610 m_jit.move(GPRInfo::returnValueGPR, result);
611 }
612 void callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
613 {
614 ASSERT(isFlushed());
615
616 setupStubArguments(arg1, arg2);
617 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
618
619 appendCallWithExceptionCheck(operation);
620 m_jit.move(GPRInfo::returnValueGPR, result);
621 }
622 void callOperation(V_DFGOperation_EJJP operation, GPRReg arg1, GPRReg arg2, void* pointer)
623 {
624 ASSERT(isFlushed());
625
626 setupStubArguments(arg1, arg2);
627 m_jit.move(JITCompiler::TrustedImmPtr(pointer), GPRInfo::argumentGPR3);
628 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
629
630 appendCallWithExceptionCheck(operation);
631 }
632 void callOperation(V_DFGOperation_EJJI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
633 {
634 callOperation((V_DFGOperation_EJJP)operation, arg1, arg2, identifier);
635 }
636 void callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
637 {
638 ASSERT(isFlushed());
639
640 setupStubArguments(arg1, arg2, arg3);
641 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
642
643 appendCallWithExceptionCheck(operation);
644 }
645 void callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
646 {
647 ASSERT(isFlushed());
648
649 setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2);
650
651 m_jit.appendCall(operation);
652 m_jit.moveDouble(FPRInfo::returnValueFPR, result);
653 }
654
655 void appendCallWithExceptionCheck(const FunctionPtr& function)
656 {
657 m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].exceptionInfo);
658 }
659
660 void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
661 {
662 m_branches.append(BranchRecord(jump, destination));
663 }
664
665 void linkBranches()
666 {
667 for (size_t i = 0; i < m_branches.size(); ++i) {
668 BranchRecord& branch = m_branches[i];
669 branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
670 }
671 }
672
673 #ifndef NDEBUG
674 void dump(const char* label = 0);
675 #endif
676
677 #if DFG_CONSISTENCY_CHECK
678 void checkConsistency();
679 #else
680 void checkConsistency() {}
681 #endif
682
683 // The JIT, while also provides MacroAssembler functionality.
684 JITCompiler& m_jit;
685 // This flag is used to distinguish speculative and non-speculative
686 // code generation. This is significant when filling spilled values
687 // from the RegisterFile. When spilling we attempt to store information
688 // as to the type of boxed value being stored (int32, double, cell), and
689 // when filling on the speculative path we will retrieve this type info
690 // where available. On the non-speculative path, however, we cannot rely
691 // on the spill format info, since the a value being loaded might have
692 // been spilled by either the speculative or non-speculative paths (where
693 // we entered the non-speculative path on an intervening bail-out), and
694 // the value may have been boxed differently on the two paths.
695 bool m_isSpeculative;
696 // The current node being generated.
697 BlockIndex m_block;
698 NodeIndex m_compileIndex;
699 // Virtual and physical register maps.
700 Vector<GenerationInfo, 32> m_generationInfo;
701 RegisterBank<GPRInfo> m_gprs;
702 RegisterBank<FPRInfo> m_fprs;
703
704 Vector<MacroAssembler::Label> m_blockHeads;
705 struct BranchRecord {
706 BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
707 : jump(jump)
708 , destination(destination)
709 {
710 }
711
712 MacroAssembler::Jump jump;
713 BlockIndex destination;
714 };
715 Vector<BranchRecord, 8> m_branches;
716 };
717
718 // === Operand types ===
719 //
720 // IntegerOperand, DoubleOperand and JSValueOperand.
721 //
722 // These classes are used to lock the operands to a node into machine
723 // registers. These classes implement of pattern of locking a value
724 // into register at the point of construction only if it is already in
725 // registers, and otherwise loading it lazily at the point it is first
726 // used. We do so in order to attempt to avoid spilling one operand
727 // in order to make space available for another.
728
729 class IntegerOperand {
730 public:
731 explicit IntegerOperand(JITCodeGenerator* jit, NodeIndex index)
732 : m_jit(jit)
733 , m_index(index)
734 , m_gprOrInvalid(InvalidGPRReg)
735 #ifndef NDEBUG
736 , m_format(DataFormatNone)
737 #endif
738 {
739 ASSERT(m_jit);
740 if (jit->isFilled(index))
741 gpr();
742 }
743
744 ~IntegerOperand()
745 {
746 ASSERT(m_gprOrInvalid != InvalidGPRReg);
747 m_jit->unlock(m_gprOrInvalid);
748 }
749
750 NodeIndex index() const
751 {
752 return m_index;
753 }
754
755 DataFormat format()
756 {
757 gpr(); // m_format is set when m_gpr is locked.
758 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
759 return m_format;
760 }
761
762 GPRReg gpr()
763 {
764 if (m_gprOrInvalid == InvalidGPRReg)
765 m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
766 return m_gprOrInvalid;
767 }
768
769 private:
770 JITCodeGenerator* m_jit;
771 NodeIndex m_index;
772 GPRReg m_gprOrInvalid;
773 DataFormat m_format;
774 };
775
776 class DoubleOperand {
777 public:
778 explicit DoubleOperand(JITCodeGenerator* jit, NodeIndex index)
779 : m_jit(jit)
780 , m_index(index)
781 , m_fprOrInvalid(InvalidFPRReg)
782 {
783 ASSERT(m_jit);
784 if (jit->isFilledDouble(index))
785 fpr();
786 }
787
788 ~DoubleOperand()
789 {
790 ASSERT(m_fprOrInvalid != InvalidFPRReg);
791 m_jit->unlock(m_fprOrInvalid);
792 }
793
794 NodeIndex index() const
795 {
796 return m_index;
797 }
798
799 FPRReg fpr()
800 {
801 if (m_fprOrInvalid == InvalidFPRReg)
802 m_fprOrInvalid = m_jit->fillDouble(index());
803 return m_fprOrInvalid;
804 }
805
806 private:
807 JITCodeGenerator* m_jit;
808 NodeIndex m_index;
809 FPRReg m_fprOrInvalid;
810 };
811
812 class JSValueOperand {
813 public:
814 explicit JSValueOperand(JITCodeGenerator* jit, NodeIndex index)
815 : m_jit(jit)
816 , m_index(index)
817 , m_gprOrInvalid(InvalidGPRReg)
818 {
819 ASSERT(m_jit);
820 if (jit->isFilled(index))
821 gpr();
822 }
823
824 ~JSValueOperand()
825 {
826 ASSERT(m_gprOrInvalid != InvalidGPRReg);
827 m_jit->unlock(m_gprOrInvalid);
828 }
829
830 NodeIndex index() const
831 {
832 return m_index;
833 }
834
835 GPRReg gpr()
836 {
837 if (m_gprOrInvalid == InvalidGPRReg)
838 m_gprOrInvalid = m_jit->fillJSValue(index());
839 return m_gprOrInvalid;
840 }
841
842 private:
843 JITCodeGenerator* m_jit;
844 NodeIndex m_index;
845 GPRReg m_gprOrInvalid;
846 };
847
848
849 // === Temporaries ===
850 //
851 // These classes are used to allocate temporary registers.
852 // A mechanism is provided to attempt to reuse the registers
853 // currently allocated to child nodes whose value is consumed
854 // by, and not live after, this operation.
855
856 class GPRTemporary {
857 public:
858 GPRTemporary(JITCodeGenerator*);
859 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&);
860 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
861 GPRTemporary(JITCodeGenerator*, IntegerOperand&);
862 GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&);
863 GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&);
864 GPRTemporary(JITCodeGenerator*, JSValueOperand&);
865
866 ~GPRTemporary()
867 {
868 m_jit->unlock(gpr());
869 }
870
871 GPRReg gpr()
872 {
873 ASSERT(m_gpr != InvalidGPRReg);
874 return m_gpr;
875 }
876
877 protected:
878 GPRTemporary(JITCodeGenerator* jit, GPRReg lockedGPR)
879 : m_jit(jit)
880 , m_gpr(lockedGPR)
881 {
882 }
883
884 private:
885 JITCodeGenerator* m_jit;
886 GPRReg m_gpr;
887 };
888
889 class FPRTemporary {
890 public:
891 FPRTemporary(JITCodeGenerator*);
892 FPRTemporary(JITCodeGenerator*, DoubleOperand&);
893 FPRTemporary(JITCodeGenerator*, DoubleOperand&, DoubleOperand&);
894
895 ~FPRTemporary()
896 {
897 m_jit->unlock(fpr());
898 }
899
900 FPRReg fpr() const
901 {
902 ASSERT(m_fpr != InvalidFPRReg);
903 return m_fpr;
904 }
905
906 protected:
907 FPRTemporary(JITCodeGenerator* jit, FPRReg lockedFPR)
908 : m_jit(jit)
909 , m_fpr(lockedFPR)
910 {
911 }
912
913 private:
914 JITCodeGenerator* m_jit;
915 FPRReg m_fpr;
916 };
917
918
919 // === Results ===
920 //
921 // These classes lock the result of a call to a C++ helper function.
922
923 class GPRResult : public GPRTemporary {
924 public:
925 GPRResult(JITCodeGenerator* jit)
926 : GPRTemporary(jit, lockedResult(jit))
927 {
928 }
929
930 private:
931 static GPRReg lockedResult(JITCodeGenerator* jit)
932 {
933 jit->lock(GPRInfo::returnValueGPR);
934 return GPRInfo::returnValueGPR;
935 }
936 };
937
938 class FPRResult : public FPRTemporary {
939 public:
940 FPRResult(JITCodeGenerator* jit)
941 : FPRTemporary(jit, lockedResult(jit))
942 {
943 }
944
945 private:
946 static FPRReg lockedResult(JITCodeGenerator* jit)
947 {
948 jit->lock(FPRInfo::returnValueFPR);
949 return FPRInfo::returnValueFPR;
950 }
951 };
952
953 } } // namespace JSC::DFG
954
955 #endif
956 #endif
957