]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGByteCodeParser.cpp
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / dfg / DFGByteCodeParser.cpp
1 /*
2 * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CallLinkStatus.h"
32 #include "CodeBlock.h"
33 #include "DFGByteCodeCache.h"
34 #include "DFGCapabilities.h"
35 #include "GetByIdStatus.h"
36 #include "MethodCallLinkStatus.h"
37 #include "PutByIdStatus.h"
38 #include <wtf/HashMap.h>
39 #include <wtf/MathExtras.h>
40
41 namespace JSC { namespace DFG {
42
43 // === ByteCodeParser ===
44 //
45 // This class is used to compile the dataflow graph from a CodeBlock.
46 class ByteCodeParser {
47 public:
48 ByteCodeParser(Graph& graph)
49 : m_globalData(&graph.m_globalData)
50 , m_codeBlock(graph.m_codeBlock)
51 , m_profiledBlock(graph.m_profiledBlock)
52 , m_graph(graph)
53 , m_currentBlock(0)
54 , m_currentIndex(0)
55 , m_currentProfilingIndex(0)
56 , m_constantUndefined(UINT_MAX)
57 , m_constantNull(UINT_MAX)
58 , m_constantNaN(UINT_MAX)
59 , m_constant1(UINT_MAX)
60 , m_constants(m_codeBlock->numberOfConstantRegisters())
61 , m_numArguments(m_codeBlock->numParameters())
62 , m_numLocals(m_codeBlock->m_numCalleeRegisters)
63 , m_preservedVars(m_codeBlock->m_numVars)
64 , m_parameterSlots(0)
65 , m_numPassedVarArgs(0)
66 , m_globalResolveNumber(0)
67 , m_inlineStackTop(0)
68 , m_haveBuiltOperandMaps(false)
69 , m_emptyJSValueIndex(UINT_MAX)
70 {
71 ASSERT(m_profiledBlock);
72
73 for (int i = 0; i < m_codeBlock->m_numVars; ++i)
74 m_preservedVars.set(i);
75 }
76
77 // Parse a full CodeBlock of bytecode.
78 bool parse();
79
80 private:
81 // Just parse from m_currentIndex to the end of the current CodeBlock.
82 void parseCodeBlock();
83
84 // Helper for min and max.
85 bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
86
87 // Handle calls. This resolves issues surrounding inlining and intrinsics.
88 void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
89 void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
90 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
91 bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
92 // Handle setting the result of an intrinsic.
93 void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
94 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
95 bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
96 // Prepare to parse a block.
97 void prepareToParseBlock();
98 // Parse a single basic block of bytecode instructions.
99 bool parseBlock(unsigned limit);
100 // Find reachable code and setup predecessor links in the graph's BasicBlocks.
101 void determineReachability();
102 // Enqueue a block onto the worklist, if necessary.
103 void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
104 // Link block successors.
105 void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
106 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
107 // Link GetLocal & SetLocal nodes, to ensure live values are generated.
108 enum PhiStackType {
109 LocalPhiStack,
110 ArgumentPhiStack
111 };
112 template<PhiStackType stackType>
113 void processPhiStack();
114
115 void fixVariableAccessPredictions();
116 // Add spill locations to nodes.
117 void allocateVirtualRegisters();
118
119 VariableAccessData* newVariableAccessData(int operand)
120 {
121 ASSERT(operand < FirstConstantRegisterIndex);
122
123 m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
124 return &m_graph.m_variableAccessData.last();
125 }
126
127 // Get/Set the operands/result of a bytecode instruction.
128 NodeIndex getDirect(int operand)
129 {
130 // Is this a constant?
131 if (operand >= FirstConstantRegisterIndex) {
132 unsigned constant = operand - FirstConstantRegisterIndex;
133 ASSERT(constant < m_constants.size());
134 return getJSConstant(constant);
135 }
136
137 // Is this an argument?
138 if (operandIsArgument(operand))
139 return getArgument(operand);
140
141 // Must be a local.
142 return getLocal((unsigned)operand);
143 }
144 NodeIndex get(int operand)
145 {
146 return getDirect(m_inlineStackTop->remapOperand(operand));
147 }
148 void setDirect(int operand, NodeIndex value)
149 {
150 // Is this an argument?
151 if (operandIsArgument(operand)) {
152 setArgument(operand, value);
153 return;
154 }
155
156 // Must be a local.
157 setLocal((unsigned)operand, value);
158 }
159 void set(int operand, NodeIndex value)
160 {
161 setDirect(m_inlineStackTop->remapOperand(operand), value);
162 }
163
164 NodeIndex injectLazyOperandPrediction(NodeIndex nodeIndex)
165 {
166 Node& node = m_graph[nodeIndex];
167 ASSERT(node.op() == GetLocal);
168 ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
169 PredictedType prediction =
170 m_inlineStackTop->m_lazyOperands.prediction(
171 LazyOperandValueProfileKey(m_currentIndex, node.local()));
172 #if DFG_ENABLE(DEBUG_VERBOSE)
173 dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
174 nodeIndex, m_currentIndex, node.local(), predictionToString(prediction));
175 #endif
176 node.variableAccessData()->predict(prediction);
177 return nodeIndex;
178 }
179
180 // Used in implementing get/set, above, where the operand is a local variable.
181 NodeIndex getLocal(unsigned operand)
182 {
183 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
184
185 if (nodeIndex != NoNode) {
186 Node* nodePtr = &m_graph[nodeIndex];
187 if (nodePtr->op() == Flush) {
188 // Two possibilities: either the block wants the local to be live
189 // but has not loaded its value, or it has loaded its value, in
190 // which case we're done.
191 nodeIndex = nodePtr->child1().index();
192 Node& flushChild = m_graph[nodeIndex];
193 if (flushChild.op() == Phi) {
194 VariableAccessData* variableAccessData = flushChild.variableAccessData();
195 nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
196 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
197 return nodeIndex;
198 }
199 nodePtr = &flushChild;
200 }
201
202 ASSERT(&m_graph[nodeIndex] == nodePtr);
203 ASSERT(nodePtr->op() != Flush);
204
205 if (m_graph.localIsCaptured(operand)) {
206 // We wish to use the same variable access data as the previous access,
207 // but for all other purposes we want to issue a load since for all we
208 // know, at this stage of compilation, the local has been clobbered.
209
210 // Make sure we link to the Phi node, not to the GetLocal.
211 if (nodePtr->op() == GetLocal)
212 nodeIndex = nodePtr->child1().index();
213
214 return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
215 }
216
217 if (nodePtr->op() == GetLocal)
218 return nodeIndex;
219 ASSERT(nodePtr->op() == SetLocal);
220 return nodePtr->child1().index();
221 }
222
223 // Check for reads of temporaries from prior blocks,
224 // expand m_preservedVars to cover these.
225 m_preservedVars.set(operand);
226
227 VariableAccessData* variableAccessData = newVariableAccessData(operand);
228
229 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
230 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
231 nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
232 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
233
234 m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
235
236 return nodeIndex;
237 }
238 void setLocal(unsigned operand, NodeIndex value)
239 {
240 VariableAccessData* variableAccessData = newVariableAccessData(operand);
241 NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
242 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
243
244 bool shouldFlush = m_graph.localIsCaptured(operand);
245
246 if (!shouldFlush) {
247 // If this is in argument position, then it should be flushed.
248 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
249 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
250 if (!inlineCallFrame)
251 break;
252 if (static_cast<int>(operand) >= inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize)
253 continue;
254 if (static_cast<int>(operand) == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
255 continue;
256 if (operand < inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize - inlineCallFrame->arguments.size())
257 continue;
258 int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
259 stack->m_argumentPositions[argument]->addVariable(variableAccessData);
260 shouldFlush = true;
261 break;
262 }
263 }
264
265 if (shouldFlush)
266 addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
267 }
268
269 // Used in implementing get/set, above, where the operand is an argument.
270 NodeIndex getArgument(unsigned operand)
271 {
272 unsigned argument = operandToArgument(operand);
273 ASSERT(argument < m_numArguments);
274
275 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
276
277 if (nodeIndex != NoNode) {
278 Node* nodePtr = &m_graph[nodeIndex];
279 if (nodePtr->op() == Flush) {
280 // Two possibilities: either the block wants the local to be live
281 // but has not loaded its value, or it has loaded its value, in
282 // which case we're done.
283 nodeIndex = nodePtr->child1().index();
284 Node& flushChild = m_graph[nodeIndex];
285 if (flushChild.op() == Phi) {
286 VariableAccessData* variableAccessData = flushChild.variableAccessData();
287 nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
288 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
289 return nodeIndex;
290 }
291 nodePtr = &flushChild;
292 }
293
294 ASSERT(&m_graph[nodeIndex] == nodePtr);
295 ASSERT(nodePtr->op() != Flush);
296
297 if (nodePtr->op() == SetArgument) {
298 // We're getting an argument in the first basic block; link
299 // the GetLocal to the SetArgument.
300 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
301 nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
302 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
303 return nodeIndex;
304 }
305
306 if (m_graph.argumentIsCaptured(argument)) {
307 if (nodePtr->op() == GetLocal)
308 nodeIndex = nodePtr->child1().index();
309 return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
310 }
311
312 if (nodePtr->op() == GetLocal)
313 return nodeIndex;
314
315 ASSERT(nodePtr->op() == SetLocal);
316 return nodePtr->child1().index();
317 }
318
319 VariableAccessData* variableAccessData = newVariableAccessData(operand);
320
321 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
322 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
323 nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
324 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
325
326 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
327
328 return nodeIndex;
329 }
330 void setArgument(int operand, NodeIndex value)
331 {
332 unsigned argument = operandToArgument(operand);
333 ASSERT(argument < m_numArguments);
334
335 VariableAccessData* variableAccessData = newVariableAccessData(operand);
336 InlineStackEntry* stack = m_inlineStackTop;
337 while (stack->m_inlineCallFrame) // find the machine stack entry.
338 stack = stack->m_caller;
339 stack->m_argumentPositions[argument]->addVariable(variableAccessData);
340 NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
341 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
342 // Always flush arguments.
343 addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
344 }
345
346 VariableAccessData* flushArgument(int operand)
347 {
348 // FIXME: This should check if the same operand had already been flushed to
349 // some other local variable.
350
351 operand = m_inlineStackTop->remapOperand(operand);
352
353 ASSERT(operand < FirstConstantRegisterIndex);
354
355 NodeIndex nodeIndex;
356 int index;
357 if (operandIsArgument(operand)) {
358 index = operandToArgument(operand);
359 nodeIndex = m_currentBlock->variablesAtTail.argument(index);
360 } else {
361 index = operand;
362 nodeIndex = m_currentBlock->variablesAtTail.local(index);
363 m_preservedVars.set(operand);
364 }
365
366 if (nodeIndex != NoNode) {
367 Node& node = m_graph[nodeIndex];
368 switch (node.op()) {
369 case Flush:
370 nodeIndex = node.child1().index();
371 break;
372 case GetLocal:
373 nodeIndex = node.child1().index();
374 break;
375 default:
376 break;
377 }
378
379 ASSERT(m_graph[nodeIndex].op() != Flush
380 && m_graph[nodeIndex].op() != GetLocal);
381
382 // Emit a Flush regardless of whether we already flushed it.
383 // This gives us guidance to see that the variable also needs to be flushed
384 // for arguments, even if it already had to be flushed for other reasons.
385 VariableAccessData* variableAccessData = node.variableAccessData();
386 addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
387 return variableAccessData;
388 }
389
390 VariableAccessData* variableAccessData = newVariableAccessData(operand);
391 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
392 nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
393 if (operandIsArgument(operand)) {
394 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
395 m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
396 m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
397 } else {
398 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
399 m_currentBlock->variablesAtTail.local(index) = nodeIndex;
400 m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
401 }
402 return variableAccessData;
403 }
404
405 // Get an operand, and perform a ToInt32/ToNumber conversion on it.
406 NodeIndex getToInt32(int operand)
407 {
408 return toInt32(get(operand));
409 }
410
411 // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
412 NodeIndex toInt32(NodeIndex index)
413 {
414 Node& node = m_graph[index];
415
416 if (node.hasInt32Result())
417 return index;
418
419 if (node.op() == UInt32ToNumber)
420 return node.child1().index();
421
422 // Check for numeric constants boxed as JSValues.
423 if (node.op() == JSConstant) {
424 JSValue v = valueOfJSConstant(index);
425 if (v.isInt32())
426 return getJSConstant(node.constantNumber());
427 if (v.isNumber())
428 return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
429 }
430
431 return addToGraph(ValueToInt32, index);
432 }
433
434 NodeIndex getJSConstantForValue(JSValue constantValue)
435 {
436 unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
437 if (constantIndex >= m_constants.size())
438 m_constants.append(ConstantRecord());
439
440 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
441
442 return getJSConstant(constantIndex);
443 }
444
445 NodeIndex getJSConstant(unsigned constant)
446 {
447 NodeIndex index = m_constants[constant].asJSValue;
448 if (index != NoNode)
449 return index;
450
451 NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
452 m_constants[constant].asJSValue = resultIndex;
453 return resultIndex;
454 }
455
456 // Helper functions to get/set the this value.
457 NodeIndex getThis()
458 {
459 return get(m_inlineStackTop->m_codeBlock->thisRegister());
460 }
461 void setThis(NodeIndex value)
462 {
463 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
464 }
465
466 // Convenience methods for checking nodes for constants.
467 bool isJSConstant(NodeIndex index)
468 {
469 return m_graph[index].op() == JSConstant;
470 }
471 bool isInt32Constant(NodeIndex nodeIndex)
472 {
473 return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
474 }
475 // Convenience methods for getting constant values.
476 JSValue valueOfJSConstant(NodeIndex index)
477 {
478 ASSERT(isJSConstant(index));
479 return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
480 }
481 int32_t valueOfInt32Constant(NodeIndex nodeIndex)
482 {
483 ASSERT(isInt32Constant(nodeIndex));
484 return valueOfJSConstant(nodeIndex).asInt32();
485 }
486
487 // This method returns a JSConstant with the value 'undefined'.
488 NodeIndex constantUndefined()
489 {
490 // Has m_constantUndefined been set up yet?
491 if (m_constantUndefined == UINT_MAX) {
492 // Search the constant pool for undefined, if we find it, we can just reuse this!
493 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
494 for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
495 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
496 if (testMe.isUndefined())
497 return getJSConstant(m_constantUndefined);
498 }
499
500 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
501 ASSERT(m_constants.size() == numberOfConstants);
502 m_codeBlock->addConstant(jsUndefined());
503 m_constants.append(ConstantRecord());
504 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
505 }
506
507 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
508 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
509 return getJSConstant(m_constantUndefined);
510 }
511
512 // This method returns a JSConstant with the value 'null'.
513 NodeIndex constantNull()
514 {
515 // Has m_constantNull been set up yet?
516 if (m_constantNull == UINT_MAX) {
517 // Search the constant pool for null, if we find it, we can just reuse this!
518 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
519 for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
520 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
521 if (testMe.isNull())
522 return getJSConstant(m_constantNull);
523 }
524
525 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
526 ASSERT(m_constants.size() == numberOfConstants);
527 m_codeBlock->addConstant(jsNull());
528 m_constants.append(ConstantRecord());
529 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
530 }
531
532 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
533 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
534 return getJSConstant(m_constantNull);
535 }
536
537 // This method returns a DoubleConstant with the value 1.
538 NodeIndex one()
539 {
540 // Has m_constant1 been set up yet?
541 if (m_constant1 == UINT_MAX) {
542 // Search the constant pool for the value 1, if we find it, we can just reuse this!
543 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
544 for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
545 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
546 if (testMe.isInt32() && testMe.asInt32() == 1)
547 return getJSConstant(m_constant1);
548 }
549
550 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
551 ASSERT(m_constants.size() == numberOfConstants);
552 m_codeBlock->addConstant(jsNumber(1));
553 m_constants.append(ConstantRecord());
554 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
555 }
556
557 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
558 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
559 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
560 return getJSConstant(m_constant1);
561 }
562
563 // This method returns a DoubleConstant with the value NaN.
564 NodeIndex constantNaN()
565 {
566 JSValue nan = jsNaN();
567
568 // Has m_constantNaN been set up yet?
569 if (m_constantNaN == UINT_MAX) {
570 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
571 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
572 for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
573 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
574 if (JSValue::encode(testMe) == JSValue::encode(nan))
575 return getJSConstant(m_constantNaN);
576 }
577
578 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
579 ASSERT(m_constants.size() == numberOfConstants);
580 m_codeBlock->addConstant(nan);
581 m_constants.append(ConstantRecord());
582 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
583 }
584
585 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
586 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
587 ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
588 return getJSConstant(m_constantNaN);
589 }
590
591 NodeIndex cellConstant(JSCell* cell)
592 {
593 HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
594 if (result.isNewEntry)
595 result.iterator->second = addToGraph(WeakJSConstant, OpInfo(cell));
596
597 return result.iterator->second;
598 }
599
600 CodeOrigin currentCodeOrigin()
601 {
602 return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
603 }
604
605 // These methods create a node and add it to the graph. If nodes of this type are
606 // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
607 NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
608 {
609 NodeIndex resultIndex = (NodeIndex)m_graph.size();
610 m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
611 ASSERT(op != Phi);
612 m_currentBlock->append(resultIndex);
613
614 if (defaultFlags(op) & NodeMustGenerate)
615 m_graph.ref(resultIndex);
616 return resultIndex;
617 }
618 NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
619 {
620 NodeIndex resultIndex = (NodeIndex)m_graph.size();
621 m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
622 if (op == Phi)
623 m_currentBlock->phis.append(resultIndex);
624 else
625 m_currentBlock->append(resultIndex);
626
627 if (defaultFlags(op) & NodeMustGenerate)
628 m_graph.ref(resultIndex);
629 return resultIndex;
630 }
631 NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
632 {
633 NodeIndex resultIndex = (NodeIndex)m_graph.size();
634 m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
635 ASSERT(op != Phi);
636 m_currentBlock->append(resultIndex);
637
638 if (defaultFlags(op) & NodeMustGenerate)
639 m_graph.ref(resultIndex);
640 return resultIndex;
641 }
642
643 NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
644 {
645 NodeIndex resultIndex = (NodeIndex)m_graph.size();
646 m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
647 ASSERT(op != Phi);
648 m_currentBlock->append(resultIndex);
649
650 m_numPassedVarArgs = 0;
651
652 if (defaultFlags(op) & NodeMustGenerate)
653 m_graph.ref(resultIndex);
654 return resultIndex;
655 }
656
657 NodeIndex insertPhiNode(OpInfo info, BasicBlock* block)
658 {
659 NodeIndex resultIndex = (NodeIndex)m_graph.size();
660 m_graph.append(Node(Phi, currentCodeOrigin(), info));
661 block->phis.append(resultIndex);
662
663 return resultIndex;
664 }
665
666 void addVarArgChild(NodeIndex child)
667 {
668 m_graph.m_varArgChildren.append(Edge(child));
669 m_numPassedVarArgs++;
670 }
671
672 NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
673 {
674 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
675
676 PredictedType prediction = PredictNone;
677 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
678 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
679 prediction = getPrediction();
680 }
681
682 addVarArgChild(get(currentInstruction[1].u.operand));
683 int argCount = currentInstruction[2].u.operand;
684 if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
685 m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
686
687 int registerOffset = currentInstruction[3].u.operand;
688 int dummyThisArgument = op == Call ? 0 : 1;
689 for (int i = 0 + dummyThisArgument; i < argCount; ++i)
690 addVarArgChild(get(registerOffset + argumentToOperand(i)));
691
692 NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
693 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
694 set(putInstruction[1].u.operand, call);
695 return call;
696 }
697
698 PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
699 {
700 UNUSED_PARAM(nodeIndex);
701
702 PredictedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
703 #if DFG_ENABLE(DEBUG_VERBOSE)
704 dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
705 #endif
706
707 return prediction;
708 }
709
710 PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
711 {
712 PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
713
714 if (prediction == PredictNone) {
715 // We have no information about what values this node generates. Give up
716 // on executing this code, since we're likely to do more damage than good.
717 addToGraph(ForceOSRExit);
718 }
719
720 return prediction;
721 }
722
723 PredictedType getPredictionWithoutOSRExit()
724 {
725 return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
726 }
727
728 PredictedType getPrediction()
729 {
730 return getPrediction(m_graph.size(), m_currentProfilingIndex);
731 }
732
733 NodeIndex makeSafe(NodeIndex nodeIndex)
734 {
735 Node& node = m_graph[nodeIndex];
736
737 bool likelyToTakeSlowCase;
738 if (!isX86() && node.op() == ArithMod)
739 likelyToTakeSlowCase = false;
740 else
741 likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
742
743 if (!likelyToTakeSlowCase
744 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
745 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
746 return nodeIndex;
747
748 switch (m_graph[nodeIndex].op()) {
749 case UInt32ToNumber:
750 case ArithAdd:
751 case ArithSub:
752 case ArithNegate:
753 case ValueAdd:
754 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
755 m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
756 break;
757
758 case ArithMul:
759 if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
760 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
761 #if DFG_ENABLE(DEBUG_VERBOSE)
762 dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
763 #endif
764 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
765 } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
766 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
767 #if DFG_ENABLE(DEBUG_VERBOSE)
768 dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
769 #endif
770 m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
771 }
772 break;
773
774 default:
775 ASSERT_NOT_REACHED();
776 break;
777 }
778
779 return nodeIndex;
780 }
781
782 NodeIndex makeDivSafe(NodeIndex nodeIndex)
783 {
784 ASSERT(m_graph[nodeIndex].op() == ArithDiv);
785
786 // The main slow case counter for op_div in the old JIT counts only when
787 // the operands are not numbers. We don't care about that since we already
788 // have speculations in place that take care of that separately. We only
789 // care about when the outcome of the division is not an integer, which
790 // is what the special fast case counter tells us.
791
792 if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
793 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
794 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
795 return nodeIndex;
796
797 #if DFG_ENABLE(DEBUG_VERBOSE)
798 dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
799 #endif
800
801 // FIXME: It might be possible to make this more granular. The DFG certainly can
802 // distinguish between negative zero and overflow in its exit profiles.
803 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
804
805 return nodeIndex;
806 }
807
808 bool willNeedFlush(StructureStubInfo& stubInfo)
809 {
810 PolymorphicAccessStructureList* list;
811 int listSize;
812 switch (stubInfo.accessType) {
813 case access_get_by_id_self_list:
814 list = stubInfo.u.getByIdSelfList.structureList;
815 listSize = stubInfo.u.getByIdSelfList.listSize;
816 break;
817 case access_get_by_id_proto_list:
818 list = stubInfo.u.getByIdProtoList.structureList;
819 listSize = stubInfo.u.getByIdProtoList.listSize;
820 break;
821 default:
822 return false;
823 }
824 for (int i = 0; i < listSize; ++i) {
825 if (!list->list[i].isDirect)
826 return true;
827 }
828 return false;
829 }
830
831 bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
832 {
833 if (direct)
834 return true;
835
836 if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
837 return false;
838
839 for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
840 if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
841 return false;
842 }
843
844 return true;
845 }
846
847 void buildOperandMapsIfNecessary();
848
849 JSGlobalData* m_globalData;
850 CodeBlock* m_codeBlock;
851 CodeBlock* m_profiledBlock;
852 Graph& m_graph;
853
854 // The current block being generated.
855 BasicBlock* m_currentBlock;
856 // The bytecode index of the current instruction being generated.
857 unsigned m_currentIndex;
858 // The bytecode index of the value profile of the current instruction being generated.
859 unsigned m_currentProfilingIndex;
860
861 // We use these values during code generation, and to avoid the need for
862 // special handling we make sure they are available as constants in the
863 // CodeBlock's constant pool. These variables are initialized to
864 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
865 // constant pool, as necessary.
866 unsigned m_constantUndefined;
867 unsigned m_constantNull;
868 unsigned m_constantNaN;
869 unsigned m_constant1;
870 HashMap<JSCell*, unsigned> m_cellConstants;
871 HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
872
873 // A constant in the constant pool may be represented by more than one
874 // node in the graph, depending on the context in which it is being used.
875 struct ConstantRecord {
876 ConstantRecord()
877 : asInt32(NoNode)
878 , asNumeric(NoNode)
879 , asJSValue(NoNode)
880 {
881 }
882
883 NodeIndex asInt32;
884 NodeIndex asNumeric;
885 NodeIndex asJSValue;
886 };
887
888 // Track the index of the node whose result is the current value for every
889 // register value in the bytecode - argument, local, and temporary.
890 Vector<ConstantRecord, 16> m_constants;
891
892 // The number of arguments passed to the function.
893 unsigned m_numArguments;
894 // The number of locals (vars + temporaries) used in the function.
895 unsigned m_numLocals;
896 // The set of registers we need to preserve across BasicBlock boundaries;
897 // typically equal to the set of vars, but we expand this to cover all
898 // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
899 BitVector m_preservedVars;
900 // The number of slots (in units of sizeof(Register)) that we need to
901 // preallocate for calls emanating from this frame. This includes the
902 // size of the CallFrame, only if this is not a leaf function. (I.e.
903 // this is 0 if and only if this function is a leaf.)
904 unsigned m_parameterSlots;
905 // The number of var args passed to the next var arg node.
906 unsigned m_numPassedVarArgs;
907 // The index in the global resolve info.
908 unsigned m_globalResolveNumber;
909
910 struct PhiStackEntry {
911 PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
912 : m_block(block)
913 , m_phi(phi)
914 , m_varNo(varNo)
915 {
916 }
917
918 BasicBlock* m_block;
919 NodeIndex m_phi;
920 unsigned m_varNo;
921 };
922 Vector<PhiStackEntry, 16> m_argumentPhiStack;
923 Vector<PhiStackEntry, 16> m_localPhiStack;
924
925 struct InlineStackEntry {
926 ByteCodeParser* m_byteCodeParser;
927
928 CodeBlock* m_codeBlock;
929 CodeBlock* m_profiledBlock;
930 InlineCallFrame* m_inlineCallFrame;
931 VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
932
933 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
934
935 QueryableExitProfile m_exitProfile;
936
937 // Remapping of identifier and constant numbers from the code block being
938 // inlined (inline callee) to the code block that we're inlining into
939 // (the machine code block, which is the transitive, though not necessarily
940 // direct, caller).
941 Vector<unsigned> m_identifierRemap;
942 Vector<unsigned> m_constantRemap;
943
944 // Blocks introduced by this code block, which need successor linking.
945 // May include up to one basic block that includes the continuation after
946 // the callsite in the caller. These must be appended in the order that they
947 // are created, but their bytecodeBegin values need not be in order as they
948 // are ignored.
949 Vector<UnlinkedBlock> m_unlinkedBlocks;
950
951 // Potential block linking targets. Must be sorted by bytecodeBegin, and
952 // cannot have two blocks that have the same bytecodeBegin. For this very
953 // reason, this is not equivalent to
954 Vector<BlockIndex> m_blockLinkingTargets;
955
956 // If the callsite's basic block was split into two, then this will be
957 // the head of the callsite block. It needs its successors linked to the
958 // m_unlinkedBlocks, but not the other way around: there's no way for
959 // any blocks in m_unlinkedBlocks to jump back into this block.
960 BlockIndex m_callsiteBlockHead;
961
962 // Does the callsite block head need linking? This is typically true
963 // but will be false for the machine code block's inline stack entry
964 // (since that one is not inlined) and for cases where an inline callee
965 // did the linking for us.
966 bool m_callsiteBlockHeadNeedsLinking;
967
968 VirtualRegister m_returnValue;
969
970 // Predictions about variable types collected from the profiled code block,
971 // which are based on OSR exit profiles that past DFG compilatins of this
972 // code block had gathered.
973 LazyOperandValueProfileParser m_lazyOperands;
974
975 // Did we see any returns? We need to handle the (uncommon but necessary)
976 // case where a procedure that does not return was inlined.
977 bool m_didReturn;
978
979 // Did we have any early returns?
980 bool m_didEarlyReturn;
981
982 // Pointers to the argument position trackers for this slice of code.
983 Vector<ArgumentPosition*> m_argumentPositions;
984
985 InlineStackEntry* m_caller;
986
987 InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
988
989 ~InlineStackEntry()
990 {
991 m_byteCodeParser->m_inlineStackTop = m_caller;
992 }
993
994 int remapOperand(int operand) const
995 {
996 if (!m_inlineCallFrame)
997 return operand;
998
999 if (operand >= FirstConstantRegisterIndex) {
1000 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
1001 ASSERT(result >= FirstConstantRegisterIndex);
1002 return result;
1003 }
1004
1005 return operand + m_inlineCallFrame->stackOffset;
1006 }
1007 };
1008
1009 InlineStackEntry* m_inlineStackTop;
1010
1011 // Have we built operand maps? We initialize them lazily, and only when doing
1012 // inlining.
1013 bool m_haveBuiltOperandMaps;
1014 // Mapping between identifier names and numbers.
1015 IdentifierMap m_identifierMap;
1016 // Mapping between values and constant numbers.
1017 JSValueMap m_jsValueMap;
1018 // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1019 // work-around for the fact that JSValueMap can't handle "empty" values.
1020 unsigned m_emptyJSValueIndex;
1021
1022 // Cache of code blocks that we've generated bytecode for.
1023 ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
1024 };
1025
1026 #define NEXT_OPCODE(name) \
1027 m_currentIndex += OPCODE_LENGTH(name); \
1028 continue
1029
1030 #define LAST_OPCODE(name) \
1031 m_currentIndex += OPCODE_LENGTH(name); \
1032 return shouldContinueParsing
1033
1034
1035 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
1036 {
1037 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1038
1039 NodeIndex callTarget = get(currentInstruction[1].u.operand);
1040 enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
1041
1042 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1043 m_inlineStackTop->m_profiledBlock, m_currentIndex);
1044
1045 #if DFG_ENABLE(DEBUG_VERBOSE)
1046 dataLog("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
1047 if (callLinkStatus.isSet()) {
1048 if (callLinkStatus.couldTakeSlowPath())
1049 dataLog("could take slow path, ");
1050 dataLog("target = %p\n", callLinkStatus.callTarget());
1051 } else
1052 dataLog("not set.\n");
1053 #endif
1054
1055 if (m_graph.isFunctionConstant(callTarget))
1056 callType = ConstantFunction;
1057 else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
1058 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
1059 callType = LinkedFunction;
1060 else
1061 callType = UnknownFunction;
1062 if (callType != UnknownFunction) {
1063 int argumentCountIncludingThis = currentInstruction[2].u.operand;
1064 int registerOffset = currentInstruction[3].u.operand;
1065
1066 // Do we have a result?
1067 bool usesResult = false;
1068 int resultOperand = 0; // make compiler happy
1069 unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
1070 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
1071 PredictedType prediction = PredictNone;
1072 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
1073 resultOperand = putInstruction[1].u.operand;
1074 usesResult = true;
1075 m_currentProfilingIndex = nextOffset;
1076 prediction = getPrediction();
1077 nextOffset += OPCODE_LENGTH(op_call_put_result);
1078 }
1079 JSFunction* expectedFunction;
1080 Intrinsic intrinsic;
1081 bool certainAboutExpectedFunction;
1082 if (callType == ConstantFunction) {
1083 expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
1084 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1085 certainAboutExpectedFunction = true;
1086 } else {
1087 ASSERT(callType == LinkedFunction);
1088 expectedFunction = callLinkStatus.callTarget();
1089 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1090 certainAboutExpectedFunction = false;
1091 }
1092
1093 if (intrinsic != NoIntrinsic) {
1094 if (!certainAboutExpectedFunction)
1095 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
1096
1097 if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1098 if (!certainAboutExpectedFunction) {
1099 // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
1100 // to, since at this point we know that the call target is a constant. It's just that OSR isn't
1101 // smart enough to figure that out, since it doesn't understand CheckFunction.
1102 addToGraph(Phantom, callTarget);
1103 }
1104
1105 return;
1106 }
1107 } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
1108 return;
1109 }
1110
1111 addCall(interpreter, currentInstruction, op);
1112 }
1113
1114 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
1115 {
1116 NodeIndex thisArgument;
1117 if (kind == CodeForCall)
1118 thisArgument = get(registerOffset + argumentToOperand(0));
1119 else
1120 thisArgument = NoNode;
1121 addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
1122 }
1123
1124 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
1125 {
1126 // First, the really simple checks: do we have an actual JS function?
1127 if (!expectedFunction)
1128 return false;
1129 if (expectedFunction->isHostFunction())
1130 return false;
1131
1132 FunctionExecutable* executable = expectedFunction->jsExecutable();
1133
1134 // Does the number of arguments we're passing match the arity of the target? We could
1135 // inline arity check failures, but for simplicity we currently don't.
1136 if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
1137 return false;
1138
1139 // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1140 // If either of these are detected, then don't inline.
1141 unsigned depth = 0;
1142 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1143 ++depth;
1144 if (depth >= Options::maximumInliningDepth)
1145 return false; // Depth exceeded.
1146
1147 if (entry->executable() == executable)
1148 return false; // Recursion detected.
1149 }
1150
1151 // Does the code block's size match the heuristics/requirements for being
1152 // an inline candidate?
1153 CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1154 if (!mightInlineFunctionFor(profiledBlock, kind))
1155 return false;
1156
1157 // If we get here then it looks like we should definitely inline this code. Proceed
1158 // with parsing the code to get bytecode, so that we can then parse the bytecode.
1159 // Note that if LLInt is enabled, the bytecode will always be available. Also note
1160 // that if LLInt is enabled, we may inline a code block that has never been JITted
1161 // before!
1162 CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1163 if (!codeBlock)
1164 return false;
1165
1166 ASSERT(canInlineFunctionFor(codeBlock, kind));
1167
1168 #if DFG_ENABLE(DEBUG_VERBOSE)
1169 dataLog("Inlining executable %p.\n", executable);
1170 #endif
1171
1172 // Now we know without a doubt that we are committed to inlining. So begin the process
1173 // by checking the callee (if necessary) and making sure that arguments and the callee
1174 // are flushed.
1175 if (!certainAboutExpectedFunction)
1176 emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1177
1178 // FIXME: Don't flush constants!
1179
1180 Vector<VariableAccessData*, 8> arguments;
1181 for (int i = 1; i < argumentCountIncludingThis; ++i)
1182 arguments.append(flushArgument(registerOffset + argumentToOperand(i)));
1183
1184 int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
1185
1186 // Make sure that the area used by the call frame is reserved.
1187 for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
1188 m_preservedVars.set(arg);
1189
1190 // Make sure that we have enough locals.
1191 unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1192 if (newNumLocals > m_numLocals) {
1193 m_numLocals = newNumLocals;
1194 for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1195 m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1196 }
1197
1198 InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
1199
1200 // Link up the argument variable access datas to their argument positions.
1201 for (int i = 1; i < argumentCountIncludingThis; ++i)
1202 inlineStackEntry.m_argumentPositions[i]->addVariable(arguments[i - 1]);
1203
1204 // This is where the actual inlining really happens.
1205 unsigned oldIndex = m_currentIndex;
1206 unsigned oldProfilingIndex = m_currentProfilingIndex;
1207 m_currentIndex = 0;
1208 m_currentProfilingIndex = 0;
1209
1210 addToGraph(InlineStart);
1211
1212 parseCodeBlock();
1213
1214 m_currentIndex = oldIndex;
1215 m_currentProfilingIndex = oldProfilingIndex;
1216
1217 // If the inlined code created some new basic blocks, then we have linking to do.
1218 if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1219
1220 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1221 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1222 linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1223 else
1224 ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1225
1226 // It's possible that the callsite block head is not owned by the caller.
1227 if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1228 // It's definitely owned by the caller, because the caller created new blocks.
1229 // Assert that this all adds up.
1230 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1231 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1232 inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1233 } else {
1234 // It's definitely not owned by the caller. Tell the caller that he does not
1235 // need to link his callsite block head, because we did it for him.
1236 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1237 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1238 inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1239 }
1240
1241 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1242 } else
1243 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1244
1245 // If there was a return, but no early returns, then we're done. We allow parsing of
1246 // the caller to continue in whatever basic block we're in right now.
1247 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1248 BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1249 ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal());
1250
1251 // If we created new blocks then the last block needs linking, but in the
1252 // caller. It doesn't need to be linked to, but it needs outgoing links.
1253 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1254 #if DFG_ENABLE(DEBUG_VERBOSE)
1255 dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1256 #endif
1257 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1258 // for release builds because this block will never serve as a potential target
1259 // in the linker's binary search.
1260 lastBlock->bytecodeBegin = m_currentIndex;
1261 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1262 }
1263
1264 m_currentBlock = m_graph.m_blocks.last().get();
1265
1266 #if DFG_ENABLE(DEBUG_VERBOSE)
1267 dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1268 #endif
1269 return true;
1270 }
1271
1272 // If we get to this point then all blocks must end in some sort of terminals.
1273 ASSERT(m_graph.last().isTerminal());
1274
1275 // Link the early returns to the basic block we're about to create.
1276 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1277 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1278 continue;
1279 BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1280 ASSERT(!block->isLinked);
1281 Node& node = m_graph[block->last()];
1282 ASSERT(node.op() == Jump);
1283 ASSERT(node.takenBlockIndex() == NoBlock);
1284 node.setTakenBlockIndex(m_graph.m_blocks.size());
1285 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1286 #if !ASSERT_DISABLED
1287 block->isLinked = true;
1288 #endif
1289 }
1290
1291 // Need to create a new basic block for the continuation at the caller.
1292 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
1293 #if DFG_ENABLE(DEBUG_VERBOSE)
1294 dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1295 #endif
1296 m_currentBlock = block.get();
1297 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1298 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1299 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1300 m_graph.m_blocks.append(block.release());
1301 prepareToParseBlock();
1302
1303 // At this point we return and continue to generate code for the caller, but
1304 // in the new basic block.
1305 #if DFG_ENABLE(DEBUG_VERBOSE)
1306 dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
1307 #endif
1308 return true;
1309 }
1310
1311 void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
1312 {
1313 if (!usesResult)
1314 return;
1315 set(resultOperand, nodeIndex);
1316 }
1317
1318 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1319 {
1320 if (argumentCountIncludingThis == 1) { // Math.min()
1321 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1322 return true;
1323 }
1324
1325 if (argumentCountIncludingThis == 2) { // Math.min(x)
1326 // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
1327 NodeIndex result = get(registerOffset + argumentToOperand(1));
1328 addToGraph(CheckNumber, result);
1329 setIntrinsicResult(usesResult, resultOperand, result);
1330 return true;
1331 }
1332
1333 if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1334 setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
1335 return true;
1336 }
1337
1338 // Don't handle >=3 arguments for now.
1339 return false;
1340 }
1341
1342 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1343 // they need to perform the ToNumber conversion, which can have side-effects.
1344 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
1345 {
1346 switch (intrinsic) {
1347 case AbsIntrinsic: {
1348 if (argumentCountIncludingThis == 1) { // Math.abs()
1349 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1350 return true;
1351 }
1352
1353 if (!MacroAssembler::supportsFloatingPointAbs())
1354 return false;
1355
1356 NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
1357 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1358 m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
1359 setIntrinsicResult(usesResult, resultOperand, nodeIndex);
1360 return true;
1361 }
1362
1363 case MinIntrinsic:
1364 return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1365
1366 case MaxIntrinsic:
1367 return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1368
1369 case SqrtIntrinsic: {
1370 if (argumentCountIncludingThis == 1) { // Math.sqrt()
1371 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1372 return true;
1373 }
1374
1375 if (!MacroAssembler::supportsFloatingPointSqrt())
1376 return false;
1377
1378 setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
1379 return true;
1380 }
1381
1382 case ArrayPushIntrinsic: {
1383 if (argumentCountIncludingThis != 2)
1384 return false;
1385
1386 NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1387 if (usesResult)
1388 set(resultOperand, arrayPush);
1389
1390 return true;
1391 }
1392
1393 case ArrayPopIntrinsic: {
1394 if (argumentCountIncludingThis != 1)
1395 return false;
1396
1397 NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1398 if (usesResult)
1399 set(resultOperand, arrayPop);
1400 return true;
1401 }
1402
1403 case CharCodeAtIntrinsic: {
1404 if (argumentCountIncludingThis != 2)
1405 return false;
1406
1407 int thisOperand = registerOffset + argumentToOperand(0);
1408 if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1409 return false;
1410
1411 int indexOperand = registerOffset + argumentToOperand(1);
1412 NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1413 NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
1414
1415 if (usesResult)
1416 set(resultOperand, charCode);
1417 return true;
1418 }
1419
1420 case CharAtIntrinsic: {
1421 if (argumentCountIncludingThis != 2)
1422 return false;
1423
1424 int thisOperand = registerOffset + argumentToOperand(0);
1425 if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1426 return false;
1427
1428 int indexOperand = registerOffset + argumentToOperand(1);
1429 NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1430 NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
1431
1432 if (usesResult)
1433 set(resultOperand, charCode);
1434 return true;
1435 }
1436
1437 case RegExpExecIntrinsic: {
1438 if (argumentCountIncludingThis != 2)
1439 return false;
1440
1441 NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1442 if (usesResult)
1443 set(resultOperand, regExpExec);
1444
1445 return true;
1446 }
1447
1448 case RegExpTestIntrinsic: {
1449 if (argumentCountIncludingThis != 2)
1450 return false;
1451
1452 NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1453 if (usesResult)
1454 set(resultOperand, regExpExec);
1455
1456 return true;
1457 }
1458
1459 default:
1460 return false;
1461 }
1462 }
1463
1464 void ByteCodeParser::prepareToParseBlock()
1465 {
1466 for (unsigned i = 0; i < m_constants.size(); ++i)
1467 m_constants[i] = ConstantRecord();
1468 m_cellConstantNodes.clear();
1469 }
1470
1471 bool ByteCodeParser::parseBlock(unsigned limit)
1472 {
1473 bool shouldContinueParsing = true;
1474
1475 Interpreter* interpreter = m_globalData->interpreter;
1476 Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1477 unsigned blockBegin = m_currentIndex;
1478
1479 // If we are the first basic block, introduce markers for arguments. This allows
1480 // us to track if a use of an argument may use the actual argument passed, as
1481 // opposed to using a value we set explicitly.
1482 if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
1483 m_graph.m_arguments.resize(m_numArguments);
1484 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1485 NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
1486 m_graph.m_arguments[argument] = setArgument;
1487 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
1488 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1489 }
1490 }
1491
1492 while (true) {
1493 m_currentProfilingIndex = m_currentIndex;
1494
1495 // Don't extend over jump destinations.
1496 if (m_currentIndex == limit) {
1497 // Ordinarily we want to plant a jump. But refuse to do this if the block is
1498 // empty. This is a special case for inlining, which might otherwise create
1499 // some empty blocks in some cases. When parseBlock() returns with an empty
1500 // block, it will get repurposed instead of creating a new one. Note that this
1501 // logic relies on every bytecode resulting in one or more nodes, which would
1502 // be true anyway except for op_loop_hint, which emits a Phantom to force this
1503 // to be true.
1504 if (!m_currentBlock->isEmpty())
1505 addToGraph(Jump, OpInfo(m_currentIndex));
1506 else {
1507 #if DFG_ENABLE(DEBUG_VERBOSE)
1508 dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1509 #endif
1510 }
1511 return shouldContinueParsing;
1512 }
1513
1514 // Switch on the current bytecode opcode.
1515 Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1516 OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1517 switch (opcodeID) {
1518
1519 // === Function entry opcodes ===
1520
1521 case op_enter:
1522 // Initialize all locals to undefined.
1523 for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1524 set(i, constantUndefined());
1525 NEXT_OPCODE(op_enter);
1526
1527 case op_convert_this: {
1528 NodeIndex op1 = getThis();
1529 if (m_graph[op1].op() == ConvertThis)
1530 setThis(op1);
1531 else
1532 setThis(addToGraph(ConvertThis, op1));
1533 NEXT_OPCODE(op_convert_this);
1534 }
1535
1536 case op_create_this: {
1537 NodeIndex op1 = get(currentInstruction[2].u.operand);
1538 set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
1539 NEXT_OPCODE(op_create_this);
1540 }
1541
1542 case op_new_object: {
1543 set(currentInstruction[1].u.operand, addToGraph(NewObject));
1544 NEXT_OPCODE(op_new_object);
1545 }
1546
1547 case op_new_array: {
1548 int startOperand = currentInstruction[2].u.operand;
1549 int numOperands = currentInstruction[3].u.operand;
1550 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1551 addVarArgChild(get(operandIdx));
1552 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
1553 NEXT_OPCODE(op_new_array);
1554 }
1555
1556 case op_new_array_buffer: {
1557 int startConstant = currentInstruction[2].u.operand;
1558 int numConstants = currentInstruction[3].u.operand;
1559 set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
1560 NEXT_OPCODE(op_new_array_buffer);
1561 }
1562
1563 case op_new_regexp: {
1564 set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1565 NEXT_OPCODE(op_new_regexp);
1566 }
1567
1568 case op_get_callee: {
1569 if (m_inlineStackTop->m_inlineCallFrame)
1570 set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
1571 else
1572 set(currentInstruction[1].u.operand, addToGraph(GetCallee));
1573 NEXT_OPCODE(op_get_callee);
1574 }
1575
1576 // === Bitwise operations ===
1577
1578 case op_bitand: {
1579 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1580 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1581 set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1582 NEXT_OPCODE(op_bitand);
1583 }
1584
1585 case op_bitor: {
1586 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1587 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1588 set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1589 NEXT_OPCODE(op_bitor);
1590 }
1591
1592 case op_bitxor: {
1593 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1594 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1595 set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1596 NEXT_OPCODE(op_bitxor);
1597 }
1598
1599 case op_rshift: {
1600 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1601 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1602 NodeIndex result;
1603 // Optimize out shifts by zero.
1604 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1605 result = op1;
1606 else
1607 result = addToGraph(BitRShift, op1, op2);
1608 set(currentInstruction[1].u.operand, result);
1609 NEXT_OPCODE(op_rshift);
1610 }
1611
1612 case op_lshift: {
1613 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1614 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1615 NodeIndex result;
1616 // Optimize out shifts by zero.
1617 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1618 result = op1;
1619 else
1620 result = addToGraph(BitLShift, op1, op2);
1621 set(currentInstruction[1].u.operand, result);
1622 NEXT_OPCODE(op_lshift);
1623 }
1624
1625 case op_urshift: {
1626 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1627 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1628 NodeIndex result;
1629 // The result of a zero-extending right shift is treated as an unsigned value.
1630 // This means that if the top bit is set, the result is not in the int32 range,
1631 // and as such must be stored as a double. If the shift amount is a constant,
1632 // we may be able to optimize.
1633 if (isInt32Constant(op2)) {
1634 // If we know we are shifting by a non-zero amount, then since the operation
1635 // zero fills we know the top bit of the result must be zero, and as such the
1636 // result must be within the int32 range. Conversely, if this is a shift by
1637 // zero, then the result may be changed by the conversion to unsigned, but it
1638 // is not necessary to perform the shift!
1639 if (valueOfInt32Constant(op2) & 0x1f)
1640 result = addToGraph(BitURShift, op1, op2);
1641 else
1642 result = makeSafe(addToGraph(UInt32ToNumber, op1));
1643 } else {
1644 // Cannot optimize at this stage; shift & potentially rebox as a double.
1645 result = addToGraph(BitURShift, op1, op2);
1646 result = makeSafe(addToGraph(UInt32ToNumber, result));
1647 }
1648 set(currentInstruction[1].u.operand, result);
1649 NEXT_OPCODE(op_urshift);
1650 }
1651
1652 // === Increment/Decrement opcodes ===
1653
1654 case op_pre_inc: {
1655 unsigned srcDst = currentInstruction[1].u.operand;
1656 NodeIndex op = get(srcDst);
1657 set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
1658 NEXT_OPCODE(op_pre_inc);
1659 }
1660
1661 case op_post_inc: {
1662 unsigned result = currentInstruction[1].u.operand;
1663 unsigned srcDst = currentInstruction[2].u.operand;
1664 ASSERT(result != srcDst); // Required for assumptions we make during OSR.
1665 NodeIndex op = get(srcDst);
1666 set(result, op);
1667 set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
1668 NEXT_OPCODE(op_post_inc);
1669 }
1670
1671 case op_pre_dec: {
1672 unsigned srcDst = currentInstruction[1].u.operand;
1673 NodeIndex op = get(srcDst);
1674 set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
1675 NEXT_OPCODE(op_pre_dec);
1676 }
1677
1678 case op_post_dec: {
1679 unsigned result = currentInstruction[1].u.operand;
1680 unsigned srcDst = currentInstruction[2].u.operand;
1681 NodeIndex op = get(srcDst);
1682 set(result, op);
1683 set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
1684 NEXT_OPCODE(op_post_dec);
1685 }
1686
1687 // === Arithmetic operations ===
1688
1689 case op_add: {
1690 NodeIndex op1 = get(currentInstruction[2].u.operand);
1691 NodeIndex op2 = get(currentInstruction[3].u.operand);
1692 if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
1693 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
1694 else
1695 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
1696 NEXT_OPCODE(op_add);
1697 }
1698
1699 case op_sub: {
1700 NodeIndex op1 = get(currentInstruction[2].u.operand);
1701 NodeIndex op2 = get(currentInstruction[3].u.operand);
1702 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
1703 NEXT_OPCODE(op_sub);
1704 }
1705
1706 case op_negate: {
1707 NodeIndex op1 = get(currentInstruction[2].u.operand);
1708 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
1709 NEXT_OPCODE(op_negate);
1710 }
1711
1712 case op_mul: {
1713 // Multiply requires that the inputs are not truncated, unfortunately.
1714 NodeIndex op1 = get(currentInstruction[2].u.operand);
1715 NodeIndex op2 = get(currentInstruction[3].u.operand);
1716 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
1717 NEXT_OPCODE(op_mul);
1718 }
1719
1720 case op_mod: {
1721 NodeIndex op1 = get(currentInstruction[2].u.operand);
1722 NodeIndex op2 = get(currentInstruction[3].u.operand);
1723 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
1724 NEXT_OPCODE(op_mod);
1725 }
1726
1727 case op_div: {
1728 NodeIndex op1 = get(currentInstruction[2].u.operand);
1729 NodeIndex op2 = get(currentInstruction[3].u.operand);
1730 set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
1731 NEXT_OPCODE(op_div);
1732 }
1733
1734 // === Misc operations ===
1735
1736 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1737 case op_debug:
1738 addToGraph(Breakpoint);
1739 NEXT_OPCODE(op_debug);
1740 #endif
1741 case op_mov: {
1742 NodeIndex op = get(currentInstruction[2].u.operand);
1743 set(currentInstruction[1].u.operand, op);
1744 NEXT_OPCODE(op_mov);
1745 }
1746
1747 case op_check_has_instance:
1748 addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1749 NEXT_OPCODE(op_check_has_instance);
1750
1751 case op_instanceof: {
1752 NodeIndex value = get(currentInstruction[2].u.operand);
1753 NodeIndex baseValue = get(currentInstruction[3].u.operand);
1754 NodeIndex prototype = get(currentInstruction[4].u.operand);
1755 set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1756 NEXT_OPCODE(op_instanceof);
1757 }
1758
1759 case op_is_undefined: {
1760 NodeIndex value = get(currentInstruction[2].u.operand);
1761 set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
1762 NEXT_OPCODE(op_is_undefined);
1763 }
1764
1765 case op_is_boolean: {
1766 NodeIndex value = get(currentInstruction[2].u.operand);
1767 set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
1768 NEXT_OPCODE(op_is_boolean);
1769 }
1770
1771 case op_is_number: {
1772 NodeIndex value = get(currentInstruction[2].u.operand);
1773 set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
1774 NEXT_OPCODE(op_is_number);
1775 }
1776
1777 case op_is_string: {
1778 NodeIndex value = get(currentInstruction[2].u.operand);
1779 set(currentInstruction[1].u.operand, addToGraph(IsString, value));
1780 NEXT_OPCODE(op_is_string);
1781 }
1782
1783 case op_is_object: {
1784 NodeIndex value = get(currentInstruction[2].u.operand);
1785 set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
1786 NEXT_OPCODE(op_is_object);
1787 }
1788
1789 case op_is_function: {
1790 NodeIndex value = get(currentInstruction[2].u.operand);
1791 set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
1792 NEXT_OPCODE(op_is_function);
1793 }
1794
1795 case op_not: {
1796 NodeIndex value = get(currentInstruction[2].u.operand);
1797 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1798 NEXT_OPCODE(op_not);
1799 }
1800
1801 case op_to_primitive: {
1802 NodeIndex value = get(currentInstruction[2].u.operand);
1803 set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1804 NEXT_OPCODE(op_to_primitive);
1805 }
1806
1807 case op_strcat: {
1808 int startOperand = currentInstruction[2].u.operand;
1809 int numOperands = currentInstruction[3].u.operand;
1810 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1811 addVarArgChild(get(operandIdx));
1812 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1813 NEXT_OPCODE(op_strcat);
1814 }
1815
1816 case op_less: {
1817 NodeIndex op1 = get(currentInstruction[2].u.operand);
1818 NodeIndex op2 = get(currentInstruction[3].u.operand);
1819 set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1820 NEXT_OPCODE(op_less);
1821 }
1822
1823 case op_lesseq: {
1824 NodeIndex op1 = get(currentInstruction[2].u.operand);
1825 NodeIndex op2 = get(currentInstruction[3].u.operand);
1826 set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1827 NEXT_OPCODE(op_lesseq);
1828 }
1829
1830 case op_greater: {
1831 NodeIndex op1 = get(currentInstruction[2].u.operand);
1832 NodeIndex op2 = get(currentInstruction[3].u.operand);
1833 set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
1834 NEXT_OPCODE(op_greater);
1835 }
1836
1837 case op_greatereq: {
1838 NodeIndex op1 = get(currentInstruction[2].u.operand);
1839 NodeIndex op2 = get(currentInstruction[3].u.operand);
1840 set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
1841 NEXT_OPCODE(op_greatereq);
1842 }
1843
1844 case op_eq: {
1845 NodeIndex op1 = get(currentInstruction[2].u.operand);
1846 NodeIndex op2 = get(currentInstruction[3].u.operand);
1847 set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
1848 NEXT_OPCODE(op_eq);
1849 }
1850
1851 case op_eq_null: {
1852 NodeIndex value = get(currentInstruction[2].u.operand);
1853 set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
1854 NEXT_OPCODE(op_eq_null);
1855 }
1856
1857 case op_stricteq: {
1858 NodeIndex op1 = get(currentInstruction[2].u.operand);
1859 NodeIndex op2 = get(currentInstruction[3].u.operand);
1860 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
1861 NEXT_OPCODE(op_stricteq);
1862 }
1863
1864 case op_neq: {
1865 NodeIndex op1 = get(currentInstruction[2].u.operand);
1866 NodeIndex op2 = get(currentInstruction[3].u.operand);
1867 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
1868 NEXT_OPCODE(op_neq);
1869 }
1870
1871 case op_neq_null: {
1872 NodeIndex value = get(currentInstruction[2].u.operand);
1873 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
1874 NEXT_OPCODE(op_neq_null);
1875 }
1876
1877 case op_nstricteq: {
1878 NodeIndex op1 = get(currentInstruction[2].u.operand);
1879 NodeIndex op2 = get(currentInstruction[3].u.operand);
1880 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
1881 NEXT_OPCODE(op_nstricteq);
1882 }
1883
1884 // === Property access operations ===
1885
1886 case op_get_by_val: {
1887 PredictedType prediction = getPrediction();
1888
1889 NodeIndex base = get(currentInstruction[2].u.operand);
1890 NodeIndex property = get(currentInstruction[3].u.operand);
1891 NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
1892 NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
1893 set(currentInstruction[1].u.operand, getByVal);
1894
1895 NEXT_OPCODE(op_get_by_val);
1896 }
1897
1898 case op_put_by_val: {
1899 NodeIndex base = get(currentInstruction[1].u.operand);
1900 NodeIndex property = get(currentInstruction[2].u.operand);
1901 NodeIndex value = get(currentInstruction[3].u.operand);
1902
1903 addToGraph(PutByVal, base, property, value);
1904
1905 NEXT_OPCODE(op_put_by_val);
1906 }
1907
1908 case op_method_check: {
1909 m_currentProfilingIndex += OPCODE_LENGTH(op_method_check);
1910 Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
1911
1912 PredictedType prediction = getPrediction();
1913
1914 ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
1915
1916 NodeIndex base = get(getInstruction[2].u.operand);
1917 unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
1918
1919 // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
1920 // node, which is a lot more efficient.
1921 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
1922 m_inlineStackTop->m_profiledBlock,
1923 m_currentIndex,
1924 m_codeBlock->identifier(identifier));
1925 MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor(
1926 m_inlineStackTop->m_profiledBlock, m_currentIndex);
1927
1928 if (methodCallStatus.isSet()
1929 && !getByIdStatus.wasSeenInJIT()
1930 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1931 // It's monomorphic as far as we can tell, since the method_check was linked
1932 // but the slow path (i.e. the normal get_by_id) never fired.
1933
1934 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base);
1935 if (methodCallStatus.needsPrototypeCheck())
1936 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.prototypeStructure())), cellConstant(methodCallStatus.prototype()));
1937
1938 // Keep the base of the access alive past the speculations.
1939 addToGraph(Phantom, base);
1940
1941 set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function()));
1942 } else
1943 set(getInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifier), OpInfo(prediction), base));
1944
1945 m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
1946 continue;
1947 }
1948 case op_get_scoped_var: {
1949 PredictedType prediction = getPrediction();
1950 int dst = currentInstruction[1].u.operand;
1951 int slot = currentInstruction[2].u.operand;
1952 int depth = currentInstruction[3].u.operand;
1953 NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1954 NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
1955 set(dst, getScopedVar);
1956 NEXT_OPCODE(op_get_scoped_var);
1957 }
1958 case op_put_scoped_var: {
1959 int slot = currentInstruction[1].u.operand;
1960 int depth = currentInstruction[2].u.operand;
1961 int source = currentInstruction[3].u.operand;
1962 NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1963 addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
1964 NEXT_OPCODE(op_put_scoped_var);
1965 }
1966 case op_get_by_id: {
1967 PredictedType prediction = getPredictionWithoutOSRExit();
1968
1969 NodeIndex base = get(currentInstruction[2].u.operand);
1970 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
1971
1972 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1973 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
1974 m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
1975
1976 if (getByIdStatus.isSimpleDirect()
1977 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1978 ASSERT(getByIdStatus.structureSet().size());
1979
1980 // The implementation of GetByOffset does not know to terminate speculative
1981 // execution if it doesn't have a prediction, so we do it manually.
1982 if (prediction == PredictNone)
1983 addToGraph(ForceOSRExit);
1984
1985 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
1986 NodeIndex propertyStorage;
1987 size_t offsetOffset;
1988 if (getByIdStatus.structureSet().allAreUsingInlinePropertyStorage()) {
1989 propertyStorage = base;
1990 ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
1991 offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
1992 } else {
1993 propertyStorage = addToGraph(GetPropertyStorage, base);
1994 offsetOffset = 0;
1995 }
1996 set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage));
1997
1998 StorageAccessData storageAccessData;
1999 storageAccessData.offset = getByIdStatus.offset() + offsetOffset;
2000 storageAccessData.identifierNumber = identifierNumber;
2001 m_graph.m_storageAccessData.append(storageAccessData);
2002 } else
2003 set(currentInstruction[1].u.operand, addToGraph(getByIdStatus.makesCalls() ? GetByIdFlush : GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
2004
2005 NEXT_OPCODE(op_get_by_id);
2006 }
2007 case op_put_by_id:
2008 case op_put_by_id_transition_direct:
2009 case op_put_by_id_transition_normal: {
2010 NodeIndex value = get(currentInstruction[3].u.operand);
2011 NodeIndex base = get(currentInstruction[1].u.operand);
2012 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2013 bool direct = currentInstruction[8].u.operand;
2014
2015 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2016 m_inlineStackTop->m_profiledBlock,
2017 m_currentIndex,
2018 m_codeBlock->identifier(identifierNumber));
2019 if (!putByIdStatus.isSet())
2020 addToGraph(ForceOSRExit);
2021
2022 bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
2023
2024 if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
2025 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2026 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
2027
2028 StorageAccessData storageAccessData;
2029 storageAccessData.offset = putByIdStatus.offset();
2030 storageAccessData.identifierNumber = identifierNumber;
2031 m_graph.m_storageAccessData.append(storageAccessData);
2032 } else if (!hasExitSite
2033 && putByIdStatus.isSimpleTransition()
2034 && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity()
2035 && structureChainIsStillValid(
2036 direct,
2037 putByIdStatus.oldStructure(),
2038 putByIdStatus.structureChain())) {
2039
2040 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2041 if (!direct) {
2042 if (!putByIdStatus.oldStructure()->storedPrototype().isNull())
2043 addToGraph(
2044 CheckStructure,
2045 OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure()->storedPrototype().asCell()->structure())),
2046 cellConstant(putByIdStatus.oldStructure()->storedPrototype().asCell()));
2047
2048 for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
2049 JSValue prototype = (*it)->storedPrototype();
2050 if (prototype.isNull())
2051 continue;
2052 ASSERT(prototype.isCell());
2053 addToGraph(
2054 CheckStructure,
2055 OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())),
2056 cellConstant(prototype.asCell()));
2057 }
2058 }
2059 addToGraph(
2060 PutStructure,
2061 OpInfo(
2062 m_graph.addStructureTransitionData(
2063 StructureTransitionData(
2064 putByIdStatus.oldStructure(),
2065 putByIdStatus.newStructure()))),
2066 base);
2067
2068 addToGraph(
2069 PutByOffset,
2070 OpInfo(m_graph.m_storageAccessData.size()),
2071 base,
2072 addToGraph(GetPropertyStorage, base),
2073 value);
2074
2075 StorageAccessData storageAccessData;
2076 storageAccessData.offset = putByIdStatus.offset();
2077 storageAccessData.identifierNumber = identifierNumber;
2078 m_graph.m_storageAccessData.append(storageAccessData);
2079 } else {
2080 if (direct)
2081 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2082 else
2083 addToGraph(PutById, OpInfo(identifierNumber), base, value);
2084 }
2085
2086 NEXT_OPCODE(op_put_by_id);
2087 }
2088
2089 case op_get_global_var: {
2090 PredictedType prediction = getPrediction();
2091
2092 NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand), OpInfo(prediction));
2093 set(currentInstruction[1].u.operand, getGlobalVar);
2094 NEXT_OPCODE(op_get_global_var);
2095 }
2096
2097 case op_put_global_var: {
2098 NodeIndex value = get(currentInstruction[2].u.operand);
2099 addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
2100 NEXT_OPCODE(op_put_global_var);
2101 }
2102
2103 // === Block terminators. ===
2104
2105 case op_jmp: {
2106 unsigned relativeOffset = currentInstruction[1].u.operand;
2107 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2108 LAST_OPCODE(op_jmp);
2109 }
2110
2111 case op_loop: {
2112 unsigned relativeOffset = currentInstruction[1].u.operand;
2113 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2114 LAST_OPCODE(op_loop);
2115 }
2116
2117 case op_jtrue: {
2118 unsigned relativeOffset = currentInstruction[2].u.operand;
2119 NodeIndex condition = get(currentInstruction[1].u.operand);
2120 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
2121 LAST_OPCODE(op_jtrue);
2122 }
2123
2124 case op_jfalse: {
2125 unsigned relativeOffset = currentInstruction[2].u.operand;
2126 NodeIndex condition = get(currentInstruction[1].u.operand);
2127 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
2128 LAST_OPCODE(op_jfalse);
2129 }
2130
2131 case op_loop_if_true: {
2132 unsigned relativeOffset = currentInstruction[2].u.operand;
2133 NodeIndex condition = get(currentInstruction[1].u.operand);
2134 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
2135 LAST_OPCODE(op_loop_if_true);
2136 }
2137
2138 case op_loop_if_false: {
2139 unsigned relativeOffset = currentInstruction[2].u.operand;
2140 NodeIndex condition = get(currentInstruction[1].u.operand);
2141 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
2142 LAST_OPCODE(op_loop_if_false);
2143 }
2144
2145 case op_jeq_null: {
2146 unsigned relativeOffset = currentInstruction[2].u.operand;
2147 NodeIndex value = get(currentInstruction[1].u.operand);
2148 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2149 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
2150 LAST_OPCODE(op_jeq_null);
2151 }
2152
2153 case op_jneq_null: {
2154 unsigned relativeOffset = currentInstruction[2].u.operand;
2155 NodeIndex value = get(currentInstruction[1].u.operand);
2156 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2157 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
2158 LAST_OPCODE(op_jneq_null);
2159 }
2160
2161 case op_jless: {
2162 unsigned relativeOffset = currentInstruction[3].u.operand;
2163 NodeIndex op1 = get(currentInstruction[1].u.operand);
2164 NodeIndex op2 = get(currentInstruction[2].u.operand);
2165 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2166 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2167 LAST_OPCODE(op_jless);
2168 }
2169
2170 case op_jlesseq: {
2171 unsigned relativeOffset = currentInstruction[3].u.operand;
2172 NodeIndex op1 = get(currentInstruction[1].u.operand);
2173 NodeIndex op2 = get(currentInstruction[2].u.operand);
2174 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2175 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2176 LAST_OPCODE(op_jlesseq);
2177 }
2178
2179 case op_jgreater: {
2180 unsigned relativeOffset = currentInstruction[3].u.operand;
2181 NodeIndex op1 = get(currentInstruction[1].u.operand);
2182 NodeIndex op2 = get(currentInstruction[2].u.operand);
2183 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2184 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2185 LAST_OPCODE(op_jgreater);
2186 }
2187
2188 case op_jgreatereq: {
2189 unsigned relativeOffset = currentInstruction[3].u.operand;
2190 NodeIndex op1 = get(currentInstruction[1].u.operand);
2191 NodeIndex op2 = get(currentInstruction[2].u.operand);
2192 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2193 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2194 LAST_OPCODE(op_jgreatereq);
2195 }
2196
2197 case op_jnless: {
2198 unsigned relativeOffset = currentInstruction[3].u.operand;
2199 NodeIndex op1 = get(currentInstruction[1].u.operand);
2200 NodeIndex op2 = get(currentInstruction[2].u.operand);
2201 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2202 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2203 LAST_OPCODE(op_jnless);
2204 }
2205
2206 case op_jnlesseq: {
2207 unsigned relativeOffset = currentInstruction[3].u.operand;
2208 NodeIndex op1 = get(currentInstruction[1].u.operand);
2209 NodeIndex op2 = get(currentInstruction[2].u.operand);
2210 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2211 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2212 LAST_OPCODE(op_jnlesseq);
2213 }
2214
2215 case op_jngreater: {
2216 unsigned relativeOffset = currentInstruction[3].u.operand;
2217 NodeIndex op1 = get(currentInstruction[1].u.operand);
2218 NodeIndex op2 = get(currentInstruction[2].u.operand);
2219 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2220 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2221 LAST_OPCODE(op_jngreater);
2222 }
2223
2224 case op_jngreatereq: {
2225 unsigned relativeOffset = currentInstruction[3].u.operand;
2226 NodeIndex op1 = get(currentInstruction[1].u.operand);
2227 NodeIndex op2 = get(currentInstruction[2].u.operand);
2228 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2229 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2230 LAST_OPCODE(op_jngreatereq);
2231 }
2232
2233 case op_loop_if_less: {
2234 unsigned relativeOffset = currentInstruction[3].u.operand;
2235 NodeIndex op1 = get(currentInstruction[1].u.operand);
2236 NodeIndex op2 = get(currentInstruction[2].u.operand);
2237 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2238 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2239 LAST_OPCODE(op_loop_if_less);
2240 }
2241
2242 case op_loop_if_lesseq: {
2243 unsigned relativeOffset = currentInstruction[3].u.operand;
2244 NodeIndex op1 = get(currentInstruction[1].u.operand);
2245 NodeIndex op2 = get(currentInstruction[2].u.operand);
2246 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2247 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2248 LAST_OPCODE(op_loop_if_lesseq);
2249 }
2250
2251 case op_loop_if_greater: {
2252 unsigned relativeOffset = currentInstruction[3].u.operand;
2253 NodeIndex op1 = get(currentInstruction[1].u.operand);
2254 NodeIndex op2 = get(currentInstruction[2].u.operand);
2255 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2256 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2257 LAST_OPCODE(op_loop_if_greater);
2258 }
2259
2260 case op_loop_if_greatereq: {
2261 unsigned relativeOffset = currentInstruction[3].u.operand;
2262 NodeIndex op1 = get(currentInstruction[1].u.operand);
2263 NodeIndex op2 = get(currentInstruction[2].u.operand);
2264 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2265 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2266 LAST_OPCODE(op_loop_if_greatereq);
2267 }
2268
2269 case op_ret:
2270 if (m_inlineStackTop->m_inlineCallFrame) {
2271 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2272 setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2273 m_inlineStackTop->m_didReturn = true;
2274 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2275 // If we're returning from the first block, then we're done parsing.
2276 ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2277 shouldContinueParsing = false;
2278 LAST_OPCODE(op_ret);
2279 } else {
2280 // If inlining created blocks, and we're doing a return, then we need some
2281 // special linking.
2282 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2283 m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2284 }
2285 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2286 ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2287 addToGraph(Jump, OpInfo(NoBlock));
2288 m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2289 m_inlineStackTop->m_didEarlyReturn = true;
2290 }
2291 LAST_OPCODE(op_ret);
2292 }
2293 addToGraph(Return, get(currentInstruction[1].u.operand));
2294 LAST_OPCODE(op_ret);
2295
2296 case op_end:
2297 ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2298 addToGraph(Return, get(currentInstruction[1].u.operand));
2299 LAST_OPCODE(op_end);
2300
2301 case op_throw:
2302 addToGraph(Throw, get(currentInstruction[1].u.operand));
2303 LAST_OPCODE(op_throw);
2304
2305 case op_throw_reference_error:
2306 addToGraph(ThrowReferenceError);
2307 LAST_OPCODE(op_throw_reference_error);
2308
2309 case op_call:
2310 handleCall(interpreter, currentInstruction, Call, CodeForCall);
2311 NEXT_OPCODE(op_call);
2312
2313 case op_construct:
2314 handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2315 NEXT_OPCODE(op_construct);
2316
2317 case op_call_put_result:
2318 NEXT_OPCODE(op_call_put_result);
2319
2320 case op_resolve: {
2321 PredictedType prediction = getPrediction();
2322
2323 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2324
2325 NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
2326 set(currentInstruction[1].u.operand, resolve);
2327
2328 NEXT_OPCODE(op_resolve);
2329 }
2330
2331 case op_resolve_base: {
2332 PredictedType prediction = getPrediction();
2333
2334 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2335
2336 NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
2337 set(currentInstruction[1].u.operand, resolve);
2338
2339 NEXT_OPCODE(op_resolve_base);
2340 }
2341
2342 case op_resolve_global: {
2343 PredictedType prediction = getPrediction();
2344
2345 NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2346 m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2347 ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2348 data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2349 data.resolveInfoIndex = m_globalResolveNumber++;
2350 set(currentInstruction[1].u.operand, resolve);
2351
2352 NEXT_OPCODE(op_resolve_global);
2353 }
2354
2355 case op_loop_hint: {
2356 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2357 // OSR can only happen at basic block boundaries. Assert that these two statements
2358 // are compatible.
2359 ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
2360
2361 // We never do OSR into an inlined code block. That could not happen, since OSR
2362 // looks up the code block that is the replacement for the baseline JIT code
2363 // block. Hence, machine code block = true code block = not inline code block.
2364 if (!m_inlineStackTop->m_caller)
2365 m_currentBlock->isOSRTarget = true;
2366
2367 // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2368 // op.
2369 addToGraph(Phantom);
2370
2371 NEXT_OPCODE(op_loop_hint);
2372 }
2373
2374 case op_init_lazy_reg: {
2375 set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
2376 NEXT_OPCODE(op_init_lazy_reg);
2377 }
2378
2379 case op_create_activation: {
2380 set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
2381 NEXT_OPCODE(op_create_activation);
2382 }
2383
2384 case op_tear_off_activation: {
2385 // This currently ignores arguments because we don't support them yet.
2386 addToGraph(TearOffActivation, get(currentInstruction[1].u.operand));
2387 NEXT_OPCODE(op_tear_off_activation);
2388 }
2389
2390 case op_new_func: {
2391 if (!currentInstruction[3].u.operand) {
2392 set(currentInstruction[1].u.operand,
2393 addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
2394 } else {
2395 set(currentInstruction[1].u.operand,
2396 addToGraph(
2397 NewFunction,
2398 OpInfo(currentInstruction[2].u.operand),
2399 get(currentInstruction[1].u.operand)));
2400 }
2401 NEXT_OPCODE(op_new_func);
2402 }
2403
2404 case op_new_func_exp: {
2405 set(currentInstruction[1].u.operand,
2406 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
2407 NEXT_OPCODE(op_new_func_exp);
2408 }
2409
2410 default:
2411 // Parse failed! This should not happen because the capabilities checker
2412 // should have caught it.
2413 ASSERT_NOT_REACHED();
2414 return false;
2415 }
2416
2417 ASSERT(canCompileOpcode(opcodeID));
2418 }
2419 }
2420
2421 template<ByteCodeParser::PhiStackType stackType>
2422 void ByteCodeParser::processPhiStack()
2423 {
2424 Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
2425
2426 while (!phiStack.isEmpty()) {
2427 PhiStackEntry entry = phiStack.last();
2428 phiStack.removeLast();
2429
2430 PredecessorList& predecessors = entry.m_block->m_predecessors;
2431 unsigned varNo = entry.m_varNo;
2432 VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
2433
2434 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2435 dataLog(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
2436 #endif
2437
2438 for (size_t i = 0; i < predecessors.size(); ++i) {
2439 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2440 dataLog(" Dealing with predecessor block %u.\n", predecessors[i]);
2441 #endif
2442
2443 BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
2444
2445 NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
2446
2447 NodeIndex valueInPredecessor = var;
2448 if (valueInPredecessor == NoNode) {
2449 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2450 dataLog(" Did not find node, adding phi.\n");
2451 #endif
2452
2453 valueInPredecessor = insertPhiNode(OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))), predecessorBlock);
2454 var = valueInPredecessor;
2455 if (stackType == ArgumentPhiStack)
2456 predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
2457 else
2458 predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
2459 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
2460 } else if (m_graph[valueInPredecessor].op() == GetLocal) {
2461 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2462 dataLog(" Found GetLocal @%u.\n", valueInPredecessor);
2463 #endif
2464
2465 // We want to ensure that the VariableAccessDatas are identical between the
2466 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2467 // to be unified. But for efficiency, we want the code that creates GetLocals
2468 // and Phis to try to reuse VariableAccessDatas as much as possible.
2469 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1().index()].variableAccessData());
2470
2471 valueInPredecessor = m_graph[valueInPredecessor].child1().index();
2472 } else {
2473 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2474 dataLog(" Found @%u.\n", valueInPredecessor);
2475 #endif
2476 }
2477 ASSERT(m_graph[valueInPredecessor].op() == SetLocal
2478 || m_graph[valueInPredecessor].op() == Phi
2479 || m_graph[valueInPredecessor].op() == Flush
2480 || (m_graph[valueInPredecessor].op() == SetArgument
2481 && stackType == ArgumentPhiStack));
2482
2483 VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
2484
2485 dataForPredecessor->unify(dataForPhi);
2486
2487 Node* phiNode = &m_graph[entry.m_phi];
2488 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2489 dataLog(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
2490 #endif
2491 if (phiNode->refCount()) {
2492 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2493 dataLog(" Reffing @%u.\n", valueInPredecessor);
2494 #endif
2495 m_graph.ref(valueInPredecessor);
2496 }
2497
2498 if (!phiNode->child1()) {
2499 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2500 dataLog(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
2501 #endif
2502 phiNode->children.setChild1(Edge(valueInPredecessor));
2503 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2504 dataLog(" Children of @%u: ", entry.m_phi);
2505 phiNode->dumpChildren(WTF::dataFile());
2506 dataLog(".\n");
2507 #endif
2508 continue;
2509 }
2510 if (!phiNode->child2()) {
2511 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2512 dataLog(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
2513 #endif
2514 phiNode->children.setChild2(Edge(valueInPredecessor));
2515 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2516 dataLog(" Children of @%u: ", entry.m_phi);
2517 phiNode->dumpChildren(WTF::dataFile());
2518 dataLog(".\n");
2519 #endif
2520 continue;
2521 }
2522 if (!phiNode->child3()) {
2523 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2524 dataLog(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
2525 #endif
2526 phiNode->children.setChild3(Edge(valueInPredecessor));
2527 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2528 dataLog(" Children of @%u: ", entry.m_phi);
2529 phiNode->dumpChildren(WTF::dataFile());
2530 dataLog(".\n");
2531 #endif
2532 continue;
2533 }
2534
2535 NodeIndex newPhi = insertPhiNode(OpInfo(dataForPhi), entry.m_block);
2536
2537 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2538 dataLog(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
2539 #endif
2540
2541 phiNode = &m_graph[entry.m_phi]; // reload after vector resize
2542 Node& newPhiNode = m_graph[newPhi];
2543 if (phiNode->refCount())
2544 m_graph.ref(newPhi);
2545
2546 newPhiNode.children = phiNode->children;
2547
2548 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2549 dataLog(" Children of @%u: ", newPhi);
2550 newPhiNode.dumpChildren(WTF::dataFile());
2551 dataLog(".\n");
2552 #endif
2553
2554 phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
2555
2556 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2557 dataLog(" Children of @%u: ", entry.m_phi);
2558 phiNode->dumpChildren(WTF::dataFile());
2559 dataLog(".\n");
2560 #endif
2561 }
2562 }
2563 }
2564
2565 void ByteCodeParser::fixVariableAccessPredictions()
2566 {
2567 for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
2568 VariableAccessData* data = &m_graph.m_variableAccessData[i];
2569 data->find()->predict(data->nonUnifiedPrediction());
2570 }
2571 }
2572
2573 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
2574 {
2575 ASSERT(!block->isLinked);
2576 ASSERT(!block->isEmpty());
2577 Node& node = m_graph[block->last()];
2578 ASSERT(node.isTerminal());
2579
2580 switch (node.op()) {
2581 case Jump:
2582 node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2583 #if DFG_ENABLE(DEBUG_VERBOSE)
2584 dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
2585 #endif
2586 break;
2587
2588 case Branch:
2589 node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2590 node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
2591 #if DFG_ENABLE(DEBUG_VERBOSE)
2592 dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
2593 #endif
2594 break;
2595
2596 default:
2597 #if DFG_ENABLE(DEBUG_VERBOSE)
2598 dataLog("Marking basic block %p as linked.\n", block);
2599 #endif
2600 break;
2601 }
2602
2603 #if !ASSERT_DISABLED
2604 block->isLinked = true;
2605 #endif
2606 }
2607
2608 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
2609 {
2610 for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
2611 if (unlinkedBlocks[i].m_needsNormalLinking) {
2612 linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
2613 unlinkedBlocks[i].m_needsNormalLinking = false;
2614 }
2615 }
2616 }
2617
2618 void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
2619 {
2620 BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
2621 if (!successor->isReachable) {
2622 successor->isReachable = true;
2623 worklist.append(successorIndex);
2624 }
2625
2626 successor->m_predecessors.append(blockIndex);
2627 }
2628
2629 void ByteCodeParser::determineReachability()
2630 {
2631 Vector<BlockIndex, 16> worklist;
2632 worklist.append(0);
2633 m_graph.m_blocks[0]->isReachable = true;
2634 while (!worklist.isEmpty()) {
2635 BlockIndex index = worklist.last();
2636 worklist.removeLast();
2637
2638 BasicBlock* block = m_graph.m_blocks[index].get();
2639 ASSERT(block->isLinked);
2640
2641 Node& node = m_graph[block->last()];
2642 ASSERT(node.isTerminal());
2643
2644 if (node.isJump())
2645 handleSuccessor(worklist, index, node.takenBlockIndex());
2646 else if (node.isBranch()) {
2647 handleSuccessor(worklist, index, node.takenBlockIndex());
2648 handleSuccessor(worklist, index, node.notTakenBlockIndex());
2649 }
2650 }
2651 }
2652
2653 void ByteCodeParser::buildOperandMapsIfNecessary()
2654 {
2655 if (m_haveBuiltOperandMaps)
2656 return;
2657
2658 for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
2659 m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
2660 for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
2661 JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
2662 if (!value)
2663 m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
2664 else
2665 m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
2666 }
2667
2668 m_haveBuiltOperandMaps = true;
2669 }
2670
2671 ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
2672 : m_byteCodeParser(byteCodeParser)
2673 , m_codeBlock(codeBlock)
2674 , m_profiledBlock(profiledBlock)
2675 , m_calleeVR(calleeVR)
2676 , m_exitProfile(profiledBlock->exitProfile())
2677 , m_callsiteBlockHead(callsiteBlockHead)
2678 , m_returnValue(returnValueVR)
2679 , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
2680 , m_didReturn(false)
2681 , m_didEarlyReturn(false)
2682 , m_caller(byteCodeParser->m_inlineStackTop)
2683 {
2684 m_argumentPositions.resize(codeBlock->numParameters());
2685 for (unsigned i = codeBlock->numParameters(); i--;) {
2686 byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
2687 ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
2688 m_argumentPositions[i] = argumentPosition;
2689 }
2690
2691 if (m_caller) {
2692 // Inline case.
2693 ASSERT(codeBlock != byteCodeParser->m_codeBlock);
2694 ASSERT(callee);
2695 ASSERT(calleeVR != InvalidVirtualRegister);
2696 ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
2697 ASSERT(callsiteBlockHead != NoBlock);
2698
2699 InlineCallFrame inlineCallFrame;
2700 inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
2701 inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
2702 inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
2703 inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
2704 inlineCallFrame.arguments.resize(codeBlock->numParameters()); // Set the number of arguments including this, but don't configure the value recoveries, yet.
2705 inlineCallFrame.isCall = isCall(kind);
2706 byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
2707 m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
2708
2709 byteCodeParser->buildOperandMapsIfNecessary();
2710
2711 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2712 m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2713
2714 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
2715 StringImpl* rep = codeBlock->identifier(i).impl();
2716 IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
2717 if (result.isNewEntry)
2718 byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
2719 m_identifierRemap[i] = result.iterator->second;
2720 }
2721 for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
2722 JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
2723 if (!value) {
2724 if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
2725 byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
2726 byteCodeParser->m_codeBlock->addConstant(JSValue());
2727 byteCodeParser->m_constants.append(ConstantRecord());
2728 }
2729 m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
2730 continue;
2731 }
2732 JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
2733 if (result.isNewEntry) {
2734 byteCodeParser->m_codeBlock->addConstant(value);
2735 byteCodeParser->m_constants.append(ConstantRecord());
2736 }
2737 m_constantRemap[i] = result.iterator->second;
2738 }
2739
2740 m_callsiteBlockHeadNeedsLinking = true;
2741 } else {
2742 // Machine code block case.
2743 ASSERT(codeBlock == byteCodeParser->m_codeBlock);
2744 ASSERT(!callee);
2745 ASSERT(calleeVR == InvalidVirtualRegister);
2746 ASSERT(returnValueVR == InvalidVirtualRegister);
2747 ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
2748 ASSERT(callsiteBlockHead == NoBlock);
2749
2750 m_inlineCallFrame = 0;
2751
2752 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2753 m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2754
2755 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
2756 m_identifierRemap[i] = i;
2757 for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
2758 m_constantRemap[i] = i + FirstConstantRegisterIndex;
2759
2760 m_callsiteBlockHeadNeedsLinking = false;
2761 }
2762
2763 for (size_t i = 0; i < m_constantRemap.size(); ++i)
2764 ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
2765
2766 byteCodeParser->m_inlineStackTop = this;
2767 }
2768
2769 void ByteCodeParser::parseCodeBlock()
2770 {
2771 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2772
2773 #if DFG_ENABLE(DEBUG_VERBOSE)
2774 dataLog("Parsing code block %p. codeType = %s, numCapturedVars = %u, needsFullScopeChain = %s, needsActivation = %s, isStrictMode = %s\n",
2775 codeBlock,
2776 codeTypeToString(codeBlock->codeType()),
2777 codeBlock->m_numCapturedVars,
2778 codeBlock->needsFullScopeChain()?"true":"false",
2779 codeBlock->ownerExecutable()->needsActivation()?"true":"false",
2780 codeBlock->ownerExecutable()->isStrictMode()?"true":"false");
2781 #endif
2782
2783 for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
2784 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
2785 unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
2786 #if DFG_ENABLE(DEBUG_VERBOSE)
2787 dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2788 #endif
2789 ASSERT(m_currentIndex < limit);
2790
2791 // Loop until we reach the current limit (i.e. next jump target).
2792 do {
2793 if (!m_currentBlock) {
2794 // Check if we can use the last block.
2795 if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->isEmpty()) {
2796 // This must be a block belonging to us.
2797 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2798 // Either the block is linkable or it isn't. If it's linkable then it's the last
2799 // block in the blockLinkingTargets list. If it's not then the last block will
2800 // have a lower bytecode index that the one we're about to give to this block.
2801 if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
2802 // Make the block linkable.
2803 ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
2804 m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
2805 }
2806 // Change its bytecode begin and continue.
2807 m_currentBlock = m_graph.m_blocks.last().get();
2808 #if DFG_ENABLE(DEBUG_VERBOSE)
2809 dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
2810 #endif
2811 m_currentBlock->bytecodeBegin = m_currentIndex;
2812 } else {
2813 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
2814 #if DFG_ENABLE(DEBUG_VERBOSE)
2815 dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2816 #endif
2817 m_currentBlock = block.get();
2818 ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
2819 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
2820 m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
2821 m_graph.m_blocks.append(block.release());
2822 prepareToParseBlock();
2823 }
2824 }
2825
2826 bool shouldContinueParsing = parseBlock(limit);
2827
2828 // We should not have gone beyond the limit.
2829 ASSERT(m_currentIndex <= limit);
2830
2831 // We should have planted a terminal, or we just gave up because
2832 // we realized that the jump target information is imprecise, or we
2833 // are at the end of an inline function, or we realized that we
2834 // should stop parsing because there was a return in the first
2835 // basic block.
2836 ASSERT(m_currentBlock->isEmpty() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
2837
2838 if (!shouldContinueParsing)
2839 return;
2840
2841 m_currentBlock = 0;
2842 } while (m_currentIndex < limit);
2843 }
2844
2845 // Should have reached the end of the instructions.
2846 ASSERT(m_currentIndex == codeBlock->instructions().size());
2847 }
2848
2849 bool ByteCodeParser::parse()
2850 {
2851 // Set during construction.
2852 ASSERT(!m_currentIndex);
2853
2854 #if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
2855 // We should be pretending that the code has an activation.
2856 ASSERT(m_graph.needsActivation());
2857 #endif
2858
2859 InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
2860
2861 parseCodeBlock();
2862
2863 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
2864 determineReachability();
2865 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2866 dataLog("Processing local variable phis.\n");
2867 #endif
2868
2869 m_currentProfilingIndex = m_currentIndex;
2870
2871 processPhiStack<LocalPhiStack>();
2872 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2873 dataLog("Processing argument phis.\n");
2874 #endif
2875 processPhiStack<ArgumentPhiStack>();
2876
2877 fixVariableAccessPredictions();
2878
2879 m_graph.m_preservedVars = m_preservedVars;
2880 m_graph.m_localVars = m_numLocals;
2881 m_graph.m_parameterSlots = m_parameterSlots;
2882
2883 return true;
2884 }
2885
2886 bool parse(Graph& graph)
2887 {
2888 #if DFG_DEBUG_LOCAL_DISBALE
2889 UNUSED_PARAM(graph);
2890 return false;
2891 #else
2892 return ByteCodeParser(graph).parse();
2893 #endif
2894 }
2895
2896 } } // namespace JSC::DFG
2897
2898 #endif