]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2011-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "DFGByteCodeParser.h" | |
28 | ||
29 | #if ENABLE(DFG_JIT) | |
30 | ||
31 | #include "ArrayConstructor.h" | |
32 | #include "BasicBlockLocation.h" | |
33 | #include "CallLinkStatus.h" | |
34 | #include "CodeBlock.h" | |
35 | #include "CodeBlockWithJITType.h" | |
36 | #include "DFGArrayMode.h" | |
37 | #include "DFGCapabilities.h" | |
38 | #include "DFGGraph.h" | |
39 | #include "DFGJITCode.h" | |
40 | #include "GetByIdStatus.h" | |
41 | #include "Heap.h" | |
42 | #include "JSLexicalEnvironment.h" | |
43 | #include "JSCInlines.h" | |
44 | #include "PreciseJumpTargets.h" | |
45 | #include "PutByIdStatus.h" | |
46 | #include "StackAlignment.h" | |
47 | #include "StringConstructor.h" | |
48 | #include <wtf/CommaPrinter.h> | |
49 | #include <wtf/HashMap.h> | |
50 | #include <wtf/MathExtras.h> | |
51 | #include <wtf/StdLibExtras.h> | |
52 | ||
53 | namespace JSC { namespace DFG { | |
54 | ||
55 | static const bool verbose = false; | |
56 | ||
57 | class ConstantBufferKey { | |
58 | public: | |
59 | ConstantBufferKey() | |
60 | : m_codeBlock(0) | |
61 | , m_index(0) | |
62 | { | |
63 | } | |
64 | ||
65 | ConstantBufferKey(WTF::HashTableDeletedValueType) | |
66 | : m_codeBlock(0) | |
67 | , m_index(1) | |
68 | { | |
69 | } | |
70 | ||
71 | ConstantBufferKey(CodeBlock* codeBlock, unsigned index) | |
72 | : m_codeBlock(codeBlock) | |
73 | , m_index(index) | |
74 | { | |
75 | } | |
76 | ||
77 | bool operator==(const ConstantBufferKey& other) const | |
78 | { | |
79 | return m_codeBlock == other.m_codeBlock | |
80 | && m_index == other.m_index; | |
81 | } | |
82 | ||
83 | unsigned hash() const | |
84 | { | |
85 | return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index; | |
86 | } | |
87 | ||
88 | bool isHashTableDeletedValue() const | |
89 | { | |
90 | return !m_codeBlock && m_index; | |
91 | } | |
92 | ||
93 | CodeBlock* codeBlock() const { return m_codeBlock; } | |
94 | unsigned index() const { return m_index; } | |
95 | ||
96 | private: | |
97 | CodeBlock* m_codeBlock; | |
98 | unsigned m_index; | |
99 | }; | |
100 | ||
101 | struct ConstantBufferKeyHash { | |
102 | static unsigned hash(const ConstantBufferKey& key) { return key.hash(); } | |
103 | static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b) | |
104 | { | |
105 | return a == b; | |
106 | } | |
107 | ||
108 | static const bool safeToCompareToEmptyOrDeleted = true; | |
109 | }; | |
110 | ||
111 | } } // namespace JSC::DFG | |
112 | ||
113 | namespace WTF { | |
114 | ||
115 | template<typename T> struct DefaultHash; | |
116 | template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> { | |
117 | typedef JSC::DFG::ConstantBufferKeyHash Hash; | |
118 | }; | |
119 | ||
120 | template<typename T> struct HashTraits; | |
121 | template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { }; | |
122 | ||
123 | } // namespace WTF | |
124 | ||
125 | namespace JSC { namespace DFG { | |
126 | ||
127 | // === ByteCodeParser === | |
128 | // | |
129 | // This class is used to compile the dataflow graph from a CodeBlock. | |
130 | class ByteCodeParser { | |
131 | public: | |
132 | ByteCodeParser(Graph& graph) | |
133 | : m_vm(&graph.m_vm) | |
134 | , m_codeBlock(graph.m_codeBlock) | |
135 | , m_profiledBlock(graph.m_profiledBlock) | |
136 | , m_graph(graph) | |
137 | , m_currentBlock(0) | |
138 | , m_currentIndex(0) | |
139 | , m_constantUndefined(graph.freeze(jsUndefined())) | |
140 | , m_constantNull(graph.freeze(jsNull())) | |
141 | , m_constantNaN(graph.freeze(jsNumber(PNaN))) | |
142 | , m_constantOne(graph.freeze(jsNumber(1))) | |
143 | , m_numArguments(m_codeBlock->numParameters()) | |
144 | , m_numLocals(m_codeBlock->m_numCalleeRegisters) | |
145 | , m_parameterSlots(0) | |
146 | , m_numPassedVarArgs(0) | |
147 | , m_inlineStackTop(0) | |
148 | , m_haveBuiltOperandMaps(false) | |
149 | , m_currentInstruction(0) | |
150 | , m_hasDebuggerEnabled(graph.hasDebuggerEnabled()) | |
151 | { | |
152 | ASSERT(m_profiledBlock); | |
153 | } | |
154 | ||
155 | // Parse a full CodeBlock of bytecode. | |
156 | bool parse(); | |
157 | ||
158 | private: | |
159 | struct InlineStackEntry; | |
160 | ||
161 | // Just parse from m_currentIndex to the end of the current CodeBlock. | |
162 | void parseCodeBlock(); | |
163 | ||
164 | void ensureLocals(unsigned newNumLocals) | |
165 | { | |
166 | if (newNumLocals <= m_numLocals) | |
167 | return; | |
168 | m_numLocals = newNumLocals; | |
169 | for (size_t i = 0; i < m_graph.numBlocks(); ++i) | |
170 | m_graph.block(i)->ensureLocals(newNumLocals); | |
171 | } | |
172 | ||
173 | // Helper for min and max. | |
174 | template<typename ChecksFunctor> | |
175 | bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); | |
176 | ||
177 | // Handle calls. This resolves issues surrounding inlining and intrinsics. | |
178 | void handleCall( | |
179 | int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, | |
180 | Node* callTarget, int argCount, int registerOffset, CallLinkStatus, | |
181 | SpeculatedType prediction); | |
182 | void handleCall( | |
183 | int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, | |
184 | Node* callTarget, int argCount, int registerOffset, CallLinkStatus); | |
185 | void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset); | |
186 | void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind); | |
187 | void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind); | |
188 | void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt); | |
189 | void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); | |
190 | unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. | |
191 | // Handle inlining. Return true if it succeeded, false if we need to plant a call. | |
192 | bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); | |
193 | enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually }; | |
194 | template<typename ChecksFunctor> | |
195 | bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks); | |
196 | template<typename ChecksFunctor> | |
197 | void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks); | |
198 | void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry. | |
199 | // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. | |
200 | template<typename ChecksFunctor> | |
201 | bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); | |
202 | template<typename ChecksFunctor> | |
203 | bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); | |
204 | template<typename ChecksFunctor> | |
205 | bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks); | |
206 | Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value); | |
207 | Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset); | |
208 | void handleGetById( | |
209 | int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, | |
210 | const GetByIdStatus&); | |
211 | void emitPutById( | |
212 | Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); | |
213 | void handlePutById( | |
214 | Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, | |
215 | bool isDirect); | |
216 | void emitChecks(const ConstantStructureCheckVector&); | |
217 | ||
218 | void prepareToParseBlock(); | |
219 | void clearCaches(); | |
220 | ||
221 | // Parse a single basic block of bytecode instructions. | |
222 | bool parseBlock(unsigned limit); | |
223 | // Link block successors. | |
224 | void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets); | |
225 | void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets); | |
226 | ||
227 | VariableAccessData* newVariableAccessData(VirtualRegister operand) | |
228 | { | |
229 | ASSERT(!operand.isConstant()); | |
230 | ||
231 | m_graph.m_variableAccessData.append(VariableAccessData(operand)); | |
232 | return &m_graph.m_variableAccessData.last(); | |
233 | } | |
234 | ||
235 | // Get/Set the operands/result of a bytecode instruction. | |
236 | Node* getDirect(VirtualRegister operand) | |
237 | { | |
238 | ASSERT(!operand.isConstant()); | |
239 | ||
240 | // Is this an argument? | |
241 | if (operand.isArgument()) | |
242 | return getArgument(operand); | |
243 | ||
244 | // Must be a local. | |
245 | return getLocal(operand); | |
246 | } | |
247 | ||
248 | Node* get(VirtualRegister operand) | |
249 | { | |
250 | if (operand.isConstant()) { | |
251 | unsigned constantIndex = operand.toConstantIndex(); | |
252 | unsigned oldSize = m_constants.size(); | |
253 | if (constantIndex >= oldSize || !m_constants[constantIndex]) { | |
254 | const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock; | |
255 | JSValue value = codeBlock.getConstant(operand.offset()); | |
256 | SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset()); | |
257 | if (constantIndex >= oldSize) { | |
258 | m_constants.grow(constantIndex + 1); | |
259 | for (unsigned i = oldSize; i < m_constants.size(); ++i) | |
260 | m_constants[i] = nullptr; | |
261 | } | |
262 | ||
263 | Node* constantNode = nullptr; | |
264 | if (sourceCodeRepresentation == SourceCodeRepresentation::Double) | |
265 | constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber())))); | |
266 | else | |
267 | constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value))); | |
268 | m_constants[constantIndex] = constantNode; | |
269 | } | |
270 | ASSERT(m_constants[constantIndex]); | |
271 | return m_constants[constantIndex]; | |
272 | } | |
273 | ||
274 | if (inlineCallFrame()) { | |
275 | if (!inlineCallFrame()->isClosureCall) { | |
276 | JSFunction* callee = inlineCallFrame()->calleeConstant(); | |
277 | if (operand.offset() == JSStack::Callee) | |
278 | return weakJSConstant(callee); | |
279 | } | |
280 | } else if (operand.offset() == JSStack::Callee) { | |
281 | // We have to do some constant-folding here because this enables CreateThis folding. Note | |
282 | // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that | |
283 | // case if the function is a singleton then we already know it. | |
284 | if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) { | |
285 | InferredValue* singleton = executable->singletonFunction(); | |
286 | if (JSValue value = singleton->inferredValue()) { | |
287 | m_graph.watchpoints().addLazily(singleton); | |
288 | JSFunction* function = jsCast<JSFunction*>(value); | |
289 | return weakJSConstant(function); | |
290 | } | |
291 | } | |
292 | return addToGraph(GetCallee); | |
293 | } | |
294 | ||
295 | return getDirect(m_inlineStackTop->remapOperand(operand)); | |
296 | } | |
297 | ||
298 | enum SetMode { | |
299 | // A normal set which follows a two-phase commit that spans code origins. During | |
300 | // the current code origin it issues a MovHint, and at the start of the next | |
301 | // code origin there will be a SetLocal. If the local needs flushing, the second | |
302 | // SetLocal will be preceded with a Flush. | |
303 | NormalSet, | |
304 | ||
305 | // A set where the SetLocal happens immediately and there is still a Flush. This | |
306 | // is relevant when assigning to a local in tricky situations for the delayed | |
307 | // SetLocal logic but where we know that we have not performed any side effects | |
308 | // within this code origin. This is a safe replacement for NormalSet anytime we | |
309 | // know that we have not yet performed side effects in this code origin. | |
310 | ImmediateSetWithFlush, | |
311 | ||
312 | // A set where the SetLocal happens immediately and we do not Flush it even if | |
313 | // this is a local that is marked as needing it. This is relevant when | |
314 | // initializing locals at the top of a function. | |
315 | ImmediateNakedSet | |
316 | }; | |
317 | Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) | |
318 | { | |
319 | addToGraph(MovHint, OpInfo(operand.offset()), value); | |
320 | ||
321 | DelayedSetLocal delayed(currentCodeOrigin(), operand, value); | |
322 | ||
323 | if (setMode == NormalSet) { | |
324 | m_setLocalQueue.append(delayed); | |
325 | return 0; | |
326 | } | |
327 | ||
328 | return delayed.execute(this, setMode); | |
329 | } | |
330 | ||
331 | void processSetLocalQueue() | |
332 | { | |
333 | for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) | |
334 | m_setLocalQueue[i].execute(this); | |
335 | m_setLocalQueue.resize(0); | |
336 | } | |
337 | ||
338 | Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) | |
339 | { | |
340 | return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode); | |
341 | } | |
342 | ||
343 | Node* injectLazyOperandSpeculation(Node* node) | |
344 | { | |
345 | ASSERT(node->op() == GetLocal); | |
346 | ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex); | |
347 | ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); | |
348 | LazyOperandValueProfileKey key(m_currentIndex, node->local()); | |
349 | SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key); | |
350 | node->variableAccessData()->predict(prediction); | |
351 | return node; | |
352 | } | |
353 | ||
354 | // Used in implementing get/set, above, where the operand is a local variable. | |
355 | Node* getLocal(VirtualRegister operand) | |
356 | { | |
357 | unsigned local = operand.toLocal(); | |
358 | ||
359 | Node* node = m_currentBlock->variablesAtTail.local(local); | |
360 | ||
361 | // This has two goals: 1) link together variable access datas, and 2) | |
362 | // try to avoid creating redundant GetLocals. (1) is required for | |
363 | // correctness - no other phase will ensure that block-local variable | |
364 | // access data unification is done correctly. (2) is purely opportunistic | |
365 | // and is meant as an compile-time optimization only. | |
366 | ||
367 | VariableAccessData* variable; | |
368 | ||
369 | if (node) { | |
370 | variable = node->variableAccessData(); | |
371 | ||
372 | switch (node->op()) { | |
373 | case GetLocal: | |
374 | return node; | |
375 | case SetLocal: | |
376 | return node->child1().node(); | |
377 | default: | |
378 | break; | |
379 | } | |
380 | } else | |
381 | variable = newVariableAccessData(operand); | |
382 | ||
383 | node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); | |
384 | m_currentBlock->variablesAtTail.local(local) = node; | |
385 | return node; | |
386 | } | |
387 | ||
388 | Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet) | |
389 | { | |
390 | CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin; | |
391 | m_currentSemanticOrigin = semanticOrigin; | |
392 | ||
393 | unsigned local = operand.toLocal(); | |
394 | ||
395 | if (setMode != ImmediateNakedSet) { | |
396 | ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand); | |
397 | if (argumentPosition) | |
398 | flushDirect(operand, argumentPosition); | |
399 | else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister()) | |
400 | flush(operand); | |
401 | } | |
402 | ||
403 | VariableAccessData* variableAccessData = newVariableAccessData(operand); | |
404 | variableAccessData->mergeStructureCheckHoistingFailed( | |
405 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache)); | |
406 | variableAccessData->mergeCheckArrayHoistingFailed( | |
407 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType)); | |
408 | Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); | |
409 | m_currentBlock->variablesAtTail.local(local) = node; | |
410 | ||
411 | m_currentSemanticOrigin = oldSemanticOrigin; | |
412 | return node; | |
413 | } | |
414 | ||
415 | // Used in implementing get/set, above, where the operand is an argument. | |
416 | Node* getArgument(VirtualRegister operand) | |
417 | { | |
418 | unsigned argument = operand.toArgument(); | |
419 | ASSERT(argument < m_numArguments); | |
420 | ||
421 | Node* node = m_currentBlock->variablesAtTail.argument(argument); | |
422 | ||
423 | VariableAccessData* variable; | |
424 | ||
425 | if (node) { | |
426 | variable = node->variableAccessData(); | |
427 | ||
428 | switch (node->op()) { | |
429 | case GetLocal: | |
430 | return node; | |
431 | case SetLocal: | |
432 | return node->child1().node(); | |
433 | default: | |
434 | break; | |
435 | } | |
436 | } else | |
437 | variable = newVariableAccessData(operand); | |
438 | ||
439 | node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); | |
440 | m_currentBlock->variablesAtTail.argument(argument) = node; | |
441 | return node; | |
442 | } | |
443 | Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet) | |
444 | { | |
445 | CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin; | |
446 | m_currentSemanticOrigin = semanticOrigin; | |
447 | ||
448 | unsigned argument = operand.toArgument(); | |
449 | ASSERT(argument < m_numArguments); | |
450 | ||
451 | VariableAccessData* variableAccessData = newVariableAccessData(operand); | |
452 | ||
453 | // Always flush arguments, except for 'this'. If 'this' is created by us, | |
454 | // then make sure that it's never unboxed. | |
455 | if (argument) { | |
456 | if (setMode != ImmediateNakedSet) | |
457 | flushDirect(operand); | |
458 | } else if (m_codeBlock->specializationKind() == CodeForConstruct) | |
459 | variableAccessData->mergeShouldNeverUnbox(true); | |
460 | ||
461 | variableAccessData->mergeStructureCheckHoistingFailed( | |
462 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache)); | |
463 | variableAccessData->mergeCheckArrayHoistingFailed( | |
464 | m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType)); | |
465 | Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); | |
466 | m_currentBlock->variablesAtTail.argument(argument) = node; | |
467 | ||
468 | m_currentSemanticOrigin = oldSemanticOrigin; | |
469 | return node; | |
470 | } | |
471 | ||
472 | ArgumentPosition* findArgumentPositionForArgument(int argument) | |
473 | { | |
474 | InlineStackEntry* stack = m_inlineStackTop; | |
475 | while (stack->m_inlineCallFrame) | |
476 | stack = stack->m_caller; | |
477 | return stack->m_argumentPositions[argument]; | |
478 | } | |
479 | ||
480 | ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand) | |
481 | { | |
482 | for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) { | |
483 | InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame; | |
484 | if (!inlineCallFrame) | |
485 | break; | |
486 | if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize)) | |
487 | continue; | |
488 | if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()) | |
489 | continue; | |
490 | if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size())) | |
491 | continue; | |
492 | int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument(); | |
493 | return stack->m_argumentPositions[argument]; | |
494 | } | |
495 | return 0; | |
496 | } | |
497 | ||
498 | ArgumentPosition* findArgumentPosition(VirtualRegister operand) | |
499 | { | |
500 | if (operand.isArgument()) | |
501 | return findArgumentPositionForArgument(operand.toArgument()); | |
502 | return findArgumentPositionForLocal(operand); | |
503 | } | |
504 | ||
505 | void flush(VirtualRegister operand) | |
506 | { | |
507 | flushDirect(m_inlineStackTop->remapOperand(operand)); | |
508 | } | |
509 | ||
510 | void flushDirect(VirtualRegister operand) | |
511 | { | |
512 | flushDirect(operand, findArgumentPosition(operand)); | |
513 | } | |
514 | ||
515 | void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition) | |
516 | { | |
517 | ASSERT(!operand.isConstant()); | |
518 | ||
519 | Node* node = m_currentBlock->variablesAtTail.operand(operand); | |
520 | ||
521 | VariableAccessData* variable; | |
522 | ||
523 | if (node) | |
524 | variable = node->variableAccessData(); | |
525 | else | |
526 | variable = newVariableAccessData(operand); | |
527 | ||
528 | node = addToGraph(Flush, OpInfo(variable)); | |
529 | m_currentBlock->variablesAtTail.operand(operand) = node; | |
530 | if (argumentPosition) | |
531 | argumentPosition->addVariable(variable); | |
532 | } | |
533 | ||
534 | void flush(InlineStackEntry* inlineStackEntry) | |
535 | { | |
536 | int numArguments; | |
537 | if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) { | |
538 | ASSERT(!m_hasDebuggerEnabled); | |
539 | numArguments = inlineCallFrame->arguments.size(); | |
540 | if (inlineCallFrame->isClosureCall) | |
541 | flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee))); | |
542 | if (inlineCallFrame->isVarargs()) | |
543 | flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount))); | |
544 | } else | |
545 | numArguments = inlineStackEntry->m_codeBlock->numParameters(); | |
546 | for (unsigned argument = numArguments; argument-- > 1;) | |
547 | flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument))); | |
548 | if (m_hasDebuggerEnabled) | |
549 | flush(m_codeBlock->scopeRegister()); | |
550 | } | |
551 | ||
552 | void flushForTerminal() | |
553 | { | |
554 | for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) | |
555 | flush(inlineStackEntry); | |
556 | } | |
557 | ||
558 | void flushForReturn() | |
559 | { | |
560 | flush(m_inlineStackTop); | |
561 | } | |
562 | ||
563 | void flushIfTerminal(SwitchData& data) | |
564 | { | |
565 | if (data.fallThrough.bytecodeIndex() > m_currentIndex) | |
566 | return; | |
567 | ||
568 | for (unsigned i = data.cases.size(); i--;) { | |
569 | if (data.cases[i].target.bytecodeIndex() > m_currentIndex) | |
570 | return; | |
571 | } | |
572 | ||
573 | flushForTerminal(); | |
574 | } | |
575 | ||
576 | // Assumes that the constant should be strongly marked. | |
577 | Node* jsConstant(JSValue constantValue) | |
578 | { | |
579 | return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue))); | |
580 | } | |
581 | ||
582 | Node* weakJSConstant(JSValue constantValue) | |
583 | { | |
584 | return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue))); | |
585 | } | |
586 | ||
587 | // Helper functions to get/set the this value. | |
588 | Node* getThis() | |
589 | { | |
590 | return get(m_inlineStackTop->m_codeBlock->thisRegister()); | |
591 | } | |
592 | ||
593 | void setThis(Node* value) | |
594 | { | |
595 | set(m_inlineStackTop->m_codeBlock->thisRegister(), value); | |
596 | } | |
597 | ||
598 | InlineCallFrame* inlineCallFrame() | |
599 | { | |
600 | return m_inlineStackTop->m_inlineCallFrame; | |
601 | } | |
602 | ||
603 | CodeOrigin currentCodeOrigin() | |
604 | { | |
605 | return CodeOrigin(m_currentIndex, inlineCallFrame()); | |
606 | } | |
607 | ||
608 | NodeOrigin currentNodeOrigin() | |
609 | { | |
610 | // FIXME: We should set the forExit origin only on those nodes that can exit. | |
611 | // https://bugs.webkit.org/show_bug.cgi?id=145204 | |
612 | if (m_currentSemanticOrigin.isSet()) | |
613 | return NodeOrigin(m_currentSemanticOrigin, currentCodeOrigin()); | |
614 | return NodeOrigin(currentCodeOrigin()); | |
615 | } | |
616 | ||
617 | BranchData* branchData(unsigned taken, unsigned notTaken) | |
618 | { | |
619 | // We assume that branches originating from bytecode always have a fall-through. We | |
620 | // use this assumption to avoid checking for the creation of terminal blocks. | |
621 | ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex)); | |
622 | BranchData* data = m_graph.m_branchData.add(); | |
623 | *data = BranchData::withBytecodeIndices(taken, notTaken); | |
624 | return data; | |
625 | } | |
626 | ||
627 | Node* addToGraph(Node* node) | |
628 | { | |
629 | if (Options::verboseDFGByteCodeParsing()) | |
630 | dataLog(" appended ", node, " ", Graph::opName(node->op()), "\n"); | |
631 | m_currentBlock->append(node); | |
632 | return node; | |
633 | } | |
634 | ||
635 | Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) | |
636 | { | |
637 | Node* result = m_graph.addNode( | |
638 | SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2), | |
639 | Edge(child3)); | |
640 | return addToGraph(result); | |
641 | } | |
642 | Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) | |
643 | { | |
644 | Node* result = m_graph.addNode( | |
645 | SpecNone, op, currentNodeOrigin(), child1, child2, child3); | |
646 | return addToGraph(result); | |
647 | } | |
648 | Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) | |
649 | { | |
650 | Node* result = m_graph.addNode( | |
651 | SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2), | |
652 | Edge(child3)); | |
653 | return addToGraph(result); | |
654 | } | |
655 | Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) | |
656 | { | |
657 | Node* result = m_graph.addNode( | |
658 | SpecNone, op, currentNodeOrigin(), info1, info2, | |
659 | Edge(child1), Edge(child2), Edge(child3)); | |
660 | return addToGraph(result); | |
661 | } | |
662 | ||
663 | Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2) | |
664 | { | |
665 | Node* result = m_graph.addNode( | |
666 | SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2, | |
667 | m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs); | |
668 | addToGraph(result); | |
669 | ||
670 | m_numPassedVarArgs = 0; | |
671 | ||
672 | return result; | |
673 | } | |
674 | ||
675 | void addVarArgChild(Node* child) | |
676 | { | |
677 | m_graph.m_varArgChildren.append(Edge(child)); | |
678 | m_numPassedVarArgs++; | |
679 | } | |
680 | ||
681 | Node* addCallWithoutSettingResult( | |
682 | NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, | |
683 | SpeculatedType prediction) | |
684 | { | |
685 | addVarArgChild(callee); | |
686 | size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount; | |
687 | if (parameterSlots > m_parameterSlots) | |
688 | m_parameterSlots = parameterSlots; | |
689 | ||
690 | for (int i = 0; i < argCount; ++i) | |
691 | addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); | |
692 | ||
693 | return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction)); | |
694 | } | |
695 | ||
696 | Node* addCall( | |
697 | int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, | |
698 | SpeculatedType prediction) | |
699 | { | |
700 | Node* call = addCallWithoutSettingResult( | |
701 | op, opInfo, callee, argCount, registerOffset, prediction); | |
702 | VirtualRegister resultReg(result); | |
703 | if (resultReg.isValid()) | |
704 | set(resultReg, call); | |
705 | return call; | |
706 | } | |
707 | ||
708 | Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure) | |
709 | { | |
710 | Node* objectNode = weakJSConstant(object); | |
711 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode); | |
712 | return objectNode; | |
713 | } | |
714 | ||
715 | SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex) | |
716 | { | |
717 | ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); | |
718 | return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); | |
719 | } | |
720 | ||
721 | SpeculatedType getPrediction(unsigned bytecodeIndex) | |
722 | { | |
723 | SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex); | |
724 | ||
725 | if (prediction == SpecNone) { | |
726 | // We have no information about what values this node generates. Give up | |
727 | // on executing this code, since we're likely to do more damage than good. | |
728 | addToGraph(ForceOSRExit); | |
729 | } | |
730 | ||
731 | return prediction; | |
732 | } | |
733 | ||
734 | SpeculatedType getPredictionWithoutOSRExit() | |
735 | { | |
736 | return getPredictionWithoutOSRExit(m_currentIndex); | |
737 | } | |
738 | ||
739 | SpeculatedType getPrediction() | |
740 | { | |
741 | return getPrediction(m_currentIndex); | |
742 | } | |
743 | ||
744 | ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action) | |
745 | { | |
746 | ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); | |
747 | profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); | |
748 | bool makeSafe = profile->outOfBounds(locker); | |
749 | return ArrayMode::fromObserved(locker, profile, action, makeSafe); | |
750 | } | |
751 | ||
752 | ArrayMode getArrayMode(ArrayProfile* profile) | |
753 | { | |
754 | return getArrayMode(profile, Array::Read); | |
755 | } | |
756 | ||
757 | Node* makeSafe(Node* node) | |
758 | { | |
759 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) | |
760 | node->mergeFlags(NodeMayOverflowInDFG); | |
761 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) | |
762 | node->mergeFlags(NodeMayNegZeroInDFG); | |
763 | ||
764 | if (!isX86() && node->op() == ArithMod) | |
765 | return node; | |
766 | ||
767 | if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) | |
768 | return node; | |
769 | ||
770 | switch (node->op()) { | |
771 | case UInt32ToNumber: | |
772 | case ArithAdd: | |
773 | case ArithSub: | |
774 | case ValueAdd: | |
775 | case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double. | |
776 | node->mergeFlags(NodeMayOverflowInBaseline); | |
777 | break; | |
778 | ||
779 | case ArithNegate: | |
780 | // Currently we can't tell the difference between a negation overflowing | |
781 | // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow | |
782 | // path then we assume that it did both of those things. | |
783 | node->mergeFlags(NodeMayOverflowInBaseline); | |
784 | node->mergeFlags(NodeMayNegZeroInBaseline); | |
785 | break; | |
786 | ||
787 | case ArithMul: | |
788 | // FIXME: We should detect cases where we only overflowed but never created | |
789 | // negative zero. | |
790 | // https://bugs.webkit.org/show_bug.cgi?id=132470 | |
791 | if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex) | |
792 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) | |
793 | node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline); | |
794 | else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex) | |
795 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) | |
796 | node->mergeFlags(NodeMayNegZeroInBaseline); | |
797 | break; | |
798 | ||
799 | default: | |
800 | RELEASE_ASSERT_NOT_REACHED(); | |
801 | break; | |
802 | } | |
803 | ||
804 | return node; | |
805 | } | |
806 | ||
807 | Node* makeDivSafe(Node* node) | |
808 | { | |
809 | ASSERT(node->op() == ArithDiv); | |
810 | ||
811 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) | |
812 | node->mergeFlags(NodeMayOverflowInDFG); | |
813 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) | |
814 | node->mergeFlags(NodeMayNegZeroInDFG); | |
815 | ||
816 | // The main slow case counter for op_div in the old JIT counts only when | |
817 | // the operands are not numbers. We don't care about that since we already | |
818 | // have speculations in place that take care of that separately. We only | |
819 | // care about when the outcome of the division is not an integer, which | |
820 | // is what the special fast case counter tells us. | |
821 | ||
822 | if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)) | |
823 | return node; | |
824 | ||
825 | // FIXME: It might be possible to make this more granular. | |
826 | node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline); | |
827 | ||
828 | return node; | |
829 | } | |
830 | ||
831 | void noticeArgumentsUse() | |
832 | { | |
833 | // All of the arguments in this function need to be formatted as JSValues because we will | |
834 | // load from them in a random-access fashion and we don't want to have to switch on | |
835 | // format. | |
836 | ||
837 | for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions) | |
838 | argument->mergeShouldNeverUnbox(true); | |
839 | } | |
840 | ||
841 | void buildOperandMapsIfNecessary(); | |
842 | ||
843 | VM* m_vm; | |
844 | CodeBlock* m_codeBlock; | |
845 | CodeBlock* m_profiledBlock; | |
846 | Graph& m_graph; | |
847 | ||
848 | // The current block being generated. | |
849 | BasicBlock* m_currentBlock; | |
850 | // The bytecode index of the current instruction being generated. | |
851 | unsigned m_currentIndex; | |
852 | // The semantic origin of the current node if different from the current Index. | |
853 | CodeOrigin m_currentSemanticOrigin; | |
854 | ||
855 | FrozenValue* m_constantUndefined; | |
856 | FrozenValue* m_constantNull; | |
857 | FrozenValue* m_constantNaN; | |
858 | FrozenValue* m_constantOne; | |
859 | Vector<Node*, 16> m_constants; | |
860 | ||
861 | // The number of arguments passed to the function. | |
862 | unsigned m_numArguments; | |
863 | // The number of locals (vars + temporaries) used in the function. | |
864 | unsigned m_numLocals; | |
865 | // The number of slots (in units of sizeof(Register)) that we need to | |
866 | // preallocate for arguments to outgoing calls from this frame. This | |
867 | // number includes the CallFrame slots that we initialize for the callee | |
868 | // (but not the callee-initialized CallerFrame and ReturnPC slots). | |
869 | // This number is 0 if and only if this function is a leaf. | |
870 | unsigned m_parameterSlots; | |
871 | // The number of var args passed to the next var arg node. | |
872 | unsigned m_numPassedVarArgs; | |
873 | ||
874 | HashMap<ConstantBufferKey, unsigned> m_constantBufferCache; | |
875 | ||
876 | struct InlineStackEntry { | |
877 | ByteCodeParser* m_byteCodeParser; | |
878 | ||
879 | CodeBlock* m_codeBlock; | |
880 | CodeBlock* m_profiledBlock; | |
881 | InlineCallFrame* m_inlineCallFrame; | |
882 | ||
883 | ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); } | |
884 | ||
885 | QueryableExitProfile m_exitProfile; | |
886 | ||
887 | // Remapping of identifier and constant numbers from the code block being | |
888 | // inlined (inline callee) to the code block that we're inlining into | |
889 | // (the machine code block, which is the transitive, though not necessarily | |
890 | // direct, caller). | |
891 | Vector<unsigned> m_identifierRemap; | |
892 | Vector<unsigned> m_constantBufferRemap; | |
893 | Vector<unsigned> m_switchRemap; | |
894 | ||
895 | // Blocks introduced by this code block, which need successor linking. | |
896 | // May include up to one basic block that includes the continuation after | |
897 | // the callsite in the caller. These must be appended in the order that they | |
898 | // are created, but their bytecodeBegin values need not be in order as they | |
899 | // are ignored. | |
900 | Vector<UnlinkedBlock> m_unlinkedBlocks; | |
901 | ||
902 | // Potential block linking targets. Must be sorted by bytecodeBegin, and | |
903 | // cannot have two blocks that have the same bytecodeBegin. | |
904 | Vector<BasicBlock*> m_blockLinkingTargets; | |
905 | ||
906 | // If the callsite's basic block was split into two, then this will be | |
907 | // the head of the callsite block. It needs its successors linked to the | |
908 | // m_unlinkedBlocks, but not the other way around: there's no way for | |
909 | // any blocks in m_unlinkedBlocks to jump back into this block. | |
910 | BasicBlock* m_callsiteBlockHead; | |
911 | ||
912 | // Does the callsite block head need linking? This is typically true | |
913 | // but will be false for the machine code block's inline stack entry | |
914 | // (since that one is not inlined) and for cases where an inline callee | |
915 | // did the linking for us. | |
916 | bool m_callsiteBlockHeadNeedsLinking; | |
917 | ||
918 | VirtualRegister m_returnValue; | |
919 | ||
920 | // Speculations about variable types collected from the profiled code block, | |
921 | // which are based on OSR exit profiles that past DFG compilatins of this | |
922 | // code block had gathered. | |
923 | LazyOperandValueProfileParser m_lazyOperands; | |
924 | ||
925 | CallLinkInfoMap m_callLinkInfos; | |
926 | StubInfoMap m_stubInfos; | |
927 | ||
928 | // Did we see any returns? We need to handle the (uncommon but necessary) | |
929 | // case where a procedure that does not return was inlined. | |
930 | bool m_didReturn; | |
931 | ||
932 | // Did we have any early returns? | |
933 | bool m_didEarlyReturn; | |
934 | ||
935 | // Pointers to the argument position trackers for this slice of code. | |
936 | Vector<ArgumentPosition*> m_argumentPositions; | |
937 | ||
938 | InlineStackEntry* m_caller; | |
939 | ||
940 | InlineStackEntry( | |
941 | ByteCodeParser*, | |
942 | CodeBlock*, | |
943 | CodeBlock* profiledBlock, | |
944 | BasicBlock* callsiteBlockHead, | |
945 | JSFunction* callee, // Null if this is a closure call. | |
946 | VirtualRegister returnValueVR, | |
947 | VirtualRegister inlineCallFrameStart, | |
948 | int argumentCountIncludingThis, | |
949 | InlineCallFrame::Kind); | |
950 | ||
951 | ~InlineStackEntry() | |
952 | { | |
953 | m_byteCodeParser->m_inlineStackTop = m_caller; | |
954 | } | |
955 | ||
956 | VirtualRegister remapOperand(VirtualRegister operand) const | |
957 | { | |
958 | if (!m_inlineCallFrame) | |
959 | return operand; | |
960 | ||
961 | ASSERT(!operand.isConstant()); | |
962 | ||
963 | return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset); | |
964 | } | |
965 | }; | |
966 | ||
967 | InlineStackEntry* m_inlineStackTop; | |
968 | ||
969 | struct DelayedSetLocal { | |
970 | CodeOrigin m_origin; | |
971 | VirtualRegister m_operand; | |
972 | Node* m_value; | |
973 | ||
974 | DelayedSetLocal() { } | |
975 | DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value) | |
976 | : m_origin(origin) | |
977 | , m_operand(operand) | |
978 | , m_value(value) | |
979 | { | |
980 | } | |
981 | ||
982 | Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet) | |
983 | { | |
984 | if (m_operand.isArgument()) | |
985 | return parser->setArgument(m_origin, m_operand, m_value, setMode); | |
986 | return parser->setLocal(m_origin, m_operand, m_value, setMode); | |
987 | } | |
988 | }; | |
989 | ||
990 | Vector<DelayedSetLocal, 2> m_setLocalQueue; | |
991 | ||
992 | // Have we built operand maps? We initialize them lazily, and only when doing | |
993 | // inlining. | |
994 | bool m_haveBuiltOperandMaps; | |
995 | // Mapping between identifier names and numbers. | |
996 | BorrowedIdentifierMap m_identifierMap; | |
997 | ||
998 | CodeBlock* m_dfgCodeBlock; | |
999 | CallLinkStatus::ContextMap m_callContextMap; | |
1000 | StubInfoMap m_dfgStubInfos; | |
1001 | ||
1002 | Instruction* m_currentInstruction; | |
1003 | bool m_hasDebuggerEnabled; | |
1004 | }; | |
1005 | ||
1006 | #define NEXT_OPCODE(name) \ | |
1007 | m_currentIndex += OPCODE_LENGTH(name); \ | |
1008 | continue | |
1009 | ||
1010 | #define LAST_OPCODE(name) \ | |
1011 | m_currentIndex += OPCODE_LENGTH(name); \ | |
1012 | return shouldContinueParsing | |
1013 | ||
1014 | void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind) | |
1015 | { | |
1016 | ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); | |
1017 | handleCall( | |
1018 | pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call), | |
1019 | pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand); | |
1020 | } | |
1021 | ||
1022 | void ByteCodeParser::handleCall( | |
1023 | int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize, | |
1024 | int callee, int argumentCountIncludingThis, int registerOffset) | |
1025 | { | |
1026 | Node* callTarget = get(VirtualRegister(callee)); | |
1027 | ||
1028 | CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( | |
1029 | m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), | |
1030 | m_inlineStackTop->m_callLinkInfos, m_callContextMap); | |
1031 | ||
1032 | handleCall( | |
1033 | result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget, | |
1034 | argumentCountIncludingThis, registerOffset, callLinkStatus); | |
1035 | } | |
1036 | ||
1037 | void ByteCodeParser::handleCall( | |
1038 | int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, | |
1039 | Node* callTarget, int argumentCountIncludingThis, int registerOffset, | |
1040 | CallLinkStatus callLinkStatus) | |
1041 | { | |
1042 | handleCall( | |
1043 | result, op, kind, instructionSize, callTarget, argumentCountIncludingThis, | |
1044 | registerOffset, callLinkStatus, getPrediction()); | |
1045 | } | |
1046 | ||
1047 | void ByteCodeParser::handleCall( | |
1048 | int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, | |
1049 | Node* callTarget, int argumentCountIncludingThis, int registerOffset, | |
1050 | CallLinkStatus callLinkStatus, SpeculatedType prediction) | |
1051 | { | |
1052 | ASSERT(registerOffset <= 0); | |
1053 | ||
1054 | if (callTarget->isCellConstant()) | |
1055 | callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell())); | |
1056 | ||
1057 | if (Options::verboseDFGByteCodeParsing()) | |
1058 | dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n"); | |
1059 | ||
1060 | if (!callLinkStatus.canOptimize()) { | |
1061 | // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically | |
1062 | // that we cannot optimize them. | |
1063 | ||
1064 | addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction); | |
1065 | return; | |
1066 | } | |
1067 | ||
1068 | unsigned nextOffset = m_currentIndex + instructionSize; | |
1069 | ||
1070 | OpInfo callOpInfo; | |
1071 | ||
1072 | if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) { | |
1073 | if (m_graph.compilation()) | |
1074 | m_graph.compilation()->noticeInlinedCall(); | |
1075 | return; | |
1076 | } | |
1077 | ||
1078 | #if ENABLE(FTL_NATIVE_CALL_INLINING) | |
1079 | if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) { | |
1080 | CallVariant callee = callLinkStatus[0]; | |
1081 | JSFunction* function = callee.function(); | |
1082 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); | |
1083 | if (function && function->isHostFunction()) { | |
1084 | emitFunctionChecks(callee, callTarget, virtualRegisterForArgument(0, registerOffset)); | |
1085 | callOpInfo = OpInfo(m_graph.freeze(function)); | |
1086 | ||
1087 | if (op == Call) | |
1088 | op = NativeCall; | |
1089 | else { | |
1090 | ASSERT(op == Construct); | |
1091 | op = NativeConstruct; | |
1092 | } | |
1093 | } | |
1094 | } | |
1095 | #endif | |
1096 | ||
1097 | addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction); | |
1098 | } | |
1099 | ||
1100 | void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind) | |
1101 | { | |
1102 | ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs)); | |
1103 | ||
1104 | int result = pc[1].u.operand; | |
1105 | int callee = pc[2].u.operand; | |
1106 | int thisReg = pc[3].u.operand; | |
1107 | int arguments = pc[4].u.operand; | |
1108 | int firstFreeReg = pc[5].u.operand; | |
1109 | int firstVarArgOffset = pc[6].u.operand; | |
1110 | ||
1111 | SpeculatedType prediction = getPrediction(); | |
1112 | ||
1113 | Node* callTarget = get(VirtualRegister(callee)); | |
1114 | ||
1115 | CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( | |
1116 | m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), | |
1117 | m_inlineStackTop->m_callLinkInfos, m_callContextMap); | |
1118 | if (callTarget->isCellConstant()) | |
1119 | callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell())); | |
1120 | ||
1121 | if (Options::verboseDFGByteCodeParsing()) | |
1122 | dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n"); | |
1123 | ||
1124 | if (callLinkStatus.canOptimize() | |
1125 | && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) { | |
1126 | if (m_graph.compilation()) | |
1127 | m_graph.compilation()->noticeInlinedCall(); | |
1128 | return; | |
1129 | } | |
1130 | ||
1131 | CallVarargsData* data = m_graph.m_callVarargsData.add(); | |
1132 | data->firstVarArgOffset = firstVarArgOffset; | |
1133 | ||
1134 | Node* thisChild = get(VirtualRegister(thisReg)); | |
1135 | ||
1136 | Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild); | |
1137 | VirtualRegister resultReg(result); | |
1138 | if (resultReg.isValid()) | |
1139 | set(resultReg, call); | |
1140 | } | |
1141 | ||
1142 | void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg) | |
1143 | { | |
1144 | Node* thisArgument; | |
1145 | if (thisArgumentReg.isValid()) | |
1146 | thisArgument = get(thisArgumentReg); | |
1147 | else | |
1148 | thisArgument = 0; | |
1149 | ||
1150 | JSCell* calleeCell; | |
1151 | Node* callTargetForCheck; | |
1152 | if (callee.isClosureCall()) { | |
1153 | calleeCell = callee.executable(); | |
1154 | callTargetForCheck = addToGraph(GetExecutable, callTarget); | |
1155 | } else { | |
1156 | calleeCell = callee.nonExecutableCallee(); | |
1157 | callTargetForCheck = callTarget; | |
1158 | } | |
1159 | ||
1160 | ASSERT(calleeCell); | |
1161 | addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument); | |
1162 | } | |
1163 | ||
1164 | void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis) | |
1165 | { | |
1166 | for (int i = 0; i < argumentCountIncludingThis; ++i) | |
1167 | addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset))); | |
1168 | } | |
1169 | ||
1170 | unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind) | |
1171 | { | |
1172 | if (verbose) | |
1173 | dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n"); | |
1174 | ||
1175 | if (m_hasDebuggerEnabled) { | |
1176 | if (verbose) | |
1177 | dataLog(" Failing because the debugger is in use.\n"); | |
1178 | return UINT_MAX; | |
1179 | } | |
1180 | ||
1181 | FunctionExecutable* executable = callee.functionExecutable(); | |
1182 | if (!executable) { | |
1183 | if (verbose) | |
1184 | dataLog(" Failing because there is no function executable.\n"); | |
1185 | return UINT_MAX; | |
1186 | } | |
1187 | ||
1188 | // Does the number of arguments we're passing match the arity of the target? We currently | |
1189 | // inline only if the number of arguments passed is greater than or equal to the number | |
1190 | // arguments expected. | |
1191 | if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) { | |
1192 | if (verbose) | |
1193 | dataLog(" Failing because of arity mismatch.\n"); | |
1194 | return UINT_MAX; | |
1195 | } | |
1196 | ||
1197 | // Do we have a code block, and does the code block's size match the heuristics/requirements for | |
1198 | // being an inline candidate? We might not have a code block (1) if code was thrown away, | |
1199 | // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and | |
1200 | // specialization kind is construct. In the former 2 cases, we could still theoretically attempt | |
1201 | // to inline it if we had a static proof of what was being called; this might happen for example | |
1202 | // if you call a global function, where watchpointing gives us static information. Overall, | |
1203 | // it's a rare case because we expect that any hot callees would have already been compiled. | |
1204 | CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind); | |
1205 | if (!codeBlock) { | |
1206 | if (verbose) | |
1207 | dataLog(" Failing because no code block available.\n"); | |
1208 | return UINT_MAX; | |
1209 | } | |
1210 | CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel( | |
1211 | codeBlock, kind, callee.isClosureCall()); | |
1212 | if (verbose) { | |
1213 | dataLog(" Kind: ", kind, "\n"); | |
1214 | dataLog(" Is closure call: ", callee.isClosureCall(), "\n"); | |
1215 | dataLog(" Capability level: ", capabilityLevel, "\n"); | |
1216 | dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n"); | |
1217 | dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n"); | |
1218 | dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n"); | |
1219 | dataLog(" Needs activation: ", codeBlock->ownerExecutable()->needsActivation(), "\n"); | |
1220 | dataLog(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n"); | |
1221 | } | |
1222 | if (!canInline(capabilityLevel)) { | |
1223 | if (verbose) | |
1224 | dataLog(" Failing because the function is not inlineable.\n"); | |
1225 | return UINT_MAX; | |
1226 | } | |
1227 | ||
1228 | // Check if the caller is already too large. We do this check here because that's just | |
1229 | // where we happen to also have the callee's code block, and we want that for the | |
1230 | // purpose of unsetting SABI. | |
1231 | if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) { | |
1232 | codeBlock->m_shouldAlwaysBeInlined = false; | |
1233 | if (verbose) | |
1234 | dataLog(" Failing because the caller is too large.\n"); | |
1235 | return UINT_MAX; | |
1236 | } | |
1237 | ||
1238 | // FIXME: this should be better at predicting how much bloat we will introduce by inlining | |
1239 | // this function. | |
1240 | // https://bugs.webkit.org/show_bug.cgi?id=127627 | |
1241 | ||
1242 | // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These | |
1243 | // functions have very low fidelity profiling, and presumably they weren't very hot if they | |
1244 | // haven't gotten to Baseline yet. Consider not inlining these functions. | |
1245 | // https://bugs.webkit.org/show_bug.cgi?id=145503 | |
1246 | ||
1247 | // Have we exceeded inline stack depth, or are we trying to inline a recursive call to | |
1248 | // too many levels? If either of these are detected, then don't inline. We adjust our | |
1249 | // heuristics if we are dealing with a function that cannot otherwise be compiled. | |
1250 | ||
1251 | unsigned depth = 0; | |
1252 | unsigned recursion = 0; | |
1253 | ||
1254 | for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) { | |
1255 | ++depth; | |
1256 | if (depth >= Options::maximumInliningDepth()) { | |
1257 | if (verbose) | |
1258 | dataLog(" Failing because depth exceeded.\n"); | |
1259 | return UINT_MAX; | |
1260 | } | |
1261 | ||
1262 | if (entry->executable() == executable) { | |
1263 | ++recursion; | |
1264 | if (recursion >= Options::maximumInliningRecursion()) { | |
1265 | if (verbose) | |
1266 | dataLog(" Failing because recursion detected.\n"); | |
1267 | return UINT_MAX; | |
1268 | } | |
1269 | } | |
1270 | } | |
1271 | ||
1272 | if (verbose) | |
1273 | dataLog(" Inlining should be possible.\n"); | |
1274 | ||
1275 | // It might be possible to inline. | |
1276 | return codeBlock->instructionCount(); | |
1277 | } | |
1278 | ||
1279 | template<typename ChecksFunctor> | |
1280 | void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks) | |
1281 | { | |
1282 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); | |
1283 | ||
1284 | ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX); | |
1285 | ||
1286 | CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind); | |
1287 | insertChecks(codeBlock); | |
1288 | ||
1289 | // FIXME: Don't flush constants! | |
1290 | ||
1291 | int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize; | |
1292 | ||
1293 | ensureLocals( | |
1294 | VirtualRegister(inlineCallFrameStart).toLocal() + 1 + | |
1295 | JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters); | |
1296 | ||
1297 | size_t argumentPositionStart = m_graph.m_argumentPositions.size(); | |
1298 | ||
1299 | VirtualRegister resultReg(resultOperand); | |
1300 | if (resultReg.isValid()) | |
1301 | resultReg = m_inlineStackTop->remapOperand(resultReg); | |
1302 | ||
1303 | InlineStackEntry inlineStackEntry( | |
1304 | this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg, | |
1305 | (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind); | |
1306 | ||
1307 | // This is where the actual inlining really happens. | |
1308 | unsigned oldIndex = m_currentIndex; | |
1309 | m_currentIndex = 0; | |
1310 | ||
1311 | InlineVariableData inlineVariableData; | |
1312 | inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame; | |
1313 | inlineVariableData.argumentPositionStart = argumentPositionStart; | |
1314 | inlineVariableData.calleeVariable = 0; | |
1315 | ||
1316 | RELEASE_ASSERT( | |
1317 | m_inlineStackTop->m_inlineCallFrame->isClosureCall | |
1318 | == callee.isClosureCall()); | |
1319 | if (callee.isClosureCall()) { | |
1320 | VariableAccessData* calleeVariable = | |
1321 | set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData(); | |
1322 | ||
1323 | calleeVariable->mergeShouldNeverUnbox(true); | |
1324 | ||
1325 | inlineVariableData.calleeVariable = calleeVariable; | |
1326 | } | |
1327 | ||
1328 | m_graph.m_inlineVariableData.append(inlineVariableData); | |
1329 | ||
1330 | parseCodeBlock(); | |
1331 | clearCaches(); // Reset our state now that we're back to the outer code. | |
1332 | ||
1333 | m_currentIndex = oldIndex; | |
1334 | ||
1335 | // If the inlined code created some new basic blocks, then we have linking to do. | |
1336 | if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) { | |
1337 | ||
1338 | ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty()); | |
1339 | if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking) | |
1340 | linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets); | |
1341 | else | |
1342 | ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked); | |
1343 | ||
1344 | if (callerLinkability == CallerDoesNormalLinking) | |
1345 | cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead); | |
1346 | ||
1347 | linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); | |
1348 | } else | |
1349 | ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty()); | |
1350 | ||
1351 | BasicBlock* lastBlock = m_graph.lastBlock(); | |
1352 | // If there was a return, but no early returns, then we're done. We allow parsing of | |
1353 | // the caller to continue in whatever basic block we're in right now. | |
1354 | if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) { | |
1355 | if (Options::verboseDFGByteCodeParsing()) | |
1356 | dataLog(" Allowing parsing to continue in last inlined block.\n"); | |
1357 | ||
1358 | ASSERT(lastBlock->isEmpty() || !lastBlock->terminal()); | |
1359 | ||
1360 | // If we created new blocks then the last block needs linking, but in the | |
1361 | // caller. It doesn't need to be linked to, but it needs outgoing links. | |
1362 | if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) { | |
1363 | // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter | |
1364 | // for release builds because this block will never serve as a potential target | |
1365 | // in the linker's binary search. | |
1366 | if (Options::verboseDFGByteCodeParsing()) | |
1367 | dataLog(" Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n"); | |
1368 | lastBlock->bytecodeBegin = m_currentIndex; | |
1369 | if (callerLinkability == CallerDoesNormalLinking) { | |
1370 | if (verbose) | |
1371 | dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n"); | |
1372 | m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock())); | |
1373 | } | |
1374 | } | |
1375 | ||
1376 | m_currentBlock = m_graph.lastBlock(); | |
1377 | return; | |
1378 | } | |
1379 | ||
1380 | if (Options::verboseDFGByteCodeParsing()) | |
1381 | dataLog(" Creating new block after inlining.\n"); | |
1382 | ||
1383 | // If we get to this point then all blocks must end in some sort of terminals. | |
1384 | ASSERT(lastBlock->terminal()); | |
1385 | ||
1386 | // Need to create a new basic block for the continuation at the caller. | |
1387 | RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN)); | |
1388 | ||
1389 | // Link the early returns to the basic block we're about to create. | |
1390 | for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) { | |
1391 | if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking) | |
1392 | continue; | |
1393 | BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block; | |
1394 | ASSERT(!blockToLink->isLinked); | |
1395 | Node* node = blockToLink->terminal(); | |
1396 | ASSERT(node->op() == Jump); | |
1397 | ASSERT(!node->targetBlock()); | |
1398 | node->targetBlock() = block.get(); | |
1399 | inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false; | |
1400 | if (verbose) | |
1401 | dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n"); | |
1402 | blockToLink->didLink(); | |
1403 | } | |
1404 | ||
1405 | m_currentBlock = block.get(); | |
1406 | ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset); | |
1407 | if (verbose) | |
1408 | dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n"); | |
1409 | if (callerLinkability == CallerDoesNormalLinking) { | |
1410 | m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); | |
1411 | m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get()); | |
1412 | } | |
1413 | m_graph.appendBlock(block); | |
1414 | prepareToParseBlock(); | |
1415 | } | |
1416 | ||
1417 | void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block) | |
1418 | { | |
1419 | // It's possible that the callsite block head is not owned by the caller. | |
1420 | if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) { | |
1421 | // It's definitely owned by the caller, because the caller created new blocks. | |
1422 | // Assert that this all adds up. | |
1423 | ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block); | |
1424 | ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking); | |
1425 | inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false; | |
1426 | } else { | |
1427 | // It's definitely not owned by the caller. Tell the caller that he does not | |
1428 | // need to link his callsite block head, because we did it for him. | |
1429 | ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking); | |
1430 | ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block); | |
1431 | inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false; | |
1432 | } | |
1433 | } | |
1434 | ||
1435 | template<typename ChecksFunctor> | |
1436 | bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks) | |
1437 | { | |
1438 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); | |
1439 | ||
1440 | if (!inliningBalance) | |
1441 | return false; | |
1442 | ||
1443 | bool didInsertChecks = false; | |
1444 | auto insertChecksWithAccounting = [&] () { | |
1445 | insertChecks(nullptr); | |
1446 | didInsertChecks = true; | |
1447 | }; | |
1448 | ||
1449 | if (verbose) | |
1450 | dataLog(" Considering callee ", callee, "\n"); | |
1451 | ||
1452 | // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because | |
1453 | // we currently don't have any way of getting profiling information for arguments to non-JS varargs | |
1454 | // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow, | |
1455 | // and there are no callsite value profiles and native function won't have callee value profiles for | |
1456 | // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to | |
1457 | // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without | |
1458 | // calling LoadVarargs twice. | |
1459 | if (!InlineCallFrame::isVarargs(kind)) { | |
1460 | if (InternalFunction* function = callee.internalFunction()) { | |
1461 | if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) { | |
1462 | RELEASE_ASSERT(didInsertChecks); | |
1463 | addToGraph(Phantom, callTargetNode); | |
1464 | emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); | |
1465 | inliningBalance--; | |
1466 | return true; | |
1467 | } | |
1468 | RELEASE_ASSERT(!didInsertChecks); | |
1469 | return false; | |
1470 | } | |
1471 | ||
1472 | Intrinsic intrinsic = callee.intrinsicFor(specializationKind); | |
1473 | if (intrinsic != NoIntrinsic) { | |
1474 | if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { | |
1475 | RELEASE_ASSERT(didInsertChecks); | |
1476 | addToGraph(Phantom, callTargetNode); | |
1477 | emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); | |
1478 | inliningBalance--; | |
1479 | return true; | |
1480 | } | |
1481 | RELEASE_ASSERT(!didInsertChecks); | |
1482 | return false; | |
1483 | } | |
1484 | } | |
1485 | ||
1486 | unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind); | |
1487 | if (myInliningCost > inliningBalance) | |
1488 | return false; | |
1489 | ||
1490 | Instruction* savedCurrentInstruction = m_currentInstruction; | |
1491 | inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks); | |
1492 | inliningBalance -= myInliningCost; | |
1493 | m_currentInstruction = savedCurrentInstruction; | |
1494 | return true; | |
1495 | } | |
1496 | ||
1497 | bool ByteCodeParser::handleInlining( | |
1498 | Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, | |
1499 | int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument, | |
1500 | VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, | |
1501 | unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) | |
1502 | { | |
1503 | if (verbose) { | |
1504 | dataLog("Handling inlining...\n"); | |
1505 | dataLog("Stack: ", currentCodeOrigin(), "\n"); | |
1506 | } | |
1507 | CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); | |
1508 | ||
1509 | if (!callLinkStatus.size()) { | |
1510 | if (verbose) | |
1511 | dataLog("Bailing inlining.\n"); | |
1512 | return false; | |
1513 | } | |
1514 | ||
1515 | if (InlineCallFrame::isVarargs(kind) | |
1516 | && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) { | |
1517 | if (verbose) | |
1518 | dataLog("Bailing inlining because of varargs.\n"); | |
1519 | return false; | |
1520 | } | |
1521 | ||
1522 | unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount(); | |
1523 | if (specializationKind == CodeForConstruct) | |
1524 | inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount()); | |
1525 | if (callLinkStatus.isClosureCall()) | |
1526 | inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount()); | |
1527 | ||
1528 | // First check if we can avoid creating control flow. Our inliner does some CFG | |
1529 | // simplification on the fly and this helps reduce compile times, but we can only leverage | |
1530 | // this in cases where we don't need control flow diamonds to check the callee. | |
1531 | if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { | |
1532 | int registerOffset; | |
1533 | ||
1534 | // Only used for varargs calls. | |
1535 | unsigned mandatoryMinimum = 0; | |
1536 | unsigned maxNumArguments = 0; | |
1537 | ||
1538 | if (InlineCallFrame::isVarargs(kind)) { | |
1539 | if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable()) | |
1540 | mandatoryMinimum = functionExecutable->parameterCount(); | |
1541 | else | |
1542 | mandatoryMinimum = 0; | |
1543 | ||
1544 | // includes "this" | |
1545 | maxNumArguments = std::max( | |
1546 | callLinkStatus.maxNumArguments(), | |
1547 | mandatoryMinimum + 1); | |
1548 | ||
1549 | // We sort of pretend that this *is* the number of arguments that were passed. | |
1550 | argumentCountIncludingThis = maxNumArguments; | |
1551 | ||
1552 | registerOffset = registerOffsetOrFirstFreeReg + 1; | |
1553 | registerOffset -= maxNumArguments; // includes "this" | |
1554 | registerOffset -= JSStack::CallFrameHeaderSize; | |
1555 | registerOffset = -WTF::roundUpToMultipleOf( | |
1556 | stackAlignmentRegisters(), | |
1557 | -registerOffset); | |
1558 | } else | |
1559 | registerOffset = registerOffsetOrFirstFreeReg; | |
1560 | ||
1561 | bool result = attemptToInlineCall( | |
1562 | callTargetNode, resultOperand, callLinkStatus[0], registerOffset, | |
1563 | argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction, | |
1564 | inliningBalance, [&] (CodeBlock* codeBlock) { | |
1565 | emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument); | |
1566 | ||
1567 | // If we have a varargs call, we want to extract the arguments right now. | |
1568 | if (InlineCallFrame::isVarargs(kind)) { | |
1569 | int remappedRegisterOffset = | |
1570 | m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset(); | |
1571 | ||
1572 | ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal()); | |
1573 | ||
1574 | int argumentStart = registerOffset + JSStack::CallFrameHeaderSize; | |
1575 | int remappedArgumentStart = | |
1576 | m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset(); | |
1577 | ||
1578 | LoadVarargsData* data = m_graph.m_loadVarargsData.add(); | |
1579 | data->start = VirtualRegister(remappedArgumentStart + 1); | |
1580 | data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount); | |
1581 | data->offset = argumentsOffset; | |
1582 | data->limit = maxNumArguments; | |
1583 | data->mandatoryMinimum = mandatoryMinimum; | |
1584 | ||
1585 | addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument)); | |
1586 | ||
1587 | // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument | |
1588 | // and argumentsArgument for the baseline JIT. However, we only need a Phantom for | |
1589 | // callTargetNode because the other 2 are still in use and alive at this point. | |
1590 | addToGraph(Phantom, callTargetNode); | |
1591 | ||
1592 | // In DFG IR before SSA, we cannot insert control flow between after the | |
1593 | // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG | |
1594 | // SSA. Fortunately, we also have other reasons for not inserting control flow | |
1595 | // before SSA. | |
1596 | ||
1597 | VariableAccessData* countVariable = newVariableAccessData( | |
1598 | VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount)); | |
1599 | // This is pretty lame, but it will force the count to be flushed as an int. This doesn't | |
1600 | // matter very much, since our use of a SetArgument and Flushes for this local slot is | |
1601 | // mostly just a formality. | |
1602 | countVariable->predict(SpecInt32); | |
1603 | countVariable->mergeIsProfitableToUnbox(true); | |
1604 | Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable)); | |
1605 | m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount); | |
1606 | ||
1607 | set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet); | |
1608 | for (unsigned argument = 1; argument < maxNumArguments; ++argument) { | |
1609 | VariableAccessData* variable = newVariableAccessData( | |
1610 | VirtualRegister(remappedArgumentStart + argument)); | |
1611 | variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit. | |
1612 | ||
1613 | // For a while it had been my intention to do things like this inside the | |
1614 | // prediction injection phase. But in this case it's really best to do it here, | |
1615 | // because it's here that we have access to the variable access datas for the | |
1616 | // inlining we're about to do. | |
1617 | // | |
1618 | // Something else that's interesting here is that we'd really love to get | |
1619 | // predictions from the arguments loaded at the callsite, rather than the | |
1620 | // arguments received inside the callee. But that probably won't matter for most | |
1621 | // calls. | |
1622 | if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) { | |
1623 | ConcurrentJITLocker locker(codeBlock->m_lock); | |
1624 | if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument)) | |
1625 | variable->predict(profile->computeUpdatedPrediction(locker)); | |
1626 | } | |
1627 | ||
1628 | Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); | |
1629 | m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument); | |
1630 | } | |
1631 | } | |
1632 | }); | |
1633 | if (verbose) { | |
1634 | dataLog("Done inlining (simple).\n"); | |
1635 | dataLog("Stack: ", currentCodeOrigin(), "\n"); | |
1636 | dataLog("Result: ", result, "\n"); | |
1637 | } | |
1638 | return result; | |
1639 | } | |
1640 | ||
1641 | // We need to create some kind of switch over callee. For now we only do this if we believe that | |
1642 | // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to | |
1643 | // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in | |
1644 | // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that | |
1645 | // we could improve that aspect of this by doing polymorphic inlining but having the profiling | |
1646 | // also. | |
1647 | if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining() | |
1648 | || InlineCallFrame::isVarargs(kind)) { | |
1649 | if (verbose) { | |
1650 | dataLog("Bailing inlining (hard).\n"); | |
1651 | dataLog("Stack: ", currentCodeOrigin(), "\n"); | |
1652 | } | |
1653 | return false; | |
1654 | } | |
1655 | ||
1656 | unsigned oldOffset = m_currentIndex; | |
1657 | ||
1658 | bool allAreClosureCalls = true; | |
1659 | bool allAreDirectCalls = true; | |
1660 | for (unsigned i = callLinkStatus.size(); i--;) { | |
1661 | if (callLinkStatus[i].isClosureCall()) | |
1662 | allAreDirectCalls = false; | |
1663 | else | |
1664 | allAreClosureCalls = false; | |
1665 | } | |
1666 | ||
1667 | Node* thingToSwitchOn; | |
1668 | if (allAreDirectCalls) | |
1669 | thingToSwitchOn = callTargetNode; | |
1670 | else if (allAreClosureCalls) | |
1671 | thingToSwitchOn = addToGraph(GetExecutable, callTargetNode); | |
1672 | else { | |
1673 | // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases | |
1674 | // where it would be beneficial. It might be best to handle these cases as if all calls were | |
1675 | // closure calls. | |
1676 | // https://bugs.webkit.org/show_bug.cgi?id=136020 | |
1677 | if (verbose) { | |
1678 | dataLog("Bailing inlining (mix).\n"); | |
1679 | dataLog("Stack: ", currentCodeOrigin(), "\n"); | |
1680 | } | |
1681 | return false; | |
1682 | } | |
1683 | ||
1684 | if (verbose) { | |
1685 | dataLog("Doing hard inlining...\n"); | |
1686 | dataLog("Stack: ", currentCodeOrigin(), "\n"); | |
1687 | } | |
1688 | ||
1689 | int registerOffset = registerOffsetOrFirstFreeReg; | |
1690 | ||
1691 | // This makes me wish that we were in SSA all the time. We need to pick a variable into which to | |
1692 | // store the callee so that it will be accessible to all of the blocks we're about to create. We | |
1693 | // get away with doing an immediate-set here because we wouldn't have performed any side effects | |
1694 | // yet. | |
1695 | if (verbose) | |
1696 | dataLog("Register offset: ", registerOffset); | |
1697 | VirtualRegister calleeReg(registerOffset + JSStack::Callee); | |
1698 | calleeReg = m_inlineStackTop->remapOperand(calleeReg); | |
1699 | if (verbose) | |
1700 | dataLog("Callee is going to be ", calleeReg, "\n"); | |
1701 | setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush); | |
1702 | ||
1703 | SwitchData& data = *m_graph.m_switchData.add(); | |
1704 | data.kind = SwitchCell; | |
1705 | addToGraph(Switch, OpInfo(&data), thingToSwitchOn); | |
1706 | ||
1707 | BasicBlock* originBlock = m_currentBlock; | |
1708 | if (verbose) | |
1709 | dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n"); | |
1710 | originBlock->didLink(); | |
1711 | cancelLinkingForBlock(m_inlineStackTop, originBlock); | |
1712 | ||
1713 | // Each inlined callee will have a landing block that it returns at. They should all have jumps | |
1714 | // to the continuation block, which we create last. | |
1715 | Vector<BasicBlock*> landingBlocks; | |
1716 | ||
1717 | // We may force this true if we give up on inlining any of the edges. | |
1718 | bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath(); | |
1719 | ||
1720 | if (verbose) | |
1721 | dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n"); | |
1722 | ||
1723 | for (unsigned i = 0; i < callLinkStatus.size(); ++i) { | |
1724 | m_currentIndex = oldOffset; | |
1725 | RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN)); | |
1726 | m_currentBlock = block.get(); | |
1727 | m_graph.appendBlock(block); | |
1728 | prepareToParseBlock(); | |
1729 | ||
1730 | Node* myCallTargetNode = getDirect(calleeReg); | |
1731 | ||
1732 | bool inliningResult = attemptToInlineCall( | |
1733 | myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset, | |
1734 | argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction, | |
1735 | inliningBalance, [&] (CodeBlock*) { }); | |
1736 | ||
1737 | if (!inliningResult) { | |
1738 | // That failed so we let the block die. Nothing interesting should have been added to | |
1739 | // the block. We also give up on inlining any of the (less frequent) callees. | |
1740 | ASSERT(m_currentBlock == block.get()); | |
1741 | ASSERT(m_graph.m_blocks.last() == block); | |
1742 | m_graph.killBlockAndItsContents(block.get()); | |
1743 | m_graph.m_blocks.removeLast(); | |
1744 | ||
1745 | // The fact that inlining failed means we need a slow path. | |
1746 | couldTakeSlowPath = true; | |
1747 | break; | |
1748 | } | |
1749 | ||
1750 | JSCell* thingToCaseOn; | |
1751 | if (allAreDirectCalls) | |
1752 | thingToCaseOn = callLinkStatus[i].nonExecutableCallee(); | |
1753 | else { | |
1754 | ASSERT(allAreClosureCalls); | |
1755 | thingToCaseOn = callLinkStatus[i].executable(); | |
1756 | } | |
1757 | data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get())); | |
1758 | m_currentIndex = nextOffset; | |
1759 | processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue. | |
1760 | addToGraph(Jump); | |
1761 | if (verbose) | |
1762 | dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n"); | |
1763 | m_currentBlock->didLink(); | |
1764 | landingBlocks.append(m_currentBlock); | |
1765 | ||
1766 | if (verbose) | |
1767 | dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n"); | |
1768 | } | |
1769 | ||
1770 | RefPtr<BasicBlock> slowPathBlock = adoptRef( | |
1771 | new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN)); | |
1772 | m_currentIndex = oldOffset; | |
1773 | data.fallThrough = BranchTarget(slowPathBlock.get()); | |
1774 | m_graph.appendBlock(slowPathBlock); | |
1775 | if (verbose) | |
1776 | dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n"); | |
1777 | slowPathBlock->didLink(); | |
1778 | prepareToParseBlock(); | |
1779 | m_currentBlock = slowPathBlock.get(); | |
1780 | Node* myCallTargetNode = getDirect(calleeReg); | |
1781 | if (couldTakeSlowPath) { | |
1782 | addCall( | |
1783 | resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis, | |
1784 | registerOffset, prediction); | |
1785 | } else { | |
1786 | addToGraph(CheckBadCell); | |
1787 | addToGraph(Phantom, myCallTargetNode); | |
1788 | emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); | |
1789 | ||
1790 | set(VirtualRegister(resultOperand), addToGraph(BottomValue)); | |
1791 | } | |
1792 | ||
1793 | m_currentIndex = nextOffset; | |
1794 | processSetLocalQueue(); | |
1795 | addToGraph(Jump); | |
1796 | landingBlocks.append(m_currentBlock); | |
1797 | ||
1798 | RefPtr<BasicBlock> continuationBlock = adoptRef( | |
1799 | new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN)); | |
1800 | m_graph.appendBlock(continuationBlock); | |
1801 | if (verbose) | |
1802 | dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n"); | |
1803 | m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get())); | |
1804 | prepareToParseBlock(); | |
1805 | m_currentBlock = continuationBlock.get(); | |
1806 | ||
1807 | for (unsigned i = landingBlocks.size(); i--;) | |
1808 | landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get(); | |
1809 | ||
1810 | m_currentIndex = oldOffset; | |
1811 | ||
1812 | if (verbose) { | |
1813 | dataLog("Done inlining (hard).\n"); | |
1814 | dataLog("Stack: ", currentCodeOrigin(), "\n"); | |
1815 | } | |
1816 | return true; | |
1817 | } | |
1818 | ||
1819 | template<typename ChecksFunctor> | |
1820 | bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) | |
1821 | { | |
1822 | if (argumentCountIncludingThis == 1) { // Math.min() | |
1823 | insertChecks(); | |
1824 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); | |
1825 | return true; | |
1826 | } | |
1827 | ||
1828 | if (argumentCountIncludingThis == 2) { // Math.min(x) | |
1829 | insertChecks(); | |
1830 | Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); | |
1831 | addToGraph(Phantom, Edge(result, NumberUse)); | |
1832 | set(VirtualRegister(resultOperand), result); | |
1833 | return true; | |
1834 | } | |
1835 | ||
1836 | if (argumentCountIncludingThis == 3) { // Math.min(x, y) | |
1837 | insertChecks(); | |
1838 | set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); | |
1839 | return true; | |
1840 | } | |
1841 | ||
1842 | // Don't handle >=3 arguments for now. | |
1843 | return false; | |
1844 | } | |
1845 | ||
1846 | template<typename ChecksFunctor> | |
1847 | bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) | |
1848 | { | |
1849 | switch (intrinsic) { | |
1850 | case AbsIntrinsic: { | |
1851 | if (argumentCountIncludingThis == 1) { // Math.abs() | |
1852 | insertChecks(); | |
1853 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); | |
1854 | return true; | |
1855 | } | |
1856 | ||
1857 | if (!MacroAssembler::supportsFloatingPointAbs()) | |
1858 | return false; | |
1859 | ||
1860 | insertChecks(); | |
1861 | Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset))); | |
1862 | if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) | |
1863 | node->mergeFlags(NodeMayOverflowInDFG); | |
1864 | set(VirtualRegister(resultOperand), node); | |
1865 | return true; | |
1866 | } | |
1867 | ||
1868 | case MinIntrinsic: | |
1869 | return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); | |
1870 | ||
1871 | case MaxIntrinsic: | |
1872 | return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); | |
1873 | ||
1874 | case SqrtIntrinsic: | |
1875 | case CosIntrinsic: | |
1876 | case SinIntrinsic: | |
1877 | case LogIntrinsic: { | |
1878 | if (argumentCountIncludingThis == 1) { | |
1879 | insertChecks(); | |
1880 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); | |
1881 | return true; | |
1882 | } | |
1883 | ||
1884 | switch (intrinsic) { | |
1885 | case SqrtIntrinsic: | |
1886 | insertChecks(); | |
1887 | set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset)))); | |
1888 | return true; | |
1889 | ||
1890 | case CosIntrinsic: | |
1891 | insertChecks(); | |
1892 | set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset)))); | |
1893 | return true; | |
1894 | ||
1895 | case SinIntrinsic: | |
1896 | insertChecks(); | |
1897 | set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset)))); | |
1898 | return true; | |
1899 | ||
1900 | case LogIntrinsic: | |
1901 | insertChecks(); | |
1902 | set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset)))); | |
1903 | return true; | |
1904 | ||
1905 | default: | |
1906 | RELEASE_ASSERT_NOT_REACHED(); | |
1907 | return false; | |
1908 | } | |
1909 | } | |
1910 | ||
1911 | case PowIntrinsic: { | |
1912 | if (argumentCountIncludingThis < 3) { | |
1913 | // Math.pow() and Math.pow(x) return NaN. | |
1914 | insertChecks(); | |
1915 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); | |
1916 | return true; | |
1917 | } | |
1918 | insertChecks(); | |
1919 | VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset); | |
1920 | VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset); | |
1921 | set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand))); | |
1922 | return true; | |
1923 | } | |
1924 | ||
1925 | case ArrayPushIntrinsic: { | |
1926 | if (argumentCountIncludingThis != 2) | |
1927 | return false; | |
1928 | ||
1929 | ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile); | |
1930 | if (!arrayMode.isJSArray()) | |
1931 | return false; | |
1932 | switch (arrayMode.type()) { | |
1933 | case Array::Undecided: | |
1934 | case Array::Int32: | |
1935 | case Array::Double: | |
1936 | case Array::Contiguous: | |
1937 | case Array::ArrayStorage: { | |
1938 | insertChecks(); | |
1939 | Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); | |
1940 | set(VirtualRegister(resultOperand), arrayPush); | |
1941 | ||
1942 | return true; | |
1943 | } | |
1944 | ||
1945 | default: | |
1946 | return false; | |
1947 | } | |
1948 | } | |
1949 | ||
1950 | case ArrayPopIntrinsic: { | |
1951 | if (argumentCountIncludingThis != 1) | |
1952 | return false; | |
1953 | ||
1954 | ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile); | |
1955 | if (!arrayMode.isJSArray()) | |
1956 | return false; | |
1957 | switch (arrayMode.type()) { | |
1958 | case Array::Int32: | |
1959 | case Array::Double: | |
1960 | case Array::Contiguous: | |
1961 | case Array::ArrayStorage: { | |
1962 | insertChecks(); | |
1963 | Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); | |
1964 | set(VirtualRegister(resultOperand), arrayPop); | |
1965 | return true; | |
1966 | } | |
1967 | ||
1968 | default: | |
1969 | return false; | |
1970 | } | |
1971 | } | |
1972 | ||
1973 | case CharCodeAtIntrinsic: { | |
1974 | if (argumentCountIncludingThis != 2) | |
1975 | return false; | |
1976 | ||
1977 | insertChecks(); | |
1978 | VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); | |
1979 | VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); | |
1980 | Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand)); | |
1981 | ||
1982 | set(VirtualRegister(resultOperand), charCode); | |
1983 | return true; | |
1984 | } | |
1985 | ||
1986 | case CharAtIntrinsic: { | |
1987 | if (argumentCountIncludingThis != 2) | |
1988 | return false; | |
1989 | ||
1990 | insertChecks(); | |
1991 | VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); | |
1992 | VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); | |
1993 | Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand)); | |
1994 | ||
1995 | set(VirtualRegister(resultOperand), charCode); | |
1996 | return true; | |
1997 | } | |
1998 | case Clz32Intrinsic: { | |
1999 | insertChecks(); | |
2000 | if (argumentCountIncludingThis == 1) | |
2001 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); | |
2002 | else { | |
2003 | Node* operand = get(virtualRegisterForArgument(1, registerOffset)); | |
2004 | set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand)); | |
2005 | } | |
2006 | return true; | |
2007 | } | |
2008 | case FromCharCodeIntrinsic: { | |
2009 | if (argumentCountIncludingThis != 2) | |
2010 | return false; | |
2011 | ||
2012 | insertChecks(); | |
2013 | VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); | |
2014 | Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); | |
2015 | ||
2016 | set(VirtualRegister(resultOperand), charCode); | |
2017 | ||
2018 | return true; | |
2019 | } | |
2020 | ||
2021 | case RegExpExecIntrinsic: { | |
2022 | if (argumentCountIncludingThis != 2) | |
2023 | return false; | |
2024 | ||
2025 | insertChecks(); | |
2026 | Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); | |
2027 | set(VirtualRegister(resultOperand), regExpExec); | |
2028 | ||
2029 | return true; | |
2030 | } | |
2031 | ||
2032 | case RegExpTestIntrinsic: { | |
2033 | if (argumentCountIncludingThis != 2) | |
2034 | return false; | |
2035 | ||
2036 | insertChecks(); | |
2037 | Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); | |
2038 | set(VirtualRegister(resultOperand), regExpExec); | |
2039 | ||
2040 | return true; | |
2041 | } | |
2042 | case RoundIntrinsic: { | |
2043 | if (argumentCountIncludingThis == 1) { | |
2044 | insertChecks(); | |
2045 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); | |
2046 | return true; | |
2047 | } | |
2048 | if (argumentCountIncludingThis == 2) { | |
2049 | insertChecks(); | |
2050 | Node* operand = get(virtualRegisterForArgument(1, registerOffset)); | |
2051 | Node* roundNode = addToGraph(ArithRound, OpInfo(0), OpInfo(prediction), operand); | |
2052 | set(VirtualRegister(resultOperand), roundNode); | |
2053 | return true; | |
2054 | } | |
2055 | return false; | |
2056 | } | |
2057 | case IMulIntrinsic: { | |
2058 | if (argumentCountIncludingThis != 3) | |
2059 | return false; | |
2060 | insertChecks(); | |
2061 | VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset); | |
2062 | VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset); | |
2063 | Node* left = get(leftOperand); | |
2064 | Node* right = get(rightOperand); | |
2065 | set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right)); | |
2066 | return true; | |
2067 | } | |
2068 | ||
2069 | case FRoundIntrinsic: { | |
2070 | if (argumentCountIncludingThis != 2) | |
2071 | return false; | |
2072 | insertChecks(); | |
2073 | VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); | |
2074 | set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand))); | |
2075 | return true; | |
2076 | } | |
2077 | ||
2078 | case DFGTrueIntrinsic: { | |
2079 | insertChecks(); | |
2080 | set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); | |
2081 | return true; | |
2082 | } | |
2083 | ||
2084 | case OSRExitIntrinsic: { | |
2085 | insertChecks(); | |
2086 | addToGraph(ForceOSRExit); | |
2087 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); | |
2088 | return true; | |
2089 | } | |
2090 | ||
2091 | case IsFinalTierIntrinsic: { | |
2092 | insertChecks(); | |
2093 | set(VirtualRegister(resultOperand), | |
2094 | jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true))); | |
2095 | return true; | |
2096 | } | |
2097 | ||
2098 | case SetInt32HeapPredictionIntrinsic: { | |
2099 | insertChecks(); | |
2100 | for (int i = 1; i < argumentCountIncludingThis; ++i) { | |
2101 | Node* node = get(virtualRegisterForArgument(i, registerOffset)); | |
2102 | if (node->hasHeapPrediction()) | |
2103 | node->setHeapPrediction(SpecInt32); | |
2104 | } | |
2105 | set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); | |
2106 | return true; | |
2107 | } | |
2108 | ||
2109 | case CheckInt32Intrinsic: { | |
2110 | insertChecks(); | |
2111 | for (int i = 1; i < argumentCountIncludingThis; ++i) { | |
2112 | Node* node = get(virtualRegisterForArgument(i, registerOffset)); | |
2113 | addToGraph(Phantom, Edge(node, Int32Use)); | |
2114 | } | |
2115 | set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); | |
2116 | return true; | |
2117 | } | |
2118 | ||
2119 | case FiatInt52Intrinsic: { | |
2120 | if (argumentCountIncludingThis != 2) | |
2121 | return false; | |
2122 | insertChecks(); | |
2123 | VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); | |
2124 | if (enableInt52()) | |
2125 | set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand))); | |
2126 | else | |
2127 | set(VirtualRegister(resultOperand), get(operand)); | |
2128 | return true; | |
2129 | } | |
2130 | ||
2131 | default: | |
2132 | return false; | |
2133 | } | |
2134 | } | |
2135 | ||
2136 | template<typename ChecksFunctor> | |
2137 | bool ByteCodeParser::handleTypedArrayConstructor( | |
2138 | int resultOperand, InternalFunction* function, int registerOffset, | |
2139 | int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks) | |
2140 | { | |
2141 | if (!isTypedView(type)) | |
2142 | return false; | |
2143 | ||
2144 | if (function->classInfo() != constructorClassInfoForType(type)) | |
2145 | return false; | |
2146 | ||
2147 | if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject()) | |
2148 | return false; | |
2149 | ||
2150 | // We only have an intrinsic for the case where you say: | |
2151 | // | |
2152 | // new FooArray(blah); | |
2153 | // | |
2154 | // Of course, 'blah' could be any of the following: | |
2155 | // | |
2156 | // - Integer, indicating that you want to allocate an array of that length. | |
2157 | // This is the thing we're hoping for, and what we can actually do meaningful | |
2158 | // optimizations for. | |
2159 | // | |
2160 | // - Array buffer, indicating that you want to create a view onto that _entire_ | |
2161 | // buffer. | |
2162 | // | |
2163 | // - Non-buffer object, indicating that you want to create a copy of that | |
2164 | // object by pretending that it quacks like an array. | |
2165 | // | |
2166 | // - Anything else, indicating that you want to have an exception thrown at | |
2167 | // you. | |
2168 | // | |
2169 | // The intrinsic, NewTypedArray, will behave as if it could do any of these | |
2170 | // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is | |
2171 | // predicted Int32, then we lock it in as a normal typed array allocation. | |
2172 | // Otherwise, NewTypedArray turns into a totally opaque function call that | |
2173 | // may clobber the world - by virtue of it accessing properties on what could | |
2174 | // be an object. | |
2175 | // | |
2176 | // Note that although the generic form of NewTypedArray sounds sort of awful, | |
2177 | // it is actually quite likely to be more efficient than a fully generic | |
2178 | // Construct. So, we might want to think about making NewTypedArray variadic, | |
2179 | // or else making Construct not super slow. | |
2180 | ||
2181 | if (argumentCountIncludingThis != 2) | |
2182 | return false; | |
2183 | ||
2184 | insertChecks(); | |
2185 | set(VirtualRegister(resultOperand), | |
2186 | addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); | |
2187 | return true; | |
2188 | } | |
2189 | ||
2190 | template<typename ChecksFunctor> | |
2191 | bool ByteCodeParser::handleConstantInternalFunction( | |
2192 | int resultOperand, InternalFunction* function, int registerOffset, | |
2193 | int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks) | |
2194 | { | |
2195 | if (verbose) | |
2196 | dataLog(" Handling constant internal function ", JSValue(function), "\n"); | |
2197 | ||
2198 | // If we ever find that we have a lot of internal functions that we specialize for, | |
2199 | // then we should probably have some sort of hashtable dispatch, or maybe even | |
2200 | // dispatch straight through the MethodTable of the InternalFunction. But for now, | |
2201 | // it seems that this case is hit infrequently enough, and the number of functions | |
2202 | // we know about is small enough, that having just a linear cascade of if statements | |
2203 | // is good enough. | |
2204 | ||
2205 | if (function->classInfo() == ArrayConstructor::info()) { | |
2206 | if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject()) | |
2207 | return false; | |
2208 | ||
2209 | insertChecks(); | |
2210 | if (argumentCountIncludingThis == 2) { | |
2211 | set(VirtualRegister(resultOperand), | |
2212 | addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); | |
2213 | return true; | |
2214 | } | |
2215 | ||
2216 | // FIXME: Array constructor should use "this" as newTarget. | |
2217 | for (int i = 1; i < argumentCountIncludingThis; ++i) | |
2218 | addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); | |
2219 | set(VirtualRegister(resultOperand), | |
2220 | addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0))); | |
2221 | return true; | |
2222 | } | |
2223 | ||
2224 | if (function->classInfo() == StringConstructor::info()) { | |
2225 | insertChecks(); | |
2226 | ||
2227 | Node* result; | |
2228 | ||
2229 | if (argumentCountIncludingThis <= 1) | |
2230 | result = jsConstant(m_vm->smallStrings.emptyString()); | |
2231 | else | |
2232 | result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); | |
2233 | ||
2234 | if (kind == CodeForConstruct) | |
2235 | result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result); | |
2236 | ||
2237 | set(VirtualRegister(resultOperand), result); | |
2238 | return true; | |
2239 | } | |
2240 | ||
2241 | for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) { | |
2242 | bool result = handleTypedArrayConstructor( | |
2243 | resultOperand, function, registerOffset, argumentCountIncludingThis, | |
2244 | indexToTypedArrayType(typeIndex), insertChecks); | |
2245 | if (result) | |
2246 | return true; | |
2247 | } | |
2248 | ||
2249 | return false; | |
2250 | } | |
2251 | ||
2252 | Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op) | |
2253 | { | |
2254 | if (base->hasConstant()) { | |
2255 | if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) { | |
2256 | addToGraph(Phantom, base); | |
2257 | return weakJSConstant(constant); | |
2258 | } | |
2259 | } | |
2260 | ||
2261 | Node* propertyStorage; | |
2262 | if (isInlineOffset(offset)) | |
2263 | propertyStorage = base; | |
2264 | else | |
2265 | propertyStorage = addToGraph(GetButterfly, base); | |
2266 | ||
2267 | StorageAccessData* data = m_graph.m_storageAccessData.add(); | |
2268 | data->offset = offset; | |
2269 | data->identifierNumber = identifierNumber; | |
2270 | ||
2271 | Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base); | |
2272 | ||
2273 | return getByOffset; | |
2274 | } | |
2275 | ||
2276 | Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value) | |
2277 | { | |
2278 | Node* propertyStorage; | |
2279 | if (isInlineOffset(offset)) | |
2280 | propertyStorage = base; | |
2281 | else | |
2282 | propertyStorage = addToGraph(GetButterfly, base); | |
2283 | ||
2284 | StorageAccessData* data = m_graph.m_storageAccessData.add(); | |
2285 | data->offset = offset; | |
2286 | data->identifierNumber = identifier; | |
2287 | ||
2288 | Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value); | |
2289 | ||
2290 | return result; | |
2291 | } | |
2292 | ||
2293 | void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector) | |
2294 | { | |
2295 | for (unsigned i = 0; i < vector.size(); ++i) | |
2296 | cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure()); | |
2297 | } | |
2298 | ||
2299 | void ByteCodeParser::handleGetById( | |
2300 | int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, | |
2301 | const GetByIdStatus& getByIdStatus) | |
2302 | { | |
2303 | NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById; | |
2304 | ||
2305 | if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) { | |
2306 | set(VirtualRegister(destinationOperand), | |
2307 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); | |
2308 | return; | |
2309 | } | |
2310 | ||
2311 | if (getByIdStatus.numVariants() > 1) { | |
2312 | if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode) | |
2313 | || !Options::enablePolymorphicAccessInlining()) { | |
2314 | set(VirtualRegister(destinationOperand), | |
2315 | addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); | |
2316 | return; | |
2317 | } | |
2318 | ||
2319 | if (m_graph.compilation()) | |
2320 | m_graph.compilation()->noticeInlinedGetById(); | |
2321 | ||
2322 | // 1) Emit prototype structure checks for all chains. This could sort of maybe not be | |
2323 | // optimal, if there is some rarely executed case in the chain that requires a lot | |
2324 | // of checks and those checks are not watchpointable. | |
2325 | for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;) | |
2326 | emitChecks(getByIdStatus[variantIndex].constantChecks()); | |
2327 | ||
2328 | // 2) Emit a MultiGetByOffset | |
2329 | MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); | |
2330 | data->variants = getByIdStatus.variants(); | |
2331 | data->identifierNumber = identifierNumber; | |
2332 | set(VirtualRegister(destinationOperand), | |
2333 | addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); | |
2334 | return; | |
2335 | } | |
2336 | ||
2337 | ASSERT(getByIdStatus.numVariants() == 1); | |
2338 | GetByIdVariant variant = getByIdStatus[0]; | |
2339 | ||
2340 | if (m_graph.compilation()) | |
2341 | m_graph.compilation()->noticeInlinedGetById(); | |
2342 | ||
2343 | Node* originalBase = base; | |
2344 | ||
2345 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base); | |
2346 | ||
2347 | emitChecks(variant.constantChecks()); | |
2348 | ||
2349 | if (variant.alternateBase()) | |
2350 | base = weakJSConstant(variant.alternateBase()); | |
2351 | ||
2352 | // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to | |
2353 | // ensure that the base of the original get_by_id is kept alive until we're done with | |
2354 | // all of the speculations. We only insert the Phantom if there had been a CheckStructure | |
2355 | // on something other than the base following the CheckStructure on base. | |
2356 | if (originalBase != base) | |
2357 | addToGraph(Phantom, originalBase); | |
2358 | ||
2359 | Node* loadedValue = handleGetByOffset( | |
2360 | variant.callLinkStatus() ? SpecCellOther : prediction, | |
2361 | base, variant.baseStructure(), identifierNumber, variant.offset(), | |
2362 | variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset); | |
2363 | ||
2364 | if (!variant.callLinkStatus()) { | |
2365 | set(VirtualRegister(destinationOperand), loadedValue); | |
2366 | return; | |
2367 | } | |
2368 | ||
2369 | Node* getter = addToGraph(GetGetter, loadedValue); | |
2370 | ||
2371 | // Make a call. We don't try to get fancy with using the smallest operand number because | |
2372 | // the stack layout phase should compress the stack anyway. | |
2373 | ||
2374 | unsigned numberOfParameters = 0; | |
2375 | numberOfParameters++; // The 'this' argument. | |
2376 | numberOfParameters++; // True return PC. | |
2377 | ||
2378 | // Start with a register offset that corresponds to the last in-use register. | |
2379 | int registerOffset = virtualRegisterForLocal( | |
2380 | m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset(); | |
2381 | registerOffset -= numberOfParameters; | |
2382 | registerOffset -= JSStack::CallFrameHeaderSize; | |
2383 | ||
2384 | // Get the alignment right. | |
2385 | registerOffset = -WTF::roundUpToMultipleOf( | |
2386 | stackAlignmentRegisters(), | |
2387 | -registerOffset); | |
2388 | ||
2389 | ensureLocals( | |
2390 | m_inlineStackTop->remapOperand( | |
2391 | VirtualRegister(registerOffset)).toLocal()); | |
2392 | ||
2393 | // Issue SetLocals. This has two effects: | |
2394 | // 1) That's how handleCall() sees the arguments. | |
2395 | // 2) If we inline then this ensures that the arguments are flushed so that if you use | |
2396 | // the dreaded arguments object on the getter, the right things happen. Well, sort of - | |
2397 | // since we only really care about 'this' in this case. But we're not going to take that | |
2398 | // shortcut. | |
2399 | int nextRegister = registerOffset + JSStack::CallFrameHeaderSize; | |
2400 | set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet); | |
2401 | ||
2402 | handleCall( | |
2403 | destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id), | |
2404 | getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); | |
2405 | } | |
2406 | ||
2407 | void ByteCodeParser::emitPutById( | |
2408 | Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect) | |
2409 | { | |
2410 | if (isDirect) | |
2411 | addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value); | |
2412 | else | |
2413 | addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value); | |
2414 | } | |
2415 | ||
2416 | void ByteCodeParser::handlePutById( | |
2417 | Node* base, unsigned identifierNumber, Node* value, | |
2418 | const PutByIdStatus& putByIdStatus, bool isDirect) | |
2419 | { | |
2420 | if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) { | |
2421 | if (!putByIdStatus.isSet()) | |
2422 | addToGraph(ForceOSRExit); | |
2423 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); | |
2424 | return; | |
2425 | } | |
2426 | ||
2427 | if (putByIdStatus.numVariants() > 1) { | |
2428 | if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls() | |
2429 | || !Options::enablePolymorphicAccessInlining()) { | |
2430 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); | |
2431 | return; | |
2432 | } | |
2433 | ||
2434 | if (m_graph.compilation()) | |
2435 | m_graph.compilation()->noticeInlinedPutById(); | |
2436 | ||
2437 | if (!isDirect) { | |
2438 | for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) { | |
2439 | if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition) | |
2440 | continue; | |
2441 | emitChecks(putByIdStatus[variantIndex].constantChecks()); | |
2442 | } | |
2443 | } | |
2444 | ||
2445 | MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); | |
2446 | data->variants = putByIdStatus.variants(); | |
2447 | data->identifierNumber = identifierNumber; | |
2448 | addToGraph(MultiPutByOffset, OpInfo(data), base, value); | |
2449 | return; | |
2450 | } | |
2451 | ||
2452 | ASSERT(putByIdStatus.numVariants() == 1); | |
2453 | const PutByIdVariant& variant = putByIdStatus[0]; | |
2454 | ||
2455 | switch (variant.kind()) { | |
2456 | case PutByIdVariant::Replace: { | |
2457 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base); | |
2458 | handlePutByOffset(base, identifierNumber, variant.offset(), value); | |
2459 | if (m_graph.compilation()) | |
2460 | m_graph.compilation()->noticeInlinedPutById(); | |
2461 | return; | |
2462 | } | |
2463 | ||
2464 | case PutByIdVariant::Transition: { | |
2465 | addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base); | |
2466 | emitChecks(variant.constantChecks()); | |
2467 | ||
2468 | ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated()); | |
2469 | ||
2470 | Node* propertyStorage; | |
2471 | Transition* transition = m_graph.m_transitions.add( | |
2472 | variant.oldStructureForTransition(), variant.newStructure()); | |
2473 | ||
2474 | if (variant.reallocatesStorage()) { | |
2475 | ||
2476 | // If we're growing the property storage then it must be because we're | |
2477 | // storing into the out-of-line storage. | |
2478 | ASSERT(!isInlineOffset(variant.offset())); | |
2479 | ||
2480 | if (!variant.oldStructureForTransition()->outOfLineCapacity()) { | |
2481 | propertyStorage = addToGraph( | |
2482 | AllocatePropertyStorage, OpInfo(transition), base); | |
2483 | } else { | |
2484 | propertyStorage = addToGraph( | |
2485 | ReallocatePropertyStorage, OpInfo(transition), | |
2486 | base, addToGraph(GetButterfly, base)); | |
2487 | } | |
2488 | } else { | |
2489 | if (isInlineOffset(variant.offset())) | |
2490 | propertyStorage = base; | |
2491 | else | |
2492 | propertyStorage = addToGraph(GetButterfly, base); | |
2493 | } | |
2494 | ||
2495 | StorageAccessData* data = m_graph.m_storageAccessData.add(); | |
2496 | data->offset = variant.offset(); | |
2497 | data->identifierNumber = identifierNumber; | |
2498 | ||
2499 | addToGraph( | |
2500 | PutByOffset, | |
2501 | OpInfo(data), | |
2502 | propertyStorage, | |
2503 | base, | |
2504 | value); | |
2505 | ||
2506 | // FIXME: PutStructure goes last until we fix either | |
2507 | // https://bugs.webkit.org/show_bug.cgi?id=142921 or | |
2508 | // https://bugs.webkit.org/show_bug.cgi?id=142924. | |
2509 | addToGraph(PutStructure, OpInfo(transition), base); | |
2510 | ||
2511 | if (m_graph.compilation()) | |
2512 | m_graph.compilation()->noticeInlinedPutById(); | |
2513 | return; | |
2514 | } | |
2515 | ||
2516 | case PutByIdVariant::Setter: { | |
2517 | Node* originalBase = base; | |
2518 | ||
2519 | addToGraph( | |
2520 | CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base); | |
2521 | ||
2522 | emitChecks(variant.constantChecks()); | |
2523 | ||
2524 | if (variant.alternateBase()) | |
2525 | base = weakJSConstant(variant.alternateBase()); | |
2526 | ||
2527 | Node* loadedValue = handleGetByOffset( | |
2528 | SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(), | |
2529 | GetGetterSetterByOffset); | |
2530 | ||
2531 | Node* setter = addToGraph(GetSetter, loadedValue); | |
2532 | ||
2533 | // Make a call. We don't try to get fancy with using the smallest operand number because | |
2534 | // the stack layout phase should compress the stack anyway. | |
2535 | ||
2536 | unsigned numberOfParameters = 0; | |
2537 | numberOfParameters++; // The 'this' argument. | |
2538 | numberOfParameters++; // The new value. | |
2539 | numberOfParameters++; // True return PC. | |
2540 | ||
2541 | // Start with a register offset that corresponds to the last in-use register. | |
2542 | int registerOffset = virtualRegisterForLocal( | |
2543 | m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset(); | |
2544 | registerOffset -= numberOfParameters; | |
2545 | registerOffset -= JSStack::CallFrameHeaderSize; | |
2546 | ||
2547 | // Get the alignment right. | |
2548 | registerOffset = -WTF::roundUpToMultipleOf( | |
2549 | stackAlignmentRegisters(), | |
2550 | -registerOffset); | |
2551 | ||
2552 | ensureLocals( | |
2553 | m_inlineStackTop->remapOperand( | |
2554 | VirtualRegister(registerOffset)).toLocal()); | |
2555 | ||
2556 | int nextRegister = registerOffset + JSStack::CallFrameHeaderSize; | |
2557 | set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet); | |
2558 | set(VirtualRegister(nextRegister++), value, ImmediateNakedSet); | |
2559 | ||
2560 | handleCall( | |
2561 | VirtualRegister().offset(), Call, InlineCallFrame::SetterCall, | |
2562 | OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset, | |
2563 | *variant.callLinkStatus(), SpecOther); | |
2564 | return; | |
2565 | } | |
2566 | ||
2567 | default: { | |
2568 | emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); | |
2569 | return; | |
2570 | } } | |
2571 | } | |
2572 | ||
2573 | void ByteCodeParser::prepareToParseBlock() | |
2574 | { | |
2575 | clearCaches(); | |
2576 | ASSERT(m_setLocalQueue.isEmpty()); | |
2577 | } | |
2578 | ||
2579 | void ByteCodeParser::clearCaches() | |
2580 | { | |
2581 | m_constants.resize(0); | |
2582 | } | |
2583 | ||
2584 | bool ByteCodeParser::parseBlock(unsigned limit) | |
2585 | { | |
2586 | bool shouldContinueParsing = true; | |
2587 | ||
2588 | Interpreter* interpreter = m_vm->interpreter; | |
2589 | Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin(); | |
2590 | unsigned blockBegin = m_currentIndex; | |
2591 | ||
2592 | // If we are the first basic block, introduce markers for arguments. This allows | |
2593 | // us to track if a use of an argument may use the actual argument passed, as | |
2594 | // opposed to using a value we set explicitly. | |
2595 | if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) { | |
2596 | m_graph.m_arguments.resize(m_numArguments); | |
2597 | for (unsigned argument = 0; argument < m_numArguments; ++argument) { | |
2598 | VariableAccessData* variable = newVariableAccessData( | |
2599 | virtualRegisterForArgument(argument)); | |
2600 | variable->mergeStructureCheckHoistingFailed( | |
2601 | m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)); | |
2602 | variable->mergeCheckArrayHoistingFailed( | |
2603 | m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); | |
2604 | ||
2605 | Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); | |
2606 | m_graph.m_arguments[argument] = setArgument; | |
2607 | m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument); | |
2608 | } | |
2609 | } | |
2610 | ||
2611 | while (true) { | |
2612 | processSetLocalQueue(); | |
2613 | ||
2614 | // Don't extend over jump destinations. | |
2615 | if (m_currentIndex == limit) { | |
2616 | // Ordinarily we want to plant a jump. But refuse to do this if the block is | |
2617 | // empty. This is a special case for inlining, which might otherwise create | |
2618 | // some empty blocks in some cases. When parseBlock() returns with an empty | |
2619 | // block, it will get repurposed instead of creating a new one. Note that this | |
2620 | // logic relies on every bytecode resulting in one or more nodes, which would | |
2621 | // be true anyway except for op_loop_hint, which emits a Phantom to force this | |
2622 | // to be true. | |
2623 | if (!m_currentBlock->isEmpty()) | |
2624 | addToGraph(Jump, OpInfo(m_currentIndex)); | |
2625 | return shouldContinueParsing; | |
2626 | } | |
2627 | ||
2628 | // Switch on the current bytecode opcode. | |
2629 | Instruction* currentInstruction = instructionsBegin + m_currentIndex; | |
2630 | m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. | |
2631 | OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode); | |
2632 | ||
2633 | if (Options::verboseDFGByteCodeParsing()) | |
2634 | dataLog(" parsing ", currentCodeOrigin(), "\n"); | |
2635 | ||
2636 | if (m_graph.compilation()) { | |
2637 | addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor( | |
2638 | Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin())))); | |
2639 | } | |
2640 | ||
2641 | switch (opcodeID) { | |
2642 | ||
2643 | // === Function entry opcodes === | |
2644 | ||
2645 | case op_enter: { | |
2646 | Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined)); | |
2647 | // Initialize all locals to undefined. | |
2648 | for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i) | |
2649 | set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet); | |
2650 | NEXT_OPCODE(op_enter); | |
2651 | } | |
2652 | ||
2653 | case op_to_this: { | |
2654 | Node* op1 = getThis(); | |
2655 | if (op1->op() != ToThis) { | |
2656 | Structure* cachedStructure = currentInstruction[2].u.structure.get(); | |
2657 | if (currentInstruction[2].u.toThisStatus != ToThisOK | |
2658 | || !cachedStructure | |
2659 | || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis | |
2660 | || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) | |
2661 | || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) | |
2662 | || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) { | |
2663 | setThis(addToGraph(ToThis, op1)); | |
2664 | } else { | |
2665 | addToGraph( | |
2666 | CheckStructure, | |
2667 | OpInfo(m_graph.addStructureSet(cachedStructure)), | |
2668 | op1); | |
2669 | } | |
2670 | } | |
2671 | NEXT_OPCODE(op_to_this); | |
2672 | } | |
2673 | ||
2674 | case op_create_this: { | |
2675 | int calleeOperand = currentInstruction[2].u.operand; | |
2676 | Node* callee = get(VirtualRegister(calleeOperand)); | |
2677 | ||
2678 | JSFunction* function = callee->dynamicCastConstant<JSFunction*>(); | |
2679 | if (!function) { | |
2680 | JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet(); | |
2681 | if (cachedFunction | |
2682 | && cachedFunction != JSCell::seenMultipleCalleeObjects() | |
2683 | && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { | |
2684 | ASSERT(cachedFunction->inherits(JSFunction::info())); | |
2685 | ||
2686 | FrozenValue* frozen = m_graph.freeze(cachedFunction); | |
2687 | addToGraph(CheckCell, OpInfo(frozen), callee); | |
2688 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen))); | |
2689 | ||
2690 | function = static_cast<JSFunction*>(cachedFunction); | |
2691 | } | |
2692 | } | |
2693 | ||
2694 | bool alreadyEmitted = false; | |
2695 | if (function) { | |
2696 | if (FunctionRareData* rareData = function->rareData()) { | |
2697 | if (Structure* structure = rareData->allocationStructure()) { | |
2698 | m_graph.freeze(rareData); | |
2699 | m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet()); | |
2700 | // The callee is still live up to this point. | |
2701 | addToGraph(Phantom, callee); | |
2702 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure))); | |
2703 | alreadyEmitted = true; | |
2704 | } | |
2705 | } | |
2706 | } | |
2707 | if (!alreadyEmitted) { | |
2708 | set(VirtualRegister(currentInstruction[1].u.operand), | |
2709 | addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee)); | |
2710 | } | |
2711 | NEXT_OPCODE(op_create_this); | |
2712 | } | |
2713 | ||
2714 | case op_new_object: { | |
2715 | set(VirtualRegister(currentInstruction[1].u.operand), | |
2716 | addToGraph(NewObject, | |
2717 | OpInfo(currentInstruction[3].u.objectAllocationProfile->structure()))); | |
2718 | NEXT_OPCODE(op_new_object); | |
2719 | } | |
2720 | ||
2721 | case op_new_array: { | |
2722 | int startOperand = currentInstruction[2].u.operand; | |
2723 | int numOperands = currentInstruction[3].u.operand; | |
2724 | ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; | |
2725 | for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) | |
2726 | addVarArgChild(get(VirtualRegister(operandIdx))); | |
2727 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0))); | |
2728 | NEXT_OPCODE(op_new_array); | |
2729 | } | |
2730 | ||
2731 | case op_new_array_with_size: { | |
2732 | int lengthOperand = currentInstruction[2].u.operand; | |
2733 | ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile; | |
2734 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand)))); | |
2735 | NEXT_OPCODE(op_new_array_with_size); | |
2736 | } | |
2737 | ||
2738 | case op_new_array_buffer: { | |
2739 | int startConstant = currentInstruction[2].u.operand; | |
2740 | int numConstants = currentInstruction[3].u.operand; | |
2741 | ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; | |
2742 | NewArrayBufferData data; | |
2743 | data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant]; | |
2744 | data.numConstants = numConstants; | |
2745 | data.indexingType = profile->selectIndexingType(); | |
2746 | ||
2747 | // If this statement has never executed, we'll have the wrong indexing type in the profile. | |
2748 | for (int i = 0; i < numConstants; ++i) { | |
2749 | data.indexingType = | |
2750 | leastUpperBoundOfIndexingTypeAndValue( | |
2751 | data.indexingType, | |
2752 | m_codeBlock->constantBuffer(data.startConstant)[i]); | |
2753 | } | |
2754 | ||
2755 | m_graph.m_newArrayBufferData.append(data); | |
2756 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last()))); | |
2757 | NEXT_OPCODE(op_new_array_buffer); | |
2758 | } | |
2759 | ||
2760 | case op_new_regexp: { | |
2761 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand))); | |
2762 | NEXT_OPCODE(op_new_regexp); | |
2763 | } | |
2764 | ||
2765 | // === Bitwise operations === | |
2766 | ||
2767 | case op_bitand: { | |
2768 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2769 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2770 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2)); | |
2771 | NEXT_OPCODE(op_bitand); | |
2772 | } | |
2773 | ||
2774 | case op_bitor: { | |
2775 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2776 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2777 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2)); | |
2778 | NEXT_OPCODE(op_bitor); | |
2779 | } | |
2780 | ||
2781 | case op_bitxor: { | |
2782 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2783 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2784 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2)); | |
2785 | NEXT_OPCODE(op_bitxor); | |
2786 | } | |
2787 | ||
2788 | case op_rshift: { | |
2789 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2790 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2791 | set(VirtualRegister(currentInstruction[1].u.operand), | |
2792 | addToGraph(BitRShift, op1, op2)); | |
2793 | NEXT_OPCODE(op_rshift); | |
2794 | } | |
2795 | ||
2796 | case op_lshift: { | |
2797 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2798 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2799 | set(VirtualRegister(currentInstruction[1].u.operand), | |
2800 | addToGraph(BitLShift, op1, op2)); | |
2801 | NEXT_OPCODE(op_lshift); | |
2802 | } | |
2803 | ||
2804 | case op_urshift: { | |
2805 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2806 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2807 | set(VirtualRegister(currentInstruction[1].u.operand), | |
2808 | addToGraph(BitURShift, op1, op2)); | |
2809 | NEXT_OPCODE(op_urshift); | |
2810 | } | |
2811 | ||
2812 | case op_unsigned: { | |
2813 | set(VirtualRegister(currentInstruction[1].u.operand), | |
2814 | makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand))))); | |
2815 | NEXT_OPCODE(op_unsigned); | |
2816 | } | |
2817 | ||
2818 | // === Increment/Decrement opcodes === | |
2819 | ||
2820 | case op_inc: { | |
2821 | int srcDst = currentInstruction[1].u.operand; | |
2822 | VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); | |
2823 | Node* op = get(srcDstVirtualRegister); | |
2824 | set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); | |
2825 | NEXT_OPCODE(op_inc); | |
2826 | } | |
2827 | ||
2828 | case op_dec: { | |
2829 | int srcDst = currentInstruction[1].u.operand; | |
2830 | VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); | |
2831 | Node* op = get(srcDstVirtualRegister); | |
2832 | set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); | |
2833 | NEXT_OPCODE(op_dec); | |
2834 | } | |
2835 | ||
2836 | // === Arithmetic operations === | |
2837 | ||
2838 | case op_add: { | |
2839 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2840 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2841 | if (op1->hasNumberResult() && op2->hasNumberResult()) | |
2842 | set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2))); | |
2843 | else | |
2844 | set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2))); | |
2845 | NEXT_OPCODE(op_add); | |
2846 | } | |
2847 | ||
2848 | case op_sub: { | |
2849 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2850 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2851 | set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2))); | |
2852 | NEXT_OPCODE(op_sub); | |
2853 | } | |
2854 | ||
2855 | case op_negate: { | |
2856 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2857 | set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1))); | |
2858 | NEXT_OPCODE(op_negate); | |
2859 | } | |
2860 | ||
2861 | case op_mul: { | |
2862 | // Multiply requires that the inputs are not truncated, unfortunately. | |
2863 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2864 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2865 | set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2))); | |
2866 | NEXT_OPCODE(op_mul); | |
2867 | } | |
2868 | ||
2869 | case op_mod: { | |
2870 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2871 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2872 | set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2))); | |
2873 | NEXT_OPCODE(op_mod); | |
2874 | } | |
2875 | ||
2876 | case op_div: { | |
2877 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2878 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2879 | set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2))); | |
2880 | NEXT_OPCODE(op_div); | |
2881 | } | |
2882 | ||
2883 | // === Misc operations === | |
2884 | ||
2885 | case op_debug: | |
2886 | addToGraph(Breakpoint); | |
2887 | NEXT_OPCODE(op_debug); | |
2888 | ||
2889 | case op_profile_will_call: { | |
2890 | addToGraph(ProfileWillCall); | |
2891 | NEXT_OPCODE(op_profile_will_call); | |
2892 | } | |
2893 | ||
2894 | case op_profile_did_call: { | |
2895 | addToGraph(ProfileDidCall); | |
2896 | NEXT_OPCODE(op_profile_did_call); | |
2897 | } | |
2898 | ||
2899 | case op_mov: { | |
2900 | Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2901 | set(VirtualRegister(currentInstruction[1].u.operand), op); | |
2902 | NEXT_OPCODE(op_mov); | |
2903 | } | |
2904 | ||
2905 | case op_check_tdz: { | |
2906 | Node* op = get(VirtualRegister(currentInstruction[1].u.operand)); | |
2907 | addToGraph(CheckNotEmpty, op); | |
2908 | NEXT_OPCODE(op_check_tdz); | |
2909 | } | |
2910 | ||
2911 | case op_check_has_instance: | |
2912 | addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand))); | |
2913 | NEXT_OPCODE(op_check_has_instance); | |
2914 | ||
2915 | case op_instanceof: { | |
2916 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2917 | Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand)); | |
2918 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype)); | |
2919 | NEXT_OPCODE(op_instanceof); | |
2920 | } | |
2921 | ||
2922 | case op_is_undefined: { | |
2923 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2924 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value)); | |
2925 | NEXT_OPCODE(op_is_undefined); | |
2926 | } | |
2927 | ||
2928 | case op_is_boolean: { | |
2929 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2930 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value)); | |
2931 | NEXT_OPCODE(op_is_boolean); | |
2932 | } | |
2933 | ||
2934 | case op_is_number: { | |
2935 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2936 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value)); | |
2937 | NEXT_OPCODE(op_is_number); | |
2938 | } | |
2939 | ||
2940 | case op_is_string: { | |
2941 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2942 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value)); | |
2943 | NEXT_OPCODE(op_is_string); | |
2944 | } | |
2945 | ||
2946 | case op_is_object: { | |
2947 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2948 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value)); | |
2949 | NEXT_OPCODE(op_is_object); | |
2950 | } | |
2951 | ||
2952 | case op_is_object_or_null: { | |
2953 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2954 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value)); | |
2955 | NEXT_OPCODE(op_is_object_or_null); | |
2956 | } | |
2957 | ||
2958 | case op_is_function: { | |
2959 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2960 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value)); | |
2961 | NEXT_OPCODE(op_is_function); | |
2962 | } | |
2963 | ||
2964 | case op_not: { | |
2965 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2966 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value)); | |
2967 | NEXT_OPCODE(op_not); | |
2968 | } | |
2969 | ||
2970 | case op_to_primitive: { | |
2971 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
2972 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value)); | |
2973 | NEXT_OPCODE(op_to_primitive); | |
2974 | } | |
2975 | ||
2976 | case op_strcat: { | |
2977 | int startOperand = currentInstruction[2].u.operand; | |
2978 | int numOperands = currentInstruction[3].u.operand; | |
2979 | #if CPU(X86) | |
2980 | // X86 doesn't have enough registers to compile MakeRope with three arguments. | |
2981 | // Rather than try to be clever, we just make MakeRope dumber on this processor. | |
2982 | const unsigned maxRopeArguments = 2; | |
2983 | #else | |
2984 | const unsigned maxRopeArguments = 3; | |
2985 | #endif | |
2986 | auto toStringNodes = std::make_unique<Node*[]>(numOperands); | |
2987 | for (int i = 0; i < numOperands; i++) | |
2988 | toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i))); | |
2989 | ||
2990 | for (int i = 0; i < numOperands; i++) | |
2991 | addToGraph(Phantom, toStringNodes[i]); | |
2992 | ||
2993 | Node* operands[AdjacencyList::Size]; | |
2994 | unsigned indexInOperands = 0; | |
2995 | for (unsigned i = 0; i < AdjacencyList::Size; ++i) | |
2996 | operands[i] = 0; | |
2997 | for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) { | |
2998 | if (indexInOperands == maxRopeArguments) { | |
2999 | operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]); | |
3000 | for (unsigned i = 1; i < AdjacencyList::Size; ++i) | |
3001 | operands[i] = 0; | |
3002 | indexInOperands = 1; | |
3003 | } | |
3004 | ||
3005 | ASSERT(indexInOperands < AdjacencyList::Size); | |
3006 | ASSERT(indexInOperands < maxRopeArguments); | |
3007 | operands[indexInOperands++] = toStringNodes[operandIdx]; | |
3008 | } | |
3009 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3010 | addToGraph(MakeRope, operands[0], operands[1], operands[2])); | |
3011 | NEXT_OPCODE(op_strcat); | |
3012 | } | |
3013 | ||
3014 | case op_less: { | |
3015 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3016 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3017 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2)); | |
3018 | NEXT_OPCODE(op_less); | |
3019 | } | |
3020 | ||
3021 | case op_lesseq: { | |
3022 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3023 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3024 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2)); | |
3025 | NEXT_OPCODE(op_lesseq); | |
3026 | } | |
3027 | ||
3028 | case op_greater: { | |
3029 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3030 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3031 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2)); | |
3032 | NEXT_OPCODE(op_greater); | |
3033 | } | |
3034 | ||
3035 | case op_greatereq: { | |
3036 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3037 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3038 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2)); | |
3039 | NEXT_OPCODE(op_greatereq); | |
3040 | } | |
3041 | ||
3042 | case op_eq: { | |
3043 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3044 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3045 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2)); | |
3046 | NEXT_OPCODE(op_eq); | |
3047 | } | |
3048 | ||
3049 | case op_eq_null: { | |
3050 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3051 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))); | |
3052 | NEXT_OPCODE(op_eq_null); | |
3053 | } | |
3054 | ||
3055 | case op_stricteq: { | |
3056 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3057 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3058 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); | |
3059 | NEXT_OPCODE(op_stricteq); | |
3060 | } | |
3061 | ||
3062 | case op_neq: { | |
3063 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3064 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3065 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); | |
3066 | NEXT_OPCODE(op_neq); | |
3067 | } | |
3068 | ||
3069 | case op_neq_null: { | |
3070 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3071 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))))); | |
3072 | NEXT_OPCODE(op_neq_null); | |
3073 | } | |
3074 | ||
3075 | case op_nstricteq: { | |
3076 | Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3077 | Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3078 | Node* invertedResult; | |
3079 | invertedResult = addToGraph(CompareStrictEq, op1, op2); | |
3080 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult)); | |
3081 | NEXT_OPCODE(op_nstricteq); | |
3082 | } | |
3083 | ||
3084 | // === Property access operations === | |
3085 | ||
3086 | case op_get_by_val: { | |
3087 | SpeculatedType prediction = getPredictionWithoutOSRExit(); | |
3088 | ||
3089 | Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3090 | ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); | |
3091 | Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3092 | Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property); | |
3093 | set(VirtualRegister(currentInstruction[1].u.operand), getByVal); | |
3094 | ||
3095 | NEXT_OPCODE(op_get_by_val); | |
3096 | } | |
3097 | ||
3098 | case op_put_by_val_direct: | |
3099 | case op_put_by_val: { | |
3100 | Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3101 | ||
3102 | ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write); | |
3103 | ||
3104 | Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3105 | Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3106 | ||
3107 | addVarArgChild(base); | |
3108 | addVarArgChild(property); | |
3109 | addVarArgChild(value); | |
3110 | addVarArgChild(0); // Leave room for property storage. | |
3111 | addVarArgChild(0); // Leave room for length. | |
3112 | addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); | |
3113 | ||
3114 | NEXT_OPCODE(op_put_by_val); | |
3115 | } | |
3116 | ||
3117 | case op_get_by_id: | |
3118 | case op_get_by_id_out_of_line: | |
3119 | case op_get_array_length: { | |
3120 | SpeculatedType prediction = getPrediction(); | |
3121 | ||
3122 | Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3123 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; | |
3124 | ||
3125 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; | |
3126 | GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( | |
3127 | m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock, | |
3128 | m_inlineStackTop->m_stubInfos, m_dfgStubInfos, | |
3129 | currentCodeOrigin(), uid); | |
3130 | ||
3131 | handleGetById( | |
3132 | currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus); | |
3133 | ||
3134 | NEXT_OPCODE(op_get_by_id); | |
3135 | } | |
3136 | case op_put_by_id: | |
3137 | case op_put_by_id_out_of_line: | |
3138 | case op_put_by_id_transition_direct: | |
3139 | case op_put_by_id_transition_normal: | |
3140 | case op_put_by_id_transition_direct_out_of_line: | |
3141 | case op_put_by_id_transition_normal_out_of_line: { | |
3142 | Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3143 | Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3144 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; | |
3145 | bool direct = currentInstruction[8].u.operand; | |
3146 | ||
3147 | PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( | |
3148 | m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock, | |
3149 | m_inlineStackTop->m_stubInfos, m_dfgStubInfos, | |
3150 | currentCodeOrigin(), m_graph.identifiers()[identifierNumber]); | |
3151 | ||
3152 | handlePutById(base, identifierNumber, value, putByIdStatus, direct); | |
3153 | NEXT_OPCODE(op_put_by_id); | |
3154 | } | |
3155 | ||
3156 | case op_init_global_const_nop: { | |
3157 | NEXT_OPCODE(op_init_global_const_nop); | |
3158 | } | |
3159 | ||
3160 | case op_init_global_const: { | |
3161 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3162 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); | |
3163 | addToGraph( | |
3164 | PutGlobalVar, | |
3165 | OpInfo(globalObject->assertVariableIsInThisObject(currentInstruction[1].u.variablePointer)), | |
3166 | weakJSConstant(globalObject), value); | |
3167 | NEXT_OPCODE(op_init_global_const); | |
3168 | } | |
3169 | ||
3170 | case op_profile_type: { | |
3171 | Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3172 | addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile); | |
3173 | NEXT_OPCODE(op_profile_type); | |
3174 | } | |
3175 | ||
3176 | case op_profile_control_flow: { | |
3177 | BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; | |
3178 | addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation)); | |
3179 | NEXT_OPCODE(op_profile_control_flow); | |
3180 | } | |
3181 | ||
3182 | // === Block terminators. === | |
3183 | ||
3184 | case op_jmp: { | |
3185 | int relativeOffset = currentInstruction[1].u.operand; | |
3186 | addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); | |
3187 | if (relativeOffset <= 0) | |
3188 | flushForTerminal(); | |
3189 | LAST_OPCODE(op_jmp); | |
3190 | } | |
3191 | ||
3192 | case op_jtrue: { | |
3193 | unsigned relativeOffset = currentInstruction[2].u.operand; | |
3194 | Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3195 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition); | |
3196 | LAST_OPCODE(op_jtrue); | |
3197 | } | |
3198 | ||
3199 | case op_jfalse: { | |
3200 | unsigned relativeOffset = currentInstruction[2].u.operand; | |
3201 | Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3202 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition); | |
3203 | LAST_OPCODE(op_jfalse); | |
3204 | } | |
3205 | ||
3206 | case op_jeq_null: { | |
3207 | unsigned relativeOffset = currentInstruction[2].u.operand; | |
3208 | Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3209 | Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))); | |
3210 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition); | |
3211 | LAST_OPCODE(op_jeq_null); | |
3212 | } | |
3213 | ||
3214 | case op_jneq_null: { | |
3215 | unsigned relativeOffset = currentInstruction[2].u.operand; | |
3216 | Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3217 | Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))); | |
3218 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition); | |
3219 | LAST_OPCODE(op_jneq_null); | |
3220 | } | |
3221 | ||
3222 | case op_jless: { | |
3223 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3224 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3225 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3226 | Node* condition = addToGraph(CompareLess, op1, op2); | |
3227 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition); | |
3228 | LAST_OPCODE(op_jless); | |
3229 | } | |
3230 | ||
3231 | case op_jlesseq: { | |
3232 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3233 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3234 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3235 | Node* condition = addToGraph(CompareLessEq, op1, op2); | |
3236 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition); | |
3237 | LAST_OPCODE(op_jlesseq); | |
3238 | } | |
3239 | ||
3240 | case op_jgreater: { | |
3241 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3242 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3243 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3244 | Node* condition = addToGraph(CompareGreater, op1, op2); | |
3245 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition); | |
3246 | LAST_OPCODE(op_jgreater); | |
3247 | } | |
3248 | ||
3249 | case op_jgreatereq: { | |
3250 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3251 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3252 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3253 | Node* condition = addToGraph(CompareGreaterEq, op1, op2); | |
3254 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition); | |
3255 | LAST_OPCODE(op_jgreatereq); | |
3256 | } | |
3257 | ||
3258 | case op_jnless: { | |
3259 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3260 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3261 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3262 | Node* condition = addToGraph(CompareLess, op1, op2); | |
3263 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition); | |
3264 | LAST_OPCODE(op_jnless); | |
3265 | } | |
3266 | ||
3267 | case op_jnlesseq: { | |
3268 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3269 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3270 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3271 | Node* condition = addToGraph(CompareLessEq, op1, op2); | |
3272 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition); | |
3273 | LAST_OPCODE(op_jnlesseq); | |
3274 | } | |
3275 | ||
3276 | case op_jngreater: { | |
3277 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3278 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3279 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3280 | Node* condition = addToGraph(CompareGreater, op1, op2); | |
3281 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition); | |
3282 | LAST_OPCODE(op_jngreater); | |
3283 | } | |
3284 | ||
3285 | case op_jngreatereq: { | |
3286 | unsigned relativeOffset = currentInstruction[3].u.operand; | |
3287 | Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); | |
3288 | Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3289 | Node* condition = addToGraph(CompareGreaterEq, op1, op2); | |
3290 | addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition); | |
3291 | LAST_OPCODE(op_jngreatereq); | |
3292 | } | |
3293 | ||
3294 | case op_switch_imm: { | |
3295 | SwitchData& data = *m_graph.m_switchData.add(); | |
3296 | data.kind = SwitchImm; | |
3297 | data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; | |
3298 | data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); | |
3299 | SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); | |
3300 | for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { | |
3301 | if (!table.branchOffsets[i]) | |
3302 | continue; | |
3303 | unsigned target = m_currentIndex + table.branchOffsets[i]; | |
3304 | if (target == data.fallThrough.bytecodeIndex()) | |
3305 | continue; | |
3306 | data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target)); | |
3307 | } | |
3308 | addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); | |
3309 | flushIfTerminal(data); | |
3310 | LAST_OPCODE(op_switch_imm); | |
3311 | } | |
3312 | ||
3313 | case op_switch_char: { | |
3314 | SwitchData& data = *m_graph.m_switchData.add(); | |
3315 | data.kind = SwitchChar; | |
3316 | data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; | |
3317 | data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); | |
3318 | SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); | |
3319 | for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { | |
3320 | if (!table.branchOffsets[i]) | |
3321 | continue; | |
3322 | unsigned target = m_currentIndex + table.branchOffsets[i]; | |
3323 | if (target == data.fallThrough.bytecodeIndex()) | |
3324 | continue; | |
3325 | data.cases.append( | |
3326 | SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); | |
3327 | } | |
3328 | addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); | |
3329 | flushIfTerminal(data); | |
3330 | LAST_OPCODE(op_switch_char); | |
3331 | } | |
3332 | ||
3333 | case op_switch_string: { | |
3334 | SwitchData& data = *m_graph.m_switchData.add(); | |
3335 | data.kind = SwitchString; | |
3336 | data.switchTableIndex = currentInstruction[1].u.operand; | |
3337 | data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); | |
3338 | StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); | |
3339 | StringJumpTable::StringOffsetTable::iterator iter; | |
3340 | StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); | |
3341 | for (iter = table.offsetTable.begin(); iter != end; ++iter) { | |
3342 | unsigned target = m_currentIndex + iter->value.branchOffset; | |
3343 | if (target == data.fallThrough.bytecodeIndex()) | |
3344 | continue; | |
3345 | data.cases.append( | |
3346 | SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); | |
3347 | } | |
3348 | addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); | |
3349 | flushIfTerminal(data); | |
3350 | LAST_OPCODE(op_switch_string); | |
3351 | } | |
3352 | ||
3353 | case op_ret: | |
3354 | if (inlineCallFrame()) { | |
3355 | flushForReturn(); | |
3356 | if (m_inlineStackTop->m_returnValue.isValid()) | |
3357 | setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush); | |
3358 | m_inlineStackTop->m_didReturn = true; | |
3359 | if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { | |
3360 | // If we're returning from the first block, then we're done parsing. | |
3361 | ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock()); | |
3362 | shouldContinueParsing = false; | |
3363 | LAST_OPCODE(op_ret); | |
3364 | } else { | |
3365 | // If inlining created blocks, and we're doing a return, then we need some | |
3366 | // special linking. | |
3367 | ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock()); | |
3368 | m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false; | |
3369 | } | |
3370 | if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) { | |
3371 | ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size()); | |
3372 | addToGraph(Jump, OpInfo(0)); | |
3373 | m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true; | |
3374 | m_inlineStackTop->m_didEarlyReturn = true; | |
3375 | } | |
3376 | LAST_OPCODE(op_ret); | |
3377 | } | |
3378 | addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); | |
3379 | flushForReturn(); | |
3380 | LAST_OPCODE(op_ret); | |
3381 | ||
3382 | case op_end: | |
3383 | ASSERT(!inlineCallFrame()); | |
3384 | addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); | |
3385 | flushForReturn(); | |
3386 | LAST_OPCODE(op_end); | |
3387 | ||
3388 | case op_throw: | |
3389 | addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand))); | |
3390 | flushForTerminal(); | |
3391 | addToGraph(Unreachable); | |
3392 | LAST_OPCODE(op_throw); | |
3393 | ||
3394 | case op_throw_static_error: | |
3395 | addToGraph(ThrowReferenceError); | |
3396 | flushForTerminal(); | |
3397 | addToGraph(Unreachable); | |
3398 | LAST_OPCODE(op_throw_static_error); | |
3399 | ||
3400 | case op_call: | |
3401 | handleCall(currentInstruction, Call, CodeForCall); | |
3402 | // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction | |
3403 | ASSERT(m_currentInstruction == currentInstruction); | |
3404 | NEXT_OPCODE(op_call); | |
3405 | ||
3406 | case op_construct: | |
3407 | handleCall(currentInstruction, Construct, CodeForConstruct); | |
3408 | NEXT_OPCODE(op_construct); | |
3409 | ||
3410 | case op_call_varargs: { | |
3411 | handleVarargsCall(currentInstruction, CallVarargs, CodeForCall); | |
3412 | NEXT_OPCODE(op_call_varargs); | |
3413 | } | |
3414 | ||
3415 | case op_construct_varargs: { | |
3416 | handleVarargsCall(currentInstruction, ConstructVarargs, CodeForConstruct); | |
3417 | NEXT_OPCODE(op_construct_varargs); | |
3418 | } | |
3419 | ||
3420 | case op_jneq_ptr: | |
3421 | // Statically speculate for now. It makes sense to let speculate-only jneq_ptr | |
3422 | // support simmer for a while before making it more general, since it's | |
3423 | // already gnarly enough as it is. | |
3424 | ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer)); | |
3425 | addToGraph( | |
3426 | CheckCell, | |
3427 | OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor( | |
3428 | m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))), | |
3429 | get(VirtualRegister(currentInstruction[1].u.operand))); | |
3430 | addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr))); | |
3431 | LAST_OPCODE(op_jneq_ptr); | |
3432 | ||
3433 | case op_resolve_scope: { | |
3434 | int dst = currentInstruction[1].u.operand; | |
3435 | ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); | |
3436 | unsigned depth = currentInstruction[5].u.operand; | |
3437 | ||
3438 | // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. | |
3439 | if (needsVarInjectionChecks(resolveType)) | |
3440 | addToGraph(VarInjectionWatchpoint); | |
3441 | ||
3442 | switch (resolveType) { | |
3443 | case GlobalProperty: | |
3444 | case GlobalVar: | |
3445 | case GlobalPropertyWithVarInjectionChecks: | |
3446 | case GlobalVarWithVarInjectionChecks: | |
3447 | set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject())); | |
3448 | if (resolveType == GlobalPropertyWithVarInjectionChecks || resolveType == GlobalVarWithVarInjectionChecks) | |
3449 | addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand)))); | |
3450 | break; | |
3451 | case LocalClosureVar: | |
3452 | case ClosureVar: | |
3453 | case ClosureVarWithVarInjectionChecks: { | |
3454 | Node* localBase = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3455 | addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope. | |
3456 | ||
3457 | // We have various forms of constant folding here. This is necessary to avoid | |
3458 | // spurious recompiles in dead-but-foldable code. | |
3459 | if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) { | |
3460 | InferredValue* singleton = symbolTable->singletonScope(); | |
3461 | if (JSValue value = singleton->inferredValue()) { | |
3462 | m_graph.watchpoints().addLazily(singleton); | |
3463 | set(VirtualRegister(dst), weakJSConstant(value)); | |
3464 | break; | |
3465 | } | |
3466 | } | |
3467 | if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>()) { | |
3468 | for (unsigned n = depth; n--;) | |
3469 | scope = scope->next(); | |
3470 | set(VirtualRegister(dst), weakJSConstant(scope)); | |
3471 | break; | |
3472 | } | |
3473 | for (unsigned n = depth; n--;) | |
3474 | localBase = addToGraph(SkipScope, localBase); | |
3475 | set(VirtualRegister(dst), localBase); | |
3476 | break; | |
3477 | } | |
3478 | case Dynamic: | |
3479 | RELEASE_ASSERT_NOT_REACHED(); | |
3480 | break; | |
3481 | } | |
3482 | NEXT_OPCODE(op_resolve_scope); | |
3483 | } | |
3484 | ||
3485 | case op_get_from_scope: { | |
3486 | int dst = currentInstruction[1].u.operand; | |
3487 | int scope = currentInstruction[2].u.operand; | |
3488 | unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; | |
3489 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; | |
3490 | ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); | |
3491 | ||
3492 | Structure* structure = 0; | |
3493 | WatchpointSet* watchpoints = 0; | |
3494 | uintptr_t operand; | |
3495 | { | |
3496 | ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); | |
3497 | if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) | |
3498 | watchpoints = currentInstruction[5].u.watchpointSet; | |
3499 | else | |
3500 | structure = currentInstruction[5].u.structure.get(); | |
3501 | operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); | |
3502 | } | |
3503 | ||
3504 | UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode. | |
3505 | ||
3506 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); | |
3507 | ||
3508 | switch (resolveType) { | |
3509 | case GlobalProperty: | |
3510 | case GlobalPropertyWithVarInjectionChecks: { | |
3511 | SpeculatedType prediction = getPrediction(); | |
3512 | GetByIdStatus status = GetByIdStatus::computeFor(structure, uid); | |
3513 | if (status.state() != GetByIdStatus::Simple | |
3514 | || status.numVariants() != 1 | |
3515 | || status[0].structureSet().size() != 1) { | |
3516 | set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope)))); | |
3517 | break; | |
3518 | } | |
3519 | Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure()); | |
3520 | addToGraph(Phantom, get(VirtualRegister(scope))); | |
3521 | set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand)); | |
3522 | break; | |
3523 | } | |
3524 | case GlobalVar: | |
3525 | case GlobalVarWithVarInjectionChecks: { | |
3526 | addToGraph(Phantom, get(VirtualRegister(scope))); | |
3527 | WatchpointSet* watchpointSet; | |
3528 | ScopeOffset offset; | |
3529 | { | |
3530 | ConcurrentJITLocker locker(globalObject->symbolTable()->m_lock); | |
3531 | SymbolTableEntry entry = globalObject->symbolTable()->get(locker, uid); | |
3532 | watchpointSet = entry.watchpointSet(); | |
3533 | offset = entry.scopeOffset(); | |
3534 | } | |
3535 | if (watchpointSet && watchpointSet->state() == IsWatched) { | |
3536 | // This has a fun concurrency story. There is the possibility of a race in two | |
3537 | // directions: | |
3538 | // | |
3539 | // We see that the set IsWatched, but in the meantime it gets invalidated: this is | |
3540 | // fine because if we saw that it IsWatched then we add a watchpoint. If it gets | |
3541 | // invalidated, then this compilation is invalidated. Note that in the meantime we | |
3542 | // may load an absurd value from the global object. It's fine to load an absurd | |
3543 | // value if the compilation is invalidated anyway. | |
3544 | // | |
3545 | // We see that the set IsWatched, but the value isn't yet initialized: this isn't | |
3546 | // possible because of the ordering of operations. | |
3547 | // | |
3548 | // Here's how we order operations: | |
3549 | // | |
3550 | // Main thread stores to the global object: always store a value first, and only | |
3551 | // after that do we touch the watchpoint set. There is a fence in the touch, that | |
3552 | // ensures that the store to the global object always happens before the touch on the | |
3553 | // set. | |
3554 | // | |
3555 | // Compilation thread: always first load the state of the watchpoint set, and then | |
3556 | // load the value. The WatchpointSet::state() method does fences for us to ensure | |
3557 | // that the load of the state happens before our load of the value. | |
3558 | // | |
3559 | // Finalizing compilation: this happens on the main thread and synchronously checks | |
3560 | // validity of all watchpoint sets. | |
3561 | // | |
3562 | // We will only perform optimizations if the load of the state yields IsWatched. That | |
3563 | // means that at least one store would have happened to initialize the original value | |
3564 | // of the variable (that is, the value we'd like to constant fold to). There may be | |
3565 | // other stores that happen after that, but those stores will invalidate the | |
3566 | // watchpoint set and also the compilation. | |
3567 | ||
3568 | // Note that we need to use the operand, which is a direct pointer at the global, | |
3569 | // rather than looking up the global by doing variableAt(offset). That's because the | |
3570 | // internal data structures of JSSegmentedVariableObject are not thread-safe even | |
3571 | // though accessing the global itself is. The segmentation involves a vector spine | |
3572 | // that resizes with malloc/free, so if new globals unrelated to the one we are | |
3573 | // reading are added, we might access freed memory if we do variableAt(). | |
3574 | WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand); | |
3575 | ||
3576 | ASSERT(globalObject->findVariableIndex(pointer) == offset); | |
3577 | ||
3578 | JSValue value = pointer->get(); | |
3579 | if (value) { | |
3580 | m_graph.watchpoints().addLazily(watchpointSet); | |
3581 | set(VirtualRegister(dst), weakJSConstant(value)); | |
3582 | break; | |
3583 | } | |
3584 | } | |
3585 | ||
3586 | SpeculatedType prediction = getPrediction(); | |
3587 | set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction))); | |
3588 | break; | |
3589 | } | |
3590 | case LocalClosureVar: | |
3591 | case ClosureVar: | |
3592 | case ClosureVarWithVarInjectionChecks: { | |
3593 | Node* scopeNode = get(VirtualRegister(scope)); | |
3594 | ||
3595 | // Ideally we wouldn't have to do this Phantom. But: | |
3596 | // | |
3597 | // For the constant case: we must do it because otherwise we would have no way of knowing | |
3598 | // that the scope is live at OSR here. | |
3599 | // | |
3600 | // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation | |
3601 | // won't be able to handle an Undefined scope. | |
3602 | addToGraph(Phantom, scopeNode); | |
3603 | ||
3604 | // Constant folding in the bytecode parser is important for performance. This may not | |
3605 | // have executed yet. If it hasn't, then we won't have a prediction. Lacking a | |
3606 | // prediction, we'd otherwise think that it has to exit. Then when it did execute, we | |
3607 | // would recompile. But if we can fold it here, we avoid the exit. | |
3608 | if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) { | |
3609 | set(VirtualRegister(dst), weakJSConstant(value)); | |
3610 | break; | |
3611 | } | |
3612 | SpeculatedType prediction = getPrediction(); | |
3613 | set(VirtualRegister(dst), | |
3614 | addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode)); | |
3615 | break; | |
3616 | } | |
3617 | case Dynamic: | |
3618 | RELEASE_ASSERT_NOT_REACHED(); | |
3619 | break; | |
3620 | } | |
3621 | NEXT_OPCODE(op_get_from_scope); | |
3622 | } | |
3623 | ||
3624 | case op_put_to_scope: { | |
3625 | unsigned scope = currentInstruction[1].u.operand; | |
3626 | unsigned identifierNumber = currentInstruction[2].u.operand; | |
3627 | if (identifierNumber != UINT_MAX) | |
3628 | identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber]; | |
3629 | unsigned value = currentInstruction[3].u.operand; | |
3630 | ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); | |
3631 | UniquedStringImpl* uid; | |
3632 | if (identifierNumber != UINT_MAX) | |
3633 | uid = m_graph.identifiers()[identifierNumber]; | |
3634 | else | |
3635 | uid = nullptr; | |
3636 | ||
3637 | Structure* structure = nullptr; | |
3638 | WatchpointSet* watchpoints = nullptr; | |
3639 | uintptr_t operand; | |
3640 | { | |
3641 | ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); | |
3642 | if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar) | |
3643 | watchpoints = currentInstruction[5].u.watchpointSet; | |
3644 | else | |
3645 | structure = currentInstruction[5].u.structure.get(); | |
3646 | operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); | |
3647 | } | |
3648 | ||
3649 | JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); | |
3650 | ||
3651 | switch (resolveType) { | |
3652 | case GlobalProperty: | |
3653 | case GlobalPropertyWithVarInjectionChecks: { | |
3654 | PutByIdStatus status; | |
3655 | if (uid) | |
3656 | status = PutByIdStatus::computeFor(globalObject, structure, uid, false); | |
3657 | else | |
3658 | status = PutByIdStatus(PutByIdStatus::TakesSlowPath); | |
3659 | if (status.numVariants() != 1 | |
3660 | || status[0].kind() != PutByIdVariant::Replace | |
3661 | || status[0].structure().size() != 1) { | |
3662 | addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value))); | |
3663 | break; | |
3664 | } | |
3665 | ASSERT(status[0].structure().onlyStructure() == structure); | |
3666 | Node* base = cellConstantWithStructureCheck(globalObject, structure); | |
3667 | addToGraph(Phantom, get(VirtualRegister(scope))); | |
3668 | handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value))); | |
3669 | // Keep scope alive until after put. | |
3670 | addToGraph(Phantom, get(VirtualRegister(scope))); | |
3671 | break; | |
3672 | } | |
3673 | case GlobalVar: | |
3674 | case GlobalVarWithVarInjectionChecks: { | |
3675 | if (watchpoints) { | |
3676 | SymbolTableEntry entry = globalObject->symbolTable()->get(uid); | |
3677 | ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet()); | |
3678 | } | |
3679 | Node* valueNode = get(VirtualRegister(value)); | |
3680 | addToGraph(PutGlobalVar, OpInfo(operand), weakJSConstant(globalObject), valueNode); | |
3681 | if (watchpoints && watchpoints->state() != IsInvalidated) { | |
3682 | // Must happen after the store. See comment for GetGlobalVar. | |
3683 | addToGraph(NotifyWrite, OpInfo(watchpoints)); | |
3684 | } | |
3685 | // Keep scope alive until after put. | |
3686 | addToGraph(Phantom, get(VirtualRegister(scope))); | |
3687 | break; | |
3688 | } | |
3689 | case LocalClosureVar: | |
3690 | case ClosureVar: | |
3691 | case ClosureVarWithVarInjectionChecks: { | |
3692 | Node* scopeNode = get(VirtualRegister(scope)); | |
3693 | Node* valueNode = get(VirtualRegister(value)); | |
3694 | ||
3695 | addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode); | |
3696 | ||
3697 | if (watchpoints && watchpoints->state() != IsInvalidated) { | |
3698 | // Must happen after the store. See comment for GetGlobalVar. | |
3699 | addToGraph(NotifyWrite, OpInfo(watchpoints)); | |
3700 | } | |
3701 | break; | |
3702 | } | |
3703 | case Dynamic: | |
3704 | RELEASE_ASSERT_NOT_REACHED(); | |
3705 | break; | |
3706 | } | |
3707 | NEXT_OPCODE(op_put_to_scope); | |
3708 | } | |
3709 | ||
3710 | case op_loop_hint: { | |
3711 | // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG | |
3712 | // OSR can only happen at basic block boundaries. Assert that these two statements | |
3713 | // are compatible. | |
3714 | RELEASE_ASSERT(m_currentIndex == blockBegin); | |
3715 | ||
3716 | // We never do OSR into an inlined code block. That could not happen, since OSR | |
3717 | // looks up the code block that is the replacement for the baseline JIT code | |
3718 | // block. Hence, machine code block = true code block = not inline code block. | |
3719 | if (!m_inlineStackTop->m_caller) | |
3720 | m_currentBlock->isOSRTarget = true; | |
3721 | ||
3722 | addToGraph(LoopHint); | |
3723 | ||
3724 | if (m_vm->watchdog && m_vm->watchdog->isEnabled()) | |
3725 | addToGraph(CheckWatchdogTimer); | |
3726 | ||
3727 | NEXT_OPCODE(op_loop_hint); | |
3728 | } | |
3729 | ||
3730 | case op_create_lexical_environment: { | |
3731 | FrozenValue* symbolTable = m_graph.freezeStrong(m_graph.symbolTableFor(currentNodeOrigin().semantic)); | |
3732 | Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), get(VirtualRegister(currentInstruction[2].u.operand))); | |
3733 | set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment); | |
3734 | set(VirtualRegister(currentInstruction[2].u.operand), lexicalEnvironment); | |
3735 | NEXT_OPCODE(op_create_lexical_environment); | |
3736 | } | |
3737 | ||
3738 | case op_get_scope: { | |
3739 | // Help the later stages a bit by doing some small constant folding here. Note that this | |
3740 | // only helps for the first basic block. It's extremely important not to constant fold | |
3741 | // loads from the scope register later, as that would prevent the DFG from tracking the | |
3742 | // bytecode-level liveness of the scope register. | |
3743 | Node* callee = get(VirtualRegister(JSStack::Callee)); | |
3744 | Node* result; | |
3745 | if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) | |
3746 | result = weakJSConstant(function->scope()); | |
3747 | else | |
3748 | result = addToGraph(GetScope, callee); | |
3749 | set(VirtualRegister(currentInstruction[1].u.operand), result); | |
3750 | NEXT_OPCODE(op_get_scope); | |
3751 | } | |
3752 | ||
3753 | case op_create_direct_arguments: { | |
3754 | noticeArgumentsUse(); | |
3755 | Node* createArguments = addToGraph(CreateDirectArguments); | |
3756 | set(VirtualRegister(currentInstruction[1].u.operand), createArguments); | |
3757 | NEXT_OPCODE(op_create_direct_arguments); | |
3758 | } | |
3759 | ||
3760 | case op_create_scoped_arguments: { | |
3761 | noticeArgumentsUse(); | |
3762 | Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand))); | |
3763 | set(VirtualRegister(currentInstruction[1].u.operand), createArguments); | |
3764 | NEXT_OPCODE(op_create_scoped_arguments); | |
3765 | } | |
3766 | ||
3767 | case op_create_out_of_band_arguments: { | |
3768 | noticeArgumentsUse(); | |
3769 | Node* createArguments = addToGraph(CreateClonedArguments); | |
3770 | set(VirtualRegister(currentInstruction[1].u.operand), createArguments); | |
3771 | NEXT_OPCODE(op_create_out_of_band_arguments); | |
3772 | } | |
3773 | ||
3774 | case op_get_from_arguments: { | |
3775 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3776 | addToGraph( | |
3777 | GetFromArguments, | |
3778 | OpInfo(currentInstruction[3].u.operand), | |
3779 | OpInfo(getPrediction()), | |
3780 | get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3781 | NEXT_OPCODE(op_get_from_arguments); | |
3782 | } | |
3783 | ||
3784 | case op_put_to_arguments: { | |
3785 | addToGraph( | |
3786 | PutToArguments, | |
3787 | OpInfo(currentInstruction[2].u.operand), | |
3788 | get(VirtualRegister(currentInstruction[1].u.operand)), | |
3789 | get(VirtualRegister(currentInstruction[3].u.operand))); | |
3790 | NEXT_OPCODE(op_put_to_arguments); | |
3791 | } | |
3792 | ||
3793 | case op_new_func: { | |
3794 | FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand); | |
3795 | FrozenValue* frozen = m_graph.freezeStrong(decl); | |
3796 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3797 | addToGraph(NewFunction, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3798 | NEXT_OPCODE(op_new_func); | |
3799 | } | |
3800 | ||
3801 | case op_new_func_exp: { | |
3802 | FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand); | |
3803 | FrozenValue* frozen = m_graph.freezeStrong(expr); | |
3804 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3805 | addToGraph(NewFunction, OpInfo(frozen), get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3806 | NEXT_OPCODE(op_new_func_exp); | |
3807 | } | |
3808 | ||
3809 | case op_typeof: { | |
3810 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3811 | addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3812 | NEXT_OPCODE(op_typeof); | |
3813 | } | |
3814 | ||
3815 | case op_to_number: { | |
3816 | Node* node = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3817 | addToGraph(Phantom, Edge(node, NumberUse)); | |
3818 | set(VirtualRegister(currentInstruction[1].u.operand), node); | |
3819 | NEXT_OPCODE(op_to_number); | |
3820 | } | |
3821 | ||
3822 | case op_to_string: { | |
3823 | Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3824 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value)); | |
3825 | NEXT_OPCODE(op_to_string); | |
3826 | } | |
3827 | ||
3828 | case op_in: { | |
3829 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3830 | addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand)))); | |
3831 | NEXT_OPCODE(op_in); | |
3832 | } | |
3833 | ||
3834 | case op_get_enumerable_length: { | |
3835 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, | |
3836 | get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3837 | NEXT_OPCODE(op_get_enumerable_length); | |
3838 | } | |
3839 | ||
3840 | case op_has_generic_property: { | |
3841 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, | |
3842 | get(VirtualRegister(currentInstruction[2].u.operand)), | |
3843 | get(VirtualRegister(currentInstruction[3].u.operand)))); | |
3844 | NEXT_OPCODE(op_has_generic_property); | |
3845 | } | |
3846 | ||
3847 | case op_has_structure_property: { | |
3848 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, | |
3849 | get(VirtualRegister(currentInstruction[2].u.operand)), | |
3850 | get(VirtualRegister(currentInstruction[3].u.operand)), | |
3851 | get(VirtualRegister(currentInstruction[4].u.operand)))); | |
3852 | NEXT_OPCODE(op_has_structure_property); | |
3853 | } | |
3854 | ||
3855 | case op_has_indexed_property: { | |
3856 | Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3857 | ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); | |
3858 | Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3859 | Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property); | |
3860 | set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty); | |
3861 | NEXT_OPCODE(op_has_indexed_property); | |
3862 | } | |
3863 | ||
3864 | case op_get_direct_pname: { | |
3865 | SpeculatedType prediction = getPredictionWithoutOSRExit(); | |
3866 | ||
3867 | Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); | |
3868 | Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); | |
3869 | Node* index = get(VirtualRegister(currentInstruction[4].u.operand)); | |
3870 | Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand)); | |
3871 | ||
3872 | addVarArgChild(base); | |
3873 | addVarArgChild(property); | |
3874 | addVarArgChild(index); | |
3875 | addVarArgChild(enumerator); | |
3876 | set(VirtualRegister(currentInstruction[1].u.operand), | |
3877 | addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); | |
3878 | ||
3879 | NEXT_OPCODE(op_get_direct_pname); | |
3880 | } | |
3881 | ||
3882 | case op_get_property_enumerator: { | |
3883 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator, | |
3884 | get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3885 | NEXT_OPCODE(op_get_property_enumerator); | |
3886 | } | |
3887 | ||
3888 | case op_enumerator_structure_pname: { | |
3889 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname, | |
3890 | get(VirtualRegister(currentInstruction[2].u.operand)), | |
3891 | get(VirtualRegister(currentInstruction[3].u.operand)))); | |
3892 | NEXT_OPCODE(op_enumerator_structure_pname); | |
3893 | } | |
3894 | ||
3895 | case op_enumerator_generic_pname: { | |
3896 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname, | |
3897 | get(VirtualRegister(currentInstruction[2].u.operand)), | |
3898 | get(VirtualRegister(currentInstruction[3].u.operand)))); | |
3899 | NEXT_OPCODE(op_enumerator_generic_pname); | |
3900 | } | |
3901 | ||
3902 | case op_to_index_string: { | |
3903 | set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, | |
3904 | get(VirtualRegister(currentInstruction[2].u.operand)))); | |
3905 | NEXT_OPCODE(op_to_index_string); | |
3906 | } | |
3907 | ||
3908 | default: | |
3909 | // Parse failed! This should not happen because the capabilities checker | |
3910 | // should have caught it. | |
3911 | RELEASE_ASSERT_NOT_REACHED(); | |
3912 | return false; | |
3913 | } | |
3914 | } | |
3915 | } | |
3916 | ||
3917 | void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets) | |
3918 | { | |
3919 | ASSERT(!block->isLinked); | |
3920 | ASSERT(!block->isEmpty()); | |
3921 | Node* node = block->terminal(); | |
3922 | ASSERT(node->isTerminal()); | |
3923 | ||
3924 | switch (node->op()) { | |
3925 | case Jump: | |
3926 | node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing()); | |
3927 | break; | |
3928 | ||
3929 | case Branch: { | |
3930 | BranchData* data = node->branchData(); | |
3931 | data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex()); | |
3932 | data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex()); | |
3933 | break; | |
3934 | } | |
3935 | ||
3936 | case Switch: { | |
3937 | SwitchData* data = node->switchData(); | |
3938 | for (unsigned i = node->switchData()->cases.size(); i--;) | |
3939 | data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex()); | |
3940 | data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex()); | |
3941 | break; | |
3942 | } | |
3943 | ||
3944 | default: | |
3945 | break; | |
3946 | } | |
3947 | ||
3948 | if (verbose) | |
3949 | dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n"); | |
3950 | block->didLink(); | |
3951 | } | |
3952 | ||
3953 | void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets) | |
3954 | { | |
3955 | for (size_t i = 0; i < unlinkedBlocks.size(); ++i) { | |
3956 | if (verbose) | |
3957 | dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n"); | |
3958 | if (unlinkedBlocks[i].m_needsNormalLinking) { | |
3959 | if (verbose) | |
3960 | dataLog(" Does need normal linking.\n"); | |
3961 | linkBlock(unlinkedBlocks[i].m_block, possibleTargets); | |
3962 | unlinkedBlocks[i].m_needsNormalLinking = false; | |
3963 | } | |
3964 | } | |
3965 | } | |
3966 | ||
3967 | void ByteCodeParser::buildOperandMapsIfNecessary() | |
3968 | { | |
3969 | if (m_haveBuiltOperandMaps) | |
3970 | return; | |
3971 | ||
3972 | for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i) | |
3973 | m_identifierMap.add(m_codeBlock->identifier(i).impl(), i); | |
3974 | ||
3975 | m_haveBuiltOperandMaps = true; | |
3976 | } | |
3977 | ||
3978 | ByteCodeParser::InlineStackEntry::InlineStackEntry( | |
3979 | ByteCodeParser* byteCodeParser, | |
3980 | CodeBlock* codeBlock, | |
3981 | CodeBlock* profiledBlock, | |
3982 | BasicBlock* callsiteBlockHead, | |
3983 | JSFunction* callee, // Null if this is a closure call. | |
3984 | VirtualRegister returnValueVR, | |
3985 | VirtualRegister inlineCallFrameStart, | |
3986 | int argumentCountIncludingThis, | |
3987 | InlineCallFrame::Kind kind) | |
3988 | : m_byteCodeParser(byteCodeParser) | |
3989 | , m_codeBlock(codeBlock) | |
3990 | , m_profiledBlock(profiledBlock) | |
3991 | , m_callsiteBlockHead(callsiteBlockHead) | |
3992 | , m_returnValue(returnValueVR) | |
3993 | , m_didReturn(false) | |
3994 | , m_didEarlyReturn(false) | |
3995 | , m_caller(byteCodeParser->m_inlineStackTop) | |
3996 | { | |
3997 | { | |
3998 | ConcurrentJITLocker locker(m_profiledBlock->m_lock); | |
3999 | m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles()); | |
4000 | m_exitProfile.initialize(locker, profiledBlock->exitProfile()); | |
4001 | ||
4002 | // We do this while holding the lock because we want to encourage StructureStubInfo's | |
4003 | // to be potentially added to operations and because the profiled block could be in the | |
4004 | // middle of LLInt->JIT tier-up in which case we would be adding the info's right now. | |
4005 | if (m_profiledBlock->hasBaselineJITProfiling()) { | |
4006 | m_profiledBlock->getStubInfoMap(locker, m_stubInfos); | |
4007 | m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos); | |
4008 | } | |
4009 | } | |
4010 | ||
4011 | m_argumentPositions.resize(argumentCountIncludingThis); | |
4012 | for (int i = 0; i < argumentCountIncludingThis; ++i) { | |
4013 | byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition()); | |
4014 | ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last(); | |
4015 | m_argumentPositions[i] = argumentPosition; | |
4016 | } | |
4017 | ||
4018 | if (m_caller) { | |
4019 | // Inline case. | |
4020 | ASSERT(codeBlock != byteCodeParser->m_codeBlock); | |
4021 | ASSERT(inlineCallFrameStart.isValid()); | |
4022 | ASSERT(callsiteBlockHead); | |
4023 | ||
4024 | m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add(); | |
4025 | byteCodeParser->m_graph.freeze(codeBlock->ownerExecutable()); | |
4026 | initializeLazyWriteBarrierForInlineCallFrameExecutable( | |
4027 | byteCodeParser->m_graph.m_plan.writeBarriers, | |
4028 | m_inlineCallFrame->executable, | |
4029 | byteCodeParser->m_codeBlock, | |
4030 | m_inlineCallFrame, | |
4031 | byteCodeParser->m_codeBlock->ownerExecutable(), | |
4032 | codeBlock->ownerExecutable()); | |
4033 | m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize); | |
4034 | if (callee) { | |
4035 | m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee); | |
4036 | m_inlineCallFrame->isClosureCall = false; | |
4037 | } else | |
4038 | m_inlineCallFrame->isClosureCall = true; | |
4039 | m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin(); | |
4040 | m_inlineCallFrame->arguments.resizeToFit(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet. | |
4041 | m_inlineCallFrame->kind = kind; | |
4042 | ||
4043 | byteCodeParser->buildOperandMapsIfNecessary(); | |
4044 | ||
4045 | m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); | |
4046 | m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); | |
4047 | m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); | |
4048 | ||
4049 | for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) { | |
4050 | UniquedStringImpl* rep = codeBlock->identifier(i).impl(); | |
4051 | BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers()); | |
4052 | if (result.isNewEntry) | |
4053 | byteCodeParser->m_graph.identifiers().addLazily(rep); | |
4054 | m_identifierRemap[i] = result.iterator->value; | |
4055 | } | |
4056 | for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) { | |
4057 | // If we inline the same code block multiple times, we don't want to needlessly | |
4058 | // duplicate its constant buffers. | |
4059 | HashMap<ConstantBufferKey, unsigned>::iterator iter = | |
4060 | byteCodeParser->m_constantBufferCache.find(ConstantBufferKey(codeBlock, i)); | |
4061 | if (iter != byteCodeParser->m_constantBufferCache.end()) { | |
4062 | m_constantBufferRemap[i] = iter->value; | |
4063 | continue; | |
4064 | } | |
4065 | Vector<JSValue>& buffer = codeBlock->constantBufferAsVector(i); | |
4066 | unsigned newIndex = byteCodeParser->m_codeBlock->addConstantBuffer(buffer); | |
4067 | m_constantBufferRemap[i] = newIndex; | |
4068 | byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex); | |
4069 | } | |
4070 | for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) { | |
4071 | m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables(); | |
4072 | byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i); | |
4073 | } | |
4074 | m_callsiteBlockHeadNeedsLinking = true; | |
4075 | } else { | |
4076 | // Machine code block case. | |
4077 | ASSERT(codeBlock == byteCodeParser->m_codeBlock); | |
4078 | ASSERT(!callee); | |
4079 | ASSERT(!returnValueVR.isValid()); | |
4080 | ASSERT(!inlineCallFrameStart.isValid()); | |
4081 | ASSERT(!callsiteBlockHead); | |
4082 | ||
4083 | m_inlineCallFrame = 0; | |
4084 | ||
4085 | m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); | |
4086 | m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); | |
4087 | m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); | |
4088 | for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) | |
4089 | m_identifierRemap[i] = i; | |
4090 | for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) | |
4091 | m_constantBufferRemap[i] = i; | |
4092 | for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) | |
4093 | m_switchRemap[i] = i; | |
4094 | m_callsiteBlockHeadNeedsLinking = false; | |
4095 | } | |
4096 | ||
4097 | byteCodeParser->m_inlineStackTop = this; | |
4098 | } | |
4099 | ||
4100 | void ByteCodeParser::parseCodeBlock() | |
4101 | { | |
4102 | clearCaches(); | |
4103 | ||
4104 | CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; | |
4105 | ||
4106 | if (m_graph.compilation()) { | |
4107 | m_graph.compilation()->addProfiledBytecodes( | |
4108 | *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock); | |
4109 | } | |
4110 | ||
4111 | if (UNLIKELY(Options::dumpSourceAtDFGTime())) { | |
4112 | Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump(); | |
4113 | if (inlineCallFrame()) { | |
4114 | DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->caller); | |
4115 | deferredSourceDump.append(dump); | |
4116 | } else | |
4117 | deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion())); | |
4118 | } | |
4119 | ||
4120 | if (Options::dumpBytecodeAtDFGTime()) { | |
4121 | dataLog("Parsing ", *codeBlock); | |
4122 | if (inlineCallFrame()) { | |
4123 | dataLog( | |
4124 | " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), | |
4125 | " ", inlineCallFrame()->caller); | |
4126 | } | |
4127 | dataLog( | |
4128 | ": needsActivation = ", codeBlock->needsActivation(), | |
4129 | ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n"); | |
4130 | codeBlock->baselineVersion()->dumpBytecode(); | |
4131 | } | |
4132 | ||
4133 | Vector<unsigned, 32> jumpTargets; | |
4134 | computePreciseJumpTargets(codeBlock, jumpTargets); | |
4135 | if (Options::dumpBytecodeAtDFGTime()) { | |
4136 | dataLog("Jump targets: "); | |
4137 | CommaPrinter comma; | |
4138 | for (unsigned i = 0; i < jumpTargets.size(); ++i) | |
4139 | dataLog(comma, jumpTargets[i]); | |
4140 | dataLog("\n"); | |
4141 | } | |
4142 | ||
4143 | for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) { | |
4144 | // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. | |
4145 | unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size(); | |
4146 | ASSERT(m_currentIndex < limit); | |
4147 | ||
4148 | // Loop until we reach the current limit (i.e. next jump target). | |
4149 | do { | |
4150 | if (!m_currentBlock) { | |
4151 | // Check if we can use the last block. | |
4152 | if (m_graph.numBlocks() && m_graph.lastBlock()->isEmpty()) { | |
4153 | // This must be a block belonging to us. | |
4154 | ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock()); | |
4155 | // Either the block is linkable or it isn't. If it's linkable then it's the last | |
4156 | // block in the blockLinkingTargets list. If it's not then the last block will | |
4157 | // have a lower bytecode index that the one we're about to give to this block. | |
4158 | if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin != m_currentIndex) { | |
4159 | // Make the block linkable. | |
4160 | ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < m_currentIndex); | |
4161 | m_inlineStackTop->m_blockLinkingTargets.append(m_graph.lastBlock()); | |
4162 | } | |
4163 | // Change its bytecode begin and continue. | |
4164 | m_currentBlock = m_graph.lastBlock(); | |
4165 | m_currentBlock->bytecodeBegin = m_currentIndex; | |
4166 | } else { | |
4167 | RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, PNaN)); | |
4168 | m_currentBlock = block.get(); | |
4169 | // This assertion checks two things: | |
4170 | // 1) If the bytecodeBegin is greater than currentIndex, then something has gone | |
4171 | // horribly wrong. So, we're probably generating incorrect code. | |
4172 | // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do | |
4173 | // a peephole coalescing of this block in the if statement above. So, we're | |
4174 | // generating suboptimal code and leaving more work for the CFG simplifier. | |
4175 | if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { | |
4176 | unsigned lastBegin = | |
4177 | m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin; | |
4178 | ASSERT_UNUSED( | |
4179 | lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex); | |
4180 | } | |
4181 | m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); | |
4182 | m_inlineStackTop->m_blockLinkingTargets.append(block.get()); | |
4183 | // The first block is definitely an OSR target. | |
4184 | if (!m_graph.numBlocks()) | |
4185 | block->isOSRTarget = true; | |
4186 | m_graph.appendBlock(block); | |
4187 | prepareToParseBlock(); | |
4188 | } | |
4189 | } | |
4190 | ||
4191 | bool shouldContinueParsing = parseBlock(limit); | |
4192 | ||
4193 | // We should not have gone beyond the limit. | |
4194 | ASSERT(m_currentIndex <= limit); | |
4195 | ||
4196 | // We should have planted a terminal, or we just gave up because | |
4197 | // we realized that the jump target information is imprecise, or we | |
4198 | // are at the end of an inline function, or we realized that we | |
4199 | // should stop parsing because there was a return in the first | |
4200 | // basic block. | |
4201 | ASSERT(m_currentBlock->isEmpty() || m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing); | |
4202 | ||
4203 | if (!shouldContinueParsing) { | |
4204 | if (Options::verboseDFGByteCodeParsing()) | |
4205 | dataLog("Done parsing ", *codeBlock, "\n"); | |
4206 | return; | |
4207 | } | |
4208 | ||
4209 | m_currentBlock = 0; | |
4210 | } while (m_currentIndex < limit); | |
4211 | } | |
4212 | ||
4213 | // Should have reached the end of the instructions. | |
4214 | ASSERT(m_currentIndex == codeBlock->instructions().size()); | |
4215 | ||
4216 | if (Options::verboseDFGByteCodeParsing()) | |
4217 | dataLog("Done parsing ", *codeBlock, " (fell off end)\n"); | |
4218 | } | |
4219 | ||
4220 | bool ByteCodeParser::parse() | |
4221 | { | |
4222 | // Set during construction. | |
4223 | ASSERT(!m_currentIndex); | |
4224 | ||
4225 | if (Options::verboseDFGByteCodeParsing()) | |
4226 | dataLog("Parsing ", *m_codeBlock, "\n"); | |
4227 | ||
4228 | m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock.get(); | |
4229 | if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock | |
4230 | && Options::enablePolyvariantDevirtualization()) { | |
4231 | if (Options::enablePolyvariantCallInlining()) | |
4232 | CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap); | |
4233 | if (Options::enablePolyvariantByIdInlining()) | |
4234 | m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos); | |
4235 | } | |
4236 | ||
4237 | InlineStackEntry inlineStackEntry( | |
4238 | this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(), | |
4239 | m_codeBlock->numParameters(), InlineCallFrame::Call); | |
4240 | ||
4241 | parseCodeBlock(); | |
4242 | ||
4243 | linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); | |
4244 | m_graph.determineReachability(); | |
4245 | m_graph.killUnreachableBlocks(); | |
4246 | ||
4247 | for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { | |
4248 | BasicBlock* block = m_graph.block(blockIndex); | |
4249 | if (!block) | |
4250 | continue; | |
4251 | ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals()); | |
4252 | ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments()); | |
4253 | ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals()); | |
4254 | ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments()); | |
4255 | } | |
4256 | ||
4257 | m_graph.m_localVars = m_numLocals; | |
4258 | m_graph.m_parameterSlots = m_parameterSlots; | |
4259 | ||
4260 | return true; | |
4261 | } | |
4262 | ||
4263 | bool parse(Graph& graph) | |
4264 | { | |
4265 | SamplingRegion samplingRegion("DFG Parsing"); | |
4266 | return ByteCodeParser(graph).parse(); | |
4267 | } | |
4268 | ||
4269 | } } // namespace JSC::DFG | |
4270 | ||
4271 | #endif |