]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved. | |
3 | * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> | |
4 | * Copyright (C) 2012 Igalia, S.L. | |
5 | * | |
6 | * Redistribution and use in source and binary forms, with or without | |
7 | * modification, are permitted provided that the following conditions | |
8 | * are met: | |
9 | * | |
10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | |
15 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of | |
16 | * its contributors may be used to endorse or promote products derived | |
17 | * from this software without specific prior written permission. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
28 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #ifndef BytecodeGenerator_h | |
32 | #define BytecodeGenerator_h | |
33 | ||
34 | #include "CodeBlock.h" | |
35 | #include <wtf/HashTraits.h> | |
36 | #include "Instruction.h" | |
37 | #include "Label.h" | |
38 | #include "LabelScope.h" | |
39 | #include "Interpreter.h" | |
40 | #include "ParserError.h" | |
41 | #include "RegisterID.h" | |
42 | #include "SymbolTable.h" | |
43 | #include "Debugger.h" | |
44 | #include "Nodes.h" | |
45 | #include "StaticPropertyAnalyzer.h" | |
46 | #include "UnlinkedCodeBlock.h" | |
47 | #include <wtf/PassRefPtr.h> | |
48 | #include <wtf/SegmentedVector.h> | |
49 | #include <wtf/Vector.h> | |
50 | ||
51 | namespace JSC { | |
52 | ||
53 | class Identifier; | |
54 | class Label; | |
55 | class JSScope; | |
56 | ||
57 | enum ExpectedFunction { | |
58 | NoExpectedFunction, | |
59 | ExpectObjectConstructor, | |
60 | ExpectArrayConstructor | |
61 | }; | |
62 | ||
63 | class CallArguments { | |
64 | public: | |
65 | CallArguments(BytecodeGenerator& generator, ArgumentsNode* argumentsNode); | |
66 | ||
67 | RegisterID* thisRegister() { return m_argv[0].get(); } | |
68 | RegisterID* argumentRegister(unsigned i) { return m_argv[i + 1].get(); } | |
69 | unsigned registerOffset() { return m_argv.last()->index() + CallFrame::offsetFor(argumentCountIncludingThis()); } | |
70 | unsigned argumentCountIncludingThis() { return m_argv.size(); } | |
71 | RegisterID* profileHookRegister() { return m_profileHookRegister.get(); } | |
72 | ArgumentsNode* argumentsNode() { return m_argumentsNode; } | |
73 | ||
74 | private: | |
75 | void newArgument(BytecodeGenerator&); | |
76 | ||
77 | RefPtr<RegisterID> m_profileHookRegister; | |
78 | ArgumentsNode* m_argumentsNode; | |
79 | Vector<RefPtr<RegisterID>, 8, UnsafeVectorOverflow> m_argv; | |
80 | }; | |
81 | ||
82 | struct FinallyContext { | |
83 | StatementNode* finallyBlock; | |
84 | unsigned scopeContextStackSize; | |
85 | unsigned switchContextStackSize; | |
86 | unsigned forInContextStackSize; | |
87 | unsigned tryContextStackSize; | |
88 | unsigned labelScopesSize; | |
89 | int finallyDepth; | |
90 | int dynamicScopeDepth; | |
91 | }; | |
92 | ||
93 | struct ControlFlowContext { | |
94 | bool isFinallyBlock; | |
95 | FinallyContext finallyContext; | |
96 | }; | |
97 | ||
98 | struct ForInContext { | |
99 | RefPtr<RegisterID> expectedSubscriptRegister; | |
100 | RefPtr<RegisterID> iterRegister; | |
101 | RefPtr<RegisterID> indexRegister; | |
102 | RefPtr<RegisterID> propertyRegister; | |
103 | }; | |
104 | ||
105 | struct TryData { | |
106 | RefPtr<Label> target; | |
107 | unsigned targetScopeDepth; | |
108 | }; | |
109 | ||
110 | struct TryContext { | |
111 | RefPtr<Label> start; | |
112 | TryData* tryData; | |
113 | }; | |
114 | ||
115 | struct TryRange { | |
116 | RefPtr<Label> start; | |
117 | RefPtr<Label> end; | |
118 | TryData* tryData; | |
119 | }; | |
120 | ||
121 | class ResolveResult { | |
122 | public: | |
123 | enum Flags { | |
124 | // The property is locally bound, in a register. | |
125 | RegisterFlag = 0x1, | |
126 | // We need to traverse the scope chain at runtime, checking for | |
127 | // non-strict eval and/or `with' nodes. | |
128 | DynamicFlag = 0x2, | |
129 | // The resolved binding is immutable. | |
130 | ReadOnlyFlag = 0x4, | |
131 | // The property has a static location | |
132 | StaticFlag = 0x8, | |
133 | // Entry at scope distance "m_depth" and located at "m_index" | |
134 | ScopedFlag = 0x10 | |
135 | }; | |
136 | ||
137 | enum Type { | |
138 | // The property is local, and stored in a register. | |
139 | Register = RegisterFlag | StaticFlag, | |
140 | // A read-only local, created by "const". | |
141 | ReadOnlyRegister = RegisterFlag | ReadOnlyFlag | StaticFlag, | |
142 | // Lexically fixed location in the scope chain | |
143 | Lexical = ScopedFlag | StaticFlag, | |
144 | // A read-only Lexical, created by "const". | |
145 | ReadOnlyLexical = ScopedFlag | ReadOnlyFlag | StaticFlag, | |
146 | // Any other form of lookup | |
147 | Dynamic = DynamicFlag, | |
148 | }; | |
149 | ||
150 | static ResolveResult registerResolve(RegisterID *local, unsigned flags) | |
151 | { | |
152 | return ResolveResult(Register | flags, local); | |
153 | } | |
154 | static ResolveResult dynamicResolve() | |
155 | { | |
156 | return ResolveResult(Dynamic, 0); | |
157 | } | |
158 | static ResolveResult lexicalResolve(int index, size_t depth, unsigned flags) | |
159 | { | |
160 | if (flags & DynamicFlag) | |
161 | return dynamicResolve(); | |
162 | return ResolveResult(Lexical | flags, index, depth); | |
163 | } | |
164 | unsigned type() const { return m_type; } | |
165 | ||
166 | // Returns the register corresponding to a local variable, or 0 if no | |
167 | // such register exists. Registers returned by ResolveResult::local() do | |
168 | // not require explicit reference counting. | |
169 | RegisterID* local() const { return m_local; } | |
170 | ||
171 | bool isRegister() const { return m_type & RegisterFlag; } | |
172 | bool isStatic() const { return (m_type & StaticFlag) && !isDynamic(); } | |
173 | bool isDynamic() const { return m_type & DynamicFlag; } | |
174 | bool isReadOnly() const { return (m_type & ReadOnlyFlag) && !isDynamic(); } | |
175 | ||
176 | unsigned depth() const { ASSERT(isStatic()); return m_depth; } | |
177 | int32_t index() const { ASSERT(isStatic()); return m_index; } | |
178 | ||
179 | private: | |
180 | ResolveResult(unsigned type, RegisterID* local) | |
181 | : m_type(type) | |
182 | , m_local(local) | |
183 | , m_index(0) | |
184 | , m_depth(0) | |
185 | { | |
186 | #ifndef NDEBUG | |
187 | checkValidity(); | |
188 | #endif | |
189 | } | |
190 | ||
191 | ResolveResult(unsigned type, int index, unsigned depth) | |
192 | : m_type(type) | |
193 | , m_local(0) | |
194 | , m_index(index) | |
195 | , m_depth(depth) | |
196 | { | |
197 | #ifndef NDEBUG | |
198 | checkValidity(); | |
199 | #endif | |
200 | } | |
201 | ||
202 | #ifndef NDEBUG | |
203 | void checkValidity(); | |
204 | #endif | |
205 | ||
206 | unsigned m_type; | |
207 | RegisterID* m_local; // Local register, if RegisterFlag is set | |
208 | int m_index; | |
209 | unsigned m_depth; | |
210 | }; | |
211 | ||
212 | struct NonlocalResolveInfo { | |
213 | friend class BytecodeGenerator; | |
214 | NonlocalResolveInfo() | |
215 | : m_state(Unused) | |
216 | { | |
217 | } | |
218 | ~NonlocalResolveInfo() | |
219 | { | |
220 | ASSERT(m_state == Put); | |
221 | } | |
222 | private: | |
223 | void resolved(uint32_t putToBaseIndex) | |
224 | { | |
225 | ASSERT(putToBaseIndex); | |
226 | ASSERT(m_state == Unused); | |
227 | m_state = Resolved; | |
228 | m_putToBaseIndex = putToBaseIndex; | |
229 | } | |
230 | uint32_t put() | |
231 | { | |
232 | ASSERT(m_state == Resolved); | |
233 | m_state = Put; | |
234 | return m_putToBaseIndex; | |
235 | } | |
236 | enum State { Unused, Resolved, Put }; | |
237 | State m_state; | |
238 | uint32_t m_putToBaseIndex; | |
239 | }; | |
240 | ||
241 | class BytecodeGenerator { | |
242 | WTF_MAKE_FAST_ALLOCATED; | |
243 | public: | |
244 | typedef DeclarationStacks::VarStack VarStack; | |
245 | typedef DeclarationStacks::FunctionStack FunctionStack; | |
246 | ||
247 | BytecodeGenerator(VM&, JSScope*, ProgramNode*, UnlinkedProgramCodeBlock*, DebuggerMode, ProfilerMode); | |
248 | BytecodeGenerator(VM&, JSScope*, FunctionBodyNode*, UnlinkedFunctionCodeBlock*, DebuggerMode, ProfilerMode); | |
249 | BytecodeGenerator(VM&, JSScope*, EvalNode*, UnlinkedEvalCodeBlock*, DebuggerMode, ProfilerMode); | |
250 | ||
251 | ~BytecodeGenerator(); | |
252 | ||
253 | VM* vm() const { return m_vm; } | |
254 | const CommonIdentifiers& propertyNames() const { return *m_vm->propertyNames; } | |
255 | ||
256 | bool isConstructor() { return m_codeBlock->isConstructor(); } | |
257 | ||
258 | ParserError generate(); | |
259 | ||
260 | bool isArgumentNumber(const Identifier&, int); | |
261 | ||
262 | void setIsNumericCompareFunction(bool isNumericCompareFunction); | |
263 | ||
264 | bool willResolveToArguments(const Identifier&); | |
265 | RegisterID* uncheckedRegisterForArguments(); | |
266 | ||
267 | // Resolve an identifier, given the current compile-time scope chain. | |
268 | ResolveResult resolve(const Identifier&); | |
269 | // Behaves as resolve does, but ignores dynamic scope as | |
270 | // dynamic scope should not interfere with const initialisation | |
271 | ResolveResult resolveConstDecl(const Identifier&); | |
272 | ||
273 | // Returns the register storing "this" | |
274 | RegisterID* thisRegister() { return &m_thisRegister; } | |
275 | ||
276 | // Returns the next available temporary register. Registers returned by | |
277 | // newTemporary require a modified form of reference counting: any | |
278 | // register with a refcount of 0 is considered "available", meaning that | |
279 | // the next instruction may overwrite it. | |
280 | RegisterID* newTemporary(); | |
281 | ||
282 | // The same as newTemporary(), but this function returns "suggestion" if | |
283 | // "suggestion" is a temporary. This function is helpful in situations | |
284 | // where you've put "suggestion" in a RefPtr, but you'd like to allow | |
285 | // the next instruction to overwrite it anyway. | |
286 | RegisterID* newTemporaryOr(RegisterID* suggestion) { return suggestion->isTemporary() ? suggestion : newTemporary(); } | |
287 | ||
288 | // Functions for handling of dst register | |
289 | ||
290 | RegisterID* ignoredResult() { return &m_ignoredResultRegister; } | |
291 | ||
292 | // Returns a place to write intermediate values of an operation | |
293 | // which reuses dst if it is safe to do so. | |
294 | RegisterID* tempDestination(RegisterID* dst) | |
295 | { | |
296 | return (dst && dst != ignoredResult() && dst->isTemporary()) ? dst : newTemporary(); | |
297 | } | |
298 | ||
299 | // Returns the place to write the final output of an operation. | |
300 | RegisterID* finalDestination(RegisterID* originalDst, RegisterID* tempDst = 0) | |
301 | { | |
302 | if (originalDst && originalDst != ignoredResult()) | |
303 | return originalDst; | |
304 | ASSERT(tempDst != ignoredResult()); | |
305 | if (tempDst && tempDst->isTemporary()) | |
306 | return tempDst; | |
307 | return newTemporary(); | |
308 | } | |
309 | ||
310 | // Returns the place to write the final output of an operation. | |
311 | RegisterID* finalDestinationOrIgnored(RegisterID* originalDst, RegisterID* tempDst = 0) | |
312 | { | |
313 | if (originalDst) | |
314 | return originalDst; | |
315 | ASSERT(tempDst != ignoredResult()); | |
316 | if (tempDst && tempDst->isTemporary()) | |
317 | return tempDst; | |
318 | return newTemporary(); | |
319 | } | |
320 | ||
321 | RegisterID* destinationForAssignResult(RegisterID* dst) | |
322 | { | |
323 | if (dst && dst != ignoredResult() && m_codeBlock->needsFullScopeChain()) | |
324 | return dst->isTemporary() ? dst : newTemporary(); | |
325 | return 0; | |
326 | } | |
327 | ||
328 | // Moves src to dst if dst is not null and is different from src, otherwise just returns src. | |
329 | RegisterID* moveToDestinationIfNeeded(RegisterID* dst, RegisterID* src) | |
330 | { | |
331 | return dst == ignoredResult() ? 0 : (dst && dst != src) ? emitMove(dst, src) : src; | |
332 | } | |
333 | ||
334 | LabelScopePtr newLabelScope(LabelScope::Type, const Identifier* = 0); | |
335 | PassRefPtr<Label> newLabel(); | |
336 | ||
337 | void emitNode(RegisterID* dst, StatementNode* n) | |
338 | { | |
339 | // Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary. | |
340 | ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount()); | |
341 | if (!m_stack.isSafeToRecurse()) { | |
342 | emitThrowExpressionTooDeepException(); | |
343 | return; | |
344 | } | |
345 | n->emitBytecode(*this, dst); | |
346 | } | |
347 | ||
348 | void emitNode(StatementNode* n) | |
349 | { | |
350 | emitNode(0, n); | |
351 | } | |
352 | ||
353 | RegisterID* emitNode(RegisterID* dst, ExpressionNode* n) | |
354 | { | |
355 | // Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary. | |
356 | ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount()); | |
357 | if (!m_stack.isSafeToRecurse()) | |
358 | return emitThrowExpressionTooDeepException(); | |
359 | return n->emitBytecode(*this, dst); | |
360 | } | |
361 | ||
362 | RegisterID* emitNode(ExpressionNode* n) | |
363 | { | |
364 | return emitNode(0, n); | |
365 | } | |
366 | ||
367 | void emitNodeInConditionContext(ExpressionNode* n, Label* trueTarget, Label* falseTarget, FallThroughMode fallThroughMode) | |
368 | { | |
369 | if (!m_stack.isSafeToRecurse()) { | |
370 | emitThrowExpressionTooDeepException(); | |
371 | return; | |
372 | } | |
373 | ||
374 | n->emitBytecodeInConditionContext(*this, trueTarget, falseTarget, fallThroughMode); | |
375 | } | |
376 | ||
377 | void emitExpressionInfo(int divot, int startOffset, int endOffset, unsigned line, int lineStart) | |
378 | { | |
379 | int sourceOffset = m_scopeNode->source().startOffset(); | |
380 | unsigned firstLine = m_scopeNode->source().firstLine(); | |
381 | ||
382 | ASSERT(divot >= lineStart); | |
383 | ASSERT(divot >= sourceOffset); | |
384 | divot -= sourceOffset; | |
385 | ||
386 | if (lineStart > sourceOffset) | |
387 | lineStart -= sourceOffset; | |
388 | else | |
389 | lineStart = 0; | |
390 | ||
391 | ASSERT(line >= firstLine); | |
392 | line -= firstLine; | |
393 | ||
394 | unsigned instructionOffset = instructions().size(); | |
395 | ASSERT(divot >= lineStart); | |
396 | unsigned column = divot - lineStart; | |
397 | m_codeBlock->addExpressionInfo(instructionOffset, divot, startOffset, endOffset, line, column); | |
398 | } | |
399 | ||
400 | ALWAYS_INLINE bool leftHandSideNeedsCopy(bool rightHasAssignments, bool rightIsPure) | |
401 | { | |
402 | return (m_codeType != FunctionCode || m_codeBlock->needsFullScopeChain() || rightHasAssignments) && !rightIsPure; | |
403 | } | |
404 | ||
405 | ALWAYS_INLINE PassRefPtr<RegisterID> emitNodeForLeftHandSide(ExpressionNode* n, bool rightHasAssignments, bool rightIsPure) | |
406 | { | |
407 | if (leftHandSideNeedsCopy(rightHasAssignments, rightIsPure)) { | |
408 | PassRefPtr<RegisterID> dst = newTemporary(); | |
409 | emitNode(dst.get(), n); | |
410 | return dst; | |
411 | } | |
412 | ||
413 | return emitNode(n); | |
414 | } | |
415 | ||
416 | RegisterID* emitLoad(RegisterID* dst, bool); | |
417 | RegisterID* emitLoad(RegisterID* dst, double); | |
418 | RegisterID* emitLoad(RegisterID* dst, const Identifier&); | |
419 | RegisterID* emitLoad(RegisterID* dst, JSValue); | |
420 | RegisterID* emitLoadGlobalObject(RegisterID* dst); | |
421 | ||
422 | RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); | |
423 | RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); | |
424 | RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); | |
425 | RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src); | |
426 | ||
427 | RegisterID* emitCreateThis(RegisterID* dst); | |
428 | RegisterID* emitNewObject(RegisterID* dst); | |
429 | RegisterID* emitNewArray(RegisterID* dst, ElementNode*, unsigned length); // stops at first elision | |
430 | ||
431 | RegisterID* emitNewFunction(RegisterID* dst, FunctionBodyNode* body); | |
432 | RegisterID* emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* body); | |
433 | RegisterID* emitNewFunctionInternal(RegisterID* dst, unsigned index, bool shouldNullCheck); | |
434 | RegisterID* emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func); | |
435 | RegisterID* emitNewRegExp(RegisterID* dst, RegExp*); | |
436 | ||
437 | RegisterID* emitMove(RegisterID* dst, RegisterID* src); | |
438 | ||
439 | RegisterID* emitToNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_number, dst, src); } | |
440 | RegisterID* emitInc(RegisterID* srcDst); | |
441 | RegisterID* emitDec(RegisterID* srcDst); | |
442 | ||
443 | void emitCheckHasInstance(RegisterID* dst, RegisterID* value, RegisterID* base, Label* target); | |
444 | RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype); | |
445 | RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); } | |
446 | RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, property, base, OperandTypes()); } | |
447 | ||
448 | RegisterID* emitGetStaticVar(RegisterID* dst, const ResolveResult&, const Identifier&); | |
449 | RegisterID* emitPutStaticVar(const ResolveResult&, const Identifier&, RegisterID* value); | |
450 | RegisterID* emitInitGlobalConst(const Identifier&, RegisterID* value); | |
451 | ||
452 | RegisterID* emitResolve(RegisterID* dst, const ResolveResult&, const Identifier& property); | |
453 | RegisterID* emitResolveBase(RegisterID* dst, const ResolveResult&, const Identifier& property); | |
454 | RegisterID* emitResolveBaseForPut(RegisterID* dst, const ResolveResult&, const Identifier& property, NonlocalResolveInfo&); | |
455 | RegisterID* emitResolveWithBaseForPut(RegisterID* baseDst, RegisterID* propDst, const ResolveResult&, const Identifier& property, NonlocalResolveInfo&); | |
456 | RegisterID* emitResolveWithThis(RegisterID* baseDst, RegisterID* propDst, const ResolveResult&, const Identifier& property); | |
457 | ||
458 | RegisterID* emitPutToBase(RegisterID* base, const Identifier&, RegisterID* value, NonlocalResolveInfo&); | |
459 | ||
460 | RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property); | |
461 | RegisterID* emitGetArgumentsLength(RegisterID* dst, RegisterID* base); | |
462 | RegisterID* emitPutById(RegisterID* base, const Identifier& property, RegisterID* value); | |
463 | RegisterID* emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value); | |
464 | RegisterID* emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier&); | |
465 | RegisterID* emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property); | |
466 | RegisterID* emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property); | |
467 | RegisterID* emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value); | |
468 | RegisterID* emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property); | |
469 | RegisterID* emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value); | |
470 | void emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter); | |
471 | ||
472 | ExpectedFunction expectedFunctionForIdentifier(const Identifier&); | |
473 | RegisterID* emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset, unsigned line, unsigned lineStart); | |
474 | RegisterID* emitCallEval(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset, unsigned line, unsigned lineStart); | |
475 | RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, RegisterID* profileHookRegister, unsigned divot, unsigned startOffset, unsigned endOffset, unsigned line, unsigned lineStart); | |
476 | RegisterID* emitLoadVarargs(RegisterID* argCountDst, RegisterID* thisRegister, RegisterID* args); | |
477 | ||
478 | RegisterID* emitReturn(RegisterID* src); | |
479 | RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); } | |
480 | ||
481 | RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset, unsigned line, unsigned lineStart); | |
482 | RegisterID* emitStrcat(RegisterID* dst, RegisterID* src, int count); | |
483 | void emitToPrimitive(RegisterID* dst, RegisterID* src); | |
484 | ||
485 | PassRefPtr<Label> emitLabel(Label*); | |
486 | void emitLoopHint(); | |
487 | PassRefPtr<Label> emitJump(Label* target); | |
488 | PassRefPtr<Label> emitJumpIfTrue(RegisterID* cond, Label* target); | |
489 | PassRefPtr<Label> emitJumpIfFalse(RegisterID* cond, Label* target); | |
490 | PassRefPtr<Label> emitJumpIfNotFunctionCall(RegisterID* cond, Label* target); | |
491 | PassRefPtr<Label> emitJumpIfNotFunctionApply(RegisterID* cond, Label* target); | |
492 | void emitPopScopes(int targetScopeDepth); | |
493 | ||
494 | RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget); | |
495 | RegisterID* emitNextPropertyName(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, RegisterID* iter, Label* target); | |
496 | ||
497 | void emitReadOnlyExceptionIfNeeded(); | |
498 | ||
499 | // Start a try block. 'start' must have been emitted. | |
500 | TryData* pushTry(Label* start); | |
501 | // End a try block. 'end' must have been emitted. | |
502 | RegisterID* popTryAndEmitCatch(TryData*, RegisterID* targetRegister, Label* end); | |
503 | ||
504 | void emitThrow(RegisterID* exc) | |
505 | { | |
506 | m_usesExceptions = true; | |
507 | emitUnaryNoDstOp(op_throw, exc); | |
508 | } | |
509 | ||
510 | void emitThrowReferenceError(const String& message); | |
511 | ||
512 | void emitPushNameScope(const Identifier& property, RegisterID* value, unsigned attributes); | |
513 | ||
514 | RegisterID* emitPushWithScope(RegisterID* scope); | |
515 | void emitPopScope(); | |
516 | ||
517 | void emitDebugHook(DebugHookID, unsigned firstLine, unsigned lastLine, unsigned charOffset, unsigned lineStart); | |
518 | ||
519 | int scopeDepth() { return m_dynamicScopeDepth + m_finallyDepth; } | |
520 | bool hasFinaliser() { return m_finallyDepth != 0; } | |
521 | ||
522 | void pushFinallyContext(StatementNode* finallyBlock); | |
523 | void popFinallyContext(); | |
524 | ||
525 | void pushOptimisedForIn(RegisterID* expectedBase, RegisterID* iter, RegisterID* index, RegisterID* propertyRegister) | |
526 | { | |
527 | ForInContext context = { expectedBase, iter, index, propertyRegister }; | |
528 | m_forInContextStack.append(context); | |
529 | } | |
530 | ||
531 | void popOptimisedForIn() | |
532 | { | |
533 | m_forInContextStack.removeLast(); | |
534 | } | |
535 | ||
536 | LabelScope* breakTarget(const Identifier&); | |
537 | LabelScope* continueTarget(const Identifier&); | |
538 | ||
539 | void beginSwitch(RegisterID*, SwitchInfo::SwitchType); | |
540 | void endSwitch(uint32_t clauseCount, RefPtr<Label>*, ExpressionNode**, Label* defaultLabel, int32_t min, int32_t range); | |
541 | ||
542 | CodeType codeType() const { return m_codeType; } | |
543 | ||
544 | bool shouldEmitProfileHooks() { return m_shouldEmitProfileHooks; } | |
545 | bool shouldEmitDebugHooks() { return m_shouldEmitDebugHooks; } | |
546 | ||
547 | bool isStrictMode() const { return m_codeBlock->isStrictMode(); } | |
548 | ||
549 | private: | |
550 | friend class Label; | |
551 | ||
552 | void emitOpcode(OpcodeID); | |
553 | UnlinkedArrayAllocationProfile newArrayAllocationProfile(); | |
554 | UnlinkedObjectAllocationProfile newObjectAllocationProfile(); | |
555 | UnlinkedArrayProfile newArrayProfile(); | |
556 | UnlinkedValueProfile emitProfiledOpcode(OpcodeID); | |
557 | int kill(RegisterID* dst) | |
558 | { | |
559 | int index = dst->index(); | |
560 | m_staticPropertyAnalyzer.kill(index); | |
561 | return index; | |
562 | } | |
563 | ||
564 | void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); | |
565 | void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); | |
566 | ALWAYS_INLINE void rewindBinaryOp(); | |
567 | ALWAYS_INLINE void rewindUnaryOp(); | |
568 | ||
569 | void emitComplexPopScopes(ControlFlowContext* topScope, ControlFlowContext* bottomScope); | |
570 | ||
571 | typedef HashMap<double, JSValue> NumberMap; | |
572 | typedef HashMap<StringImpl*, JSString*, IdentifierRepHash> IdentifierStringMap; | |
573 | typedef struct { | |
574 | int resolveOperations; | |
575 | int putOperations; | |
576 | } ResolveCacheEntry; | |
577 | typedef HashMap<StringImpl*, ResolveCacheEntry, IdentifierRepHash> IdentifierResolvePutMap; | |
578 | typedef HashMap<StringImpl*, uint32_t, IdentifierRepHash> IdentifierResolveMap; | |
579 | ||
580 | // Helper for emitCall() and emitConstruct(). This works because the set of | |
581 | // expected functions have identical behavior for both call and construct | |
582 | // (i.e. "Object()" is identical to "new Object()"). | |
583 | ExpectedFunction emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, Label* done); | |
584 | ||
585 | RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset, unsigned line, unsigned lineStart); | |
586 | ||
587 | RegisterID* newRegister(); | |
588 | ||
589 | // Adds a var slot and maps it to the name ident in symbolTable(). | |
590 | RegisterID* addVar(const Identifier& ident, bool isConstant) | |
591 | { | |
592 | RegisterID* local; | |
593 | addVar(ident, isConstant, local); | |
594 | return local; | |
595 | } | |
596 | ||
597 | // Ditto. Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used. | |
598 | bool addVar(const Identifier&, bool isConstant, RegisterID*&); | |
599 | ||
600 | // Adds an anonymous var slot. To give this slot a name, add it to symbolTable(). | |
601 | RegisterID* addVar() | |
602 | { | |
603 | ++m_codeBlock->m_numVars; | |
604 | return newRegister(); | |
605 | } | |
606 | ||
607 | // Returns the index of the added var. | |
608 | void addParameter(const Identifier&, int parameterIndex); | |
609 | RegisterID* resolveCallee(FunctionBodyNode*); | |
610 | void addCallee(FunctionBodyNode*, RegisterID*); | |
611 | ||
612 | void preserveLastVar(); | |
613 | bool shouldAvoidResolveGlobal(); | |
614 | ||
615 | RegisterID& registerFor(int index) | |
616 | { | |
617 | if (index >= 0) | |
618 | return m_calleeRegisters[index]; | |
619 | ||
620 | if (index == JSStack::Callee) | |
621 | return m_calleeRegister; | |
622 | ||
623 | ASSERT(m_parameters.size()); | |
624 | return m_parameters[index + m_parameters.size() + JSStack::CallFrameHeaderSize]; | |
625 | } | |
626 | ||
627 | unsigned addConstant(const Identifier&); | |
628 | RegisterID* addConstantValue(JSValue); | |
629 | RegisterID* addConstantEmptyValue(); | |
630 | unsigned addRegExp(RegExp*); | |
631 | ||
632 | unsigned addConstantBuffer(unsigned length); | |
633 | ||
634 | UnlinkedFunctionExecutable* makeFunction(FunctionBodyNode* body) | |
635 | { | |
636 | return UnlinkedFunctionExecutable::create(m_vm, m_scopeNode->source(), body); | |
637 | } | |
638 | ||
639 | RegisterID* emitInitLazyRegister(RegisterID*); | |
640 | ||
641 | public: | |
642 | JSString* addStringConstant(const Identifier&); | |
643 | ||
644 | Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions() { return m_instructions; } | |
645 | ||
646 | SharedSymbolTable& symbolTable() { return *m_symbolTable; } | |
647 | ||
648 | bool shouldOptimizeLocals() | |
649 | { | |
650 | if (m_dynamicScopeDepth) | |
651 | return false; | |
652 | ||
653 | if (m_codeType != FunctionCode) | |
654 | return false; | |
655 | ||
656 | return true; | |
657 | } | |
658 | ||
659 | bool canOptimizeNonLocals() | |
660 | { | |
661 | if (m_dynamicScopeDepth) | |
662 | return false; | |
663 | ||
664 | if (m_codeType == EvalCode) | |
665 | return false; | |
666 | ||
667 | if (m_codeType == FunctionCode && m_codeBlock->usesEval()) | |
668 | return false; | |
669 | ||
670 | return true; | |
671 | } | |
672 | ||
673 | RegisterID* emitThrowExpressionTooDeepException(); | |
674 | ||
675 | void createArgumentsIfNecessary(); | |
676 | void createActivationIfNecessary(); | |
677 | RegisterID* createLazyRegisterIfNecessary(RegisterID*); | |
678 | ||
679 | Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> m_instructions; | |
680 | ||
681 | bool m_shouldEmitDebugHooks; | |
682 | bool m_shouldEmitProfileHooks; | |
683 | ||
684 | SharedSymbolTable* m_symbolTable; | |
685 | ||
686 | ScopeNode* m_scopeNode; | |
687 | Strong<JSScope> m_scope; | |
688 | Strong<UnlinkedCodeBlock> m_codeBlock; | |
689 | ||
690 | // Some of these objects keep pointers to one another. They are arranged | |
691 | // to ensure a sane destruction order that avoids references to freed memory. | |
692 | HashSet<RefPtr<StringImpl>, IdentifierRepHash> m_functions; | |
693 | RegisterID m_ignoredResultRegister; | |
694 | RegisterID m_thisRegister; | |
695 | RegisterID m_calleeRegister; | |
696 | RegisterID* m_activationRegister; | |
697 | RegisterID* m_emptyValueRegister; | |
698 | RegisterID* m_globalObjectRegister; | |
699 | SegmentedVector<RegisterID, 32> m_constantPoolRegisters; | |
700 | SegmentedVector<RegisterID, 32> m_calleeRegisters; | |
701 | SegmentedVector<RegisterID, 32> m_parameters; | |
702 | SegmentedVector<Label, 32> m_labels; | |
703 | LabelScopeStore m_labelScopes; | |
704 | RefPtr<RegisterID> m_lastVar; | |
705 | int m_finallyDepth; | |
706 | int m_dynamicScopeDepth; | |
707 | CodeType m_codeType; | |
708 | ||
709 | Vector<ControlFlowContext, 0, UnsafeVectorOverflow> m_scopeContextStack; | |
710 | Vector<SwitchInfo> m_switchContextStack; | |
711 | Vector<ForInContext> m_forInContextStack; | |
712 | Vector<TryContext> m_tryContextStack; | |
713 | ||
714 | Vector<TryRange> m_tryRanges; | |
715 | SegmentedVector<TryData, 8> m_tryData; | |
716 | ||
717 | int m_firstConstantIndex; | |
718 | int m_nextConstantOffset; | |
719 | unsigned m_globalConstantIndex; | |
720 | ||
721 | int m_globalVarStorageOffset; | |
722 | ||
723 | bool m_hasCreatedActivation; | |
724 | int m_firstLazyFunction; | |
725 | int m_lastLazyFunction; | |
726 | HashMap<unsigned int, FunctionBodyNode*, WTF::IntHash<unsigned int>, WTF::UnsignedWithZeroKeyHashTraits<unsigned int> > m_lazyFunctions; | |
727 | typedef HashMap<FunctionBodyNode*, unsigned> FunctionOffsetMap; | |
728 | FunctionOffsetMap m_functionOffsets; | |
729 | ||
730 | // Constant pool | |
731 | IdentifierMap m_identifierMap; | |
732 | JSValueMap m_jsValueMap; | |
733 | NumberMap m_numberMap; | |
734 | IdentifierStringMap m_stringMap; | |
735 | ||
736 | uint32_t getResolveOperations(const Identifier& property) | |
737 | { | |
738 | if (m_dynamicScopeDepth) | |
739 | return m_codeBlock->addResolve(); | |
740 | IdentifierResolveMap::AddResult result = m_resolveCacheMap.add(property.impl(), 0); | |
741 | if (result.isNewEntry) | |
742 | result.iterator->value = m_codeBlock->addResolve(); | |
743 | return result.iterator->value; | |
744 | } | |
745 | ||
746 | uint32_t getResolveWithThisOperations(const Identifier& property) | |
747 | { | |
748 | if (m_dynamicScopeDepth) | |
749 | return m_codeBlock->addResolve(); | |
750 | IdentifierResolveMap::AddResult result = m_resolveWithThisCacheMap.add(property.impl(), 0); | |
751 | if (result.isNewEntry) | |
752 | result.iterator->value = m_codeBlock->addResolve(); | |
753 | return result.iterator->value; | |
754 | } | |
755 | ||
756 | uint32_t getResolveBaseOperations(IdentifierResolvePutMap& map, const Identifier& property, uint32_t& putToBaseOperation) | |
757 | { | |
758 | if (m_dynamicScopeDepth) { | |
759 | putToBaseOperation = m_codeBlock->addPutToBase(); | |
760 | return m_codeBlock->addResolve(); | |
761 | } | |
762 | ResolveCacheEntry entry = {-1, -1}; | |
763 | IdentifierResolvePutMap::AddResult result = map.add(property.impl(), entry); | |
764 | if (result.isNewEntry) | |
765 | result.iterator->value.resolveOperations = m_codeBlock->addResolve(); | |
766 | if (result.iterator->value.putOperations == -1) | |
767 | result.iterator->value.putOperations = getPutToBaseOperation(property); | |
768 | putToBaseOperation = result.iterator->value.putOperations; | |
769 | return result.iterator->value.resolveOperations; | |
770 | } | |
771 | ||
772 | uint32_t getResolveBaseOperations(const Identifier& property) | |
773 | { | |
774 | uint32_t scratch; | |
775 | return getResolveBaseOperations(m_resolveBaseMap, property, scratch); | |
776 | } | |
777 | ||
778 | uint32_t getResolveBaseForPutOperations(const Identifier& property, uint32_t& putToBaseOperation) | |
779 | { | |
780 | return getResolveBaseOperations(m_resolveBaseForPutMap, property, putToBaseOperation); | |
781 | } | |
782 | ||
783 | uint32_t getResolveWithBaseForPutOperations(const Identifier& property, uint32_t& putToBaseOperation) | |
784 | { | |
785 | return getResolveBaseOperations(m_resolveWithBaseForPutMap, property, putToBaseOperation); | |
786 | } | |
787 | ||
788 | uint32_t getPutToBaseOperation(const Identifier& property) | |
789 | { | |
790 | if (m_dynamicScopeDepth) | |
791 | return m_codeBlock->addPutToBase(); | |
792 | IdentifierResolveMap::AddResult result = m_putToBaseMap.add(property.impl(), 0); | |
793 | if (result.isNewEntry) | |
794 | result.iterator->value = m_codeBlock->addPutToBase(); | |
795 | return result.iterator->value; | |
796 | } | |
797 | ||
798 | IdentifierResolveMap m_putToBaseMap; | |
799 | IdentifierResolveMap m_resolveCacheMap; | |
800 | IdentifierResolveMap m_resolveWithThisCacheMap; | |
801 | IdentifierResolvePutMap m_resolveBaseMap; | |
802 | IdentifierResolvePutMap m_resolveBaseForPutMap; | |
803 | IdentifierResolvePutMap m_resolveWithBaseForPutMap; | |
804 | ||
805 | StaticPropertyAnalyzer m_staticPropertyAnalyzer; | |
806 | ||
807 | VM* m_vm; | |
808 | ||
809 | OpcodeID m_lastOpcodeID; | |
810 | #ifndef NDEBUG | |
811 | size_t m_lastOpcodePosition; | |
812 | #endif | |
813 | ||
814 | StackBounds m_stack; | |
815 | ||
816 | bool m_usesExceptions; | |
817 | bool m_expressionTooDeep; | |
818 | }; | |
819 | ||
820 | } | |
821 | ||
822 | #endif // BytecodeGenerator_h |