]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved. | |
3 | * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * 2. Redistributions in binary form must reproduce the above copyright | |
12 | * notice, this list of conditions and the following disclaimer in the | |
13 | * documentation and/or other materials provided with the distribution. | |
14 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of | |
15 | * its contributors may be used to endorse or promote products derived | |
16 | * from this software without specific prior written permission. | |
17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | |
29 | ||
30 | #ifndef BytecodeGenerator_h | |
31 | #define BytecodeGenerator_h | |
32 | ||
33 | #include "CodeBlock.h" | |
34 | #include <wtf/HashTraits.h> | |
35 | #include "Instruction.h" | |
36 | #include "Label.h" | |
37 | #include "LabelScope.h" | |
38 | #include "Interpreter.h" | |
39 | #include "RegisterID.h" | |
40 | #include "SymbolTable.h" | |
41 | #include "Debugger.h" | |
42 | #include "Nodes.h" | |
43 | #include <wtf/PassRefPtr.h> | |
44 | #include <wtf/SegmentedVector.h> | |
45 | #include <wtf/Vector.h> | |
46 | ||
47 | namespace JSC { | |
48 | ||
49 | class Identifier; | |
50 | class Label; | |
51 | class ScopeChainNode; | |
52 | ||
53 | class CallArguments { | |
54 | public: | |
55 | CallArguments(BytecodeGenerator& generator, ArgumentsNode* argumentsNode); | |
56 | ||
57 | RegisterID* thisRegister() { return m_argv[0].get(); } | |
58 | RegisterID* argumentRegister(unsigned i) { return m_argv[i + 1].get(); } | |
59 | unsigned registerOffset() { return m_argv.last()->index() + CallFrame::offsetFor(argumentCountIncludingThis()); } | |
60 | unsigned argumentCountIncludingThis() { return m_argv.size(); } | |
61 | RegisterID* profileHookRegister() { return m_profileHookRegister.get(); } | |
62 | ArgumentsNode* argumentsNode() { return m_argumentsNode; } | |
63 | ||
64 | private: | |
65 | void newArgument(BytecodeGenerator&); | |
66 | ||
67 | RefPtr<RegisterID> m_profileHookRegister; | |
68 | ArgumentsNode* m_argumentsNode; | |
69 | Vector<RefPtr<RegisterID>, 8> m_argv; | |
70 | }; | |
71 | ||
72 | struct FinallyContext { | |
73 | StatementNode* finallyBlock; | |
74 | unsigned scopeContextStackSize; | |
75 | unsigned switchContextStackSize; | |
76 | unsigned forInContextStackSize; | |
77 | unsigned labelScopesSize; | |
78 | int finallyDepth; | |
79 | int dynamicScopeDepth; | |
80 | }; | |
81 | ||
82 | struct ControlFlowContext { | |
83 | bool isFinallyBlock; | |
84 | FinallyContext finallyContext; | |
85 | }; | |
86 | ||
87 | struct ForInContext { | |
88 | RefPtr<RegisterID> expectedSubscriptRegister; | |
89 | RefPtr<RegisterID> iterRegister; | |
90 | RefPtr<RegisterID> indexRegister; | |
91 | RefPtr<RegisterID> propertyRegister; | |
92 | }; | |
93 | ||
94 | class BytecodeGenerator { | |
95 | WTF_MAKE_FAST_ALLOCATED; | |
96 | public: | |
97 | typedef DeclarationStacks::VarStack VarStack; | |
98 | typedef DeclarationStacks::FunctionStack FunctionStack; | |
99 | ||
100 | JS_EXPORT_PRIVATE static void setDumpsGeneratedCode(bool dumpsGeneratedCode); | |
101 | static bool dumpsGeneratedCode(); | |
102 | ||
103 | BytecodeGenerator(ProgramNode*, ScopeChainNode*, SymbolTable*, ProgramCodeBlock*, CompilationKind); | |
104 | BytecodeGenerator(FunctionBodyNode*, ScopeChainNode*, SymbolTable*, CodeBlock*, CompilationKind); | |
105 | BytecodeGenerator(EvalNode*, ScopeChainNode*, SymbolTable*, EvalCodeBlock*, CompilationKind); | |
106 | ||
107 | ~BytecodeGenerator(); | |
108 | ||
109 | JSGlobalData* globalData() const { return m_globalData; } | |
110 | const CommonIdentifiers& propertyNames() const { return *m_globalData->propertyNames; } | |
111 | ||
112 | bool isConstructor() { return m_codeBlock->m_isConstructor; } | |
113 | ||
114 | JSObject* generate(); | |
115 | ||
116 | // Returns the register corresponding to a local variable, or 0 if no | |
117 | // such register exists. Registers returned by registerFor do not | |
118 | // require explicit reference counting. | |
119 | RegisterID* registerFor(const Identifier&); | |
120 | ||
121 | bool isArgumentNumber(const Identifier&, int); | |
122 | ||
123 | void setIsNumericCompareFunction(bool isNumericCompareFunction); | |
124 | ||
125 | bool willResolveToArguments(const Identifier&); | |
126 | RegisterID* uncheckedRegisterForArguments(); | |
127 | ||
128 | // Behaves as registerFor does, but ignores dynamic scope as | |
129 | // dynamic scope should not interfere with const initialisation | |
130 | RegisterID* constRegisterFor(const Identifier&); | |
131 | ||
132 | // Searches the scope chain in an attempt to statically locate the requested | |
133 | // property. Returns false if for any reason the property cannot be safely | |
134 | // optimised at all. Otherwise it will return the index and depth of the | |
135 | // VariableObject that defines the property. If the property cannot be found | |
136 | // statically, depth will contain the depth of the scope chain where dynamic | |
137 | // lookup must begin. | |
138 | bool findScopedProperty(const Identifier&, int& index, size_t& depth, bool forWriting, bool& includesDynamicScopes, JSObject*& globalObject); | |
139 | ||
140 | // Returns the register storing "this" | |
141 | RegisterID* thisRegister() { return &m_thisRegister; } | |
142 | ||
143 | bool isLocal(const Identifier&); | |
144 | bool isLocalConstant(const Identifier&); | |
145 | ||
146 | // Returns the next available temporary register. Registers returned by | |
147 | // newTemporary require a modified form of reference counting: any | |
148 | // register with a refcount of 0 is considered "available", meaning that | |
149 | // the next instruction may overwrite it. | |
150 | RegisterID* newTemporary(); | |
151 | ||
152 | RegisterID* highestUsedRegister(); | |
153 | ||
154 | // The same as newTemporary(), but this function returns "suggestion" if | |
155 | // "suggestion" is a temporary. This function is helpful in situations | |
156 | // where you've put "suggestion" in a RefPtr, but you'd like to allow | |
157 | // the next instruction to overwrite it anyway. | |
158 | RegisterID* newTemporaryOr(RegisterID* suggestion) { return suggestion->isTemporary() ? suggestion : newTemporary(); } | |
159 | ||
160 | // Functions for handling of dst register | |
161 | ||
162 | RegisterID* ignoredResult() { return &m_ignoredResultRegister; } | |
163 | ||
164 | // Returns a place to write intermediate values of an operation | |
165 | // which reuses dst if it is safe to do so. | |
166 | RegisterID* tempDestination(RegisterID* dst) | |
167 | { | |
168 | return (dst && dst != ignoredResult() && dst->isTemporary()) ? dst : newTemporary(); | |
169 | } | |
170 | ||
171 | // Returns the place to write the final output of an operation. | |
172 | RegisterID* finalDestination(RegisterID* originalDst, RegisterID* tempDst = 0) | |
173 | { | |
174 | if (originalDst && originalDst != ignoredResult()) | |
175 | return originalDst; | |
176 | ASSERT(tempDst != ignoredResult()); | |
177 | if (tempDst && tempDst->isTemporary()) | |
178 | return tempDst; | |
179 | return newTemporary(); | |
180 | } | |
181 | ||
182 | // Returns the place to write the final output of an operation. | |
183 | RegisterID* finalDestinationOrIgnored(RegisterID* originalDst, RegisterID* tempDst = 0) | |
184 | { | |
185 | if (originalDst) | |
186 | return originalDst; | |
187 | ASSERT(tempDst != ignoredResult()); | |
188 | if (tempDst && tempDst->isTemporary()) | |
189 | return tempDst; | |
190 | return newTemporary(); | |
191 | } | |
192 | ||
193 | RegisterID* destinationForAssignResult(RegisterID* dst) | |
194 | { | |
195 | if (dst && dst != ignoredResult() && m_codeBlock->needsFullScopeChain()) | |
196 | return dst->isTemporary() ? dst : newTemporary(); | |
197 | return 0; | |
198 | } | |
199 | ||
200 | // Moves src to dst if dst is not null and is different from src, otherwise just returns src. | |
201 | RegisterID* moveToDestinationIfNeeded(RegisterID* dst, RegisterID* src) | |
202 | { | |
203 | return dst == ignoredResult() ? 0 : (dst && dst != src) ? emitMove(dst, src) : src; | |
204 | } | |
205 | ||
206 | PassRefPtr<LabelScope> newLabelScope(LabelScope::Type, const Identifier* = 0); | |
207 | PassRefPtr<Label> newLabel(); | |
208 | ||
209 | // The emitNode functions are just syntactic sugar for calling | |
210 | // Node::emitCode. These functions accept a 0 for the register, | |
211 | // meaning that the node should allocate a register, or ignoredResult(), | |
212 | // meaning that the node need not put the result in a register. | |
213 | // Other emit functions do not accept 0 or ignoredResult(). | |
214 | RegisterID* emitNode(RegisterID* dst, Node* n) | |
215 | { | |
216 | // Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary. | |
217 | ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount()); | |
218 | addLineInfo(n->lineNo()); | |
219 | return m_stack.recursionCheck() | |
220 | ? n->emitBytecode(*this, dst) | |
221 | : emitThrowExpressionTooDeepException(); | |
222 | } | |
223 | ||
224 | RegisterID* emitNode(Node* n) | |
225 | { | |
226 | return emitNode(0, n); | |
227 | } | |
228 | ||
229 | void emitNodeInConditionContext(ExpressionNode* n, Label* trueTarget, Label* falseTarget, bool fallThroughMeansTrue) | |
230 | { | |
231 | addLineInfo(n->lineNo()); | |
232 | if (m_stack.recursionCheck()) | |
233 | n->emitBytecodeInConditionContext(*this, trueTarget, falseTarget, fallThroughMeansTrue); | |
234 | else | |
235 | emitThrowExpressionTooDeepException(); | |
236 | } | |
237 | ||
238 | void emitExpressionInfo(unsigned divot, unsigned startOffset, unsigned endOffset) | |
239 | { | |
240 | if (!m_shouldEmitRichSourceInfo) | |
241 | return; | |
242 | ||
243 | divot -= m_codeBlock->sourceOffset(); | |
244 | if (divot > ExpressionRangeInfo::MaxDivot) { | |
245 | // Overflow has occurred, we can only give line number info for errors for this region | |
246 | divot = 0; | |
247 | startOffset = 0; | |
248 | endOffset = 0; | |
249 | } else if (startOffset > ExpressionRangeInfo::MaxOffset) { | |
250 | // If the start offset is out of bounds we clear both offsets | |
251 | // so we only get the divot marker. Error message will have to be reduced | |
252 | // to line and column number. | |
253 | startOffset = 0; | |
254 | endOffset = 0; | |
255 | } else if (endOffset > ExpressionRangeInfo::MaxOffset) { | |
256 | // The end offset is only used for additional context, and is much more likely | |
257 | // to overflow (eg. function call arguments) so we are willing to drop it without | |
258 | // dropping the rest of the range. | |
259 | endOffset = 0; | |
260 | } | |
261 | ||
262 | ExpressionRangeInfo info; | |
263 | info.instructionOffset = instructions().size(); | |
264 | info.divotPoint = divot; | |
265 | info.startOffset = startOffset; | |
266 | info.endOffset = endOffset; | |
267 | m_codeBlock->addExpressionInfo(info); | |
268 | } | |
269 | ||
270 | ALWAYS_INLINE bool leftHandSideNeedsCopy(bool rightHasAssignments, bool rightIsPure) | |
271 | { | |
272 | return (m_codeType != FunctionCode || m_codeBlock->needsFullScopeChain() || rightHasAssignments) && !rightIsPure; | |
273 | } | |
274 | ||
275 | ALWAYS_INLINE PassRefPtr<RegisterID> emitNodeForLeftHandSide(ExpressionNode* n, bool rightHasAssignments, bool rightIsPure) | |
276 | { | |
277 | if (leftHandSideNeedsCopy(rightHasAssignments, rightIsPure)) { | |
278 | PassRefPtr<RegisterID> dst = newTemporary(); | |
279 | emitNode(dst.get(), n); | |
280 | return dst; | |
281 | } | |
282 | ||
283 | return emitNode(n); | |
284 | } | |
285 | ||
286 | RegisterID* emitLoad(RegisterID* dst, bool); | |
287 | RegisterID* emitLoad(RegisterID* dst, double); | |
288 | RegisterID* emitLoad(RegisterID* dst, const Identifier&); | |
289 | RegisterID* emitLoad(RegisterID* dst, JSValue); | |
290 | ||
291 | RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); | |
292 | RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); | |
293 | RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); | |
294 | RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src); | |
295 | ||
296 | RegisterID* emitNewObject(RegisterID* dst); | |
297 | RegisterID* emitNewArray(RegisterID* dst, ElementNode*, unsigned length); // stops at first elision | |
298 | ||
299 | RegisterID* emitNewFunction(RegisterID* dst, FunctionBodyNode* body); | |
300 | RegisterID* emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* body); | |
301 | RegisterID* emitNewFunctionInternal(RegisterID* dst, unsigned index, bool shouldNullCheck); | |
302 | RegisterID* emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func); | |
303 | RegisterID* emitNewRegExp(RegisterID* dst, RegExp*); | |
304 | ||
305 | RegisterID* emitMove(RegisterID* dst, RegisterID* src); | |
306 | ||
307 | RegisterID* emitToJSNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_jsnumber, dst, src); } | |
308 | RegisterID* emitPreInc(RegisterID* srcDst); | |
309 | RegisterID* emitPreDec(RegisterID* srcDst); | |
310 | RegisterID* emitPostInc(RegisterID* dst, RegisterID* srcDst); | |
311 | RegisterID* emitPostDec(RegisterID* dst, RegisterID* srcDst); | |
312 | ||
313 | void emitCheckHasInstance(RegisterID* base); | |
314 | RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype); | |
315 | RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); } | |
316 | RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, property, base, OperandTypes()); } | |
317 | ||
318 | RegisterID* emitResolve(RegisterID* dst, const Identifier& property); | |
319 | RegisterID* emitGetScopedVar(RegisterID* dst, size_t skip, int index, JSValue globalObject); | |
320 | RegisterID* emitPutScopedVar(size_t skip, int index, RegisterID* value, JSValue globalObject); | |
321 | ||
322 | RegisterID* emitResolveBase(RegisterID* dst, const Identifier& property); | |
323 | RegisterID* emitResolveBaseForPut(RegisterID* dst, const Identifier& property); | |
324 | RegisterID* emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property); | |
325 | RegisterID* emitResolveWithThis(RegisterID* baseDst, RegisterID* propDst, const Identifier& property); | |
326 | ||
327 | void emitMethodCheck(); | |
328 | ||
329 | RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property); | |
330 | RegisterID* emitGetArgumentsLength(RegisterID* dst, RegisterID* base); | |
331 | RegisterID* emitPutById(RegisterID* base, const Identifier& property, RegisterID* value); | |
332 | RegisterID* emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value); | |
333 | RegisterID* emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier&); | |
334 | RegisterID* emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property); | |
335 | RegisterID* emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property); | |
336 | RegisterID* emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value); | |
337 | RegisterID* emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property); | |
338 | RegisterID* emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value); | |
339 | void emitPutGetterSetter(RegisterID* base, const Identifier& property, RegisterID* getter, RegisterID* setter); | |
340 | ||
341 | RegisterID* emitCall(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); | |
342 | RegisterID* emitCallEval(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); | |
343 | RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, RegisterID* profileHookRegister, unsigned divot, unsigned startOffset, unsigned endOffset); | |
344 | RegisterID* emitLoadVarargs(RegisterID* argCountDst, RegisterID* thisRegister, RegisterID* args); | |
345 | ||
346 | RegisterID* emitReturn(RegisterID* src); | |
347 | RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); } | |
348 | ||
349 | RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); | |
350 | RegisterID* emitStrcat(RegisterID* dst, RegisterID* src, int count); | |
351 | void emitToPrimitive(RegisterID* dst, RegisterID* src); | |
352 | ||
353 | PassRefPtr<Label> emitLabel(Label*); | |
354 | void emitLoopHint(); | |
355 | PassRefPtr<Label> emitJump(Label* target); | |
356 | PassRefPtr<Label> emitJumpIfTrue(RegisterID* cond, Label* target); | |
357 | PassRefPtr<Label> emitJumpIfFalse(RegisterID* cond, Label* target); | |
358 | PassRefPtr<Label> emitJumpIfNotFunctionCall(RegisterID* cond, Label* target); | |
359 | PassRefPtr<Label> emitJumpIfNotFunctionApply(RegisterID* cond, Label* target); | |
360 | PassRefPtr<Label> emitJumpScopes(Label* target, int targetScopeDepth); | |
361 | ||
362 | RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget); | |
363 | RegisterID* emitNextPropertyName(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, RegisterID* iter, Label* target); | |
364 | ||
365 | RegisterID* emitCatch(RegisterID*, Label* start, Label* end); | |
366 | void emitThrow(RegisterID* exc) | |
367 | { | |
368 | m_usesExceptions = true; | |
369 | emitUnaryNoDstOp(op_throw, exc); | |
370 | } | |
371 | ||
372 | void emitThrowReferenceError(const UString& message); | |
373 | ||
374 | void emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value); | |
375 | ||
376 | RegisterID* emitPushScope(RegisterID* scope); | |
377 | void emitPopScope(); | |
378 | ||
379 | void emitDebugHook(DebugHookID, int firstLine, int lastLine); | |
380 | ||
381 | int scopeDepth() { return m_dynamicScopeDepth + m_finallyDepth; } | |
382 | bool hasFinaliser() { return m_finallyDepth != 0; } | |
383 | ||
384 | void pushFinallyContext(StatementNode* finallyBlock); | |
385 | void popFinallyContext(); | |
386 | ||
387 | void pushOptimisedForIn(RegisterID* expectedBase, RegisterID* iter, RegisterID* index, RegisterID* propertyRegister) | |
388 | { | |
389 | ForInContext context = { expectedBase, iter, index, propertyRegister }; | |
390 | m_forInContextStack.append(context); | |
391 | } | |
392 | ||
393 | void popOptimisedForIn() | |
394 | { | |
395 | m_forInContextStack.removeLast(); | |
396 | } | |
397 | ||
398 | LabelScope* breakTarget(const Identifier&); | |
399 | LabelScope* continueTarget(const Identifier&); | |
400 | ||
401 | void beginSwitch(RegisterID*, SwitchInfo::SwitchType); | |
402 | void endSwitch(uint32_t clauseCount, RefPtr<Label>*, ExpressionNode**, Label* defaultLabel, int32_t min, int32_t range); | |
403 | ||
404 | CodeType codeType() const { return m_codeType; } | |
405 | ||
406 | bool shouldEmitProfileHooks() { return m_shouldEmitProfileHooks; } | |
407 | ||
408 | bool isStrictMode() const { return m_codeBlock->isStrictMode(); } | |
409 | ||
410 | ScopeChainNode* scopeChain() const { return m_scopeChain.get(); } | |
411 | ||
412 | private: | |
413 | friend class Label; | |
414 | ||
415 | void emitOpcode(OpcodeID); | |
416 | ValueProfile* emitProfiledOpcode(OpcodeID); | |
417 | void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); | |
418 | void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); | |
419 | ALWAYS_INLINE void rewindBinaryOp(); | |
420 | ALWAYS_INLINE void rewindUnaryOp(); | |
421 | ||
422 | PassRefPtr<Label> emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope); | |
423 | ||
424 | typedef HashMap<double, JSValue> NumberMap; | |
425 | typedef HashMap<StringImpl*, JSString*, IdentifierRepHash> IdentifierStringMap; | |
426 | ||
427 | RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); | |
428 | ||
429 | RegisterID* newRegister(); | |
430 | ||
431 | // Adds a var slot and maps it to the name ident in symbolTable(). | |
432 | RegisterID* addVar(const Identifier& ident, bool isConstant) | |
433 | { | |
434 | RegisterID* local; | |
435 | addVar(ident, isConstant, local); | |
436 | return local; | |
437 | } | |
438 | ||
439 | // Ditto. Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used. | |
440 | bool addVar(const Identifier&, bool isConstant, RegisterID*&); | |
441 | ||
442 | // Adds an anonymous var slot. To give this slot a name, add it to symbolTable(). | |
443 | RegisterID* addVar() | |
444 | { | |
445 | ++m_codeBlock->m_numVars; | |
446 | return newRegister(); | |
447 | } | |
448 | ||
449 | // Returns the index of the added var. | |
450 | int addGlobalVar(const Identifier&, bool isConstant); | |
451 | ||
452 | void addParameter(const Identifier&, int parameterIndex); | |
453 | ||
454 | void preserveLastVar(); | |
455 | bool shouldAvoidResolveGlobal(); | |
456 | ||
457 | RegisterID& registerFor(int index) | |
458 | { | |
459 | if (index >= 0) | |
460 | return m_calleeRegisters[index]; | |
461 | ||
462 | ASSERT(m_parameters.size()); | |
463 | return m_parameters[index + m_parameters.size() + RegisterFile::CallFrameHeaderSize]; | |
464 | } | |
465 | ||
466 | unsigned addConstant(const Identifier&); | |
467 | RegisterID* addConstantValue(JSValue); | |
468 | unsigned addRegExp(RegExp*); | |
469 | ||
470 | unsigned addConstantBuffer(unsigned length); | |
471 | ||
472 | FunctionExecutable* makeFunction(ExecState* exec, FunctionBodyNode* body) | |
473 | { | |
474 | return FunctionExecutable::create(exec, body->ident(), body->inferredName(), body->source(), body->usesArguments(), body->parameters(), body->isStrictMode(), body->lineNo(), body->lastLine()); | |
475 | } | |
476 | ||
477 | FunctionExecutable* makeFunction(JSGlobalData* globalData, FunctionBodyNode* body) | |
478 | { | |
479 | return FunctionExecutable::create(*globalData, body->ident(), body->inferredName(), body->source(), body->usesArguments(), body->parameters(), body->isStrictMode(), body->lineNo(), body->lastLine()); | |
480 | } | |
481 | ||
482 | JSString* addStringConstant(const Identifier&); | |
483 | ||
484 | void addLineInfo(unsigned lineNo) | |
485 | { | |
486 | #if !ENABLE(OPCODE_SAMPLING) | |
487 | if (m_shouldEmitRichSourceInfo) | |
488 | #endif | |
489 | m_codeBlock->addLineInfo(instructions().size(), lineNo); | |
490 | } | |
491 | ||
492 | RegisterID* emitInitLazyRegister(RegisterID*); | |
493 | ||
494 | Vector<Instruction>& instructions() { return m_instructions; } | |
495 | SymbolTable& symbolTable() { return *m_symbolTable; } | |
496 | ||
497 | bool shouldOptimizeLocals() | |
498 | { | |
499 | if (m_dynamicScopeDepth) | |
500 | return false; | |
501 | ||
502 | if (m_codeType != FunctionCode) | |
503 | return false; | |
504 | ||
505 | return true; | |
506 | } | |
507 | ||
508 | bool canOptimizeNonLocals() | |
509 | { | |
510 | if (m_dynamicScopeDepth) | |
511 | return false; | |
512 | ||
513 | if (m_codeType == EvalCode) | |
514 | return false; | |
515 | ||
516 | if (m_codeType == FunctionCode && m_codeBlock->usesEval()) | |
517 | return false; | |
518 | ||
519 | return true; | |
520 | } | |
521 | ||
522 | RegisterID* emitThrowExpressionTooDeepException(); | |
523 | ||
524 | void createArgumentsIfNecessary(); | |
525 | void createActivationIfNecessary(); | |
526 | RegisterID* createLazyRegisterIfNecessary(RegisterID*); | |
527 | ||
528 | Vector<Instruction> m_instructions; | |
529 | ||
530 | bool m_shouldEmitDebugHooks; | |
531 | bool m_shouldEmitProfileHooks; | |
532 | bool m_shouldEmitRichSourceInfo; | |
533 | ||
534 | Strong<ScopeChainNode> m_scopeChain; | |
535 | SymbolTable* m_symbolTable; | |
536 | ||
537 | ScopeNode* m_scopeNode; | |
538 | CodeBlock* m_codeBlock; | |
539 | ||
540 | // Some of these objects keep pointers to one another. They are arranged | |
541 | // to ensure a sane destruction order that avoids references to freed memory. | |
542 | HashSet<RefPtr<StringImpl>, IdentifierRepHash> m_functions; | |
543 | RegisterID m_ignoredResultRegister; | |
544 | RegisterID m_thisRegister; | |
545 | RegisterID* m_activationRegister; | |
546 | SegmentedVector<RegisterID, 32> m_constantPoolRegisters; | |
547 | SegmentedVector<RegisterID, 32> m_calleeRegisters; | |
548 | SegmentedVector<RegisterID, 32> m_parameters; | |
549 | SegmentedVector<Label, 32> m_labels; | |
550 | SegmentedVector<LabelScope, 8> m_labelScopes; | |
551 | RefPtr<RegisterID> m_lastVar; | |
552 | int m_finallyDepth; | |
553 | int m_dynamicScopeDepth; | |
554 | int m_baseScopeDepth; | |
555 | CodeType m_codeType; | |
556 | ||
557 | Vector<ControlFlowContext> m_scopeContextStack; | |
558 | Vector<SwitchInfo> m_switchContextStack; | |
559 | Vector<ForInContext> m_forInContextStack; | |
560 | ||
561 | int m_firstConstantIndex; | |
562 | int m_nextConstantOffset; | |
563 | unsigned m_globalConstantIndex; | |
564 | ||
565 | int m_globalVarStorageOffset; | |
566 | ||
567 | bool m_hasCreatedActivation; | |
568 | int m_firstLazyFunction; | |
569 | int m_lastLazyFunction; | |
570 | HashMap<unsigned int, FunctionBodyNode*, WTF::IntHash<unsigned int>, WTF::UnsignedWithZeroKeyHashTraits<unsigned int> > m_lazyFunctions; | |
571 | typedef HashMap<FunctionBodyNode*, unsigned> FunctionOffsetMap; | |
572 | FunctionOffsetMap m_functionOffsets; | |
573 | ||
574 | // Constant pool | |
575 | IdentifierMap m_identifierMap; | |
576 | JSValueMap m_jsValueMap; | |
577 | NumberMap m_numberMap; | |
578 | IdentifierStringMap m_stringMap; | |
579 | ||
580 | JSGlobalData* m_globalData; | |
581 | ||
582 | OpcodeID m_lastOpcodeID; | |
583 | #ifndef NDEBUG | |
584 | size_t m_lastOpcodePosition; | |
585 | #endif | |
586 | ||
587 | StackBounds m_stack; | |
588 | ||
589 | bool m_usesExceptions; | |
590 | bool m_expressionTooDeep; | |
591 | }; | |
592 | } | |
593 | ||
594 | #endif // BytecodeGenerator_h |