2 * Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "BytecodeGenerator.h"
34 #include "BytecodeUseDef.h"
35 #include "CallLinkStatus.h"
36 #include "DFGCapabilities.h"
37 #include "DFGCommon.h"
38 #include "DFGDriver.h"
39 #include "DFGJITCode.h"
40 #include "DFGWorklist.h"
42 #include "Interpreter.h"
45 #include "JSActivation.h"
46 #include "JSCJSValue.h"
47 #include "JSFunction.h"
48 #include "JSNameScope.h"
49 #include "LLIntEntrypoint.h"
50 #include "LowLevelInterpreter.h"
51 #include "JSCInlines.h"
52 #include "PolymorphicGetByIdList.h"
53 #include "PolymorphicPutByIdList.h"
54 #include "ProfilerDatabase.h"
55 #include "ReduceWhitespace.h"
57 #include "RepatchBuffer.h"
58 #include "SlotVisitorInlines.h"
59 #include "UnlinkedInstructionStream.h"
60 #include <wtf/BagToHashMap.h>
61 #include <wtf/CommaPrinter.h>
62 #include <wtf/StringExtras.h>
63 #include <wtf/StringPrintStream.h>
66 #include "DFGOperations.h"
70 #include "FTLJITCode.h"
75 CString
CodeBlock::inferredName() const
83 return jsCast
<FunctionExecutable
*>(ownerExecutable())->inferredName().utf8();
86 return CString("", 0);
90 bool CodeBlock::hasHash() const
95 bool CodeBlock::isSafeToComputeHash() const
97 return !isCompilationThread();
100 CodeBlockHash
CodeBlock::hash() const
103 RELEASE_ASSERT(isSafeToComputeHash());
104 m_hash
= CodeBlockHash(ownerExecutable()->source(), specializationKind());
109 CString
CodeBlock::sourceCodeForTools() const
111 if (codeType() != FunctionCode
)
112 return ownerExecutable()->source().toUTF8();
114 SourceProvider
* provider
= source();
115 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(ownerExecutable());
116 UnlinkedFunctionExecutable
* unlinked
= executable
->unlinkedExecutable();
117 unsigned unlinkedStartOffset
= unlinked
->startOffset();
118 unsigned linkedStartOffset
= executable
->source().startOffset();
119 int delta
= linkedStartOffset
- unlinkedStartOffset
;
120 unsigned rangeStart
= delta
+ unlinked
->unlinkedFunctionNameStart();
121 unsigned rangeEnd
= delta
+ unlinked
->startOffset() + unlinked
->sourceLength();
124 provider
->source().impl()->utf8ForRange(rangeStart
, rangeEnd
- rangeStart
));
127 CString
CodeBlock::sourceCodeOnOneLine() const
129 return reduceWhitespace(sourceCodeForTools());
132 CString
CodeBlock::hashAsStringIfPossible() const
134 if (hasHash() || isSafeToComputeHash())
135 return toCString(hash());
139 void CodeBlock::dumpAssumingJITType(PrintStream
& out
, JITCode::JITType jitType
) const
141 out
.print(inferredName(), "#", hashAsStringIfPossible());
142 out
.print(":[", RawPointer(this), "->");
144 out
.print(RawPointer(m_alternative
.get()), "->");
145 out
.print(RawPointer(ownerExecutable()), ", ", jitType
, codeType());
147 if (codeType() == FunctionCode
)
148 out
.print(specializationKind());
149 out
.print(", ", instructionCount());
150 if (this->jitType() == JITCode::BaselineJIT
&& m_shouldAlwaysBeInlined
)
151 out
.print(" (SABI)");
152 if (ownerExecutable()->neverInline())
153 out
.print(" (NeverInline)");
154 if (ownerExecutable()->isStrictMode())
155 out
.print(" (StrictMode)");
156 if (this->jitType() == JITCode::BaselineJIT
&& m_didFailFTLCompilation
)
157 out
.print(" (FTLFail)");
158 if (this->jitType() == JITCode::BaselineJIT
&& m_hasBeenCompiledWithFTL
)
159 out
.print(" (HadFTLReplacement)");
163 void CodeBlock::dump(PrintStream
& out
) const
165 dumpAssumingJITType(out
, jitType());
168 static CString
constantName(int k
, JSValue value
)
170 return toCString(value
, "(@k", k
- FirstConstantRegisterIndex
, ")");
173 static CString
idName(int id0
, const Identifier
& ident
)
175 return toCString(ident
.impl(), "(@id", id0
, ")");
178 CString
CodeBlock::registerName(int r
) const
180 if (r
== missingThisObjectMarker())
183 if (isConstantRegisterIndex(r
))
184 return constantName(r
, getConstant(r
));
186 if (operandIsArgument(r
)) {
187 if (!VirtualRegister(r
).toArgument())
189 return toCString("arg", VirtualRegister(r
).toArgument());
192 return toCString("loc", VirtualRegister(r
).toLocal());
195 static CString
regexpToSourceString(RegExp
* regExp
)
197 char postfix
[5] = { '/', 0, 0, 0, 0 };
199 if (regExp
->global())
200 postfix
[index
++] = 'g';
201 if (regExp
->ignoreCase())
202 postfix
[index
++] = 'i';
203 if (regExp
->multiline())
204 postfix
[index
] = 'm';
206 return toCString("/", regExp
->pattern().impl(), postfix
);
209 static CString
regexpName(int re
, RegExp
* regexp
)
211 return toCString(regexpToSourceString(regexp
), "(@re", re
, ")");
214 NEVER_INLINE
static const char* debugHookName(int debugHookID
)
216 switch (static_cast<DebugHookID
>(debugHookID
)) {
217 case DidEnterCallFrame
:
218 return "didEnterCallFrame";
219 case WillLeaveCallFrame
:
220 return "willLeaveCallFrame";
221 case WillExecuteStatement
:
222 return "willExecuteStatement";
223 case WillExecuteProgram
:
224 return "willExecuteProgram";
225 case DidExecuteProgram
:
226 return "didExecuteProgram";
227 case DidReachBreakpoint
:
228 return "didReachBreakpoint";
231 RELEASE_ASSERT_NOT_REACHED();
235 void CodeBlock::printUnaryOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
)
237 int r0
= (++it
)->u
.operand
;
238 int r1
= (++it
)->u
.operand
;
240 printLocationAndOp(out
, exec
, location
, it
, op
);
241 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
244 void CodeBlock::printBinaryOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
)
246 int r0
= (++it
)->u
.operand
;
247 int r1
= (++it
)->u
.operand
;
248 int r2
= (++it
)->u
.operand
;
249 printLocationAndOp(out
, exec
, location
, it
, op
);
250 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
253 void CodeBlock::printConditionalJump(PrintStream
& out
, ExecState
* exec
, const Instruction
*, const Instruction
*& it
, int location
, const char* op
)
255 int r0
= (++it
)->u
.operand
;
256 int offset
= (++it
)->u
.operand
;
257 printLocationAndOp(out
, exec
, location
, it
, op
);
258 out
.printf("%s, %d(->%d)", registerName(r0
).data(), offset
, location
+ offset
);
261 void CodeBlock::printGetByIdOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
)
264 switch (exec
->interpreter()->getOpcodeID(it
->u
.opcode
)) {
268 case op_get_by_id_out_of_line
:
269 op
= "get_by_id_out_of_line";
271 case op_get_array_length
:
275 RELEASE_ASSERT_NOT_REACHED();
278 int r0
= (++it
)->u
.operand
;
279 int r1
= (++it
)->u
.operand
;
280 int id0
= (++it
)->u
.operand
;
281 printLocationAndOp(out
, exec
, location
, it
, op
);
282 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), idName(id0
, identifier(id0
)).data());
283 it
+= 4; // Increment up to the value profiler.
286 static void dumpStructure(PrintStream
& out
, const char* name
, ExecState
* exec
, Structure
* structure
, const Identifier
& ident
)
291 out
.printf("%s = %p", name
, structure
);
293 PropertyOffset offset
= structure
->getConcurrently(exec
->vm(), ident
.impl());
294 if (offset
!= invalidOffset
)
295 out
.printf(" (offset = %d)", offset
);
298 #if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
299 static void dumpChain(PrintStream
& out
, ExecState
* exec
, StructureChain
* chain
, const Identifier
& ident
)
301 out
.printf("chain = %p: [", chain
);
303 for (WriteBarrier
<Structure
>* currentStructure
= chain
->head();
305 ++currentStructure
) {
310 dumpStructure(out
, "struct", exec
, currentStructure
->get(), ident
);
316 void CodeBlock::printGetByIdCacheStatus(PrintStream
& out
, ExecState
* exec
, int location
, const StubInfoMap
& map
)
318 Instruction
* instruction
= instructions().begin() + location
;
320 const Identifier
& ident
= identifier(instruction
[3].u
.operand
);
322 UNUSED_PARAM(ident
); // tell the compiler to shut up in certain platform configurations.
324 if (exec
->interpreter()->getOpcodeID(instruction
[0].u
.opcode
) == op_get_array_length
)
325 out
.printf(" llint(array_length)");
326 else if (Structure
* structure
= instruction
[4].u
.structure
.get()) {
327 out
.printf(" llint(");
328 dumpStructure(out
, "struct", exec
, structure
, ident
);
333 if (StructureStubInfo
* stubPtr
= map
.get(CodeOrigin(location
))) {
334 StructureStubInfo
& stubInfo
= *stubPtr
;
335 if (stubInfo
.resetByGC
)
336 out
.print(" (Reset By GC)");
341 Structure
* baseStructure
= 0;
342 Structure
* prototypeStructure
= 0;
343 StructureChain
* chain
= 0;
344 PolymorphicGetByIdList
* list
= 0;
346 switch (stubInfo
.accessType
) {
347 case access_get_by_id_self
:
349 baseStructure
= stubInfo
.u
.getByIdSelf
.baseObjectStructure
.get();
351 case access_get_by_id_chain
:
353 baseStructure
= stubInfo
.u
.getByIdChain
.baseObjectStructure
.get();
354 chain
= stubInfo
.u
.getByIdChain
.chain
.get();
356 case access_get_by_id_list
:
358 list
= stubInfo
.u
.getByIdList
.list
;
364 RELEASE_ASSERT_NOT_REACHED();
370 dumpStructure(out
, "struct", exec
, baseStructure
, ident
);
373 if (prototypeStructure
) {
375 dumpStructure(out
, "prototypeStruct", exec
, baseStructure
, ident
);
380 dumpChain(out
, exec
, chain
, ident
);
384 out
.printf(", list = %p: [", list
);
385 for (unsigned i
= 0; i
< list
->size(); ++i
) {
389 dumpStructure(out
, "base", exec
, list
->at(i
).structure(), ident
);
390 if (list
->at(i
).chain()) {
392 dumpChain(out
, exec
, list
->at(i
).chain(), ident
);
406 void CodeBlock::printCallOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
, CacheDumpMode cacheDumpMode
, bool& hasPrintedProfiling
, const CallLinkInfoMap
& map
)
408 int dst
= (++it
)->u
.operand
;
409 int func
= (++it
)->u
.operand
;
410 int argCount
= (++it
)->u
.operand
;
411 int registerOffset
= (++it
)->u
.operand
;
412 printLocationAndOp(out
, exec
, location
, it
, op
);
413 out
.printf("%s, %s, %d, %d", registerName(dst
).data(), registerName(func
).data(), argCount
, registerOffset
);
414 if (cacheDumpMode
== DumpCaches
) {
415 LLIntCallLinkInfo
* callLinkInfo
= it
[1].u
.callLinkInfo
;
416 if (callLinkInfo
->lastSeenCallee
) {
418 " llint(%p, exec %p)",
419 callLinkInfo
->lastSeenCallee
.get(),
420 callLinkInfo
->lastSeenCallee
->executable());
423 if (CallLinkInfo
* info
= map
.get(CodeOrigin(location
))) {
424 JSFunction
* target
= info
->lastSeenCallee
.get();
426 out
.printf(" jit(%p, exec %p)", target
, target
->executable());
428 out
.print(" status(", CallLinkStatus::computeFor(this, location
, map
), ")");
435 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
436 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
439 void CodeBlock::printPutByIdOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
)
441 int r0
= (++it
)->u
.operand
;
442 int id0
= (++it
)->u
.operand
;
443 int r1
= (++it
)->u
.operand
;
444 printLocationAndOp(out
, exec
, location
, it
, op
);
445 out
.printf("%s, %s, %s", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data());
449 void CodeBlock::dumpBytecode(PrintStream
& out
)
451 // We only use the ExecState* for things that don't actually lead to JS execution,
452 // like converting a JSString to a String. Hence the globalExec is appropriate.
453 ExecState
* exec
= m_globalObject
->globalExec();
455 size_t instructionCount
= 0;
457 for (size_t i
= 0; i
< instructions().size(); i
+= opcodeLengths
[exec
->interpreter()->getOpcodeID(instructions()[i
].u
.opcode
)])
462 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
463 static_cast<unsigned long>(instructions().size()),
464 static_cast<unsigned long>(instructions().size() * sizeof(Instruction
)),
465 m_numParameters
, m_numCalleeRegisters
, m_numVars
);
466 if (symbolTable() && symbolTable()->captureCount()) {
468 "; %d captured var(s) (from r%d to r%d, inclusive)",
469 symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
471 if (usesArguments()) {
473 "; uses arguments, in r%d, r%d",
474 argumentsRegister().offset(),
475 unmodifiedArgumentsRegister(argumentsRegister()).offset());
477 if (needsActivation() && codeType() == FunctionCode
)
478 out
.printf("; activation in r%d", activationRegister().offset());
481 StubInfoMap stubInfos
;
482 CallLinkInfoMap callLinkInfos
;
483 getStubInfoMap(stubInfos
);
484 getCallLinkInfoMap(callLinkInfos
);
486 const Instruction
* begin
= instructions().begin();
487 const Instruction
* end
= instructions().end();
488 for (const Instruction
* it
= begin
; it
!= end
; ++it
)
489 dumpBytecode(out
, exec
, begin
, it
, stubInfos
, callLinkInfos
);
491 if (numberOfIdentifiers()) {
492 out
.printf("\nIdentifiers:\n");
495 out
.printf(" id%u = %s\n", static_cast<unsigned>(i
), identifier(i
).string().utf8().data());
497 } while (i
!= numberOfIdentifiers());
500 if (!m_constantRegisters
.isEmpty()) {
501 out
.printf("\nConstants:\n");
504 out
.printf(" k%u = %s\n", static_cast<unsigned>(i
), toCString(m_constantRegisters
[i
].get()).data());
506 } while (i
< m_constantRegisters
.size());
509 if (size_t count
= m_unlinkedCode
->numberOfRegExps()) {
510 out
.printf("\nm_regexps:\n");
513 out
.printf(" re%u = %s\n", static_cast<unsigned>(i
), regexpToSourceString(m_unlinkedCode
->regexp(i
)).data());
518 if (m_rareData
&& !m_rareData
->m_exceptionHandlers
.isEmpty()) {
519 out
.printf("\nException Handlers:\n");
522 out
.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i
+ 1, m_rareData
->m_exceptionHandlers
[i
].start
, m_rareData
->m_exceptionHandlers
[i
].end
, m_rareData
->m_exceptionHandlers
[i
].target
, m_rareData
->m_exceptionHandlers
[i
].scopeDepth
);
524 } while (i
< m_rareData
->m_exceptionHandlers
.size());
527 if (m_rareData
&& !m_rareData
->m_switchJumpTables
.isEmpty()) {
528 out
.printf("Switch Jump Tables:\n");
531 out
.printf(" %1d = {\n", i
);
533 Vector
<int32_t>::const_iterator end
= m_rareData
->m_switchJumpTables
[i
].branchOffsets
.end();
534 for (Vector
<int32_t>::const_iterator iter
= m_rareData
->m_switchJumpTables
[i
].branchOffsets
.begin(); iter
!= end
; ++iter
, ++entry
) {
537 out
.printf("\t\t%4d => %04d\n", entry
+ m_rareData
->m_switchJumpTables
[i
].min
, *iter
);
541 } while (i
< m_rareData
->m_switchJumpTables
.size());
544 if (m_rareData
&& !m_rareData
->m_stringSwitchJumpTables
.isEmpty()) {
545 out
.printf("\nString Switch Jump Tables:\n");
548 out
.printf(" %1d = {\n", i
);
549 StringJumpTable::StringOffsetTable::const_iterator end
= m_rareData
->m_stringSwitchJumpTables
[i
].offsetTable
.end();
550 for (StringJumpTable::StringOffsetTable::const_iterator iter
= m_rareData
->m_stringSwitchJumpTables
[i
].offsetTable
.begin(); iter
!= end
; ++iter
)
551 out
.printf("\t\t\"%s\" => %04d\n", iter
->key
->utf8().data(), iter
->value
.branchOffset
);
554 } while (i
< m_rareData
->m_stringSwitchJumpTables
.size());
560 void CodeBlock::beginDumpProfiling(PrintStream
& out
, bool& hasPrintedProfiling
)
562 if (hasPrintedProfiling
) {
568 hasPrintedProfiling
= true;
571 void CodeBlock::dumpValueProfiling(PrintStream
& out
, const Instruction
*& it
, bool& hasPrintedProfiling
)
573 ConcurrentJITLocker
locker(m_lock
);
576 CString description
= it
->u
.profile
->briefDescription(locker
);
577 if (!description
.length())
579 beginDumpProfiling(out
, hasPrintedProfiling
);
580 out
.print(description
);
583 void CodeBlock::dumpArrayProfiling(PrintStream
& out
, const Instruction
*& it
, bool& hasPrintedProfiling
)
585 ConcurrentJITLocker
locker(m_lock
);
588 if (!it
->u
.arrayProfile
)
590 CString description
= it
->u
.arrayProfile
->briefDescription(locker
, this);
591 if (!description
.length())
593 beginDumpProfiling(out
, hasPrintedProfiling
);
594 out
.print(description
);
597 void CodeBlock::dumpRareCaseProfile(PrintStream
& out
, const char* name
, RareCaseProfile
* profile
, bool& hasPrintedProfiling
)
599 if (!profile
|| !profile
->m_counter
)
602 beginDumpProfiling(out
, hasPrintedProfiling
);
603 out
.print(name
, profile
->m_counter
);
606 void CodeBlock::printLocationAndOp(PrintStream
& out
, ExecState
*, int location
, const Instruction
*&, const char* op
)
608 out
.printf("[%4d] %-17s ", location
, op
);
611 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
, int operand
)
613 printLocationAndOp(out
, exec
, location
, it
, op
);
614 out
.printf("%s", registerName(operand
).data());
617 void CodeBlock::dumpBytecode(
618 PrintStream
& out
, ExecState
* exec
, const Instruction
* begin
, const Instruction
*& it
,
619 const StubInfoMap
& stubInfos
, const CallLinkInfoMap
& callLinkInfos
)
621 int location
= it
- begin
;
622 bool hasPrintedProfiling
= false;
623 OpcodeID opcode
= exec
->interpreter()->getOpcodeID(it
->u
.opcode
);
626 printLocationAndOp(out
, exec
, location
, it
, "enter");
629 case op_touch_entry
: {
630 printLocationAndOp(out
, exec
, location
, it
, "touch_entry");
633 case op_create_activation
: {
634 int r0
= (++it
)->u
.operand
;
635 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "create_activation", r0
);
638 case op_create_arguments
: {
639 int r0
= (++it
)->u
.operand
;
640 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "create_arguments", r0
);
643 case op_init_lazy_reg
: {
644 int r0
= (++it
)->u
.operand
;
645 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "init_lazy_reg", r0
);
648 case op_get_callee
: {
649 int r0
= (++it
)->u
.operand
;
650 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "get_callee", r0
);
654 case op_create_this
: {
655 int r0
= (++it
)->u
.operand
;
656 int r1
= (++it
)->u
.operand
;
657 unsigned inferredInlineCapacity
= (++it
)->u
.operand
;
658 printLocationAndOp(out
, exec
, location
, it
, "create_this");
659 out
.printf("%s, %s, %u", registerName(r0
).data(), registerName(r1
).data(), inferredInlineCapacity
);
663 int r0
= (++it
)->u
.operand
;
664 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "to_this", r0
);
665 Structure
* structure
= (++it
)->u
.structure
.get();
667 out
.print(" cache(struct = ", RawPointer(structure
), ")");
670 case op_new_object
: {
671 int r0
= (++it
)->u
.operand
;
672 unsigned inferredInlineCapacity
= (++it
)->u
.operand
;
673 printLocationAndOp(out
, exec
, location
, it
, "new_object");
674 out
.printf("%s, %u", registerName(r0
).data(), inferredInlineCapacity
);
675 ++it
; // Skip object allocation profile.
679 int dst
= (++it
)->u
.operand
;
680 int argv
= (++it
)->u
.operand
;
681 int argc
= (++it
)->u
.operand
;
682 printLocationAndOp(out
, exec
, location
, it
, "new_array");
683 out
.printf("%s, %s, %d", registerName(dst
).data(), registerName(argv
).data(), argc
);
684 ++it
; // Skip array allocation profile.
687 case op_new_array_with_size
: {
688 int dst
= (++it
)->u
.operand
;
689 int length
= (++it
)->u
.operand
;
690 printLocationAndOp(out
, exec
, location
, it
, "new_array_with_size");
691 out
.printf("%s, %s", registerName(dst
).data(), registerName(length
).data());
692 ++it
; // Skip array allocation profile.
695 case op_new_array_buffer
: {
696 int dst
= (++it
)->u
.operand
;
697 int argv
= (++it
)->u
.operand
;
698 int argc
= (++it
)->u
.operand
;
699 printLocationAndOp(out
, exec
, location
, it
, "new_array_buffer");
700 out
.printf("%s, %d, %d", registerName(dst
).data(), argv
, argc
);
701 ++it
; // Skip array allocation profile.
704 case op_new_regexp
: {
705 int r0
= (++it
)->u
.operand
;
706 int re0
= (++it
)->u
.operand
;
707 printLocationAndOp(out
, exec
, location
, it
, "new_regexp");
708 out
.printf("%s, ", registerName(r0
).data());
709 if (r0
>=0 && r0
< (int)m_unlinkedCode
->numberOfRegExps())
710 out
.printf("%s", regexpName(re0
, regexp(re0
)).data());
712 out
.printf("bad_regexp(%d)", re0
);
716 int r0
= (++it
)->u
.operand
;
717 int r1
= (++it
)->u
.operand
;
718 printLocationAndOp(out
, exec
, location
, it
, "mov");
719 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
722 case op_captured_mov
: {
723 int r0
= (++it
)->u
.operand
;
724 int r1
= (++it
)->u
.operand
;
725 printLocationAndOp(out
, exec
, location
, it
, "captured_mov");
726 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
731 printUnaryOp(out
, exec
, location
, it
, "not");
735 printBinaryOp(out
, exec
, location
, it
, "eq");
739 printUnaryOp(out
, exec
, location
, it
, "eq_null");
743 printBinaryOp(out
, exec
, location
, it
, "neq");
747 printUnaryOp(out
, exec
, location
, it
, "neq_null");
751 printBinaryOp(out
, exec
, location
, it
, "stricteq");
755 printBinaryOp(out
, exec
, location
, it
, "nstricteq");
759 printBinaryOp(out
, exec
, location
, it
, "less");
763 printBinaryOp(out
, exec
, location
, it
, "lesseq");
767 printBinaryOp(out
, exec
, location
, it
, "greater");
771 printBinaryOp(out
, exec
, location
, it
, "greatereq");
775 int r0
= (++it
)->u
.operand
;
776 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "inc", r0
);
780 int r0
= (++it
)->u
.operand
;
781 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "dec", r0
);
785 printUnaryOp(out
, exec
, location
, it
, "to_number");
789 printUnaryOp(out
, exec
, location
, it
, "negate");
793 printBinaryOp(out
, exec
, location
, it
, "add");
798 printBinaryOp(out
, exec
, location
, it
, "mul");
803 printBinaryOp(out
, exec
, location
, it
, "div");
808 printBinaryOp(out
, exec
, location
, it
, "mod");
812 printBinaryOp(out
, exec
, location
, it
, "sub");
817 printBinaryOp(out
, exec
, location
, it
, "lshift");
821 printBinaryOp(out
, exec
, location
, it
, "rshift");
825 printBinaryOp(out
, exec
, location
, it
, "urshift");
829 printBinaryOp(out
, exec
, location
, it
, "bitand");
834 printBinaryOp(out
, exec
, location
, it
, "bitxor");
839 printBinaryOp(out
, exec
, location
, it
, "bitor");
843 case op_check_has_instance
: {
844 int r0
= (++it
)->u
.operand
;
845 int r1
= (++it
)->u
.operand
;
846 int r2
= (++it
)->u
.operand
;
847 int offset
= (++it
)->u
.operand
;
848 printLocationAndOp(out
, exec
, location
, it
, "check_has_instance");
849 out
.printf("%s, %s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data(), offset
, location
+ offset
);
852 case op_instanceof
: {
853 int r0
= (++it
)->u
.operand
;
854 int r1
= (++it
)->u
.operand
;
855 int r2
= (++it
)->u
.operand
;
856 printLocationAndOp(out
, exec
, location
, it
, "instanceof");
857 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
861 printUnaryOp(out
, exec
, location
, it
, "unsigned");
865 printUnaryOp(out
, exec
, location
, it
, "typeof");
868 case op_is_undefined
: {
869 printUnaryOp(out
, exec
, location
, it
, "is_undefined");
872 case op_is_boolean
: {
873 printUnaryOp(out
, exec
, location
, it
, "is_boolean");
877 printUnaryOp(out
, exec
, location
, it
, "is_number");
881 printUnaryOp(out
, exec
, location
, it
, "is_string");
885 printUnaryOp(out
, exec
, location
, it
, "is_object");
888 case op_is_function
: {
889 printUnaryOp(out
, exec
, location
, it
, "is_function");
893 printBinaryOp(out
, exec
, location
, it
, "in");
896 case op_init_global_const_nop
: {
897 printLocationAndOp(out
, exec
, location
, it
, "init_global_const_nop");
904 case op_init_global_const
: {
905 WriteBarrier
<Unknown
>* registerPointer
= (++it
)->u
.registerPointer
;
906 int r0
= (++it
)->u
.operand
;
907 printLocationAndOp(out
, exec
, location
, it
, "init_global_const");
908 out
.printf("g%d(%p), %s", m_globalObject
->findRegisterIndex(registerPointer
), registerPointer
, registerName(r0
).data());
914 case op_get_by_id_out_of_line
:
915 case op_get_array_length
: {
916 printGetByIdOp(out
, exec
, location
, it
);
917 printGetByIdCacheStatus(out
, exec
, location
, stubInfos
);
918 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
921 case op_get_arguments_length
: {
922 printUnaryOp(out
, exec
, location
, it
, "get_arguments_length");
927 printPutByIdOp(out
, exec
, location
, it
, "put_by_id");
930 case op_put_by_id_out_of_line
: {
931 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_out_of_line");
934 case op_put_by_id_transition_direct
: {
935 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_direct");
938 case op_put_by_id_transition_direct_out_of_line
: {
939 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_direct_out_of_line");
942 case op_put_by_id_transition_normal
: {
943 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_normal");
946 case op_put_by_id_transition_normal_out_of_line
: {
947 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_normal_out_of_line");
950 case op_put_getter_setter
: {
951 int r0
= (++it
)->u
.operand
;
952 int id0
= (++it
)->u
.operand
;
953 int r1
= (++it
)->u
.operand
;
954 int r2
= (++it
)->u
.operand
;
955 printLocationAndOp(out
, exec
, location
, it
, "put_getter_setter");
956 out
.printf("%s, %s, %s, %s", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data(), registerName(r2
).data());
960 int r0
= (++it
)->u
.operand
;
961 int r1
= (++it
)->u
.operand
;
962 int id0
= (++it
)->u
.operand
;
963 printLocationAndOp(out
, exec
, location
, it
, "del_by_id");
964 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), idName(id0
, identifier(id0
)).data());
967 case op_get_by_val
: {
968 int r0
= (++it
)->u
.operand
;
969 int r1
= (++it
)->u
.operand
;
970 int r2
= (++it
)->u
.operand
;
971 printLocationAndOp(out
, exec
, location
, it
, "get_by_val");
972 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
973 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
974 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
977 case op_get_argument_by_val
: {
978 int r0
= (++it
)->u
.operand
;
979 int r1
= (++it
)->u
.operand
;
980 int r2
= (++it
)->u
.operand
;
981 printLocationAndOp(out
, exec
, location
, it
, "get_argument_by_val");
982 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
984 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
987 case op_get_by_pname
: {
988 int r0
= (++it
)->u
.operand
;
989 int r1
= (++it
)->u
.operand
;
990 int r2
= (++it
)->u
.operand
;
991 int r3
= (++it
)->u
.operand
;
992 int r4
= (++it
)->u
.operand
;
993 int r5
= (++it
)->u
.operand
;
994 printLocationAndOp(out
, exec
, location
, it
, "get_by_pname");
995 out
.printf("%s, %s, %s, %s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data(), registerName(r3
).data(), registerName(r4
).data(), registerName(r5
).data());
998 case op_put_by_val
: {
999 int r0
= (++it
)->u
.operand
;
1000 int r1
= (++it
)->u
.operand
;
1001 int r2
= (++it
)->u
.operand
;
1002 printLocationAndOp(out
, exec
, location
, it
, "put_by_val");
1003 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1004 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
1007 case op_put_by_val_direct
: {
1008 int r0
= (++it
)->u
.operand
;
1009 int r1
= (++it
)->u
.operand
;
1010 int r2
= (++it
)->u
.operand
;
1011 printLocationAndOp(out
, exec
, location
, it
, "put_by_val_direct");
1012 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1013 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
1016 case op_del_by_val
: {
1017 int r0
= (++it
)->u
.operand
;
1018 int r1
= (++it
)->u
.operand
;
1019 int r2
= (++it
)->u
.operand
;
1020 printLocationAndOp(out
, exec
, location
, it
, "del_by_val");
1021 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1024 case op_put_by_index
: {
1025 int r0
= (++it
)->u
.operand
;
1026 unsigned n0
= (++it
)->u
.operand
;
1027 int r1
= (++it
)->u
.operand
;
1028 printLocationAndOp(out
, exec
, location
, it
, "put_by_index");
1029 out
.printf("%s, %u, %s", registerName(r0
).data(), n0
, registerName(r1
).data());
1033 int offset
= (++it
)->u
.operand
;
1034 printLocationAndOp(out
, exec
, location
, it
, "jmp");
1035 out
.printf("%d(->%d)", offset
, location
+ offset
);
1039 printConditionalJump(out
, exec
, begin
, it
, location
, "jtrue");
1043 printConditionalJump(out
, exec
, begin
, it
, location
, "jfalse");
1047 printConditionalJump(out
, exec
, begin
, it
, location
, "jeq_null");
1050 case op_jneq_null
: {
1051 printConditionalJump(out
, exec
, begin
, it
, location
, "jneq_null");
1055 int r0
= (++it
)->u
.operand
;
1056 Special::Pointer pointer
= (++it
)->u
.specialPointer
;
1057 int offset
= (++it
)->u
.operand
;
1058 printLocationAndOp(out
, exec
, location
, it
, "jneq_ptr");
1059 out
.printf("%s, %d (%p), %d(->%d)", registerName(r0
).data(), pointer
, m_globalObject
->actualPointerFor(pointer
), offset
, location
+ offset
);
1063 int r0
= (++it
)->u
.operand
;
1064 int r1
= (++it
)->u
.operand
;
1065 int offset
= (++it
)->u
.operand
;
1066 printLocationAndOp(out
, exec
, location
, it
, "jless");
1067 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1071 int r0
= (++it
)->u
.operand
;
1072 int r1
= (++it
)->u
.operand
;
1073 int offset
= (++it
)->u
.operand
;
1074 printLocationAndOp(out
, exec
, location
, it
, "jlesseq");
1075 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1079 int r0
= (++it
)->u
.operand
;
1080 int r1
= (++it
)->u
.operand
;
1081 int offset
= (++it
)->u
.operand
;
1082 printLocationAndOp(out
, exec
, location
, it
, "jgreater");
1083 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1086 case op_jgreatereq
: {
1087 int r0
= (++it
)->u
.operand
;
1088 int r1
= (++it
)->u
.operand
;
1089 int offset
= (++it
)->u
.operand
;
1090 printLocationAndOp(out
, exec
, location
, it
, "jgreatereq");
1091 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1095 int r0
= (++it
)->u
.operand
;
1096 int r1
= (++it
)->u
.operand
;
1097 int offset
= (++it
)->u
.operand
;
1098 printLocationAndOp(out
, exec
, location
, it
, "jnless");
1099 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1103 int r0
= (++it
)->u
.operand
;
1104 int r1
= (++it
)->u
.operand
;
1105 int offset
= (++it
)->u
.operand
;
1106 printLocationAndOp(out
, exec
, location
, it
, "jnlesseq");
1107 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1110 case op_jngreater
: {
1111 int r0
= (++it
)->u
.operand
;
1112 int r1
= (++it
)->u
.operand
;
1113 int offset
= (++it
)->u
.operand
;
1114 printLocationAndOp(out
, exec
, location
, it
, "jngreater");
1115 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1118 case op_jngreatereq
: {
1119 int r0
= (++it
)->u
.operand
;
1120 int r1
= (++it
)->u
.operand
;
1121 int offset
= (++it
)->u
.operand
;
1122 printLocationAndOp(out
, exec
, location
, it
, "jngreatereq");
1123 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1126 case op_loop_hint
: {
1127 printLocationAndOp(out
, exec
, location
, it
, "loop_hint");
1130 case op_switch_imm
: {
1131 int tableIndex
= (++it
)->u
.operand
;
1132 int defaultTarget
= (++it
)->u
.operand
;
1133 int scrutineeRegister
= (++it
)->u
.operand
;
1134 printLocationAndOp(out
, exec
, location
, it
, "switch_imm");
1135 out
.printf("%d, %d(->%d), %s", tableIndex
, defaultTarget
, location
+ defaultTarget
, registerName(scrutineeRegister
).data());
1138 case op_switch_char
: {
1139 int tableIndex
= (++it
)->u
.operand
;
1140 int defaultTarget
= (++it
)->u
.operand
;
1141 int scrutineeRegister
= (++it
)->u
.operand
;
1142 printLocationAndOp(out
, exec
, location
, it
, "switch_char");
1143 out
.printf("%d, %d(->%d), %s", tableIndex
, defaultTarget
, location
+ defaultTarget
, registerName(scrutineeRegister
).data());
1146 case op_switch_string
: {
1147 int tableIndex
= (++it
)->u
.operand
;
1148 int defaultTarget
= (++it
)->u
.operand
;
1149 int scrutineeRegister
= (++it
)->u
.operand
;
1150 printLocationAndOp(out
, exec
, location
, it
, "switch_string");
1151 out
.printf("%d, %d(->%d), %s", tableIndex
, defaultTarget
, location
+ defaultTarget
, registerName(scrutineeRegister
).data());
1155 int r0
= (++it
)->u
.operand
;
1156 int f0
= (++it
)->u
.operand
;
1157 int shouldCheck
= (++it
)->u
.operand
;
1158 printLocationAndOp(out
, exec
, location
, it
, "new_func");
1159 out
.printf("%s, f%d, %s", registerName(r0
).data(), f0
, shouldCheck
? "<Checked>" : "<Unchecked>");
1162 case op_new_captured_func
: {
1163 int r0
= (++it
)->u
.operand
;
1164 int f0
= (++it
)->u
.operand
;
1165 printLocationAndOp(out
, exec
, location
, it
, "new_captured_func");
1166 out
.printf("%s, f%d", registerName(r0
).data(), f0
);
1170 case op_new_func_exp
: {
1171 int r0
= (++it
)->u
.operand
;
1172 int f0
= (++it
)->u
.operand
;
1173 printLocationAndOp(out
, exec
, location
, it
, "new_func_exp");
1174 out
.printf("%s, f%d", registerName(r0
).data(), f0
);
1178 printCallOp(out
, exec
, location
, it
, "call", DumpCaches
, hasPrintedProfiling
, callLinkInfos
);
1181 case op_call_eval
: {
1182 printCallOp(out
, exec
, location
, it
, "call_eval", DontDumpCaches
, hasPrintedProfiling
, callLinkInfos
);
1186 case op_construct_varargs
:
1187 case op_call_varargs
: {
1188 int result
= (++it
)->u
.operand
;
1189 int callee
= (++it
)->u
.operand
;
1190 int thisValue
= (++it
)->u
.operand
;
1191 int arguments
= (++it
)->u
.operand
;
1192 int firstFreeRegister
= (++it
)->u
.operand
;
1193 int varArgOffset
= (++it
)->u
.operand
;
1195 printLocationAndOp(out
, exec
, location
, it
, opcode
== op_call_varargs
? "call_varargs" : "construct_varargs");
1196 out
.printf("%s, %s, %s, %s, %d, %d", registerName(result
).data(), registerName(callee
).data(), registerName(thisValue
).data(), registerName(arguments
).data(), firstFreeRegister
, varArgOffset
);
1197 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
1201 case op_tear_off_activation
: {
1202 int r0
= (++it
)->u
.operand
;
1203 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "tear_off_activation", r0
);
1206 case op_tear_off_arguments
: {
1207 int r0
= (++it
)->u
.operand
;
1208 int r1
= (++it
)->u
.operand
;
1209 printLocationAndOp(out
, exec
, location
, it
, "tear_off_arguments");
1210 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
1214 int r0
= (++it
)->u
.operand
;
1215 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "ret", r0
);
1218 case op_ret_object_or_this
: {
1219 int r0
= (++it
)->u
.operand
;
1220 int r1
= (++it
)->u
.operand
;
1221 printLocationAndOp(out
, exec
, location
, it
, "constructor_ret");
1222 out
.printf("%s %s", registerName(r0
).data(), registerName(r1
).data());
1225 case op_construct
: {
1226 printCallOp(out
, exec
, location
, it
, "construct", DumpCaches
, hasPrintedProfiling
, callLinkInfos
);
1230 int r0
= (++it
)->u
.operand
;
1231 int r1
= (++it
)->u
.operand
;
1232 int count
= (++it
)->u
.operand
;
1233 printLocationAndOp(out
, exec
, location
, it
, "strcat");
1234 out
.printf("%s, %s, %d", registerName(r0
).data(), registerName(r1
).data(), count
);
1237 case op_to_primitive
: {
1238 int r0
= (++it
)->u
.operand
;
1239 int r1
= (++it
)->u
.operand
;
1240 printLocationAndOp(out
, exec
, location
, it
, "to_primitive");
1241 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
1244 case op_get_pnames
: {
1245 int r0
= it
[1].u
.operand
;
1246 int r1
= it
[2].u
.operand
;
1247 int r2
= it
[3].u
.operand
;
1248 int r3
= it
[4].u
.operand
;
1249 int offset
= it
[5].u
.operand
;
1250 printLocationAndOp(out
, exec
, location
, it
, "get_pnames");
1251 out
.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data(), registerName(r3
).data(), offset
, location
+ offset
);
1252 it
+= OPCODE_LENGTH(op_get_pnames
) - 1;
1255 case op_next_pname
: {
1256 int dest
= it
[1].u
.operand
;
1257 int base
= it
[2].u
.operand
;
1258 int i
= it
[3].u
.operand
;
1259 int size
= it
[4].u
.operand
;
1260 int iter
= it
[5].u
.operand
;
1261 int offset
= it
[6].u
.operand
;
1262 printLocationAndOp(out
, exec
, location
, it
, "next_pname");
1263 out
.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest
).data(), registerName(base
).data(), registerName(i
).data(), registerName(size
).data(), registerName(iter
).data(), offset
, location
+ offset
);
1264 it
+= OPCODE_LENGTH(op_next_pname
) - 1;
1267 case op_push_with_scope
: {
1268 int r0
= (++it
)->u
.operand
;
1269 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "push_with_scope", r0
);
1272 case op_pop_scope
: {
1273 printLocationAndOp(out
, exec
, location
, it
, "pop_scope");
1276 case op_push_name_scope
: {
1277 int id0
= (++it
)->u
.operand
;
1278 int r1
= (++it
)->u
.operand
;
1279 unsigned attributes
= (++it
)->u
.operand
;
1280 printLocationAndOp(out
, exec
, location
, it
, "push_name_scope");
1281 out
.printf("%s, %s, %u", idName(id0
, identifier(id0
)).data(), registerName(r1
).data(), attributes
);
1285 int r0
= (++it
)->u
.operand
;
1286 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "catch", r0
);
1290 int r0
= (++it
)->u
.operand
;
1291 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "throw", r0
);
1294 case op_throw_static_error
: {
1295 int k0
= (++it
)->u
.operand
;
1296 int k1
= (++it
)->u
.operand
;
1297 printLocationAndOp(out
, exec
, location
, it
, "throw_static_error");
1298 out
.printf("%s, %s", constantName(k0
, getConstant(k0
)).data(), k1
? "true" : "false");
1302 int debugHookID
= (++it
)->u
.operand
;
1303 int hasBreakpointFlag
= (++it
)->u
.operand
;
1304 printLocationAndOp(out
, exec
, location
, it
, "debug");
1305 out
.printf("%s %d", debugHookName(debugHookID
), hasBreakpointFlag
);
1308 case op_profile_will_call
: {
1309 int function
= (++it
)->u
.operand
;
1310 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "profile_will_call", function
);
1313 case op_profile_did_call
: {
1314 int function
= (++it
)->u
.operand
;
1315 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "profile_did_call", function
);
1319 int r0
= (++it
)->u
.operand
;
1320 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "end", r0
);
1323 case op_resolve_scope
: {
1324 int r0
= (++it
)->u
.operand
;
1325 int id0
= (++it
)->u
.operand
;
1326 ResolveModeAndType modeAndType
= ResolveModeAndType((++it
)->u
.operand
);
1327 int depth
= (++it
)->u
.operand
;
1328 printLocationAndOp(out
, exec
, location
, it
, "resolve_scope");
1329 out
.printf("%s, %s, %u<%s|%s>, %d", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(),
1330 modeAndType
.operand(), resolveModeName(modeAndType
.mode()), resolveTypeName(modeAndType
.type()),
1335 case op_get_from_scope
: {
1336 int r0
= (++it
)->u
.operand
;
1337 int r1
= (++it
)->u
.operand
;
1338 int id0
= (++it
)->u
.operand
;
1339 ResolveModeAndType modeAndType
= ResolveModeAndType((++it
)->u
.operand
);
1341 int operand
= (++it
)->u
.operand
; // Operand
1342 ++it
; // Skip value profile.
1343 printLocationAndOp(out
, exec
, location
, it
, "get_from_scope");
1344 out
.printf("%s, %s, %s, %u<%s|%s>, <structure>, %d",
1345 registerName(r0
).data(), registerName(r1
).data(), idName(id0
, identifier(id0
)).data(),
1346 modeAndType
.operand(), resolveModeName(modeAndType
.mode()), resolveTypeName(modeAndType
.type()),
1350 case op_put_to_scope
: {
1351 int r0
= (++it
)->u
.operand
;
1352 int id0
= (++it
)->u
.operand
;
1353 int r1
= (++it
)->u
.operand
;
1354 ResolveModeAndType modeAndType
= ResolveModeAndType((++it
)->u
.operand
);
1356 int operand
= (++it
)->u
.operand
; // Operand
1357 printLocationAndOp(out
, exec
, location
, it
, "put_to_scope");
1358 out
.printf("%s, %s, %s, %u<%s|%s>, <structure>, %d",
1359 registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data(),
1360 modeAndType
.operand(), resolveModeName(modeAndType
.mode()), resolveTypeName(modeAndType
.type()),
1365 RELEASE_ASSERT_NOT_REACHED();
1368 dumpRareCaseProfile(out
, "rare case: ", rareCaseProfileForBytecodeOffset(location
), hasPrintedProfiling
);
1369 dumpRareCaseProfile(out
, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location
), hasPrintedProfiling
);
1372 Vector
<DFG::FrequentExitSite
> exitSites
= exitProfile().exitSitesFor(location
);
1373 if (!exitSites
.isEmpty()) {
1374 out
.print(" !! frequent exits: ");
1376 for (unsigned i
= 0; i
< exitSites
.size(); ++i
)
1377 out
.print(comma
, exitSites
[i
].kind(), " ", exitSites
[i
].jitType());
1379 #else // ENABLE(DFG_JIT)
1380 UNUSED_PARAM(location
);
1381 #endif // ENABLE(DFG_JIT)
1385 void CodeBlock::dumpBytecode(
1386 PrintStream
& out
, unsigned bytecodeOffset
,
1387 const StubInfoMap
& stubInfos
, const CallLinkInfoMap
& callLinkInfos
)
1389 ExecState
* exec
= m_globalObject
->globalExec();
1390 const Instruction
* it
= instructions().begin() + bytecodeOffset
;
1391 dumpBytecode(out
, exec
, instructions().begin(), it
, stubInfos
, callLinkInfos
);
1394 #define FOR_EACH_MEMBER_VECTOR(macro) \
1395 macro(instructions) \
1396 macro(callLinkInfos) \
1397 macro(linkedCallerList) \
1398 macro(identifiers) \
1399 macro(functionExpressions) \
1400 macro(constantRegisters)
1402 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1405 macro(exceptionHandlers) \
1406 macro(switchJumpTables) \
1407 macro(stringSwitchJumpTables) \
1408 macro(evalCodeCache) \
1409 macro(expressionInfo) \
1411 macro(callReturnIndexVector)
1413 template<typename T
>
1414 static size_t sizeInBytes(const Vector
<T
>& vector
)
1416 return vector
.capacity() * sizeof(T
);
1419 CodeBlock::CodeBlock(CopyParsedBlockTag
, CodeBlock
& other
)
1420 : m_globalObject(other
.m_globalObject
)
1421 , m_heap(other
.m_heap
)
1422 , m_numCalleeRegisters(other
.m_numCalleeRegisters
)
1423 , m_numVars(other
.m_numVars
)
1424 , m_isConstructor(other
.m_isConstructor
)
1425 , m_shouldAlwaysBeInlined(true)
1426 , m_didFailFTLCompilation(false)
1427 , m_hasBeenCompiledWithFTL(false)
1428 , m_unlinkedCode(*other
.m_vm
, other
.m_ownerExecutable
.get(), other
.m_unlinkedCode
.get())
1429 , m_hasDebuggerStatement(false)
1430 , m_steppingMode(SteppingModeDisabled
)
1431 , m_numBreakpoints(0)
1432 , m_ownerExecutable(*other
.m_vm
, other
.m_ownerExecutable
.get(), other
.m_ownerExecutable
.get())
1434 , m_instructions(other
.m_instructions
)
1435 , m_thisRegister(other
.m_thisRegister
)
1436 , m_argumentsRegister(other
.m_argumentsRegister
)
1437 , m_activationRegister(other
.m_activationRegister
)
1438 , m_isStrictMode(other
.m_isStrictMode
)
1439 , m_needsActivation(other
.m_needsActivation
)
1440 , m_mayBeExecuting(false)
1441 , m_visitAggregateHasBeenCalled(false)
1442 , m_source(other
.m_source
)
1443 , m_sourceOffset(other
.m_sourceOffset
)
1444 , m_firstLineColumnOffset(other
.m_firstLineColumnOffset
)
1445 , m_codeType(other
.m_codeType
)
1446 , m_constantRegisters(other
.m_constantRegisters
)
1447 , m_functionDecls(other
.m_functionDecls
)
1448 , m_functionExprs(other
.m_functionExprs
)
1449 , m_osrExitCounter(0)
1450 , m_optimizationDelayCounter(0)
1451 , m_reoptimizationRetryCounter(0)
1452 , m_hash(other
.m_hash
)
1454 , m_capabilityLevelState(DFG::CapabilityLevelNotSet
)
1457 ASSERT(m_heap
->isDeferred());
1459 if (SymbolTable
* symbolTable
= other
.symbolTable())
1460 m_symbolTable
.set(*m_vm
, m_ownerExecutable
.get(), symbolTable
);
1462 setNumParameters(other
.numParameters());
1463 optimizeAfterWarmUp();
1466 if (other
.m_rareData
) {
1467 createRareDataIfNecessary();
1469 m_rareData
->m_exceptionHandlers
= other
.m_rareData
->m_exceptionHandlers
;
1470 m_rareData
->m_constantBuffers
= other
.m_rareData
->m_constantBuffers
;
1471 m_rareData
->m_switchJumpTables
= other
.m_rareData
->m_switchJumpTables
;
1472 m_rareData
->m_stringSwitchJumpTables
= other
.m_rareData
->m_stringSwitchJumpTables
;
1475 m_heap
->m_codeBlocks
.add(this);
1476 m_heap
->reportExtraMemoryCost(sizeof(CodeBlock
));
1479 CodeBlock::CodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
)
1480 : m_globalObject(scope
->globalObject()->vm(), ownerExecutable
, scope
->globalObject())
1481 , m_heap(&m_globalObject
->vm().heap
)
1482 , m_numCalleeRegisters(unlinkedCodeBlock
->m_numCalleeRegisters
)
1483 , m_numVars(unlinkedCodeBlock
->m_numVars
)
1484 , m_isConstructor(unlinkedCodeBlock
->isConstructor())
1485 , m_shouldAlwaysBeInlined(true)
1486 , m_didFailFTLCompilation(false)
1487 , m_hasBeenCompiledWithFTL(false)
1488 , m_unlinkedCode(m_globalObject
->vm(), ownerExecutable
, unlinkedCodeBlock
)
1489 , m_hasDebuggerStatement(false)
1490 , m_steppingMode(SteppingModeDisabled
)
1491 , m_numBreakpoints(0)
1492 , m_ownerExecutable(m_globalObject
->vm(), ownerExecutable
, ownerExecutable
)
1493 , m_vm(unlinkedCodeBlock
->vm())
1494 , m_thisRegister(unlinkedCodeBlock
->thisRegister())
1495 , m_argumentsRegister(unlinkedCodeBlock
->argumentsRegister())
1496 , m_activationRegister(unlinkedCodeBlock
->activationRegister())
1497 , m_isStrictMode(unlinkedCodeBlock
->isStrictMode())
1498 , m_needsActivation(unlinkedCodeBlock
->hasActivationRegister() && unlinkedCodeBlock
->codeType() == FunctionCode
)
1499 , m_mayBeExecuting(false)
1500 , m_visitAggregateHasBeenCalled(false)
1501 , m_source(sourceProvider
)
1502 , m_sourceOffset(sourceOffset
)
1503 , m_firstLineColumnOffset(firstLineColumnOffset
)
1504 , m_codeType(unlinkedCodeBlock
->codeType())
1505 , m_osrExitCounter(0)
1506 , m_optimizationDelayCounter(0)
1507 , m_reoptimizationRetryCounter(0)
1509 , m_capabilityLevelState(DFG::CapabilityLevelNotSet
)
1512 ASSERT(m_heap
->isDeferred());
1514 bool didCloneSymbolTable
= false;
1516 if (SymbolTable
* symbolTable
= unlinkedCodeBlock
->symbolTable()) {
1517 if (codeType() == FunctionCode
&& symbolTable
->captureCount()) {
1518 m_symbolTable
.set(*m_vm
, m_ownerExecutable
.get(), symbolTable
->cloneCapturedNames(*m_vm
));
1519 didCloneSymbolTable
= true;
1521 m_symbolTable
.set(*m_vm
, m_ownerExecutable
.get(), symbolTable
);
1525 setNumParameters(unlinkedCodeBlock
->numParameters());
1527 setConstantRegisters(unlinkedCodeBlock
->constantRegisters());
1528 if (unlinkedCodeBlock
->usesGlobalObject())
1529 m_constantRegisters
[unlinkedCodeBlock
->globalObjectRegister().toConstantIndex()].set(*m_vm
, ownerExecutable
, m_globalObject
.get());
1530 m_functionDecls
.resizeToFit(unlinkedCodeBlock
->numberOfFunctionDecls());
1531 for (size_t count
= unlinkedCodeBlock
->numberOfFunctionDecls(), i
= 0; i
< count
; ++i
) {
1532 UnlinkedFunctionExecutable
* unlinkedExecutable
= unlinkedCodeBlock
->functionDecl(i
);
1533 unsigned lineCount
= unlinkedExecutable
->lineCount();
1534 unsigned firstLine
= ownerExecutable
->lineNo() + unlinkedExecutable
->firstLineOffset();
1535 bool startColumnIsOnOwnerStartLine
= !unlinkedExecutable
->firstLineOffset();
1536 unsigned startColumn
= unlinkedExecutable
->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine
? ownerExecutable
->startColumn() : 1);
1537 bool endColumnIsOnStartLine
= !lineCount
;
1538 unsigned endColumn
= unlinkedExecutable
->unlinkedBodyEndColumn() + (endColumnIsOnStartLine
? startColumn
: 1);
1539 unsigned startOffset
= sourceOffset
+ unlinkedExecutable
->startOffset();
1540 unsigned sourceLength
= unlinkedExecutable
->sourceLength();
1541 SourceCode
code(m_source
, startOffset
, startOffset
+ sourceLength
, firstLine
, startColumn
);
1542 FunctionExecutable
* executable
= FunctionExecutable::create(*m_vm
, code
, unlinkedExecutable
, firstLine
, firstLine
+ lineCount
, startColumn
, endColumn
);
1543 m_functionDecls
[i
].set(*m_vm
, ownerExecutable
, executable
);
1546 m_functionExprs
.resizeToFit(unlinkedCodeBlock
->numberOfFunctionExprs());
1547 for (size_t count
= unlinkedCodeBlock
->numberOfFunctionExprs(), i
= 0; i
< count
; ++i
) {
1548 UnlinkedFunctionExecutable
* unlinkedExecutable
= unlinkedCodeBlock
->functionExpr(i
);
1549 unsigned lineCount
= unlinkedExecutable
->lineCount();
1550 unsigned firstLine
= ownerExecutable
->lineNo() + unlinkedExecutable
->firstLineOffset();
1551 bool startColumnIsOnOwnerStartLine
= !unlinkedExecutable
->firstLineOffset();
1552 unsigned startColumn
= unlinkedExecutable
->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine
? ownerExecutable
->startColumn() : 1);
1553 bool endColumnIsOnStartLine
= !lineCount
;
1554 unsigned endColumn
= unlinkedExecutable
->unlinkedBodyEndColumn() + (endColumnIsOnStartLine
? startColumn
: 1);
1555 unsigned startOffset
= sourceOffset
+ unlinkedExecutable
->startOffset();
1556 unsigned sourceLength
= unlinkedExecutable
->sourceLength();
1557 SourceCode
code(m_source
, startOffset
, startOffset
+ sourceLength
, firstLine
, startColumn
);
1558 FunctionExecutable
* executable
= FunctionExecutable::create(*m_vm
, code
, unlinkedExecutable
, firstLine
, firstLine
+ lineCount
, startColumn
, endColumn
);
1559 m_functionExprs
[i
].set(*m_vm
, ownerExecutable
, executable
);
1562 if (unlinkedCodeBlock
->hasRareData()) {
1563 createRareDataIfNecessary();
1564 if (size_t count
= unlinkedCodeBlock
->constantBufferCount()) {
1565 m_rareData
->m_constantBuffers
.grow(count
);
1566 for (size_t i
= 0; i
< count
; i
++) {
1567 const UnlinkedCodeBlock::ConstantBuffer
& buffer
= unlinkedCodeBlock
->constantBuffer(i
);
1568 m_rareData
->m_constantBuffers
[i
] = buffer
;
1571 if (size_t count
= unlinkedCodeBlock
->numberOfExceptionHandlers()) {
1572 m_rareData
->m_exceptionHandlers
.resizeToFit(count
);
1573 size_t nonLocalScopeDepth
= scope
->depth();
1574 for (size_t i
= 0; i
< count
; i
++) {
1575 const UnlinkedHandlerInfo
& handler
= unlinkedCodeBlock
->exceptionHandler(i
);
1576 m_rareData
->m_exceptionHandlers
[i
].start
= handler
.start
;
1577 m_rareData
->m_exceptionHandlers
[i
].end
= handler
.end
;
1578 m_rareData
->m_exceptionHandlers
[i
].target
= handler
.target
;
1579 m_rareData
->m_exceptionHandlers
[i
].scopeDepth
= nonLocalScopeDepth
+ handler
.scopeDepth
;
1581 m_rareData
->m_exceptionHandlers
[i
].nativeCode
= CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch
)));
1586 if (size_t count
= unlinkedCodeBlock
->numberOfStringSwitchJumpTables()) {
1587 m_rareData
->m_stringSwitchJumpTables
.grow(count
);
1588 for (size_t i
= 0; i
< count
; i
++) {
1589 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr
= unlinkedCodeBlock
->stringSwitchJumpTable(i
).offsetTable
.begin();
1590 UnlinkedStringJumpTable::StringOffsetTable::iterator end
= unlinkedCodeBlock
->stringSwitchJumpTable(i
).offsetTable
.end();
1591 for (; ptr
!= end
; ++ptr
) {
1592 OffsetLocation offset
;
1593 offset
.branchOffset
= ptr
->value
;
1594 m_rareData
->m_stringSwitchJumpTables
[i
].offsetTable
.add(ptr
->key
, offset
);
1599 if (size_t count
= unlinkedCodeBlock
->numberOfSwitchJumpTables()) {
1600 m_rareData
->m_switchJumpTables
.grow(count
);
1601 for (size_t i
= 0; i
< count
; i
++) {
1602 UnlinkedSimpleJumpTable
& sourceTable
= unlinkedCodeBlock
->switchJumpTable(i
);
1603 SimpleJumpTable
& destTable
= m_rareData
->m_switchJumpTables
[i
];
1604 destTable
.branchOffsets
= sourceTable
.branchOffsets
;
1605 destTable
.min
= sourceTable
.min
;
1610 // Allocate metadata buffers for the bytecode
1611 if (size_t size
= unlinkedCodeBlock
->numberOfLLintCallLinkInfos())
1612 m_llintCallLinkInfos
.resizeToFit(size
);
1613 if (size_t size
= unlinkedCodeBlock
->numberOfArrayProfiles())
1614 m_arrayProfiles
.grow(size
);
1615 if (size_t size
= unlinkedCodeBlock
->numberOfArrayAllocationProfiles())
1616 m_arrayAllocationProfiles
.resizeToFit(size
);
1617 if (size_t size
= unlinkedCodeBlock
->numberOfValueProfiles())
1618 m_valueProfiles
.resizeToFit(size
);
1619 if (size_t size
= unlinkedCodeBlock
->numberOfObjectAllocationProfiles())
1620 m_objectAllocationProfiles
.resizeToFit(size
);
1622 // Copy and translate the UnlinkedInstructions
1623 unsigned instructionCount
= unlinkedCodeBlock
->instructions().count();
1624 UnlinkedInstructionStream::Reader
instructionReader(unlinkedCodeBlock
->instructions());
1626 Vector
<Instruction
, 0, UnsafeVectorOverflow
> instructions(instructionCount
);
1627 for (unsigned i
= 0; !instructionReader
.atEnd(); ) {
1628 const UnlinkedInstruction
* pc
= instructionReader
.next();
1630 unsigned opLength
= opcodeLength(pc
[0].u
.opcode
);
1632 instructions
[i
] = vm()->interpreter
->getOpcode(pc
[0].u
.opcode
);
1633 for (size_t j
= 1; j
< opLength
; ++j
) {
1634 if (sizeof(int32_t) != sizeof(intptr_t))
1635 instructions
[i
+ j
].u
.pointer
= 0;
1636 instructions
[i
+ j
].u
.operand
= pc
[j
].u
.operand
;
1638 switch (pc
[0].u
.opcode
) {
1639 case op_call_varargs
:
1640 case op_construct_varargs
:
1642 case op_get_argument_by_val
: {
1643 int arrayProfileIndex
= pc
[opLength
- 2].u
.operand
;
1644 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1646 instructions
[i
+ opLength
- 2] = &m_arrayProfiles
[arrayProfileIndex
];
1649 case op_get_by_id
: {
1650 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1651 ASSERT(profile
->m_bytecodeOffset
== -1);
1652 profile
->m_bytecodeOffset
= i
;
1653 instructions
[i
+ opLength
- 1] = profile
;
1656 case op_put_by_val
: {
1657 int arrayProfileIndex
= pc
[opLength
- 1].u
.operand
;
1658 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1659 instructions
[i
+ opLength
- 1] = &m_arrayProfiles
[arrayProfileIndex
];
1662 case op_put_by_val_direct
: {
1663 int arrayProfileIndex
= pc
[opLength
- 1].u
.operand
;
1664 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1665 instructions
[i
+ opLength
- 1] = &m_arrayProfiles
[arrayProfileIndex
];
1670 case op_new_array_buffer
:
1671 case op_new_array_with_size
: {
1672 int arrayAllocationProfileIndex
= pc
[opLength
- 1].u
.operand
;
1673 instructions
[i
+ opLength
- 1] = &m_arrayAllocationProfiles
[arrayAllocationProfileIndex
];
1676 case op_new_object
: {
1677 int objectAllocationProfileIndex
= pc
[opLength
- 1].u
.operand
;
1678 ObjectAllocationProfile
* objectAllocationProfile
= &m_objectAllocationProfiles
[objectAllocationProfileIndex
];
1679 int inferredInlineCapacity
= pc
[opLength
- 2].u
.operand
;
1681 instructions
[i
+ opLength
- 1] = objectAllocationProfile
;
1682 objectAllocationProfile
->initialize(*vm(),
1683 m_ownerExecutable
.get(), m_globalObject
->objectPrototype(), inferredInlineCapacity
);
1688 case op_call_eval
: {
1689 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1690 ASSERT(profile
->m_bytecodeOffset
== -1);
1691 profile
->m_bytecodeOffset
= i
;
1692 instructions
[i
+ opLength
- 1] = profile
;
1693 int arrayProfileIndex
= pc
[opLength
- 2].u
.operand
;
1694 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1695 instructions
[i
+ opLength
- 2] = &m_arrayProfiles
[arrayProfileIndex
];
1696 instructions
[i
+ 5] = &m_llintCallLinkInfos
[pc
[5].u
.operand
];
1699 case op_construct
: {
1700 instructions
[i
+ 5] = &m_llintCallLinkInfos
[pc
[5].u
.operand
];
1701 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1702 ASSERT(profile
->m_bytecodeOffset
== -1);
1703 profile
->m_bytecodeOffset
= i
;
1704 instructions
[i
+ opLength
- 1] = profile
;
1707 case op_get_by_id_out_of_line
:
1708 case op_get_array_length
:
1711 case op_init_global_const_nop
: {
1712 ASSERT(codeType() == GlobalCode
);
1713 Identifier ident
= identifier(pc
[4].u
.operand
);
1714 SymbolTableEntry entry
= m_globalObject
->symbolTable()->get(ident
.impl());
1718 instructions
[i
+ 0] = vm()->interpreter
->getOpcode(op_init_global_const
);
1719 instructions
[i
+ 1] = &m_globalObject
->registerAt(entry
.getIndex());
1723 case op_resolve_scope
: {
1724 const Identifier
& ident
= identifier(pc
[2].u
.operand
);
1725 ResolveType type
= static_cast<ResolveType
>(pc
[3].u
.operand
);
1727 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), scope
, ident
, Get
, type
);
1728 instructions
[i
+ 3].u
.operand
= op
.type
;
1729 instructions
[i
+ 4].u
.operand
= op
.depth
;
1731 instructions
[i
+ 5].u
.activation
.set(*vm(), ownerExecutable
, op
.activation
);
1735 case op_get_from_scope
: {
1736 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1737 ASSERT(profile
->m_bytecodeOffset
== -1);
1738 profile
->m_bytecodeOffset
= i
;
1739 instructions
[i
+ opLength
- 1] = profile
;
1741 // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1742 const Identifier
& ident
= identifier(pc
[3].u
.operand
);
1743 ResolveModeAndType modeAndType
= ResolveModeAndType(pc
[4].u
.operand
);
1744 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), scope
, ident
, Get
, modeAndType
.type());
1746 instructions
[i
+ 4].u
.operand
= ResolveModeAndType(modeAndType
.mode(), op
.type
).operand();
1747 if (op
.type
== GlobalVar
|| op
.type
== GlobalVarWithVarInjectionChecks
)
1748 instructions
[i
+ 5].u
.watchpointSet
= op
.watchpointSet
;
1749 else if (op
.structure
)
1750 instructions
[i
+ 5].u
.structure
.set(*vm(), ownerExecutable
, op
.structure
);
1751 instructions
[i
+ 6].u
.pointer
= reinterpret_cast<void*>(op
.operand
);
1755 case op_put_to_scope
: {
1756 // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1757 const Identifier
& ident
= identifier(pc
[2].u
.operand
);
1758 ResolveModeAndType modeAndType
= ResolveModeAndType(pc
[4].u
.operand
);
1759 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), scope
, ident
, Put
, modeAndType
.type());
1761 instructions
[i
+ 4].u
.operand
= ResolveModeAndType(modeAndType
.mode(), op
.type
).operand();
1762 if (op
.type
== GlobalVar
|| op
.type
== GlobalVarWithVarInjectionChecks
)
1763 instructions
[i
+ 5].u
.watchpointSet
= op
.watchpointSet
;
1764 else if (op
.type
== ClosureVar
|| op
.type
== ClosureVarWithVarInjectionChecks
) {
1765 if (op
.watchpointSet
)
1766 op
.watchpointSet
->invalidate();
1767 } else if (op
.structure
)
1768 instructions
[i
+ 5].u
.structure
.set(*vm(), ownerExecutable
, op
.structure
);
1769 instructions
[i
+ 6].u
.pointer
= reinterpret_cast<void*>(op
.operand
);
1773 case op_captured_mov
:
1774 case op_new_captured_func
: {
1775 if (pc
[3].u
.index
== UINT_MAX
) {
1776 instructions
[i
+ 3].u
.watchpointSet
= 0;
1779 StringImpl
* uid
= identifier(pc
[3].u
.index
).impl();
1780 RELEASE_ASSERT(didCloneSymbolTable
);
1781 ConcurrentJITLocker
locker(m_symbolTable
->m_lock
);
1782 SymbolTable::Map::iterator iter
= m_symbolTable
->find(locker
, uid
);
1783 ASSERT(iter
!= m_symbolTable
->end(locker
));
1784 iter
->value
.prepareToWatch(symbolTable());
1785 instructions
[i
+ 3].u
.watchpointSet
= iter
->value
.watchpointSet();
1790 if (pc
[1].u
.index
== DidReachBreakpoint
)
1791 m_hasDebuggerStatement
= true;
1800 m_instructions
= WTF::RefCountedArray
<Instruction
>(instructions
);
1802 // Set optimization thresholds only after m_instructions is initialized, since these
1803 // rely on the instruction count (and are in theory permitted to also inspect the
1804 // instruction stream to more accurate assess the cost of tier-up).
1805 optimizeAfterWarmUp();
1808 // If the concurrent thread will want the code block's hash, then compute it here
1810 if (Options::alwaysComputeHash())
1813 if (Options::dumpGeneratedBytecodes())
1816 m_heap
->m_codeBlocks
.add(this);
1817 m_heap
->reportExtraMemoryCost(sizeof(CodeBlock
) + m_instructions
.size() * sizeof(Instruction
));
1820 CodeBlock::~CodeBlock()
1822 if (m_vm
->m_perBytecodeProfiler
)
1823 m_vm
->m_perBytecodeProfiler
->notifyDestruction(this);
1825 #if ENABLE(VERBOSE_VALUE_PROFILE)
1826 dumpValueProfiles();
1828 while (m_incomingLLIntCalls
.begin() != m_incomingLLIntCalls
.end())
1829 m_incomingLLIntCalls
.begin()->remove();
1831 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
1832 // Consider that two CodeBlocks become unreachable at the same time. There
1833 // is no guarantee about the order in which the CodeBlocks are destroyed.
1834 // So, if we don't remove incoming calls, and get destroyed before the
1835 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
1836 // destructor will try to remove nodes from our (no longer valid) linked list.
1837 while (m_incomingCalls
.begin() != m_incomingCalls
.end())
1838 m_incomingCalls
.begin()->remove();
1840 // Note that our outgoing calls will be removed from other CodeBlocks'
1841 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
1844 for (Bag
<StructureStubInfo
>::iterator iter
= m_stubInfos
.begin(); !!iter
; ++iter
)
1846 #endif // ENABLE(JIT)
1849 void CodeBlock::setNumParameters(int newValue
)
1851 m_numParameters
= newValue
;
1853 m_argumentValueProfiles
.resizeToFit(newValue
);
1856 void EvalCodeCache::visitAggregate(SlotVisitor
& visitor
)
1858 EvalCacheMap::iterator end
= m_cacheMap
.end();
1859 for (EvalCacheMap::iterator ptr
= m_cacheMap
.begin(); ptr
!= end
; ++ptr
)
1860 visitor
.append(&ptr
->value
);
1863 CodeBlock
* CodeBlock::specialOSREntryBlockOrNull()
1866 if (jitType() != JITCode::DFGJIT
)
1868 DFG::JITCode
* jitCode
= m_jitCode
->dfg();
1869 return jitCode
->osrEntryBlock
.get();
1870 #else // ENABLE(FTL_JIT)
1872 #endif // ENABLE(FTL_JIT)
1875 void CodeBlock::visitAggregate(SlotVisitor
& visitor
)
1877 #if ENABLE(PARALLEL_GC)
1878 // I may be asked to scan myself more than once, and it may even happen concurrently.
1879 // To this end, use a CAS loop to check if I've been called already. Only one thread
1880 // may proceed past this point - whichever one wins the CAS race.
1883 oldValue
= m_visitAggregateHasBeenCalled
;
1885 // Looks like someone else won! Return immediately to ensure that we don't
1886 // trace the same CodeBlock concurrently. Doing so is hazardous since we will
1887 // be mutating the state of ValueProfiles, which contain JSValues, which can
1888 // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
1889 // that are nearly impossible to track down.
1891 // Also note that it must be safe to return early as soon as we see the
1892 // value true (well, (unsigned)1), since once a GC thread is in this method
1893 // and has won the CAS race (i.e. was responsible for setting the value true)
1894 // it will definitely complete the rest of this method before declaring
1898 } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled
, 0, 1));
1899 #endif // ENABLE(PARALLEL_GC)
1901 if (!!m_alternative
)
1902 m_alternative
->visitAggregate(visitor
);
1904 if (CodeBlock
* otherBlock
= specialOSREntryBlockOrNull())
1905 otherBlock
->visitAggregate(visitor
);
1907 visitor
.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock
));
1909 visitor
.reportExtraMemoryUsage(ownerExecutable(), m_jitCode
->size());
1910 if (m_instructions
.size()) {
1911 // Divide by refCount() because m_instructions points to something that is shared
1912 // by multiple CodeBlocks, and we only want to count it towards the heap size once.
1913 // Having each CodeBlock report only its proportional share of the size is one way
1914 // of accomplishing this.
1915 visitor
.reportExtraMemoryUsage(ownerExecutable(), m_instructions
.size() * sizeof(Instruction
) / m_instructions
.refCount());
1918 visitor
.append(&m_unlinkedCode
);
1920 // There are three things that may use unconditional finalizers: lazy bytecode freeing,
1921 // inline cache clearing, and jettisoning. The probability of us wanting to do at
1922 // least one of those things is probably quite close to 1. So we add one no matter what
1923 // and when it runs, it figures out whether it has any work to do.
1924 visitor
.addUnconditionalFinalizer(this);
1926 m_allTransitionsHaveBeenMarked
= false;
1928 if (shouldImmediatelyAssumeLivenessDuringScan()) {
1929 // This code block is live, so scan all references strongly and return.
1930 stronglyVisitStrongReferences(visitor
);
1931 stronglyVisitWeakReferences(visitor
);
1932 propagateTransitions(visitor
);
1936 // There are two things that we use weak reference harvesters for: DFG fixpoint for
1937 // jettisoning, and trying to find structures that would be live based on some
1938 // inline cache. So it makes sense to register them regardless.
1939 visitor
.addWeakReferenceHarvester(this);
1942 // We get here if we're live in the sense that our owner executable is live,
1943 // but we're not yet live for sure in another sense: we may yet decide that this
1944 // code block should be jettisoned based on its outgoing weak references being
1945 // stale. Set a flag to indicate that we're still assuming that we're dead, and
1946 // perform one round of determining if we're live. The GC may determine, based on
1947 // either us marking additional objects, or by other objects being marked for
1948 // other reasons, that this iteration should run again; it will notify us of this
1949 // decision by calling harvestWeakReferences().
1951 m_jitCode
->dfgCommon()->livenessHasBeenProved
= false;
1953 propagateTransitions(visitor
);
1954 determineLiveness(visitor
);
1955 #else // ENABLE(DFG_JIT)
1956 RELEASE_ASSERT_NOT_REACHED();
1957 #endif // ENABLE(DFG_JIT)
1960 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
1963 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1964 // their weak references go stale. So if a basline JIT CodeBlock gets
1965 // scanned, we can assume that this means that it's live.
1966 if (!JITCode::isOptimizingJIT(jitType()))
1969 // For simplicity, we don't attempt to jettison code blocks during GC if
1970 // they are executing. Instead we strongly mark their weak references to
1971 // allow them to continue to execute soundly.
1972 if (m_mayBeExecuting
)
1975 if (Options::forceDFGCodeBlockLiveness())
1984 bool CodeBlock::isKnownToBeLiveDuringGC()
1987 // This should return true for:
1988 // - Code blocks that behave like normal objects - i.e. if they are referenced then they
1990 // - Code blocks that were running on the stack.
1991 // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
1992 // because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
1993 // would survive as true.
1994 // - Code blocks that don't have any dead weak references.
1996 return shouldImmediatelyAssumeLivenessDuringScan()
1997 || m_jitCode
->dfgCommon()->livenessHasBeenProved
;
2003 void CodeBlock::propagateTransitions(SlotVisitor
& visitor
)
2005 UNUSED_PARAM(visitor
);
2007 if (m_allTransitionsHaveBeenMarked
)
2010 bool allAreMarkedSoFar
= true;
2012 Interpreter
* interpreter
= m_vm
->interpreter
;
2013 if (jitType() == JITCode::InterpreterThunk
) {
2014 const Vector
<unsigned>& propertyAccessInstructions
= m_unlinkedCode
->propertyAccessInstructions();
2015 for (size_t i
= 0; i
< propertyAccessInstructions
.size(); ++i
) {
2016 Instruction
* instruction
= &instructions()[propertyAccessInstructions
[i
]];
2017 switch (interpreter
->getOpcodeID(instruction
[0].u
.opcode
)) {
2018 case op_put_by_id_transition_direct
:
2019 case op_put_by_id_transition_normal
:
2020 case op_put_by_id_transition_direct_out_of_line
:
2021 case op_put_by_id_transition_normal_out_of_line
: {
2022 if (Heap::isMarked(instruction
[4].u
.structure
.get()))
2023 visitor
.append(&instruction
[6].u
.structure
);
2025 allAreMarkedSoFar
= false;
2035 if (JITCode::isJIT(jitType())) {
2036 for (Bag
<StructureStubInfo
>::iterator iter
= m_stubInfos
.begin(); !!iter
; ++iter
) {
2037 StructureStubInfo
& stubInfo
= **iter
;
2038 switch (stubInfo
.accessType
) {
2039 case access_put_by_id_transition_normal
:
2040 case access_put_by_id_transition_direct
: {
2041 JSCell
* origin
= stubInfo
.codeOrigin
.codeOriginOwner();
2042 if ((!origin
|| Heap::isMarked(origin
))
2043 && Heap::isMarked(stubInfo
.u
.putByIdTransition
.previousStructure
.get()))
2044 visitor
.append(&stubInfo
.u
.putByIdTransition
.structure
);
2046 allAreMarkedSoFar
= false;
2050 case access_put_by_id_list
: {
2051 PolymorphicPutByIdList
* list
= stubInfo
.u
.putByIdList
.list
;
2052 JSCell
* origin
= stubInfo
.codeOrigin
.codeOriginOwner();
2053 if (origin
&& !Heap::isMarked(origin
)) {
2054 allAreMarkedSoFar
= false;
2057 for (unsigned j
= list
->size(); j
--;) {
2058 PutByIdAccess
& access
= list
->m_list
[j
];
2059 if (!access
.isTransition())
2061 if (Heap::isMarked(access
.oldStructure()))
2062 visitor
.append(&access
.m_newStructure
);
2064 allAreMarkedSoFar
= false;
2074 #endif // ENABLE(JIT)
2077 if (JITCode::isOptimizingJIT(jitType())) {
2078 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2079 for (unsigned i
= 0; i
< dfgCommon
->transitions
.size(); ++i
) {
2080 if ((!dfgCommon
->transitions
[i
].m_codeOrigin
2081 || Heap::isMarked(dfgCommon
->transitions
[i
].m_codeOrigin
.get()))
2082 && Heap::isMarked(dfgCommon
->transitions
[i
].m_from
.get())) {
2083 // If the following three things are live, then the target of the
2084 // transition is also live:
2085 // - This code block. We know it's live already because otherwise
2086 // we wouldn't be scanning ourselves.
2087 // - The code origin of the transition. Transitions may arise from
2088 // code that was inlined. They are not relevant if the user's
2089 // object that is required for the inlinee to run is no longer
2091 // - The source of the transition. The transition checks if some
2092 // heap location holds the source, and if so, stores the target.
2093 // Hence the source must be live for the transition to be live.
2094 visitor
.append(&dfgCommon
->transitions
[i
].m_to
);
2096 allAreMarkedSoFar
= false;
2099 #endif // ENABLE(DFG_JIT)
2101 if (allAreMarkedSoFar
)
2102 m_allTransitionsHaveBeenMarked
= true;
2105 void CodeBlock::determineLiveness(SlotVisitor
& visitor
)
2107 UNUSED_PARAM(visitor
);
2109 if (shouldImmediatelyAssumeLivenessDuringScan())
2113 // Check if we have any remaining work to do.
2114 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2115 if (dfgCommon
->livenessHasBeenProved
)
2118 // Now check all of our weak references. If all of them are live, then we
2119 // have proved liveness and so we scan our strong references. If at end of
2120 // GC we still have not proved liveness, then this code block is toast.
2121 bool allAreLiveSoFar
= true;
2122 for (unsigned i
= 0; i
< dfgCommon
->weakReferences
.size(); ++i
) {
2123 if (!Heap::isMarked(dfgCommon
->weakReferences
[i
].get())) {
2124 allAreLiveSoFar
= false;
2129 // If some weak references are dead, then this fixpoint iteration was
2131 if (!allAreLiveSoFar
)
2134 // All weak references are live. Record this information so we don't
2135 // come back here again, and scan the strong references.
2136 dfgCommon
->livenessHasBeenProved
= true;
2137 stronglyVisitStrongReferences(visitor
);
2138 #endif // ENABLE(DFG_JIT)
2141 void CodeBlock::visitWeakReferences(SlotVisitor
& visitor
)
2143 propagateTransitions(visitor
);
2144 determineLiveness(visitor
);
2147 void CodeBlock::finalizeUnconditionally()
2149 Interpreter
* interpreter
= m_vm
->interpreter
;
2150 if (JITCode::couldBeInterpreted(jitType())) {
2151 const Vector
<unsigned>& propertyAccessInstructions
= m_unlinkedCode
->propertyAccessInstructions();
2152 for (size_t size
= propertyAccessInstructions
.size(), i
= 0; i
< size
; ++i
) {
2153 Instruction
* curInstruction
= &instructions()[propertyAccessInstructions
[i
]];
2154 switch (interpreter
->getOpcodeID(curInstruction
[0].u
.opcode
)) {
2156 case op_get_by_id_out_of_line
:
2158 case op_put_by_id_out_of_line
:
2159 if (!curInstruction
[4].u
.structure
|| Heap::isMarked(curInstruction
[4].u
.structure
.get()))
2161 if (Options::verboseOSR())
2162 dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction
[4].u
.structure
.get());
2163 curInstruction
[4].u
.structure
.clear();
2164 curInstruction
[5].u
.operand
= 0;
2166 case op_put_by_id_transition_direct
:
2167 case op_put_by_id_transition_normal
:
2168 case op_put_by_id_transition_direct_out_of_line
:
2169 case op_put_by_id_transition_normal_out_of_line
:
2170 if (Heap::isMarked(curInstruction
[4].u
.structure
.get())
2171 && Heap::isMarked(curInstruction
[6].u
.structure
.get())
2172 && Heap::isMarked(curInstruction
[7].u
.structureChain
.get()))
2174 if (Options::verboseOSR()) {
2175 dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2176 curInstruction
[4].u
.structure
.get(),
2177 curInstruction
[6].u
.structure
.get(),
2178 curInstruction
[7].u
.structureChain
.get());
2180 curInstruction
[4].u
.structure
.clear();
2181 curInstruction
[6].u
.structure
.clear();
2182 curInstruction
[7].u
.structureChain
.clear();
2183 curInstruction
[0].u
.opcode
= interpreter
->getOpcode(op_put_by_id
);
2185 case op_get_array_length
:
2188 if (!curInstruction
[2].u
.structure
|| Heap::isMarked(curInstruction
[2].u
.structure
.get()))
2190 if (Options::verboseOSR())
2191 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction
[2].u
.structure
.get());
2192 curInstruction
[2].u
.structure
.clear();
2195 if (!curInstruction
[2].u
.jsCell
|| Heap::isMarked(curInstruction
[2].u
.jsCell
.get()))
2197 if (Options::verboseOSR())
2198 dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction
[2].u
.jsCell
.get());
2199 curInstruction
[2].u
.jsCell
.clear();
2201 case op_resolve_scope
: {
2202 WriteBarrierBase
<JSActivation
>& activation
= curInstruction
[5].u
.activation
;
2203 if (!activation
|| Heap::isMarked(activation
.get()))
2205 if (Options::verboseOSR())
2206 dataLogF("Clearing dead activation %p.\n", activation
.get());
2210 case op_get_from_scope
:
2211 case op_put_to_scope
: {
2212 ResolveModeAndType modeAndType
=
2213 ResolveModeAndType(curInstruction
[4].u
.operand
);
2214 if (modeAndType
.type() == GlobalVar
|| modeAndType
.type() == GlobalVarWithVarInjectionChecks
)
2216 WriteBarrierBase
<Structure
>& structure
= curInstruction
[5].u
.structure
;
2217 if (!structure
|| Heap::isMarked(structure
.get()))
2219 if (Options::verboseOSR())
2220 dataLogF("Clearing scope access with structure %p.\n", structure
.get());
2225 RELEASE_ASSERT_NOT_REACHED();
2229 for (unsigned i
= 0; i
< m_llintCallLinkInfos
.size(); ++i
) {
2230 if (m_llintCallLinkInfos
[i
].isLinked() && !Heap::isMarked(m_llintCallLinkInfos
[i
].callee
.get())) {
2231 if (Options::verboseOSR())
2232 dataLog("Clearing LLInt call from ", *this, "\n");
2233 m_llintCallLinkInfos
[i
].unlink();
2235 if (!!m_llintCallLinkInfos
[i
].lastSeenCallee
&& !Heap::isMarked(m_llintCallLinkInfos
[i
].lastSeenCallee
.get()))
2236 m_llintCallLinkInfos
[i
].lastSeenCallee
.clear();
2241 // Check if we're not live. If we are, then jettison.
2242 if (!isKnownToBeLiveDuringGC()) {
2243 if (Options::verboseOSR())
2244 dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2246 if (DFG::shouldShowDisassembly()) {
2247 dataLog(*this, " will be jettisoned because of the following dead references:\n");
2248 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2249 for (unsigned i
= 0; i
< dfgCommon
->transitions
.size(); ++i
) {
2250 DFG::WeakReferenceTransition
& transition
= dfgCommon
->transitions
[i
];
2251 JSCell
* origin
= transition
.m_codeOrigin
.get();
2252 JSCell
* from
= transition
.m_from
.get();
2253 JSCell
* to
= transition
.m_to
.get();
2254 if ((!origin
|| Heap::isMarked(origin
)) && Heap::isMarked(from
))
2256 dataLog(" Transition under ", RawPointer(origin
), ", ", RawPointer(from
), " -> ", RawPointer(to
), ".\n");
2258 for (unsigned i
= 0; i
< dfgCommon
->weakReferences
.size(); ++i
) {
2259 JSCell
* weak
= dfgCommon
->weakReferences
[i
].get();
2260 if (Heap::isMarked(weak
))
2262 dataLog(" Weak reference ", RawPointer(weak
), ".\n");
2266 jettison(Profiler::JettisonDueToWeakReference
);
2269 #endif // ENABLE(DFG_JIT)
2272 // Handle inline caches.
2274 RepatchBuffer
repatchBuffer(this);
2276 for (auto iter
= callLinkInfosBegin(); !!iter
; ++iter
)
2277 (*iter
)->visitWeak(repatchBuffer
);
2279 for (Bag
<StructureStubInfo
>::iterator iter
= m_stubInfos
.begin(); !!iter
; ++iter
) {
2280 StructureStubInfo
& stubInfo
= **iter
;
2282 if (stubInfo
.visitWeakReferences(repatchBuffer
))
2285 resetStubDuringGCInternal(repatchBuffer
, stubInfo
);
2291 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker
&, StubInfoMap
& result
)
2294 toHashMap(m_stubInfos
, getStructureStubInfoCodeOrigin
, result
);
2296 UNUSED_PARAM(result
);
2300 void CodeBlock::getStubInfoMap(StubInfoMap
& result
)
2302 ConcurrentJITLocker
locker(m_lock
);
2303 getStubInfoMap(locker
, result
);
2306 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker
&, CallLinkInfoMap
& result
)
2309 toHashMap(m_callLinkInfos
, getCallLinkInfoCodeOrigin
, result
);
2311 UNUSED_PARAM(result
);
2315 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap
& result
)
2317 ConcurrentJITLocker
locker(m_lock
);
2318 getCallLinkInfoMap(locker
, result
);
2322 StructureStubInfo
* CodeBlock::addStubInfo()
2324 ConcurrentJITLocker
locker(m_lock
);
2325 return m_stubInfos
.add();
2328 CallLinkInfo
* CodeBlock::addCallLinkInfo()
2330 ConcurrentJITLocker
locker(m_lock
);
2331 return m_callLinkInfos
.add();
2334 void CodeBlock::resetStub(StructureStubInfo
& stubInfo
)
2336 if (stubInfo
.accessType
== access_unset
)
2339 ConcurrentJITLocker
locker(m_lock
);
2341 RepatchBuffer
repatchBuffer(this);
2342 resetStubInternal(repatchBuffer
, stubInfo
);
2345 void CodeBlock::resetStubInternal(RepatchBuffer
& repatchBuffer
, StructureStubInfo
& stubInfo
)
2347 AccessType accessType
= static_cast<AccessType
>(stubInfo
.accessType
);
2349 if (Options::verboseOSR()) {
2350 // This can be called from GC destructor calls, so we don't try to do a full dump
2351 // of the CodeBlock.
2352 dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo
.accessType
), ") in ", RawPointer(this), ".\n");
2355 RELEASE_ASSERT(JITCode::isJIT(jitType()));
2357 if (isGetByIdAccess(accessType
))
2358 resetGetByID(repatchBuffer
, stubInfo
);
2359 else if (isPutByIdAccess(accessType
))
2360 resetPutByID(repatchBuffer
, stubInfo
);
2362 RELEASE_ASSERT(isInAccess(accessType
));
2363 resetIn(repatchBuffer
, stubInfo
);
2369 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer
& repatchBuffer
, StructureStubInfo
& stubInfo
)
2371 resetStubInternal(repatchBuffer
, stubInfo
);
2372 stubInfo
.resetByGC
= true;
2375 CallLinkInfo
* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index
)
2377 for (auto iter
= m_callLinkInfos
.begin(); !!iter
; ++iter
) {
2378 if ((*iter
)->codeOrigin
== CodeOrigin(index
))
2385 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor
& visitor
)
2387 visitor
.append(&m_globalObject
);
2388 visitor
.append(&m_ownerExecutable
);
2389 visitor
.append(&m_symbolTable
);
2390 visitor
.append(&m_unlinkedCode
);
2392 m_rareData
->m_evalCodeCache
.visitAggregate(visitor
);
2393 visitor
.appendValues(m_constantRegisters
.data(), m_constantRegisters
.size());
2394 for (size_t i
= 0; i
< m_functionExprs
.size(); ++i
)
2395 visitor
.append(&m_functionExprs
[i
]);
2396 for (size_t i
= 0; i
< m_functionDecls
.size(); ++i
)
2397 visitor
.append(&m_functionDecls
[i
]);
2398 for (unsigned i
= 0; i
< m_objectAllocationProfiles
.size(); ++i
)
2399 m_objectAllocationProfiles
[i
].visitAggregate(visitor
);
2402 if (JITCode::isOptimizingJIT(jitType())) {
2403 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2404 if (dfgCommon
->inlineCallFrames
.get())
2405 dfgCommon
->inlineCallFrames
->visitAggregate(visitor
);
2409 updateAllPredictions();
2412 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor
& visitor
)
2414 UNUSED_PARAM(visitor
);
2417 if (!JITCode::isOptimizingJIT(jitType()))
2420 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2422 for (unsigned i
= 0; i
< dfgCommon
->transitions
.size(); ++i
) {
2423 if (!!dfgCommon
->transitions
[i
].m_codeOrigin
)
2424 visitor
.append(&dfgCommon
->transitions
[i
].m_codeOrigin
); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2425 visitor
.append(&dfgCommon
->transitions
[i
].m_from
);
2426 visitor
.append(&dfgCommon
->transitions
[i
].m_to
);
2429 for (unsigned i
= 0; i
< dfgCommon
->weakReferences
.size(); ++i
)
2430 visitor
.append(&dfgCommon
->weakReferences
[i
]);
2434 CodeBlock
* CodeBlock::baselineAlternative()
2437 CodeBlock
* result
= this;
2438 while (result
->alternative())
2439 result
= result
->alternative();
2440 RELEASE_ASSERT(result
);
2441 RELEASE_ASSERT(JITCode::isBaselineCode(result
->jitType()) || result
->jitType() == JITCode::None
);
2448 CodeBlock
* CodeBlock::baselineVersion()
2451 if (JITCode::isBaselineCode(jitType()))
2453 CodeBlock
* result
= replacement();
2455 // This can happen if we're creating the original CodeBlock for an executable.
2456 // Assume that we're the baseline CodeBlock.
2457 RELEASE_ASSERT(jitType() == JITCode::None
);
2460 result
= result
->baselineAlternative();
2468 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace
)
2470 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace
);
2473 bool CodeBlock::hasOptimizedReplacement()
2475 return hasOptimizedReplacement(jitType());
2479 bool CodeBlock::isCaptured(VirtualRegister operand
, InlineCallFrame
* inlineCallFrame
) const
2481 if (operand
.isArgument())
2482 return operand
.toArgument() && usesArguments();
2484 if (inlineCallFrame
)
2485 return inlineCallFrame
->capturedVars
.get(operand
.toLocal());
2487 // The activation object isn't in the captured region, but it's "captured"
2488 // in the sense that stores to its location can be observed indirectly.
2489 if (needsActivation() && operand
== activationRegister())
2492 // Ditto for the arguments object.
2493 if (usesArguments() && operand
== argumentsRegister())
2495 if (usesArguments() && operand
== unmodifiedArgumentsRegister(argumentsRegister()))
2498 // We're in global code so there are no locals to capture
2502 return symbolTable()->isCaptured(operand
.offset());
2505 int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart
)
2507 // We'll be adding this to the stack pointer to get a registers pointer that looks
2508 // like it would have looked in the baseline engine. For example, if bytecode would
2509 // have put the first captured variable at offset -5 but we put it at offset -1, then
2510 // we'll have an offset of 4.
2513 // Compute where we put the captured variables. This offset will point the registers
2514 // pointer directly at the first captured var.
2515 offset
+= machineCaptureStart
;
2517 // Now compute the offset needed to make the runtime see the captured variables at the
2518 // same offset that the bytecode would have used.
2519 offset
-= symbolTable()->captureStart();
2524 int CodeBlock::framePointerOffsetToGetActivationRegisters()
2526 if (!JITCode::isOptimizingJIT(jitType()))
2529 return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart
);
2531 RELEASE_ASSERT_NOT_REACHED();
2536 HandlerInfo
* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset
)
2538 RELEASE_ASSERT(bytecodeOffset
< instructions().size());
2543 Vector
<HandlerInfo
>& exceptionHandlers
= m_rareData
->m_exceptionHandlers
;
2544 for (size_t i
= 0; i
< exceptionHandlers
.size(); ++i
) {
2545 // Handlers are ordered innermost first, so the first handler we encounter
2546 // that contains the source address is the correct handler to use.
2547 if (exceptionHandlers
[i
].start
<= bytecodeOffset
&& exceptionHandlers
[i
].end
> bytecodeOffset
)
2548 return &exceptionHandlers
[i
];
2554 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset
)
2556 RELEASE_ASSERT(bytecodeOffset
< instructions().size());
2557 return m_ownerExecutable
->lineNo() + m_unlinkedCode
->lineNumberForBytecodeOffset(bytecodeOffset
);
2560 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset
)
2567 expressionRangeForBytecodeOffset(bytecodeOffset
, divot
, startOffset
, endOffset
, line
, column
);
2571 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
, int& divot
, int& startOffset
, int& endOffset
, unsigned& line
, unsigned& column
)
2573 m_unlinkedCode
->expressionRangeForBytecodeOffset(bytecodeOffset
, divot
, startOffset
, endOffset
, line
, column
);
2574 divot
+= m_sourceOffset
;
2575 column
+= line
? 1 : firstLineColumnOffset();
2576 line
+= m_ownerExecutable
->lineNo();
2579 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line
, unsigned column
)
2581 Interpreter
* interpreter
= vm()->interpreter
;
2582 const Instruction
* begin
= instructions().begin();
2583 const Instruction
* end
= instructions().end();
2584 for (const Instruction
* it
= begin
; it
!= end
;) {
2585 OpcodeID opcodeID
= interpreter
->getOpcodeID(it
->u
.opcode
);
2586 if (opcodeID
== op_debug
) {
2587 unsigned bytecodeOffset
= it
- begin
;
2589 unsigned opDebugLine
;
2590 unsigned opDebugColumn
;
2591 expressionRangeForBytecodeOffset(bytecodeOffset
, unused
, unused
, unused
, opDebugLine
, opDebugColumn
);
2592 if (line
== opDebugLine
&& (column
== Breakpoint::unspecifiedColumn
|| column
== opDebugColumn
))
2595 it
+= opcodeLengths
[opcodeID
];
2600 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode
)
2602 m_rareCaseProfiles
.shrinkToFit();
2603 m_specialFastCaseProfiles
.shrinkToFit();
2605 if (shrinkMode
== EarlyShrink
) {
2606 m_constantRegisters
.shrinkToFit();
2609 m_rareData
->m_switchJumpTables
.shrinkToFit();
2610 m_rareData
->m_stringSwitchJumpTables
.shrinkToFit();
2612 } // else don't shrink these, because we would have already pointed pointers into these tables.
2615 unsigned CodeBlock::addOrFindConstant(JSValue v
)
2618 if (findConstant(v
, result
))
2620 return addConstant(v
);
2623 bool CodeBlock::findConstant(JSValue v
, unsigned& index
)
2625 unsigned numberOfConstants
= numberOfConstantRegisters();
2626 for (unsigned i
= 0; i
< numberOfConstants
; ++i
) {
2627 if (getConstant(FirstConstantRegisterIndex
+ i
) == v
) {
2632 index
= numberOfConstants
;
2637 void CodeBlock::unlinkCalls()
2639 if (!!m_alternative
)
2640 m_alternative
->unlinkCalls();
2641 for (size_t i
= 0; i
< m_llintCallLinkInfos
.size(); ++i
) {
2642 if (m_llintCallLinkInfos
[i
].isLinked())
2643 m_llintCallLinkInfos
[i
].unlink();
2645 if (m_callLinkInfos
.isEmpty())
2647 if (!m_vm
->canUseJIT())
2649 RepatchBuffer
repatchBuffer(this);
2650 for (auto iter
= m_callLinkInfos
.begin(); !!iter
; ++iter
) {
2651 CallLinkInfo
& info
= **iter
;
2652 if (!info
.isLinked())
2654 info
.unlink(repatchBuffer
);
2658 void CodeBlock::linkIncomingCall(ExecState
* callerFrame
, CallLinkInfo
* incoming
)
2660 noticeIncomingCall(callerFrame
);
2661 m_incomingCalls
.push(incoming
);
2663 #endif // ENABLE(JIT)
2665 void CodeBlock::unlinkIncomingCalls()
2667 while (m_incomingLLIntCalls
.begin() != m_incomingLLIntCalls
.end())
2668 m_incomingLLIntCalls
.begin()->unlink();
2670 if (m_incomingCalls
.isEmpty())
2672 RepatchBuffer
repatchBuffer(this);
2673 while (m_incomingCalls
.begin() != m_incomingCalls
.end())
2674 m_incomingCalls
.begin()->unlink(repatchBuffer
);
2675 #endif // ENABLE(JIT)
2678 void CodeBlock::linkIncomingCall(ExecState
* callerFrame
, LLIntCallLinkInfo
* incoming
)
2680 noticeIncomingCall(callerFrame
);
2681 m_incomingLLIntCalls
.push(incoming
);
2684 void CodeBlock::clearEvalCache()
2686 if (!!m_alternative
)
2687 m_alternative
->clearEvalCache();
2688 if (CodeBlock
* otherBlock
= specialOSREntryBlockOrNull())
2689 otherBlock
->clearEvalCache();
2692 m_rareData
->m_evalCodeCache
.clear();
2695 void CodeBlock::install()
2697 ownerExecutable()->installCode(this);
2700 PassRefPtr
<CodeBlock
> CodeBlock::newReplacement()
2702 return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
2705 const SlowArgument
* CodeBlock::machineSlowArguments()
2707 if (!JITCode::isOptimizingJIT(jitType()))
2708 return symbolTable()->slowArguments();
2711 return jitCode()->dfgCommon()->slowArguments
.get();
2712 #else // ENABLE(DFG_JIT)
2714 #endif // ENABLE(DFG_JIT)
2718 CodeBlock
* ProgramCodeBlock::replacement()
2720 return jsCast
<ProgramExecutable
*>(ownerExecutable())->codeBlock();
2723 CodeBlock
* EvalCodeBlock::replacement()
2725 return jsCast
<EvalExecutable
*>(ownerExecutable())->codeBlock();
2728 CodeBlock
* FunctionCodeBlock::replacement()
2730 return jsCast
<FunctionExecutable
*>(ownerExecutable())->codeBlockFor(m_isConstructor
? CodeForConstruct
: CodeForCall
);
2733 DFG::CapabilityLevel
ProgramCodeBlock::capabilityLevelInternal()
2735 return DFG::programCapabilityLevel(this);
2738 DFG::CapabilityLevel
EvalCodeBlock::capabilityLevelInternal()
2740 return DFG::evalCapabilityLevel(this);
2743 DFG::CapabilityLevel
FunctionCodeBlock::capabilityLevelInternal()
2745 if (m_isConstructor
)
2746 return DFG::functionForConstructCapabilityLevel(this);
2747 return DFG::functionForCallCapabilityLevel(this);
2751 void CodeBlock::jettison(Profiler::JettisonReason reason
, ReoptimizationMode mode
)
2753 RELEASE_ASSERT(reason
!= Profiler::NotJettisoned
);
2756 if (DFG::shouldShowDisassembly()) {
2757 dataLog("Jettisoning ", *this);
2758 if (mode
== CountReoptimization
)
2759 dataLog(" and counting reoptimization");
2760 dataLog(" due to ", reason
, ".\n");
2763 DeferGCForAWhile
deferGC(*m_heap
);
2764 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
2766 if (Profiler::Compilation
* compilation
= jitCode()->dfgCommon()->compilation
.get())
2767 compilation
->setJettisonReason(reason
);
2769 // We want to accomplish two things here:
2770 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2771 // we should OSR exit at the top of the next bytecode instruction after the return.
2772 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2774 // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
2775 // whether the invalidation has already happened.
2776 if (!jitCode()->dfgCommon()->invalidate()) {
2777 // Nothing to do since we've already been invalidated. That means that we cannot be
2778 // the optimized replacement.
2779 RELEASE_ASSERT(this != replacement());
2783 if (DFG::shouldShowDisassembly())
2784 dataLog(" Did invalidate ", *this, "\n");
2786 // Count the reoptimization if that's what the user wanted.
2787 if (mode
== CountReoptimization
) {
2788 // FIXME: Maybe this should call alternative().
2789 // https://bugs.webkit.org/show_bug.cgi?id=123677
2790 baselineAlternative()->countReoptimization();
2791 if (DFG::shouldShowDisassembly())
2792 dataLog(" Did count reoptimization for ", *this, "\n");
2795 // Now take care of the entrypoint.
2796 if (this != replacement()) {
2797 // This means that we were never the entrypoint. This can happen for OSR entry code
2801 alternative()->optimizeAfterWarmUp();
2802 tallyFrequentExitSites();
2803 alternative()->install();
2804 if (DFG::shouldShowDisassembly())
2805 dataLog(" Did install baseline version of ", *this, "\n");
2806 #else // ENABLE(DFG_JIT)
2808 UNREACHABLE_FOR_PLATFORM();
2809 #endif // ENABLE(DFG_JIT)
2812 JSGlobalObject
* CodeBlock::globalObjectFor(CodeOrigin codeOrigin
)
2814 if (!codeOrigin
.inlineCallFrame
)
2815 return globalObject();
2816 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->eitherCodeBlock()->globalObject();
2819 void CodeBlock::noticeIncomingCall(ExecState
* callerFrame
)
2821 CodeBlock
* callerCodeBlock
= callerFrame
->codeBlock();
2823 if (Options::verboseCallLink())
2824 dataLog("Noticing call link from ", *callerCodeBlock
, " to ", *this, "\n");
2826 if (!m_shouldAlwaysBeInlined
)
2830 if (!hasBaselineJITProfiling())
2833 if (!DFG::mightInlineFunction(this))
2836 if (!canInline(m_capabilityLevelState
))
2839 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock
)) {
2840 m_shouldAlwaysBeInlined
= false;
2841 if (Options::verboseCallLink())
2842 dataLog(" Clearing SABI because caller is too large.\n");
2846 if (callerCodeBlock
->jitType() == JITCode::InterpreterThunk
) {
2847 // If the caller is still in the interpreter, then we can't expect inlining to
2848 // happen anytime soon. Assume it's profitable to optimize it separately. This
2849 // ensures that a function is SABI only if it is called no more frequently than
2850 // any of its callers.
2851 m_shouldAlwaysBeInlined
= false;
2852 if (Options::verboseCallLink())
2853 dataLog(" Clearing SABI because caller is in LLInt.\n");
2857 if (callerCodeBlock
->codeType() != FunctionCode
) {
2858 // If the caller is either eval or global code, assume that that won't be
2859 // optimized anytime soon. For eval code this is particularly true since we
2860 // delay eval optimization by a *lot*.
2861 m_shouldAlwaysBeInlined
= false;
2862 if (Options::verboseCallLink())
2863 dataLog(" Clearing SABI because caller is not a function.\n");
2867 ExecState
* frame
= callerFrame
;
2868 for (unsigned i
= Options::maximumInliningDepth(); i
--; frame
= frame
->callerFrame()) {
2869 if (frame
->isVMEntrySentinel())
2871 if (frame
->codeBlock() == this) {
2872 // Recursive calls won't be inlined.
2873 if (Options::verboseCallLink())
2874 dataLog(" Clearing SABI because recursion was detected.\n");
2875 m_shouldAlwaysBeInlined
= false;
2880 RELEASE_ASSERT(callerCodeBlock
->m_capabilityLevelState
!= DFG::CapabilityLevelNotSet
);
2882 if (canCompile(callerCodeBlock
->m_capabilityLevelState
))
2885 if (Options::verboseCallLink())
2886 dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
2888 m_shouldAlwaysBeInlined
= false;
2892 unsigned CodeBlock::reoptimizationRetryCounter() const
2895 ASSERT(m_reoptimizationRetryCounter
<= Options::reoptimizationRetryCounterMax());
2896 return m_reoptimizationRetryCounter
;
2899 #endif // ENABLE(JIT)
2903 void CodeBlock::countReoptimization()
2905 m_reoptimizationRetryCounter
++;
2906 if (m_reoptimizationRetryCounter
> Options::reoptimizationRetryCounterMax())
2907 m_reoptimizationRetryCounter
= Options::reoptimizationRetryCounterMax();
2910 unsigned CodeBlock::numberOfDFGCompiles()
2912 ASSERT(JITCode::isBaselineCode(jitType()));
2913 if (Options::testTheFTL()) {
2914 if (m_didFailFTLCompilation
)
2916 return (m_hasBeenCompiledWithFTL
? 1 : 0) + m_reoptimizationRetryCounter
;
2918 return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter
;
2921 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2923 if (codeType() == EvalCode
)
2924 return Options::evalThresholdMultiplier();
2929 double CodeBlock::optimizationThresholdScalingFactor()
2931 // This expression arises from doing a least-squares fit of
2933 // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2935 // against the data points:
2938 // 10 0.9 (smallest reasonable code block)
2939 // 200 1.0 (typical small-ish code block)
2940 // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
2941 // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
2942 // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
2943 // 10000 6.0 (similar to above)
2945 // I achieve the minimization using the following Mathematica code:
2947 // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2949 // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2952 // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2953 // {a, b, c, d}][[2]]
2955 // And the code below (to initialize a, b, c, d) is generated by:
2957 // Print["const double " <> ToString[#[[1]]] <> " = " <>
2958 // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2960 // We've long known the following to be true:
2961 // - Small code blocks are cheap to optimize and so we should do it sooner rather
2963 // - Large code blocks are expensive to optimize and so we should postpone doing so,
2964 // and sometimes have a large enough threshold that we never optimize them.
2965 // - The difference in cost is not totally linear because (a) just invoking the
2966 // DFG incurs some base cost and (b) for large code blocks there is enough slop
2967 // in the correlation between instruction count and the actual compilation cost
2968 // that for those large blocks, the instruction count should not have a strong
2969 // influence on our threshold.
2971 // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2972 // example where the heuristics were right (code block in 3d-cube with instruction
2973 // count 320, which got compiled early as it should have been) and one where they were
2974 // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2975 // to compile and didn't run often enough to warrant compilation in my opinion), and
2976 // then threw in additional data points that represented my own guess of what our
2977 // heuristics should do for some round-numbered examples.
2979 // The expression to which I decided to fit the data arose because I started with an
2980 // affine function, and then did two things: put the linear part in an Abs to ensure
2981 // that the fit didn't end up choosing a negative value of c (which would result in
2982 // the function turning over and going negative for large x) and I threw in a Sqrt
2983 // term because Sqrt represents my intution that the function should be more sensitive
2984 // to small changes in small values of x, but less sensitive when x gets large.
2986 // Note that the current fit essentially eliminates the linear portion of the
2987 // expression (c == 0.0).
2988 const double a
= 0.061504;
2989 const double b
= 1.02406;
2990 const double c
= 0.0;
2991 const double d
= 0.825914;
2993 double instructionCount
= this->instructionCount();
2995 ASSERT(instructionCount
); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2997 double result
= d
+ a
* sqrt(instructionCount
+ b
) + c
* instructionCount
;
2999 result
*= codeTypeThresholdMultiplier();
3001 if (Options::verboseOSR()) {
3003 *this, ": instruction count is ", instructionCount
,
3004 ", scaling execution counter by ", result
, " * ", codeTypeThresholdMultiplier(),
3010 static int32_t clipThreshold(double threshold
)
3012 if (threshold
< 1.0)
3015 if (threshold
> static_cast<double>(std::numeric_limits
<int32_t>::max()))
3016 return std::numeric_limits
<int32_t>::max();
3018 return static_cast<int32_t>(threshold
);
3021 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold
)
3023 return clipThreshold(
3024 static_cast<double>(desiredThreshold
) *
3025 optimizationThresholdScalingFactor() *
3026 (1 << reoptimizationRetryCounter()));
3029 bool CodeBlock::checkIfOptimizationThresholdReached()
3032 if (DFG::Worklist
* worklist
= DFG::existingGlobalDFGWorklistOrNull()) {
3033 if (worklist
->compilationState(DFG::CompilationKey(this, DFG::DFGMode
))
3034 == DFG::Worklist::Compiled
) {
3035 optimizeNextInvocation();
3041 return m_jitExecuteCounter
.checkIfThresholdCrossedAndSet(this);
3044 void CodeBlock::optimizeNextInvocation()
3046 if (Options::verboseOSR())
3047 dataLog(*this, ": Optimizing next invocation.\n");
3048 m_jitExecuteCounter
.setNewThreshold(0, this);
3051 void CodeBlock::dontOptimizeAnytimeSoon()
3053 if (Options::verboseOSR())
3054 dataLog(*this, ": Not optimizing anytime soon.\n");
3055 m_jitExecuteCounter
.deferIndefinitely();
3058 void CodeBlock::optimizeAfterWarmUp()
3060 if (Options::verboseOSR())
3061 dataLog(*this, ": Optimizing after warm-up.\n");
3063 m_jitExecuteCounter
.setNewThreshold(
3064 adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3068 void CodeBlock::optimizeAfterLongWarmUp()
3070 if (Options::verboseOSR())
3071 dataLog(*this, ": Optimizing after long warm-up.\n");
3073 m_jitExecuteCounter
.setNewThreshold(
3074 adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3078 void CodeBlock::optimizeSoon()
3080 if (Options::verboseOSR())
3081 dataLog(*this, ": Optimizing soon.\n");
3083 m_jitExecuteCounter
.setNewThreshold(
3084 adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3088 void CodeBlock::forceOptimizationSlowPathConcurrently()
3090 if (Options::verboseOSR())
3091 dataLog(*this, ": Forcing slow path concurrently.\n");
3092 m_jitExecuteCounter
.forceSlowPathConcurrently();
3096 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result
)
3098 JITCode::JITType type
= jitType();
3099 if (type
!= JITCode::BaselineJIT
) {
3100 dataLog(*this, ": expected to have baseline code but have ", type
, "\n");
3101 RELEASE_ASSERT_NOT_REACHED();
3104 CodeBlock
* theReplacement
= replacement();
3105 if ((result
== CompilationSuccessful
) != (theReplacement
!= this)) {
3106 dataLog(*this, ": we have result = ", result
, " but ");
3107 if (theReplacement
== this)
3108 dataLog("we are our own replacement.\n");
3110 dataLog("our replacement is ", pointerDump(theReplacement
), "\n");
3111 RELEASE_ASSERT_NOT_REACHED();
3115 case CompilationSuccessful
:
3116 RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3117 optimizeNextInvocation();
3119 case CompilationFailed
:
3120 dontOptimizeAnytimeSoon();
3122 case CompilationDeferred
:
3123 // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3124 // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3125 // necessarily guarantee anything. So, we make sure that even if that
3126 // function ends up being a no-op, we still eventually retry and realize
3127 // that we have optimized code ready.
3128 optimizeAfterWarmUp();
3130 case CompilationInvalidated
:
3131 // Retry with exponential backoff.
3132 countReoptimization();
3133 optimizeAfterWarmUp();
3137 dataLog("Unrecognized result: ", static_cast<int>(result
), "\n");
3138 RELEASE_ASSERT_NOT_REACHED();
3143 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold
)
3145 ASSERT(JITCode::isOptimizingJIT(jitType()));
3146 // Compute this the lame way so we don't saturate. This is called infrequently
3147 // enough that this loop won't hurt us.
3148 unsigned result
= desiredThreshold
;
3149 for (unsigned n
= baselineVersion()->reoptimizationRetryCounter(); n
--;) {
3150 unsigned newResult
= result
<< 1;
3151 if (newResult
< result
)
3152 return std::numeric_limits
<uint32_t>::max();
3158 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3160 return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3163 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3165 return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3168 bool CodeBlock::shouldReoptimizeNow()
3170 return osrExitCounter() >= exitCountThresholdForReoptimization();
3173 bool CodeBlock::shouldReoptimizeFromLoopNow()
3175 return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3179 ArrayProfile
* CodeBlock::getArrayProfile(unsigned bytecodeOffset
)
3181 for (unsigned i
= 0; i
< m_arrayProfiles
.size(); ++i
) {
3182 if (m_arrayProfiles
[i
].bytecodeOffset() == bytecodeOffset
)
3183 return &m_arrayProfiles
[i
];
3188 ArrayProfile
* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset
)
3190 ArrayProfile
* result
= getArrayProfile(bytecodeOffset
);
3193 return addArrayProfile(bytecodeOffset
);
3196 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles
, unsigned& numberOfSamplesInProfiles
)
3198 ConcurrentJITLocker
locker(m_lock
);
3200 numberOfLiveNonArgumentValueProfiles
= 0;
3201 numberOfSamplesInProfiles
= 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3202 for (unsigned i
= 0; i
< totalNumberOfValueProfiles(); ++i
) {
3203 ValueProfile
* profile
= getFromAllValueProfiles(i
);
3204 unsigned numSamples
= profile
->totalNumberOfSamples();
3205 if (numSamples
> ValueProfile::numberOfBuckets
)
3206 numSamples
= ValueProfile::numberOfBuckets
; // We don't want profiles that are extremely hot to be given more weight.
3207 numberOfSamplesInProfiles
+= numSamples
;
3208 if (profile
->m_bytecodeOffset
< 0) {
3209 profile
->computeUpdatedPrediction(locker
);
3212 if (profile
->numberOfSamples() || profile
->m_prediction
!= SpecNone
)
3213 numberOfLiveNonArgumentValueProfiles
++;
3214 profile
->computeUpdatedPrediction(locker
);
3218 m_lazyOperandValueProfiles
.computeUpdatedPredictions(locker
);
3222 void CodeBlock::updateAllValueProfilePredictions()
3224 unsigned ignoredValue1
, ignoredValue2
;
3225 updateAllPredictionsAndCountLiveness(ignoredValue1
, ignoredValue2
);
3228 void CodeBlock::updateAllArrayPredictions()
3230 ConcurrentJITLocker
locker(m_lock
);
3232 for (unsigned i
= m_arrayProfiles
.size(); i
--;)
3233 m_arrayProfiles
[i
].computeUpdatedPrediction(locker
, this);
3235 // Don't count these either, for similar reasons.
3236 for (unsigned i
= m_arrayAllocationProfiles
.size(); i
--;)
3237 m_arrayAllocationProfiles
[i
].updateIndexingType();
3240 void CodeBlock::updateAllPredictions()
3242 updateAllValueProfilePredictions();
3243 updateAllArrayPredictions();
3246 bool CodeBlock::shouldOptimizeNow()
3248 if (Options::verboseOSR())
3249 dataLog("Considering optimizing ", *this, "...\n");
3251 if (m_optimizationDelayCounter
>= Options::maximumOptimizationDelay())
3254 updateAllArrayPredictions();
3256 unsigned numberOfLiveNonArgumentValueProfiles
;
3257 unsigned numberOfSamplesInProfiles
;
3258 updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles
, numberOfSamplesInProfiles
);
3260 if (Options::verboseOSR()) {
3262 "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3263 (double)numberOfLiveNonArgumentValueProfiles
/ numberOfValueProfiles(),
3264 numberOfLiveNonArgumentValueProfiles
, numberOfValueProfiles(),
3265 (double)numberOfSamplesInProfiles
/ ValueProfile::numberOfBuckets
/ numberOfValueProfiles(),
3266 numberOfSamplesInProfiles
, ValueProfile::numberOfBuckets
* numberOfValueProfiles());
3269 if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles
/ numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3270 && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles
/ ValueProfile::numberOfBuckets
/ totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3271 && static_cast<unsigned>(m_optimizationDelayCounter
) + 1 >= Options::minimumOptimizationDelay())
3274 ASSERT(m_optimizationDelayCounter
< std::numeric_limits
<uint8_t>::max());
3275 m_optimizationDelayCounter
++;
3276 optimizeAfterWarmUp();
3281 void CodeBlock::tallyFrequentExitSites()
3283 ASSERT(JITCode::isOptimizingJIT(jitType()));
3284 ASSERT(alternative()->jitType() == JITCode::BaselineJIT
);
3286 CodeBlock
* profiledBlock
= alternative();
3288 switch (jitType()) {
3289 case JITCode::DFGJIT
: {
3290 DFG::JITCode
* jitCode
= m_jitCode
->dfg();
3291 for (unsigned i
= 0; i
< jitCode
->osrExit
.size(); ++i
) {
3292 DFG::OSRExit
& exit
= jitCode
->osrExit
[i
];
3294 if (!exit
.considerAddingAsFrequentExitSite(profiledBlock
))
3301 case JITCode::FTLJIT
: {
3302 // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3303 // vector contains a totally different type, that just so happens to behave like
3304 // DFG::JITCode::osrExit.
3305 FTL::JITCode
* jitCode
= m_jitCode
->ftl();
3306 for (unsigned i
= 0; i
< jitCode
->osrExit
.size(); ++i
) {
3307 FTL::OSRExit
& exit
= jitCode
->osrExit
[i
];
3309 if (!exit
.considerAddingAsFrequentExitSite(profiledBlock
))
3317 RELEASE_ASSERT_NOT_REACHED();
3321 #endif // ENABLE(DFG_JIT)
3323 #if ENABLE(VERBOSE_VALUE_PROFILE)
3324 void CodeBlock::dumpValueProfiles()
3326 dataLog("ValueProfile for ", *this, ":\n");
3327 for (unsigned i
= 0; i
< totalNumberOfValueProfiles(); ++i
) {
3328 ValueProfile
* profile
= getFromAllValueProfiles(i
);
3329 if (profile
->m_bytecodeOffset
< 0) {
3330 ASSERT(profile
->m_bytecodeOffset
== -1);
3331 dataLogF(" arg = %u: ", i
);
3333 dataLogF(" bc = %d: ", profile
->m_bytecodeOffset
);
3334 if (!profile
->numberOfSamples() && profile
->m_prediction
== SpecNone
) {
3335 dataLogF("<empty>\n");
3338 profile
->dump(WTF::dataFile());
3341 dataLog("RareCaseProfile for ", *this, ":\n");
3342 for (unsigned i
= 0; i
< numberOfRareCaseProfiles(); ++i
) {
3343 RareCaseProfile
* profile
= rareCaseProfile(i
);
3344 dataLogF(" bc = %d: %u\n", profile
->m_bytecodeOffset
, profile
->m_counter
);
3346 dataLog("SpecialFastCaseProfile for ", *this, ":\n");
3347 for (unsigned i
= 0; i
< numberOfSpecialFastCaseProfiles(); ++i
) {
3348 RareCaseProfile
* profile
= specialFastCaseProfile(i
);
3349 dataLogF(" bc = %d: %u\n", profile
->m_bytecodeOffset
, profile
->m_counter
);
3352 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
3354 unsigned CodeBlock::frameRegisterCount()
3356 switch (jitType()) {
3357 case JITCode::InterpreterThunk
:
3358 return LLInt::frameRegisterCountFor(this);
3361 case JITCode::BaselineJIT
:
3362 return JIT::frameRegisterCountFor(this);
3363 #endif // ENABLE(JIT)
3366 case JITCode::DFGJIT
:
3367 case JITCode::FTLJIT
:
3368 return jitCode()->dfgCommon()->frameRegisterCount
;
3369 #endif // ENABLE(DFG_JIT)
3372 RELEASE_ASSERT_NOT_REACHED();
3377 int CodeBlock::stackPointerOffset()
3379 return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
3382 size_t CodeBlock::predictedMachineCodeSize()
3384 // This will be called from CodeBlock::CodeBlock before either m_vm or the
3385 // instructions have been initialized. It's OK to return 0 because what will really
3386 // matter is the recomputation of this value when the slow path is triggered.
3390 if (!m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
)
3391 return 0; // It's as good of a prediction as we'll get.
3393 // Be conservative: return a size that will be an overestimation 84% of the time.
3394 double multiplier
= m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.mean() +
3395 m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.standardDeviation();
3397 // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
3398 // here is OK, since this whole method is just a heuristic.
3399 if (multiplier
< 0 || multiplier
> 1000)
3402 double doubleResult
= multiplier
* m_instructions
.size();
3404 // Be even more paranoid: silently reject values that won't fit into a size_t. If
3405 // the function is so huge that we can't even fit it into virtual memory then we
3406 // should probably have some other guards in place to prevent us from even getting
3408 if (doubleResult
> std::numeric_limits
<size_t>::max())
3411 return static_cast<size_t>(doubleResult
);
3414 bool CodeBlock::usesOpcode(OpcodeID opcodeID
)
3416 Interpreter
* interpreter
= vm()->interpreter
;
3417 Instruction
* instructionsBegin
= instructions().begin();
3418 unsigned instructionCount
= instructions().size();
3420 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< instructionCount
; ) {
3421 switch (interpreter
->getOpcodeID(instructionsBegin
[bytecodeOffset
].u
.opcode
)) {
3422 #define DEFINE_OP(curOpcode, length) \
3424 if (curOpcode == opcodeID) \
3426 bytecodeOffset += length; \
3428 FOR_EACH_OPCODE_ID(DEFINE_OP
)
3431 RELEASE_ASSERT_NOT_REACHED();
3439 String
CodeBlock::nameForRegister(VirtualRegister virtualRegister
)
3441 ConcurrentJITLocker
locker(symbolTable()->m_lock
);
3442 SymbolTable::Map::iterator end
= symbolTable()->end(locker
);
3443 for (SymbolTable::Map::iterator ptr
= symbolTable()->begin(locker
); ptr
!= end
; ++ptr
) {
3444 if (ptr
->value
.getIndex() == virtualRegister
.offset()) {
3445 // FIXME: This won't work from the compilation thread.
3446 // https://bugs.webkit.org/show_bug.cgi?id=115300
3447 return String(ptr
->key
);
3450 if (needsActivation() && virtualRegister
== activationRegister())
3451 return ASCIILiteral("activation");
3452 if (virtualRegister
== thisRegister())
3453 return ASCIILiteral("this");
3454 if (usesArguments()) {
3455 if (virtualRegister
== argumentsRegister())
3456 return ASCIILiteral("arguments");
3457 if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister
)
3458 return ASCIILiteral("real arguments");
3460 if (virtualRegister
.isArgument())
3461 return String::format("arguments[%3d]", virtualRegister
.toArgument()).impl();
3468 struct VerifyCapturedDef
{
3469 void operator()(CodeBlock
* codeBlock
, Instruction
* instruction
, OpcodeID opcodeID
, int operand
)
3471 unsigned bytecodeOffset
= instruction
- codeBlock
->instructions().begin();
3473 if (codeBlock
->isConstantRegisterIndex(operand
)) {
3474 codeBlock
->beginValidationDidFail();
3475 dataLog(" At bc#", bytecodeOffset
, " encountered a definition of a constant.\n");
3476 codeBlock
->endValidationDidFail();
3482 case op_captured_mov
:
3483 case op_init_lazy_reg
:
3484 case op_create_arguments
:
3485 case op_new_captured_func
:
3491 VirtualRegister
virtualReg(operand
);
3492 if (!virtualReg
.isLocal())
3495 if (codeBlock
->captureCount() && codeBlock
->symbolTable()->isCaptured(operand
)) {
3496 codeBlock
->beginValidationDidFail();
3497 dataLog(" At bc#", bytecodeOffset
, " encountered invalid assignment to captured variable loc", virtualReg
.toLocal(), ".\n");
3498 codeBlock
->endValidationDidFail();
3506 } // anonymous namespace
3508 void CodeBlock::validate()
3510 BytecodeLivenessAnalysis
liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
3512 FastBitVector liveAtHead
= liveness
.getLivenessInfoAtBytecodeOffset(0);
3514 if (liveAtHead
.numBits() != static_cast<size_t>(m_numCalleeRegisters
)) {
3515 beginValidationDidFail();
3516 dataLog(" Wrong number of bits in result!\n");
3517 dataLog(" Result: ", liveAtHead
, "\n");
3518 dataLog(" Bit count: ", liveAtHead
.numBits(), "\n");
3519 endValidationDidFail();
3522 for (unsigned i
= m_numCalleeRegisters
; i
--;) {
3523 bool isCaptured
= false;
3524 VirtualRegister reg
= virtualRegisterForLocal(i
);
3527 isCaptured
= reg
.offset() <= captureStart() && reg
.offset() > captureEnd();
3530 if (!liveAtHead
.get(i
)) {
3531 beginValidationDidFail();
3532 dataLog(" Variable loc", i
, " is expected to be live because it is captured, but it isn't live.\n");
3533 dataLog(" Result: ", liveAtHead
, "\n");
3534 endValidationDidFail();
3537 if (liveAtHead
.get(i
)) {
3538 beginValidationDidFail();
3539 dataLog(" Variable loc", i
, " is expected to be dead.\n");
3540 dataLog(" Result: ", liveAtHead
, "\n");
3541 endValidationDidFail();
3546 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< instructions().size();) {
3547 Instruction
* currentInstruction
= instructions().begin() + bytecodeOffset
;
3548 OpcodeID opcodeID
= m_vm
->interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
3550 VerifyCapturedDef verifyCapturedDef
;
3551 computeDefsForBytecodeOffset(this, bytecodeOffset
, verifyCapturedDef
);
3553 bytecodeOffset
+= opcodeLength(opcodeID
);
3557 void CodeBlock::beginValidationDidFail()
3559 dataLog("Validation failure in ", *this, ":\n");
3563 void CodeBlock::endValidationDidFail()
3568 dataLog("Validation failure.\n");
3569 RELEASE_ASSERT_NOT_REACHED();
3572 void CodeBlock::addBreakpoint(unsigned numBreakpoints
)
3574 m_numBreakpoints
+= numBreakpoints
;
3575 ASSERT(m_numBreakpoints
);
3576 if (JITCode::isOptimizingJIT(jitType()))
3577 jettison(Profiler::JettisonDueToDebuggerBreakpoint
);
3580 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode
)
3582 m_steppingMode
= mode
;
3583 if (mode
== SteppingModeEnabled
&& JITCode::isOptimizingJIT(jitType()))
3584 jettison(Profiler::JettisonDueToDebuggerStepping
);
3587 RareCaseProfile
* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset
)
3589 return tryBinarySearch
<RareCaseProfile
, int>(
3590 m_rareCaseProfiles
, m_rareCaseProfiles
.size(), bytecodeOffset
,
3591 getRareCaseProfileBytecodeOffset
);
3595 DFG::CapabilityLevel
CodeBlock::capabilityLevel()
3597 DFG::CapabilityLevel result
= capabilityLevelInternal();
3598 m_capabilityLevelState
= result
;