2 * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
43 #include "FunctionExecutableDump.h"
44 #include "Interpreter.h"
47 #include "JSCJSValue.h"
48 #include "JSFunction.h"
49 #include "JSLexicalEnvironment.h"
50 #include "JSNameScope.h"
51 #include "LLIntEntrypoint.h"
52 #include "LowLevelInterpreter.h"
53 #include "JSCInlines.h"
54 #include "PolymorphicGetByIdList.h"
55 #include "PolymorphicPutByIdList.h"
56 #include "ProfilerDatabase.h"
57 #include "ReduceWhitespace.h"
59 #include "RepatchBuffer.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "TypeLocationCache.h"
63 #include "TypeProfiler.h"
64 #include "UnlinkedInstructionStream.h"
65 #include <wtf/BagToHashMap.h>
66 #include <wtf/CommaPrinter.h>
67 #include <wtf/StringExtras.h>
68 #include <wtf/StringPrintStream.h>
69 #include <wtf/text/UniquedStringImpl.h>
72 #include "DFGOperations.h"
76 #include "FTLJITCode.h"
81 CString
CodeBlock::inferredName() const
89 return jsCast
<FunctionExecutable
*>(ownerExecutable())->inferredName().utf8();
92 return CString("", 0);
96 bool CodeBlock::hasHash() const
101 bool CodeBlock::isSafeToComputeHash() const
103 return !isCompilationThread();
106 CodeBlockHash
CodeBlock::hash() const
109 RELEASE_ASSERT(isSafeToComputeHash());
110 m_hash
= CodeBlockHash(ownerExecutable()->source(), specializationKind());
115 CString
CodeBlock::sourceCodeForTools() const
117 if (codeType() != FunctionCode
)
118 return ownerExecutable()->source().toUTF8();
120 SourceProvider
* provider
= source();
121 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(ownerExecutable());
122 UnlinkedFunctionExecutable
* unlinked
= executable
->unlinkedExecutable();
123 unsigned unlinkedStartOffset
= unlinked
->startOffset();
124 unsigned linkedStartOffset
= executable
->source().startOffset();
125 int delta
= linkedStartOffset
- unlinkedStartOffset
;
126 unsigned rangeStart
= delta
+ unlinked
->unlinkedFunctionNameStart();
127 unsigned rangeEnd
= delta
+ unlinked
->startOffset() + unlinked
->sourceLength();
130 provider
->source().impl()->utf8ForRange(rangeStart
, rangeEnd
- rangeStart
));
133 CString
CodeBlock::sourceCodeOnOneLine() const
135 return reduceWhitespace(sourceCodeForTools());
138 CString
CodeBlock::hashAsStringIfPossible() const
140 if (hasHash() || isSafeToComputeHash())
141 return toCString(hash());
145 void CodeBlock::dumpAssumingJITType(PrintStream
& out
, JITCode::JITType jitType
) const
147 out
.print(inferredName(), "#", hashAsStringIfPossible());
148 out
.print(":[", RawPointer(this), "->");
150 out
.print(RawPointer(m_alternative
.get()), "->");
151 out
.print(RawPointer(ownerExecutable()), ", ", jitType
, codeType());
153 if (codeType() == FunctionCode
)
154 out
.print(specializationKind());
155 out
.print(", ", instructionCount());
156 if (this->jitType() == JITCode::BaselineJIT
&& m_shouldAlwaysBeInlined
)
157 out
.print(" (ShouldAlwaysBeInlined)");
158 if (ownerExecutable()->neverInline())
159 out
.print(" (NeverInline)");
160 if (ownerExecutable()->didTryToEnterInLoop())
161 out
.print(" (DidTryToEnterInLoop)");
162 if (ownerExecutable()->isStrictMode())
163 out
.print(" (StrictMode)");
164 if (this->jitType() == JITCode::BaselineJIT
&& m_didFailFTLCompilation
)
165 out
.print(" (FTLFail)");
166 if (this->jitType() == JITCode::BaselineJIT
&& m_hasBeenCompiledWithFTL
)
167 out
.print(" (HadFTLReplacement)");
171 void CodeBlock::dump(PrintStream
& out
) const
173 dumpAssumingJITType(out
, jitType());
176 static CString
idName(int id0
, const Identifier
& ident
)
178 return toCString(ident
.impl(), "(@id", id0
, ")");
181 CString
CodeBlock::registerName(int r
) const
183 if (isConstantRegisterIndex(r
))
184 return constantName(r
);
186 return toCString(VirtualRegister(r
));
189 CString
CodeBlock::constantName(int index
) const
191 JSValue value
= getConstant(index
);
192 return toCString(value
, "(", VirtualRegister(index
), ")");
195 static CString
regexpToSourceString(RegExp
* regExp
)
197 char postfix
[5] = { '/', 0, 0, 0, 0 };
199 if (regExp
->global())
200 postfix
[index
++] = 'g';
201 if (regExp
->ignoreCase())
202 postfix
[index
++] = 'i';
203 if (regExp
->multiline())
204 postfix
[index
] = 'm';
206 return toCString("/", regExp
->pattern().impl(), postfix
);
209 static CString
regexpName(int re
, RegExp
* regexp
)
211 return toCString(regexpToSourceString(regexp
), "(@re", re
, ")");
214 NEVER_INLINE
static const char* debugHookName(int debugHookID
)
216 switch (static_cast<DebugHookID
>(debugHookID
)) {
217 case DidEnterCallFrame
:
218 return "didEnterCallFrame";
219 case WillLeaveCallFrame
:
220 return "willLeaveCallFrame";
221 case WillExecuteStatement
:
222 return "willExecuteStatement";
223 case WillExecuteProgram
:
224 return "willExecuteProgram";
225 case DidExecuteProgram
:
226 return "didExecuteProgram";
227 case DidReachBreakpoint
:
228 return "didReachBreakpoint";
231 RELEASE_ASSERT_NOT_REACHED();
235 void CodeBlock::printUnaryOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
)
237 int r0
= (++it
)->u
.operand
;
238 int r1
= (++it
)->u
.operand
;
240 printLocationAndOp(out
, exec
, location
, it
, op
);
241 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
244 void CodeBlock::printBinaryOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
)
246 int r0
= (++it
)->u
.operand
;
247 int r1
= (++it
)->u
.operand
;
248 int r2
= (++it
)->u
.operand
;
249 printLocationAndOp(out
, exec
, location
, it
, op
);
250 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
253 void CodeBlock::printConditionalJump(PrintStream
& out
, ExecState
* exec
, const Instruction
*, const Instruction
*& it
, int location
, const char* op
)
255 int r0
= (++it
)->u
.operand
;
256 int offset
= (++it
)->u
.operand
;
257 printLocationAndOp(out
, exec
, location
, it
, op
);
258 out
.printf("%s, %d(->%d)", registerName(r0
).data(), offset
, location
+ offset
);
261 void CodeBlock::printGetByIdOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
)
264 switch (exec
->interpreter()->getOpcodeID(it
->u
.opcode
)) {
268 case op_get_by_id_out_of_line
:
269 op
= "get_by_id_out_of_line";
271 case op_get_array_length
:
275 RELEASE_ASSERT_NOT_REACHED();
276 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
280 int r0
= (++it
)->u
.operand
;
281 int r1
= (++it
)->u
.operand
;
282 int id0
= (++it
)->u
.operand
;
283 printLocationAndOp(out
, exec
, location
, it
, op
);
284 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), idName(id0
, identifier(id0
)).data());
285 it
+= 4; // Increment up to the value profiler.
288 static void dumpStructure(PrintStream
& out
, const char* name
, Structure
* structure
, const Identifier
& ident
)
293 out
.printf("%s = %p", name
, structure
);
295 PropertyOffset offset
= structure
->getConcurrently(ident
.impl());
296 if (offset
!= invalidOffset
)
297 out
.printf(" (offset = %d)", offset
);
300 static void dumpChain(PrintStream
& out
, StructureChain
* chain
, const Identifier
& ident
)
302 out
.printf("chain = %p: [", chain
);
304 for (WriteBarrier
<Structure
>* currentStructure
= chain
->head();
306 ++currentStructure
) {
311 dumpStructure(out
, "struct", currentStructure
->get(), ident
);
316 void CodeBlock::printGetByIdCacheStatus(PrintStream
& out
, ExecState
* exec
, int location
, const StubInfoMap
& map
)
318 Instruction
* instruction
= instructions().begin() + location
;
320 const Identifier
& ident
= identifier(instruction
[3].u
.operand
);
322 UNUSED_PARAM(ident
); // tell the compiler to shut up in certain platform configurations.
324 if (exec
->interpreter()->getOpcodeID(instruction
[0].u
.opcode
) == op_get_array_length
)
325 out
.printf(" llint(array_length)");
326 else if (Structure
* structure
= instruction
[4].u
.structure
.get()) {
327 out
.printf(" llint(");
328 dumpStructure(out
, "struct", structure
, ident
);
333 if (StructureStubInfo
* stubPtr
= map
.get(CodeOrigin(location
))) {
334 StructureStubInfo
& stubInfo
= *stubPtr
;
335 if (stubInfo
.resetByGC
)
336 out
.print(" (Reset By GC)");
341 Structure
* baseStructure
= 0;
342 Structure
* prototypeStructure
= 0;
343 StructureChain
* chain
= 0;
344 PolymorphicGetByIdList
* list
= 0;
346 switch (stubInfo
.accessType
) {
347 case access_get_by_id_self
:
349 baseStructure
= stubInfo
.u
.getByIdSelf
.baseObjectStructure
.get();
351 case access_get_by_id_list
:
353 list
= stubInfo
.u
.getByIdList
.list
;
359 RELEASE_ASSERT_NOT_REACHED();
365 dumpStructure(out
, "struct", baseStructure
, ident
);
368 if (prototypeStructure
) {
370 dumpStructure(out
, "prototypeStruct", baseStructure
, ident
);
375 dumpChain(out
, chain
, ident
);
379 out
.printf(", list = %p: [", list
);
380 for (unsigned i
= 0; i
< list
->size(); ++i
) {
384 dumpStructure(out
, "base", list
->at(i
).structure(), ident
);
385 if (list
->at(i
).chain()) {
387 dumpChain(out
, list
->at(i
).chain(), ident
);
401 void CodeBlock::printPutByIdCacheStatus(PrintStream
& out
, ExecState
* exec
, int location
, const StubInfoMap
& map
)
403 Instruction
* instruction
= instructions().begin() + location
;
405 const Identifier
& ident
= identifier(instruction
[2].u
.operand
);
407 UNUSED_PARAM(ident
); // tell the compiler to shut up in certain platform configurations.
409 if (Structure
* structure
= instruction
[4].u
.structure
.get()) {
410 switch (exec
->interpreter()->getOpcodeID(instruction
[0].u
.opcode
)) {
412 case op_put_by_id_out_of_line
:
413 out
.print(" llint(");
414 dumpStructure(out
, "struct", structure
, ident
);
418 case op_put_by_id_transition_direct
:
419 case op_put_by_id_transition_normal
:
420 case op_put_by_id_transition_direct_out_of_line
:
421 case op_put_by_id_transition_normal_out_of_line
:
422 out
.print(" llint(");
423 dumpStructure(out
, "prev", structure
, ident
);
425 dumpStructure(out
, "next", instruction
[6].u
.structure
.get(), ident
);
426 if (StructureChain
* chain
= instruction
[7].u
.structureChain
.get()) {
428 dumpChain(out
, chain
, ident
);
434 out
.print(" llint(unknown)");
440 if (StructureStubInfo
* stubPtr
= map
.get(CodeOrigin(location
))) {
441 StructureStubInfo
& stubInfo
= *stubPtr
;
442 if (stubInfo
.resetByGC
)
443 out
.print(" (Reset By GC)");
448 switch (stubInfo
.accessType
) {
449 case access_put_by_id_replace
:
450 out
.print("replace, ");
451 dumpStructure(out
, "struct", stubInfo
.u
.putByIdReplace
.baseObjectStructure
.get(), ident
);
453 case access_put_by_id_transition_normal
:
454 case access_put_by_id_transition_direct
:
455 out
.print("transition, ");
456 dumpStructure(out
, "prev", stubInfo
.u
.putByIdTransition
.previousStructure
.get(), ident
);
458 dumpStructure(out
, "next", stubInfo
.u
.putByIdTransition
.structure
.get(), ident
);
459 if (StructureChain
* chain
= stubInfo
.u
.putByIdTransition
.chain
.get()) {
461 dumpChain(out
, chain
, ident
);
464 case access_put_by_id_list
: {
465 out
.printf("list = [");
466 PolymorphicPutByIdList
* list
= stubInfo
.u
.putByIdList
.list
;
468 for (unsigned i
= 0; i
< list
->size(); ++i
) {
469 out
.print(comma
, "(");
470 const PutByIdAccess
& access
= list
->at(i
);
472 if (access
.isReplace()) {
473 out
.print("replace, ");
474 dumpStructure(out
, "struct", access
.oldStructure(), ident
);
475 } else if (access
.isSetter()) {
476 out
.print("setter, ");
477 dumpStructure(out
, "struct", access
.oldStructure(), ident
);
478 } else if (access
.isCustom()) {
479 out
.print("custom, ");
480 dumpStructure(out
, "struct", access
.oldStructure(), ident
);
481 } else if (access
.isTransition()) {
482 out
.print("transition, ");
483 dumpStructure(out
, "prev", access
.oldStructure(), ident
);
485 dumpStructure(out
, "next", access
.newStructure(), ident
);
486 if (access
.chain()) {
488 dumpChain(out
, access
.chain(), ident
);
491 out
.print("unknown");
502 RELEASE_ASSERT_NOT_REACHED();
513 void CodeBlock::printCallOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
, CacheDumpMode cacheDumpMode
, bool& hasPrintedProfiling
, const CallLinkInfoMap
& map
)
515 int dst
= (++it
)->u
.operand
;
516 int func
= (++it
)->u
.operand
;
517 int argCount
= (++it
)->u
.operand
;
518 int registerOffset
= (++it
)->u
.operand
;
519 printLocationAndOp(out
, exec
, location
, it
, op
);
520 out
.printf("%s, %s, %d, %d", registerName(dst
).data(), registerName(func
).data(), argCount
, registerOffset
);
521 if (cacheDumpMode
== DumpCaches
) {
522 LLIntCallLinkInfo
* callLinkInfo
= it
[1].u
.callLinkInfo
;
523 if (callLinkInfo
->lastSeenCallee
) {
525 " llint(%p, exec %p)",
526 callLinkInfo
->lastSeenCallee
.get(),
527 callLinkInfo
->lastSeenCallee
->executable());
530 if (CallLinkInfo
* info
= map
.get(CodeOrigin(location
))) {
531 JSFunction
* target
= info
->lastSeenCallee();
533 out
.printf(" jit(%p, exec %p)", target
, target
->executable());
536 if (jitType() != JITCode::FTLJIT
)
537 out
.print(" status(", CallLinkStatus::computeFor(this, location
, map
), ")");
544 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
545 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
548 void CodeBlock::printPutByIdOp(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
)
550 int r0
= (++it
)->u
.operand
;
551 int id0
= (++it
)->u
.operand
;
552 int r1
= (++it
)->u
.operand
;
553 printLocationAndOp(out
, exec
, location
, it
, op
);
554 out
.printf("%s, %s, %s", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data());
558 void CodeBlock::dumpSource()
560 dumpSource(WTF::dataFile());
563 void CodeBlock::dumpSource(PrintStream
& out
)
565 ScriptExecutable
* executable
= ownerExecutable();
566 if (executable
->isFunctionExecutable()) {
567 FunctionExecutable
* functionExecutable
= reinterpret_cast<FunctionExecutable
*>(executable
);
568 String source
= functionExecutable
->source().provider()->getRange(
569 functionExecutable
->parametersStartOffset(),
570 functionExecutable
->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
572 out
.print("function ", inferredName(), source
);
575 out
.print(executable
->source().toString());
578 void CodeBlock::dumpBytecode()
580 dumpBytecode(WTF::dataFile());
583 void CodeBlock::dumpBytecode(PrintStream
& out
)
585 // We only use the ExecState* for things that don't actually lead to JS execution,
586 // like converting a JSString to a String. Hence the globalExec is appropriate.
587 ExecState
* exec
= m_globalObject
->globalExec();
589 size_t instructionCount
= 0;
591 for (size_t i
= 0; i
< instructions().size(); i
+= opcodeLengths
[exec
->interpreter()->getOpcodeID(instructions()[i
].u
.opcode
)])
596 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
597 static_cast<unsigned long>(instructions().size()),
598 static_cast<unsigned long>(instructions().size() * sizeof(Instruction
)),
599 m_numParameters
, m_numCalleeRegisters
, m_numVars
);
600 if (needsActivation() && codeType() == FunctionCode
)
601 out
.printf("; lexical environment in r%d", activationRegister().offset());
604 StubInfoMap stubInfos
;
605 CallLinkInfoMap callLinkInfos
;
606 getStubInfoMap(stubInfos
);
607 getCallLinkInfoMap(callLinkInfos
);
609 const Instruction
* begin
= instructions().begin();
610 const Instruction
* end
= instructions().end();
611 for (const Instruction
* it
= begin
; it
!= end
; ++it
)
612 dumpBytecode(out
, exec
, begin
, it
, stubInfos
, callLinkInfos
);
614 if (numberOfIdentifiers()) {
615 out
.printf("\nIdentifiers:\n");
618 out
.printf(" id%u = %s\n", static_cast<unsigned>(i
), identifier(i
).string().utf8().data());
620 } while (i
!= numberOfIdentifiers());
623 if (!m_constantRegisters
.isEmpty()) {
624 out
.printf("\nConstants:\n");
627 const char* sourceCodeRepresentationDescription
= nullptr;
628 switch (m_constantsSourceCodeRepresentation
[i
]) {
629 case SourceCodeRepresentation::Double
:
630 sourceCodeRepresentationDescription
= ": in source as double";
632 case SourceCodeRepresentation::Integer
:
633 sourceCodeRepresentationDescription
= ": in source as integer";
635 case SourceCodeRepresentation::Other
:
636 sourceCodeRepresentationDescription
= "";
639 out
.printf(" k%u = %s%s\n", static_cast<unsigned>(i
), toCString(m_constantRegisters
[i
].get()).data(), sourceCodeRepresentationDescription
);
641 } while (i
< m_constantRegisters
.size());
644 if (size_t count
= m_unlinkedCode
->numberOfRegExps()) {
645 out
.printf("\nm_regexps:\n");
648 out
.printf(" re%u = %s\n", static_cast<unsigned>(i
), regexpToSourceString(m_unlinkedCode
->regexp(i
)).data());
653 if (m_rareData
&& !m_rareData
->m_exceptionHandlers
.isEmpty()) {
654 out
.printf("\nException Handlers:\n");
657 HandlerInfo
& handler
= m_rareData
->m_exceptionHandlers
[i
];
658 out
.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] } %s\n",
659 i
+ 1, handler
.start
, handler
.end
, handler
.target
, handler
.scopeDepth
, handler
.typeName());
661 } while (i
< m_rareData
->m_exceptionHandlers
.size());
664 if (m_rareData
&& !m_rareData
->m_switchJumpTables
.isEmpty()) {
665 out
.printf("Switch Jump Tables:\n");
668 out
.printf(" %1d = {\n", i
);
670 Vector
<int32_t>::const_iterator end
= m_rareData
->m_switchJumpTables
[i
].branchOffsets
.end();
671 for (Vector
<int32_t>::const_iterator iter
= m_rareData
->m_switchJumpTables
[i
].branchOffsets
.begin(); iter
!= end
; ++iter
, ++entry
) {
674 out
.printf("\t\t%4d => %04d\n", entry
+ m_rareData
->m_switchJumpTables
[i
].min
, *iter
);
678 } while (i
< m_rareData
->m_switchJumpTables
.size());
681 if (m_rareData
&& !m_rareData
->m_stringSwitchJumpTables
.isEmpty()) {
682 out
.printf("\nString Switch Jump Tables:\n");
685 out
.printf(" %1d = {\n", i
);
686 StringJumpTable::StringOffsetTable::const_iterator end
= m_rareData
->m_stringSwitchJumpTables
[i
].offsetTable
.end();
687 for (StringJumpTable::StringOffsetTable::const_iterator iter
= m_rareData
->m_stringSwitchJumpTables
[i
].offsetTable
.begin(); iter
!= end
; ++iter
)
688 out
.printf("\t\t\"%s\" => %04d\n", iter
->key
->utf8().data(), iter
->value
.branchOffset
);
691 } while (i
< m_rareData
->m_stringSwitchJumpTables
.size());
697 void CodeBlock::beginDumpProfiling(PrintStream
& out
, bool& hasPrintedProfiling
)
699 if (hasPrintedProfiling
) {
705 hasPrintedProfiling
= true;
708 void CodeBlock::dumpValueProfiling(PrintStream
& out
, const Instruction
*& it
, bool& hasPrintedProfiling
)
710 ConcurrentJITLocker
locker(m_lock
);
713 CString description
= it
->u
.profile
->briefDescription(locker
);
714 if (!description
.length())
716 beginDumpProfiling(out
, hasPrintedProfiling
);
717 out
.print(description
);
720 void CodeBlock::dumpArrayProfiling(PrintStream
& out
, const Instruction
*& it
, bool& hasPrintedProfiling
)
722 ConcurrentJITLocker
locker(m_lock
);
725 if (!it
->u
.arrayProfile
)
727 CString description
= it
->u
.arrayProfile
->briefDescription(locker
, this);
728 if (!description
.length())
730 beginDumpProfiling(out
, hasPrintedProfiling
);
731 out
.print(description
);
734 void CodeBlock::dumpRareCaseProfile(PrintStream
& out
, const char* name
, RareCaseProfile
* profile
, bool& hasPrintedProfiling
)
736 if (!profile
|| !profile
->m_counter
)
739 beginDumpProfiling(out
, hasPrintedProfiling
);
740 out
.print(name
, profile
->m_counter
);
743 void CodeBlock::printLocationAndOp(PrintStream
& out
, ExecState
*, int location
, const Instruction
*&, const char* op
)
745 out
.printf("[%4d] %-17s ", location
, op
);
748 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream
& out
, ExecState
* exec
, int location
, const Instruction
*& it
, const char* op
, int operand
)
750 printLocationAndOp(out
, exec
, location
, it
, op
);
751 out
.printf("%s", registerName(operand
).data());
754 void CodeBlock::dumpBytecode(
755 PrintStream
& out
, ExecState
* exec
, const Instruction
* begin
, const Instruction
*& it
,
756 const StubInfoMap
& stubInfos
, const CallLinkInfoMap
& callLinkInfos
)
758 int location
= it
- begin
;
759 bool hasPrintedProfiling
= false;
760 OpcodeID opcode
= exec
->interpreter()->getOpcodeID(it
->u
.opcode
);
763 printLocationAndOp(out
, exec
, location
, it
, "enter");
766 case op_create_lexical_environment
: {
767 int r0
= (++it
)->u
.operand
;
768 int r1
= (++it
)->u
.operand
;
769 printLocationAndOp(out
, exec
, location
, it
, "create_lexical_environment");
770 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
774 int r0
= (++it
)->u
.operand
;
775 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "get_scope", r0
);
778 case op_create_direct_arguments
: {
779 int r0
= (++it
)->u
.operand
;
780 printLocationAndOp(out
, exec
, location
, it
, "create_direct_arguments");
781 out
.printf("%s", registerName(r0
).data());
784 case op_create_scoped_arguments
: {
785 int r0
= (++it
)->u
.operand
;
786 int r1
= (++it
)->u
.operand
;
787 printLocationAndOp(out
, exec
, location
, it
, "create_scoped_arguments");
788 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
791 case op_create_out_of_band_arguments
: {
792 int r0
= (++it
)->u
.operand
;
793 printLocationAndOp(out
, exec
, location
, it
, "create_out_of_band_arguments");
794 out
.printf("%s", registerName(r0
).data());
797 case op_create_this
: {
798 int r0
= (++it
)->u
.operand
;
799 int r1
= (++it
)->u
.operand
;
800 unsigned inferredInlineCapacity
= (++it
)->u
.operand
;
801 unsigned cachedFunction
= (++it
)->u
.operand
;
802 printLocationAndOp(out
, exec
, location
, it
, "create_this");
803 out
.printf("%s, %s, %u, %u", registerName(r0
).data(), registerName(r1
).data(), inferredInlineCapacity
, cachedFunction
);
807 int r0
= (++it
)->u
.operand
;
808 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "to_this", r0
);
809 Structure
* structure
= (++it
)->u
.structure
.get();
811 out
.print(", cache(struct = ", RawPointer(structure
), ")");
812 out
.print(", ", (++it
)->u
.toThisStatus
);
816 int r0
= (++it
)->u
.operand
;
817 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "op_check_tdz", r0
);
820 case op_new_object
: {
821 int r0
= (++it
)->u
.operand
;
822 unsigned inferredInlineCapacity
= (++it
)->u
.operand
;
823 printLocationAndOp(out
, exec
, location
, it
, "new_object");
824 out
.printf("%s, %u", registerName(r0
).data(), inferredInlineCapacity
);
825 ++it
; // Skip object allocation profile.
829 int dst
= (++it
)->u
.operand
;
830 int argv
= (++it
)->u
.operand
;
831 int argc
= (++it
)->u
.operand
;
832 printLocationAndOp(out
, exec
, location
, it
, "new_array");
833 out
.printf("%s, %s, %d", registerName(dst
).data(), registerName(argv
).data(), argc
);
834 ++it
; // Skip array allocation profile.
837 case op_new_array_with_size
: {
838 int dst
= (++it
)->u
.operand
;
839 int length
= (++it
)->u
.operand
;
840 printLocationAndOp(out
, exec
, location
, it
, "new_array_with_size");
841 out
.printf("%s, %s", registerName(dst
).data(), registerName(length
).data());
842 ++it
; // Skip array allocation profile.
845 case op_new_array_buffer
: {
846 int dst
= (++it
)->u
.operand
;
847 int argv
= (++it
)->u
.operand
;
848 int argc
= (++it
)->u
.operand
;
849 printLocationAndOp(out
, exec
, location
, it
, "new_array_buffer");
850 out
.printf("%s, %d, %d", registerName(dst
).data(), argv
, argc
);
851 ++it
; // Skip array allocation profile.
854 case op_new_regexp
: {
855 int r0
= (++it
)->u
.operand
;
856 int re0
= (++it
)->u
.operand
;
857 printLocationAndOp(out
, exec
, location
, it
, "new_regexp");
858 out
.printf("%s, ", registerName(r0
).data());
859 if (r0
>=0 && r0
< (int)m_unlinkedCode
->numberOfRegExps())
860 out
.printf("%s", regexpName(re0
, regexp(re0
)).data());
862 out
.printf("bad_regexp(%d)", re0
);
866 int r0
= (++it
)->u
.operand
;
867 int r1
= (++it
)->u
.operand
;
868 printLocationAndOp(out
, exec
, location
, it
, "mov");
869 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
872 case op_profile_type
: {
873 int r0
= (++it
)->u
.operand
;
878 printLocationAndOp(out
, exec
, location
, it
, "op_profile_type");
879 out
.printf("%s", registerName(r0
).data());
882 case op_profile_control_flow
: {
883 BasicBlockLocation
* basicBlockLocation
= (++it
)->u
.basicBlockLocation
;
884 printLocationAndOp(out
, exec
, location
, it
, "profile_control_flow");
885 out
.printf("[%d, %d]", basicBlockLocation
->startOffset(), basicBlockLocation
->endOffset());
889 printUnaryOp(out
, exec
, location
, it
, "not");
893 printBinaryOp(out
, exec
, location
, it
, "eq");
897 printUnaryOp(out
, exec
, location
, it
, "eq_null");
901 printBinaryOp(out
, exec
, location
, it
, "neq");
905 printUnaryOp(out
, exec
, location
, it
, "neq_null");
909 printBinaryOp(out
, exec
, location
, it
, "stricteq");
913 printBinaryOp(out
, exec
, location
, it
, "nstricteq");
917 printBinaryOp(out
, exec
, location
, it
, "less");
921 printBinaryOp(out
, exec
, location
, it
, "lesseq");
925 printBinaryOp(out
, exec
, location
, it
, "greater");
929 printBinaryOp(out
, exec
, location
, it
, "greatereq");
933 int r0
= (++it
)->u
.operand
;
934 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "inc", r0
);
938 int r0
= (++it
)->u
.operand
;
939 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "dec", r0
);
943 printUnaryOp(out
, exec
, location
, it
, "to_number");
947 printUnaryOp(out
, exec
, location
, it
, "to_string");
951 printUnaryOp(out
, exec
, location
, it
, "negate");
955 printBinaryOp(out
, exec
, location
, it
, "add");
960 printBinaryOp(out
, exec
, location
, it
, "mul");
965 printBinaryOp(out
, exec
, location
, it
, "div");
970 printBinaryOp(out
, exec
, location
, it
, "mod");
974 printBinaryOp(out
, exec
, location
, it
, "sub");
979 printBinaryOp(out
, exec
, location
, it
, "lshift");
983 printBinaryOp(out
, exec
, location
, it
, "rshift");
987 printBinaryOp(out
, exec
, location
, it
, "urshift");
991 printBinaryOp(out
, exec
, location
, it
, "bitand");
996 printBinaryOp(out
, exec
, location
, it
, "bitxor");
1001 printBinaryOp(out
, exec
, location
, it
, "bitor");
1005 case op_check_has_instance
: {
1006 int r0
= (++it
)->u
.operand
;
1007 int r1
= (++it
)->u
.operand
;
1008 int r2
= (++it
)->u
.operand
;
1009 int offset
= (++it
)->u
.operand
;
1010 printLocationAndOp(out
, exec
, location
, it
, "check_has_instance");
1011 out
.printf("%s, %s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data(), offset
, location
+ offset
);
1014 case op_instanceof
: {
1015 int r0
= (++it
)->u
.operand
;
1016 int r1
= (++it
)->u
.operand
;
1017 int r2
= (++it
)->u
.operand
;
1018 printLocationAndOp(out
, exec
, location
, it
, "instanceof");
1019 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1023 printUnaryOp(out
, exec
, location
, it
, "unsigned");
1027 printUnaryOp(out
, exec
, location
, it
, "typeof");
1030 case op_is_undefined
: {
1031 printUnaryOp(out
, exec
, location
, it
, "is_undefined");
1034 case op_is_boolean
: {
1035 printUnaryOp(out
, exec
, location
, it
, "is_boolean");
1038 case op_is_number
: {
1039 printUnaryOp(out
, exec
, location
, it
, "is_number");
1042 case op_is_string
: {
1043 printUnaryOp(out
, exec
, location
, it
, "is_string");
1046 case op_is_object
: {
1047 printUnaryOp(out
, exec
, location
, it
, "is_object");
1050 case op_is_object_or_null
: {
1051 printUnaryOp(out
, exec
, location
, it
, "is_object_or_null");
1054 case op_is_function
: {
1055 printUnaryOp(out
, exec
, location
, it
, "is_function");
1059 printBinaryOp(out
, exec
, location
, it
, "in");
1062 case op_init_global_const_nop
: {
1063 printLocationAndOp(out
, exec
, location
, it
, "init_global_const_nop");
1070 case op_init_global_const
: {
1071 WriteBarrier
<Unknown
>* variablePointer
= (++it
)->u
.variablePointer
;
1072 int r0
= (++it
)->u
.operand
;
1073 printLocationAndOp(out
, exec
, location
, it
, "init_global_const");
1074 out
.printf("g%d(%p), %s", m_globalObject
->findVariableIndex(variablePointer
).offset(), variablePointer
, registerName(r0
).data());
1080 case op_get_by_id_out_of_line
:
1081 case op_get_array_length
: {
1082 printGetByIdOp(out
, exec
, location
, it
);
1083 printGetByIdCacheStatus(out
, exec
, location
, stubInfos
);
1084 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
1087 case op_put_by_id
: {
1088 printPutByIdOp(out
, exec
, location
, it
, "put_by_id");
1089 printPutByIdCacheStatus(out
, exec
, location
, stubInfos
);
1092 case op_put_by_id_out_of_line
: {
1093 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_out_of_line");
1094 printPutByIdCacheStatus(out
, exec
, location
, stubInfos
);
1097 case op_put_by_id_transition_direct
: {
1098 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_direct");
1099 printPutByIdCacheStatus(out
, exec
, location
, stubInfos
);
1102 case op_put_by_id_transition_direct_out_of_line
: {
1103 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_direct_out_of_line");
1104 printPutByIdCacheStatus(out
, exec
, location
, stubInfos
);
1107 case op_put_by_id_transition_normal
: {
1108 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_normal");
1109 printPutByIdCacheStatus(out
, exec
, location
, stubInfos
);
1112 case op_put_by_id_transition_normal_out_of_line
: {
1113 printPutByIdOp(out
, exec
, location
, it
, "put_by_id_transition_normal_out_of_line");
1114 printPutByIdCacheStatus(out
, exec
, location
, stubInfos
);
1117 case op_put_getter_by_id
: {
1118 int r0
= (++it
)->u
.operand
;
1119 int id0
= (++it
)->u
.operand
;
1120 int r1
= (++it
)->u
.operand
;
1121 printLocationAndOp(out
, exec
, location
, it
, "put_getter_by_id");
1122 out
.printf("%s, %s, %s", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data());
1125 case op_put_setter_by_id
: {
1126 int r0
= (++it
)->u
.operand
;
1127 int id0
= (++it
)->u
.operand
;
1128 int r1
= (++it
)->u
.operand
;
1129 printLocationAndOp(out
, exec
, location
, it
, "put_setter_by_id");
1130 out
.printf("%s, %s, %s", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data());
1133 case op_put_getter_setter
: {
1134 int r0
= (++it
)->u
.operand
;
1135 int id0
= (++it
)->u
.operand
;
1136 int r1
= (++it
)->u
.operand
;
1137 int r2
= (++it
)->u
.operand
;
1138 printLocationAndOp(out
, exec
, location
, it
, "put_getter_setter");
1139 out
.printf("%s, %s, %s, %s", registerName(r0
).data(), idName(id0
, identifier(id0
)).data(), registerName(r1
).data(), registerName(r2
).data());
1142 case op_del_by_id
: {
1143 int r0
= (++it
)->u
.operand
;
1144 int r1
= (++it
)->u
.operand
;
1145 int id0
= (++it
)->u
.operand
;
1146 printLocationAndOp(out
, exec
, location
, it
, "del_by_id");
1147 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), idName(id0
, identifier(id0
)).data());
1150 case op_get_by_val
: {
1151 int r0
= (++it
)->u
.operand
;
1152 int r1
= (++it
)->u
.operand
;
1153 int r2
= (++it
)->u
.operand
;
1154 printLocationAndOp(out
, exec
, location
, it
, "get_by_val");
1155 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1156 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
1157 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
1160 case op_put_by_val
: {
1161 int r0
= (++it
)->u
.operand
;
1162 int r1
= (++it
)->u
.operand
;
1163 int r2
= (++it
)->u
.operand
;
1164 printLocationAndOp(out
, exec
, location
, it
, "put_by_val");
1165 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1166 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
1169 case op_put_by_val_direct
: {
1170 int r0
= (++it
)->u
.operand
;
1171 int r1
= (++it
)->u
.operand
;
1172 int r2
= (++it
)->u
.operand
;
1173 printLocationAndOp(out
, exec
, location
, it
, "put_by_val_direct");
1174 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1175 dumpArrayProfiling(out
, it
, hasPrintedProfiling
);
1178 case op_del_by_val
: {
1179 int r0
= (++it
)->u
.operand
;
1180 int r1
= (++it
)->u
.operand
;
1181 int r2
= (++it
)->u
.operand
;
1182 printLocationAndOp(out
, exec
, location
, it
, "del_by_val");
1183 out
.printf("%s, %s, %s", registerName(r0
).data(), registerName(r1
).data(), registerName(r2
).data());
1186 case op_put_by_index
: {
1187 int r0
= (++it
)->u
.operand
;
1188 unsigned n0
= (++it
)->u
.operand
;
1189 int r1
= (++it
)->u
.operand
;
1190 printLocationAndOp(out
, exec
, location
, it
, "put_by_index");
1191 out
.printf("%s, %u, %s", registerName(r0
).data(), n0
, registerName(r1
).data());
1195 int offset
= (++it
)->u
.operand
;
1196 printLocationAndOp(out
, exec
, location
, it
, "jmp");
1197 out
.printf("%d(->%d)", offset
, location
+ offset
);
1201 printConditionalJump(out
, exec
, begin
, it
, location
, "jtrue");
1205 printConditionalJump(out
, exec
, begin
, it
, location
, "jfalse");
1209 printConditionalJump(out
, exec
, begin
, it
, location
, "jeq_null");
1212 case op_jneq_null
: {
1213 printConditionalJump(out
, exec
, begin
, it
, location
, "jneq_null");
1217 int r0
= (++it
)->u
.operand
;
1218 Special::Pointer pointer
= (++it
)->u
.specialPointer
;
1219 int offset
= (++it
)->u
.operand
;
1220 printLocationAndOp(out
, exec
, location
, it
, "jneq_ptr");
1221 out
.printf("%s, %d (%p), %d(->%d)", registerName(r0
).data(), pointer
, m_globalObject
->actualPointerFor(pointer
), offset
, location
+ offset
);
1225 int r0
= (++it
)->u
.operand
;
1226 int r1
= (++it
)->u
.operand
;
1227 int offset
= (++it
)->u
.operand
;
1228 printLocationAndOp(out
, exec
, location
, it
, "jless");
1229 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1233 int r0
= (++it
)->u
.operand
;
1234 int r1
= (++it
)->u
.operand
;
1235 int offset
= (++it
)->u
.operand
;
1236 printLocationAndOp(out
, exec
, location
, it
, "jlesseq");
1237 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1241 int r0
= (++it
)->u
.operand
;
1242 int r1
= (++it
)->u
.operand
;
1243 int offset
= (++it
)->u
.operand
;
1244 printLocationAndOp(out
, exec
, location
, it
, "jgreater");
1245 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1248 case op_jgreatereq
: {
1249 int r0
= (++it
)->u
.operand
;
1250 int r1
= (++it
)->u
.operand
;
1251 int offset
= (++it
)->u
.operand
;
1252 printLocationAndOp(out
, exec
, location
, it
, "jgreatereq");
1253 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1257 int r0
= (++it
)->u
.operand
;
1258 int r1
= (++it
)->u
.operand
;
1259 int offset
= (++it
)->u
.operand
;
1260 printLocationAndOp(out
, exec
, location
, it
, "jnless");
1261 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1265 int r0
= (++it
)->u
.operand
;
1266 int r1
= (++it
)->u
.operand
;
1267 int offset
= (++it
)->u
.operand
;
1268 printLocationAndOp(out
, exec
, location
, it
, "jnlesseq");
1269 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1272 case op_jngreater
: {
1273 int r0
= (++it
)->u
.operand
;
1274 int r1
= (++it
)->u
.operand
;
1275 int offset
= (++it
)->u
.operand
;
1276 printLocationAndOp(out
, exec
, location
, it
, "jngreater");
1277 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1280 case op_jngreatereq
: {
1281 int r0
= (++it
)->u
.operand
;
1282 int r1
= (++it
)->u
.operand
;
1283 int offset
= (++it
)->u
.operand
;
1284 printLocationAndOp(out
, exec
, location
, it
, "jngreatereq");
1285 out
.printf("%s, %s, %d(->%d)", registerName(r0
).data(), registerName(r1
).data(), offset
, location
+ offset
);
1288 case op_loop_hint
: {
1289 printLocationAndOp(out
, exec
, location
, it
, "loop_hint");
1292 case op_switch_imm
: {
1293 int tableIndex
= (++it
)->u
.operand
;
1294 int defaultTarget
= (++it
)->u
.operand
;
1295 int scrutineeRegister
= (++it
)->u
.operand
;
1296 printLocationAndOp(out
, exec
, location
, it
, "switch_imm");
1297 out
.printf("%d, %d(->%d), %s", tableIndex
, defaultTarget
, location
+ defaultTarget
, registerName(scrutineeRegister
).data());
1300 case op_switch_char
: {
1301 int tableIndex
= (++it
)->u
.operand
;
1302 int defaultTarget
= (++it
)->u
.operand
;
1303 int scrutineeRegister
= (++it
)->u
.operand
;
1304 printLocationAndOp(out
, exec
, location
, it
, "switch_char");
1305 out
.printf("%d, %d(->%d), %s", tableIndex
, defaultTarget
, location
+ defaultTarget
, registerName(scrutineeRegister
).data());
1308 case op_switch_string
: {
1309 int tableIndex
= (++it
)->u
.operand
;
1310 int defaultTarget
= (++it
)->u
.operand
;
1311 int scrutineeRegister
= (++it
)->u
.operand
;
1312 printLocationAndOp(out
, exec
, location
, it
, "switch_string");
1313 out
.printf("%d, %d(->%d), %s", tableIndex
, defaultTarget
, location
+ defaultTarget
, registerName(scrutineeRegister
).data());
1317 int r0
= (++it
)->u
.operand
;
1318 int r1
= (++it
)->u
.operand
;
1319 int f0
= (++it
)->u
.operand
;
1320 printLocationAndOp(out
, exec
, location
, it
, "new_func");
1321 out
.printf("%s, %s, f%d", registerName(r0
).data(), registerName(r1
).data(), f0
);
1324 case op_new_func_exp
: {
1325 int r0
= (++it
)->u
.operand
;
1326 int r1
= (++it
)->u
.operand
;
1327 int f0
= (++it
)->u
.operand
;
1328 printLocationAndOp(out
, exec
, location
, it
, "new_func_exp");
1329 out
.printf("%s, %s, f%d", registerName(r0
).data(), registerName(r1
).data(), f0
);
1333 printCallOp(out
, exec
, location
, it
, "call", DumpCaches
, hasPrintedProfiling
, callLinkInfos
);
1336 case op_call_eval
: {
1337 printCallOp(out
, exec
, location
, it
, "call_eval", DontDumpCaches
, hasPrintedProfiling
, callLinkInfos
);
1341 case op_construct_varargs
:
1342 case op_call_varargs
: {
1343 int result
= (++it
)->u
.operand
;
1344 int callee
= (++it
)->u
.operand
;
1345 int thisValue
= (++it
)->u
.operand
;
1346 int arguments
= (++it
)->u
.operand
;
1347 int firstFreeRegister
= (++it
)->u
.operand
;
1348 int varArgOffset
= (++it
)->u
.operand
;
1350 printLocationAndOp(out
, exec
, location
, it
, opcode
== op_call_varargs
? "call_varargs" : "construct_varargs");
1351 out
.printf("%s, %s, %s, %s, %d, %d", registerName(result
).data(), registerName(callee
).data(), registerName(thisValue
).data(), registerName(arguments
).data(), firstFreeRegister
, varArgOffset
);
1352 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
1357 int r0
= (++it
)->u
.operand
;
1358 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "ret", r0
);
1361 case op_construct
: {
1362 printCallOp(out
, exec
, location
, it
, "construct", DumpCaches
, hasPrintedProfiling
, callLinkInfos
);
1366 int r0
= (++it
)->u
.operand
;
1367 int r1
= (++it
)->u
.operand
;
1368 int count
= (++it
)->u
.operand
;
1369 printLocationAndOp(out
, exec
, location
, it
, "strcat");
1370 out
.printf("%s, %s, %d", registerName(r0
).data(), registerName(r1
).data(), count
);
1373 case op_to_primitive
: {
1374 int r0
= (++it
)->u
.operand
;
1375 int r1
= (++it
)->u
.operand
;
1376 printLocationAndOp(out
, exec
, location
, it
, "to_primitive");
1377 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
1380 case op_get_enumerable_length
: {
1381 int dst
= it
[1].u
.operand
;
1382 int base
= it
[2].u
.operand
;
1383 printLocationAndOp(out
, exec
, location
, it
, "op_get_enumerable_length");
1384 out
.printf("%s, %s", registerName(dst
).data(), registerName(base
).data());
1385 it
+= OPCODE_LENGTH(op_get_enumerable_length
) - 1;
1388 case op_has_indexed_property
: {
1389 int dst
= it
[1].u
.operand
;
1390 int base
= it
[2].u
.operand
;
1391 int propertyName
= it
[3].u
.operand
;
1392 ArrayProfile
* arrayProfile
= it
[4].u
.arrayProfile
;
1393 printLocationAndOp(out
, exec
, location
, it
, "op_has_indexed_property");
1394 out
.printf("%s, %s, %s, %p", registerName(dst
).data(), registerName(base
).data(), registerName(propertyName
).data(), arrayProfile
);
1395 it
+= OPCODE_LENGTH(op_has_indexed_property
) - 1;
1398 case op_has_structure_property
: {
1399 int dst
= it
[1].u
.operand
;
1400 int base
= it
[2].u
.operand
;
1401 int propertyName
= it
[3].u
.operand
;
1402 int enumerator
= it
[4].u
.operand
;
1403 printLocationAndOp(out
, exec
, location
, it
, "op_has_structure_property");
1404 out
.printf("%s, %s, %s, %s", registerName(dst
).data(), registerName(base
).data(), registerName(propertyName
).data(), registerName(enumerator
).data());
1405 it
+= OPCODE_LENGTH(op_has_structure_property
) - 1;
1408 case op_has_generic_property
: {
1409 int dst
= it
[1].u
.operand
;
1410 int base
= it
[2].u
.operand
;
1411 int propertyName
= it
[3].u
.operand
;
1412 printLocationAndOp(out
, exec
, location
, it
, "op_has_generic_property");
1413 out
.printf("%s, %s, %s", registerName(dst
).data(), registerName(base
).data(), registerName(propertyName
).data());
1414 it
+= OPCODE_LENGTH(op_has_generic_property
) - 1;
1417 case op_get_direct_pname
: {
1418 int dst
= it
[1].u
.operand
;
1419 int base
= it
[2].u
.operand
;
1420 int propertyName
= it
[3].u
.operand
;
1421 int index
= it
[4].u
.operand
;
1422 int enumerator
= it
[5].u
.operand
;
1423 ValueProfile
* profile
= it
[6].u
.profile
;
1424 printLocationAndOp(out
, exec
, location
, it
, "op_get_direct_pname");
1425 out
.printf("%s, %s, %s, %s, %s, %p", registerName(dst
).data(), registerName(base
).data(), registerName(propertyName
).data(), registerName(index
).data(), registerName(enumerator
).data(), profile
);
1426 it
+= OPCODE_LENGTH(op_get_direct_pname
) - 1;
1430 case op_get_property_enumerator
: {
1431 int dst
= it
[1].u
.operand
;
1432 int base
= it
[2].u
.operand
;
1433 printLocationAndOp(out
, exec
, location
, it
, "op_get_property_enumerator");
1434 out
.printf("%s, %s", registerName(dst
).data(), registerName(base
).data());
1435 it
+= OPCODE_LENGTH(op_get_property_enumerator
) - 1;
1438 case op_enumerator_structure_pname
: {
1439 int dst
= it
[1].u
.operand
;
1440 int enumerator
= it
[2].u
.operand
;
1441 int index
= it
[3].u
.operand
;
1442 printLocationAndOp(out
, exec
, location
, it
, "op_enumerator_structure_pname");
1443 out
.printf("%s, %s, %s", registerName(dst
).data(), registerName(enumerator
).data(), registerName(index
).data());
1444 it
+= OPCODE_LENGTH(op_enumerator_structure_pname
) - 1;
1447 case op_enumerator_generic_pname
: {
1448 int dst
= it
[1].u
.operand
;
1449 int enumerator
= it
[2].u
.operand
;
1450 int index
= it
[3].u
.operand
;
1451 printLocationAndOp(out
, exec
, location
, it
, "op_enumerator_generic_pname");
1452 out
.printf("%s, %s, %s", registerName(dst
).data(), registerName(enumerator
).data(), registerName(index
).data());
1453 it
+= OPCODE_LENGTH(op_enumerator_generic_pname
) - 1;
1456 case op_to_index_string
: {
1457 int dst
= it
[1].u
.operand
;
1458 int index
= it
[2].u
.operand
;
1459 printLocationAndOp(out
, exec
, location
, it
, "op_to_index_string");
1460 out
.printf("%s, %s", registerName(dst
).data(), registerName(index
).data());
1461 it
+= OPCODE_LENGTH(op_to_index_string
) - 1;
1464 case op_push_with_scope
: {
1465 int dst
= (++it
)->u
.operand
;
1466 int newScope
= (++it
)->u
.operand
;
1467 printLocationAndOp(out
, exec
, location
, it
, "push_with_scope");
1468 out
.printf("%s, %s", registerName(dst
).data(), registerName(newScope
).data());
1471 case op_pop_scope
: {
1472 int r0
= (++it
)->u
.operand
;
1473 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "pop_scope", r0
);
1476 case op_push_name_scope
: {
1477 int dst
= (++it
)->u
.operand
;
1478 int r1
= (++it
)->u
.operand
;
1479 int k0
= (++it
)->u
.operand
;
1480 JSNameScope::Type scopeType
= (JSNameScope::Type
)(++it
)->u
.operand
;
1481 printLocationAndOp(out
, exec
, location
, it
, "push_name_scope");
1482 out
.printf("%s, %s, %s, %s", registerName(dst
).data(), registerName(r1
).data(), constantName(k0
).data(), (scopeType
== JSNameScope::FunctionNameScope
) ? "functionScope" : ((scopeType
== JSNameScope::CatchScope
) ? "catchScope" : "unknownScopeType"));
1486 int r0
= (++it
)->u
.operand
;
1487 int r1
= (++it
)->u
.operand
;
1488 printLocationAndOp(out
, exec
, location
, it
, "catch");
1489 out
.printf("%s, %s", registerName(r0
).data(), registerName(r1
).data());
1493 int r0
= (++it
)->u
.operand
;
1494 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "throw", r0
);
1497 case op_throw_static_error
: {
1498 int k0
= (++it
)->u
.operand
;
1499 int k1
= (++it
)->u
.operand
;
1500 printLocationAndOp(out
, exec
, location
, it
, "throw_static_error");
1501 out
.printf("%s, %s", constantName(k0
).data(), k1
? "true" : "false");
1505 int debugHookID
= (++it
)->u
.operand
;
1506 int hasBreakpointFlag
= (++it
)->u
.operand
;
1507 printLocationAndOp(out
, exec
, location
, it
, "debug");
1508 out
.printf("%s %d", debugHookName(debugHookID
), hasBreakpointFlag
);
1511 case op_profile_will_call
: {
1512 int function
= (++it
)->u
.operand
;
1513 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "profile_will_call", function
);
1516 case op_profile_did_call
: {
1517 int function
= (++it
)->u
.operand
;
1518 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "profile_did_call", function
);
1522 int r0
= (++it
)->u
.operand
;
1523 printLocationOpAndRegisterOperand(out
, exec
, location
, it
, "end", r0
);
1526 case op_resolve_scope
: {
1527 int r0
= (++it
)->u
.operand
;
1528 int scope
= (++it
)->u
.operand
;
1529 int id0
= (++it
)->u
.operand
;
1530 ResolveModeAndType modeAndType
= ResolveModeAndType((++it
)->u
.operand
);
1531 int depth
= (++it
)->u
.operand
;
1532 printLocationAndOp(out
, exec
, location
, it
, "resolve_scope");
1533 out
.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0
).data(), registerName(scope
).data(), idName(id0
, identifier(id0
)).data(),
1534 modeAndType
.operand(), resolveModeName(modeAndType
.mode()), resolveTypeName(modeAndType
.type()),
1539 case op_get_from_scope
: {
1540 int r0
= (++it
)->u
.operand
;
1541 int r1
= (++it
)->u
.operand
;
1542 int id0
= (++it
)->u
.operand
;
1543 ResolveModeAndType modeAndType
= ResolveModeAndType((++it
)->u
.operand
);
1545 int operand
= (++it
)->u
.operand
; // Operand
1546 printLocationAndOp(out
, exec
, location
, it
, "get_from_scope");
1547 out
.print(registerName(r0
), ", ", registerName(r1
));
1548 if (static_cast<unsigned>(id0
) == UINT_MAX
)
1549 out
.print(", anonymous");
1551 out
.print(", ", idName(id0
, identifier(id0
)));
1552 out
.print(", ", modeAndType
.operand(), "<", resolveModeName(modeAndType
.mode()), "|", resolveTypeName(modeAndType
.type()), ">, ", operand
);
1553 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
1556 case op_put_to_scope
: {
1557 int r0
= (++it
)->u
.operand
;
1558 int id0
= (++it
)->u
.operand
;
1559 int r1
= (++it
)->u
.operand
;
1560 ResolveModeAndType modeAndType
= ResolveModeAndType((++it
)->u
.operand
);
1562 int operand
= (++it
)->u
.operand
; // Operand
1563 printLocationAndOp(out
, exec
, location
, it
, "put_to_scope");
1564 out
.print(registerName(r0
));
1565 if (static_cast<unsigned>(id0
) == UINT_MAX
)
1566 out
.print(", anonymous");
1568 out
.print(", ", idName(id0
, identifier(id0
)));
1569 out
.print(", ", registerName(r1
), ", ", modeAndType
.operand(), "<", resolveModeName(modeAndType
.mode()), "|", resolveTypeName(modeAndType
.type()), ">, <structure>, ", operand
);
1572 case op_get_from_arguments
: {
1573 int r0
= (++it
)->u
.operand
;
1574 int r1
= (++it
)->u
.operand
;
1575 int offset
= (++it
)->u
.operand
;
1576 printLocationAndOp(out
, exec
, location
, it
, "get_from_arguments");
1577 out
.printf("%s, %s, %d", registerName(r0
).data(), registerName(r1
).data(), offset
);
1578 dumpValueProfiling(out
, it
, hasPrintedProfiling
);
1581 case op_put_to_arguments
: {
1582 int r0
= (++it
)->u
.operand
;
1583 int offset
= (++it
)->u
.operand
;
1584 int r1
= (++it
)->u
.operand
;
1585 printLocationAndOp(out
, exec
, location
, it
, "put_to_arguments");
1586 out
.printf("%s, %d, %s", registerName(r0
).data(), offset
, registerName(r1
).data());
1590 RELEASE_ASSERT_NOT_REACHED();
1593 dumpRareCaseProfile(out
, "rare case: ", rareCaseProfileForBytecodeOffset(location
), hasPrintedProfiling
);
1594 dumpRareCaseProfile(out
, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location
), hasPrintedProfiling
);
1597 Vector
<DFG::FrequentExitSite
> exitSites
= exitProfile().exitSitesFor(location
);
1598 if (!exitSites
.isEmpty()) {
1599 out
.print(" !! frequent exits: ");
1601 for (unsigned i
= 0; i
< exitSites
.size(); ++i
)
1602 out
.print(comma
, exitSites
[i
].kind(), " ", exitSites
[i
].jitType());
1604 #else // ENABLE(DFG_JIT)
1605 UNUSED_PARAM(location
);
1606 #endif // ENABLE(DFG_JIT)
1610 void CodeBlock::dumpBytecode(
1611 PrintStream
& out
, unsigned bytecodeOffset
,
1612 const StubInfoMap
& stubInfos
, const CallLinkInfoMap
& callLinkInfos
)
1614 ExecState
* exec
= m_globalObject
->globalExec();
1615 const Instruction
* it
= instructions().begin() + bytecodeOffset
;
1616 dumpBytecode(out
, exec
, instructions().begin(), it
, stubInfos
, callLinkInfos
);
1619 #define FOR_EACH_MEMBER_VECTOR(macro) \
1620 macro(instructions) \
1621 macro(callLinkInfos) \
1622 macro(linkedCallerList) \
1623 macro(identifiers) \
1624 macro(functionExpressions) \
1625 macro(constantRegisters)
1627 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1630 macro(exceptionHandlers) \
1631 macro(switchJumpTables) \
1632 macro(stringSwitchJumpTables) \
1633 macro(evalCodeCache) \
1634 macro(expressionInfo) \
1636 macro(callReturnIndexVector)
1638 template<typename T
>
1639 static size_t sizeInBytes(const Vector
<T
>& vector
)
1641 return vector
.capacity() * sizeof(T
);
1646 class PutToScopeFireDetail
: public FireDetail
{
1648 PutToScopeFireDetail(CodeBlock
* codeBlock
, const Identifier
& ident
)
1649 : m_codeBlock(codeBlock
)
1654 virtual void dump(PrintStream
& out
) const override
1656 out
.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast
<FunctionExecutable
*>(m_codeBlock
->ownerExecutable())), " for ", m_ident
);
1660 CodeBlock
* m_codeBlock
;
1661 const Identifier
& m_ident
;
1664 } // anonymous namespace
1666 CodeBlock::CodeBlock(CopyParsedBlockTag
, CodeBlock
& other
)
1667 : m_globalObject(other
.m_globalObject
)
1668 , m_heap(other
.m_heap
)
1669 , m_numCalleeRegisters(other
.m_numCalleeRegisters
)
1670 , m_numVars(other
.m_numVars
)
1671 , m_isConstructor(other
.m_isConstructor
)
1672 , m_shouldAlwaysBeInlined(true)
1673 , m_didFailFTLCompilation(false)
1674 , m_hasBeenCompiledWithFTL(false)
1675 , m_unlinkedCode(*other
.m_vm
, other
.m_ownerExecutable
.get(), other
.m_unlinkedCode
.get())
1676 , m_hasDebuggerStatement(false)
1677 , m_steppingMode(SteppingModeDisabled
)
1678 , m_numBreakpoints(0)
1679 , m_ownerExecutable(*other
.m_vm
, other
.m_ownerExecutable
.get(), other
.m_ownerExecutable
.get())
1681 , m_instructions(other
.m_instructions
)
1682 , m_thisRegister(other
.m_thisRegister
)
1683 , m_scopeRegister(other
.m_scopeRegister
)
1684 , m_lexicalEnvironmentRegister(other
.m_lexicalEnvironmentRegister
)
1685 , m_isStrictMode(other
.m_isStrictMode
)
1686 , m_needsActivation(other
.m_needsActivation
)
1687 , m_mayBeExecuting(false)
1688 , m_source(other
.m_source
)
1689 , m_sourceOffset(other
.m_sourceOffset
)
1690 , m_firstLineColumnOffset(other
.m_firstLineColumnOffset
)
1691 , m_codeType(other
.m_codeType
)
1692 , m_constantRegisters(other
.m_constantRegisters
)
1693 , m_constantsSourceCodeRepresentation(other
.m_constantsSourceCodeRepresentation
)
1694 , m_functionDecls(other
.m_functionDecls
)
1695 , m_functionExprs(other
.m_functionExprs
)
1696 , m_osrExitCounter(0)
1697 , m_optimizationDelayCounter(0)
1698 , m_reoptimizationRetryCounter(0)
1699 , m_hash(other
.m_hash
)
1701 , m_capabilityLevelState(DFG::CapabilityLevelNotSet
)
1704 m_visitAggregateHasBeenCalled
.store(false, std::memory_order_relaxed
);
1706 ASSERT(m_heap
->isDeferred());
1707 ASSERT(m_scopeRegister
.isLocal());
1709 if (SymbolTable
* symbolTable
= other
.symbolTable())
1710 m_symbolTable
.set(*m_vm
, m_ownerExecutable
.get(), symbolTable
);
1712 setNumParameters(other
.numParameters());
1713 optimizeAfterWarmUp();
1716 if (other
.m_rareData
) {
1717 createRareDataIfNecessary();
1719 m_rareData
->m_exceptionHandlers
= other
.m_rareData
->m_exceptionHandlers
;
1720 m_rareData
->m_constantBuffers
= other
.m_rareData
->m_constantBuffers
;
1721 m_rareData
->m_switchJumpTables
= other
.m_rareData
->m_switchJumpTables
;
1722 m_rareData
->m_stringSwitchJumpTables
= other
.m_rareData
->m_stringSwitchJumpTables
;
1725 m_heap
->m_codeBlocks
.add(this);
1726 m_heap
->reportExtraMemoryAllocated(sizeof(CodeBlock
));
1729 CodeBlock::CodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
)
1730 : m_globalObject(scope
->globalObject()->vm(), ownerExecutable
, scope
->globalObject())
1731 , m_heap(&m_globalObject
->vm().heap
)
1732 , m_numCalleeRegisters(unlinkedCodeBlock
->m_numCalleeRegisters
)
1733 , m_numVars(unlinkedCodeBlock
->m_numVars
)
1734 , m_isConstructor(unlinkedCodeBlock
->isConstructor())
1735 , m_shouldAlwaysBeInlined(true)
1736 , m_didFailFTLCompilation(false)
1737 , m_hasBeenCompiledWithFTL(false)
1738 , m_unlinkedCode(m_globalObject
->vm(), ownerExecutable
, unlinkedCodeBlock
)
1739 , m_hasDebuggerStatement(false)
1740 , m_steppingMode(SteppingModeDisabled
)
1741 , m_numBreakpoints(0)
1742 , m_ownerExecutable(m_globalObject
->vm(), ownerExecutable
, ownerExecutable
)
1743 , m_vm(unlinkedCodeBlock
->vm())
1744 , m_thisRegister(unlinkedCodeBlock
->thisRegister())
1745 , m_scopeRegister(unlinkedCodeBlock
->scopeRegister())
1746 , m_lexicalEnvironmentRegister(unlinkedCodeBlock
->activationRegister())
1747 , m_isStrictMode(unlinkedCodeBlock
->isStrictMode())
1748 , m_needsActivation(unlinkedCodeBlock
->hasActivationRegister() && unlinkedCodeBlock
->codeType() == FunctionCode
)
1749 , m_mayBeExecuting(false)
1750 , m_source(sourceProvider
)
1751 , m_sourceOffset(sourceOffset
)
1752 , m_firstLineColumnOffset(firstLineColumnOffset
)
1753 , m_codeType(unlinkedCodeBlock
->codeType())
1754 , m_osrExitCounter(0)
1755 , m_optimizationDelayCounter(0)
1756 , m_reoptimizationRetryCounter(0)
1758 , m_capabilityLevelState(DFG::CapabilityLevelNotSet
)
1761 m_visitAggregateHasBeenCalled
.store(false, std::memory_order_relaxed
);
1763 ASSERT(m_heap
->isDeferred());
1764 ASSERT(m_scopeRegister
.isLocal());
1766 bool didCloneSymbolTable
= false;
1768 if (SymbolTable
* symbolTable
= unlinkedCodeBlock
->symbolTable()) {
1769 if (m_vm
->typeProfiler()) {
1770 ConcurrentJITLocker
locker(symbolTable
->m_lock
);
1771 symbolTable
->prepareForTypeProfiling(locker
);
1774 if (codeType() == FunctionCode
&& symbolTable
->scopeSize()) {
1775 m_symbolTable
.set(*m_vm
, m_ownerExecutable
.get(), symbolTable
->cloneScopePart(*m_vm
));
1776 didCloneSymbolTable
= true;
1778 m_symbolTable
.set(*m_vm
, m_ownerExecutable
.get(), symbolTable
);
1782 setNumParameters(unlinkedCodeBlock
->numParameters());
1784 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1785 vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable
->sourceID(), m_ownerExecutable
->typeProfilingStartOffset(), m_ownerExecutable
->typeProfilingEndOffset());
1787 setConstantRegisters(unlinkedCodeBlock
->constantRegisters(), unlinkedCodeBlock
->constantsSourceCodeRepresentation());
1788 if (unlinkedCodeBlock
->usesGlobalObject())
1789 m_constantRegisters
[unlinkedCodeBlock
->globalObjectRegister().toConstantIndex()].set(*m_vm
, ownerExecutable
, m_globalObject
.get());
1791 for (unsigned i
= 0; i
< LinkTimeConstantCount
; i
++) {
1792 LinkTimeConstant type
= static_cast<LinkTimeConstant
>(i
);
1793 if (unsigned registerIndex
= unlinkedCodeBlock
->registerIndexForLinkTimeConstant(type
))
1794 m_constantRegisters
[registerIndex
].set(*m_vm
, ownerExecutable
, m_globalObject
->jsCellForLinkTimeConstant(type
));
1797 m_functionDecls
.resizeToFit(unlinkedCodeBlock
->numberOfFunctionDecls());
1798 for (size_t count
= unlinkedCodeBlock
->numberOfFunctionDecls(), i
= 0; i
< count
; ++i
) {
1799 UnlinkedFunctionExecutable
* unlinkedExecutable
= unlinkedCodeBlock
->functionDecl(i
);
1800 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1801 vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable
->sourceID(), unlinkedExecutable
->typeProfilingStartOffset(), unlinkedExecutable
->typeProfilingEndOffset());
1802 m_functionDecls
[i
].set(*m_vm
, ownerExecutable
, unlinkedExecutable
->link(*m_vm
, ownerExecutable
->source()));
1805 m_functionExprs
.resizeToFit(unlinkedCodeBlock
->numberOfFunctionExprs());
1806 for (size_t count
= unlinkedCodeBlock
->numberOfFunctionExprs(), i
= 0; i
< count
; ++i
) {
1807 UnlinkedFunctionExecutable
* unlinkedExecutable
= unlinkedCodeBlock
->functionExpr(i
);
1808 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1809 vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable
->sourceID(), unlinkedExecutable
->typeProfilingStartOffset(), unlinkedExecutable
->typeProfilingEndOffset());
1810 m_functionExprs
[i
].set(*m_vm
, ownerExecutable
, unlinkedExecutable
->link(*m_vm
, ownerExecutable
->source()));
1813 if (unlinkedCodeBlock
->hasRareData()) {
1814 createRareDataIfNecessary();
1815 if (size_t count
= unlinkedCodeBlock
->constantBufferCount()) {
1816 m_rareData
->m_constantBuffers
.grow(count
);
1817 for (size_t i
= 0; i
< count
; i
++) {
1818 const UnlinkedCodeBlock::ConstantBuffer
& buffer
= unlinkedCodeBlock
->constantBuffer(i
);
1819 m_rareData
->m_constantBuffers
[i
] = buffer
;
1822 if (size_t count
= unlinkedCodeBlock
->numberOfExceptionHandlers()) {
1823 m_rareData
->m_exceptionHandlers
.resizeToFit(count
);
1824 size_t nonLocalScopeDepth
= scope
->depth();
1825 for (size_t i
= 0; i
< count
; i
++) {
1826 const UnlinkedHandlerInfo
& unlinkedHandler
= unlinkedCodeBlock
->exceptionHandler(i
);
1827 HandlerInfo
& handler
= m_rareData
->m_exceptionHandlers
[i
];
1829 handler
.initialize(unlinkedHandler
, nonLocalScopeDepth
,
1830 CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch
))));
1832 handler
.initialize(unlinkedHandler
, nonLocalScopeDepth
);
1837 if (size_t count
= unlinkedCodeBlock
->numberOfStringSwitchJumpTables()) {
1838 m_rareData
->m_stringSwitchJumpTables
.grow(count
);
1839 for (size_t i
= 0; i
< count
; i
++) {
1840 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr
= unlinkedCodeBlock
->stringSwitchJumpTable(i
).offsetTable
.begin();
1841 UnlinkedStringJumpTable::StringOffsetTable::iterator end
= unlinkedCodeBlock
->stringSwitchJumpTable(i
).offsetTable
.end();
1842 for (; ptr
!= end
; ++ptr
) {
1843 OffsetLocation offset
;
1844 offset
.branchOffset
= ptr
->value
;
1845 m_rareData
->m_stringSwitchJumpTables
[i
].offsetTable
.add(ptr
->key
, offset
);
1850 if (size_t count
= unlinkedCodeBlock
->numberOfSwitchJumpTables()) {
1851 m_rareData
->m_switchJumpTables
.grow(count
);
1852 for (size_t i
= 0; i
< count
; i
++) {
1853 UnlinkedSimpleJumpTable
& sourceTable
= unlinkedCodeBlock
->switchJumpTable(i
);
1854 SimpleJumpTable
& destTable
= m_rareData
->m_switchJumpTables
[i
];
1855 destTable
.branchOffsets
= sourceTable
.branchOffsets
;
1856 destTable
.min
= sourceTable
.min
;
1861 // Allocate metadata buffers for the bytecode
1862 if (size_t size
= unlinkedCodeBlock
->numberOfLLintCallLinkInfos())
1863 m_llintCallLinkInfos
.resizeToFit(size
);
1864 if (size_t size
= unlinkedCodeBlock
->numberOfArrayProfiles())
1865 m_arrayProfiles
.grow(size
);
1866 if (size_t size
= unlinkedCodeBlock
->numberOfArrayAllocationProfiles())
1867 m_arrayAllocationProfiles
.resizeToFit(size
);
1868 if (size_t size
= unlinkedCodeBlock
->numberOfValueProfiles())
1869 m_valueProfiles
.resizeToFit(size
);
1870 if (size_t size
= unlinkedCodeBlock
->numberOfObjectAllocationProfiles())
1871 m_objectAllocationProfiles
.resizeToFit(size
);
1873 // Copy and translate the UnlinkedInstructions
1874 unsigned instructionCount
= unlinkedCodeBlock
->instructions().count();
1875 UnlinkedInstructionStream::Reader
instructionReader(unlinkedCodeBlock
->instructions());
1877 Vector
<Instruction
, 0, UnsafeVectorOverflow
> instructions(instructionCount
);
1878 for (unsigned i
= 0; !instructionReader
.atEnd(); ) {
1879 const UnlinkedInstruction
* pc
= instructionReader
.next();
1881 unsigned opLength
= opcodeLength(pc
[0].u
.opcode
);
1883 instructions
[i
] = vm()->interpreter
->getOpcode(pc
[0].u
.opcode
);
1884 for (size_t j
= 1; j
< opLength
; ++j
) {
1885 if (sizeof(int32_t) != sizeof(intptr_t))
1886 instructions
[i
+ j
].u
.pointer
= 0;
1887 instructions
[i
+ j
].u
.operand
= pc
[j
].u
.operand
;
1889 switch (pc
[0].u
.opcode
) {
1890 case op_has_indexed_property
: {
1891 int arrayProfileIndex
= pc
[opLength
- 1].u
.operand
;
1892 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1894 instructions
[i
+ opLength
- 1] = &m_arrayProfiles
[arrayProfileIndex
];
1897 case op_call_varargs
:
1898 case op_construct_varargs
:
1899 case op_get_by_val
: {
1900 int arrayProfileIndex
= pc
[opLength
- 2].u
.operand
;
1901 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1903 instructions
[i
+ opLength
- 2] = &m_arrayProfiles
[arrayProfileIndex
];
1906 case op_get_direct_pname
:
1908 case op_get_from_arguments
: {
1909 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1910 ASSERT(profile
->m_bytecodeOffset
== -1);
1911 profile
->m_bytecodeOffset
= i
;
1912 instructions
[i
+ opLength
- 1] = profile
;
1915 case op_put_by_val
: {
1916 int arrayProfileIndex
= pc
[opLength
- 1].u
.operand
;
1917 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1918 instructions
[i
+ opLength
- 1] = &m_arrayProfiles
[arrayProfileIndex
];
1921 case op_put_by_val_direct
: {
1922 int arrayProfileIndex
= pc
[opLength
- 1].u
.operand
;
1923 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1924 instructions
[i
+ opLength
- 1] = &m_arrayProfiles
[arrayProfileIndex
];
1929 case op_new_array_buffer
:
1930 case op_new_array_with_size
: {
1931 int arrayAllocationProfileIndex
= pc
[opLength
- 1].u
.operand
;
1932 instructions
[i
+ opLength
- 1] = &m_arrayAllocationProfiles
[arrayAllocationProfileIndex
];
1935 case op_new_object
: {
1936 int objectAllocationProfileIndex
= pc
[opLength
- 1].u
.operand
;
1937 ObjectAllocationProfile
* objectAllocationProfile
= &m_objectAllocationProfiles
[objectAllocationProfileIndex
];
1938 int inferredInlineCapacity
= pc
[opLength
- 2].u
.operand
;
1940 instructions
[i
+ opLength
- 1] = objectAllocationProfile
;
1941 objectAllocationProfile
->initialize(*vm(),
1942 m_ownerExecutable
.get(), m_globalObject
->objectPrototype(), inferredInlineCapacity
);
1947 case op_call_eval
: {
1948 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1949 ASSERT(profile
->m_bytecodeOffset
== -1);
1950 profile
->m_bytecodeOffset
= i
;
1951 instructions
[i
+ opLength
- 1] = profile
;
1952 int arrayProfileIndex
= pc
[opLength
- 2].u
.operand
;
1953 m_arrayProfiles
[arrayProfileIndex
] = ArrayProfile(i
);
1954 instructions
[i
+ opLength
- 2] = &m_arrayProfiles
[arrayProfileIndex
];
1955 instructions
[i
+ 5] = &m_llintCallLinkInfos
[pc
[5].u
.operand
];
1958 case op_construct
: {
1959 instructions
[i
+ 5] = &m_llintCallLinkInfos
[pc
[5].u
.operand
];
1960 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1961 ASSERT(profile
->m_bytecodeOffset
== -1);
1962 profile
->m_bytecodeOffset
= i
;
1963 instructions
[i
+ opLength
- 1] = profile
;
1966 case op_get_by_id_out_of_line
:
1967 case op_get_array_length
:
1970 case op_init_global_const_nop
: {
1971 ASSERT(codeType() == GlobalCode
);
1972 Identifier ident
= identifier(pc
[4].u
.operand
);
1973 SymbolTableEntry entry
= m_globalObject
->symbolTable()->get(ident
.impl());
1977 instructions
[i
+ 0] = vm()->interpreter
->getOpcode(op_init_global_const
);
1978 instructions
[i
+ 1] = &m_globalObject
->variableAt(entry
.varOffset().scopeOffset());
1982 case op_resolve_scope
: {
1983 const Identifier
& ident
= identifier(pc
[3].u
.operand
);
1984 ResolveType type
= static_cast<ResolveType
>(pc
[4].u
.operand
);
1985 RELEASE_ASSERT(type
!= LocalClosureVar
);
1987 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), needsActivation(), scope
, ident
, Get
, type
);
1988 instructions
[i
+ 4].u
.operand
= op
.type
;
1989 instructions
[i
+ 5].u
.operand
= op
.depth
;
1990 if (op
.lexicalEnvironment
)
1991 instructions
[i
+ 6].u
.symbolTable
.set(*vm(), ownerExecutable
, op
.lexicalEnvironment
->symbolTable());
1995 case op_get_from_scope
: {
1996 ValueProfile
* profile
= &m_valueProfiles
[pc
[opLength
- 1].u
.operand
];
1997 ASSERT(profile
->m_bytecodeOffset
== -1);
1998 profile
->m_bytecodeOffset
= i
;
1999 instructions
[i
+ opLength
- 1] = profile
;
2001 // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
2003 ResolveModeAndType modeAndType
= ResolveModeAndType(pc
[4].u
.operand
);
2004 if (modeAndType
.type() == LocalClosureVar
) {
2005 instructions
[i
+ 4] = ResolveModeAndType(modeAndType
.mode(), ClosureVar
).operand();
2009 const Identifier
& ident
= identifier(pc
[3].u
.operand
);
2011 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), needsActivation(), scope
, ident
, Get
, modeAndType
.type());
2013 instructions
[i
+ 4].u
.operand
= ResolveModeAndType(modeAndType
.mode(), op
.type
).operand();
2014 if (op
.type
== GlobalVar
|| op
.type
== GlobalVarWithVarInjectionChecks
)
2015 instructions
[i
+ 5].u
.watchpointSet
= op
.watchpointSet
;
2016 else if (op
.structure
)
2017 instructions
[i
+ 5].u
.structure
.set(*vm(), ownerExecutable
, op
.structure
);
2018 instructions
[i
+ 6].u
.pointer
= reinterpret_cast<void*>(op
.operand
);
2022 case op_put_to_scope
: {
2023 // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
2024 ResolveModeAndType modeAndType
= ResolveModeAndType(pc
[4].u
.operand
);
2025 if (modeAndType
.type() == LocalClosureVar
) {
2026 // Only do watching if the property we're putting to is not anonymous.
2027 if (static_cast<unsigned>(pc
[2].u
.operand
) != UINT_MAX
) {
2028 RELEASE_ASSERT(didCloneSymbolTable
);
2029 const Identifier
& ident
= identifier(pc
[2].u
.operand
);
2030 ConcurrentJITLocker
locker(m_symbolTable
->m_lock
);
2031 SymbolTable::Map::iterator iter
= m_symbolTable
->find(locker
, ident
.impl());
2032 ASSERT(iter
!= m_symbolTable
->end(locker
));
2033 iter
->value
.prepareToWatch();
2034 instructions
[i
+ 5].u
.watchpointSet
= iter
->value
.watchpointSet();
2036 instructions
[i
+ 5].u
.watchpointSet
= nullptr;
2040 const Identifier
& ident
= identifier(pc
[2].u
.operand
);
2042 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), needsActivation(), scope
, ident
, Put
, modeAndType
.type());
2044 instructions
[i
+ 4].u
.operand
= ResolveModeAndType(modeAndType
.mode(), op
.type
).operand();
2045 if (op
.type
== GlobalVar
|| op
.type
== GlobalVarWithVarInjectionChecks
)
2046 instructions
[i
+ 5].u
.watchpointSet
= op
.watchpointSet
;
2047 else if (op
.type
== ClosureVar
|| op
.type
== ClosureVarWithVarInjectionChecks
) {
2048 if (op
.watchpointSet
)
2049 op
.watchpointSet
->invalidate(PutToScopeFireDetail(this, ident
));
2050 } else if (op
.structure
)
2051 instructions
[i
+ 5].u
.structure
.set(*vm(), ownerExecutable
, op
.structure
);
2052 instructions
[i
+ 6].u
.pointer
= reinterpret_cast<void*>(op
.operand
);
2057 case op_profile_type
: {
2058 RELEASE_ASSERT(vm()->typeProfiler());
2059 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2060 size_t instructionOffset
= i
+ opLength
- 1;
2061 unsigned divotStart
, divotEnd
;
2062 GlobalVariableID globalVariableID
= 0;
2063 RefPtr
<TypeSet
> globalTypeSet
;
2064 bool shouldAnalyze
= m_unlinkedCode
->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset
, divotStart
, divotEnd
);
2065 VirtualRegister
profileRegister(pc
[1].u
.operand
);
2066 ProfileTypeBytecodeFlag flag
= static_cast<ProfileTypeBytecodeFlag
>(pc
[3].u
.operand
);
2067 SymbolTable
* symbolTable
= nullptr;
2070 case ProfileTypeBytecodePutToScope
:
2071 case ProfileTypeBytecodeGetFromScope
: {
2072 const Identifier
& ident
= identifier(pc
[4].u
.operand
);
2073 ResolveType type
= static_cast<ResolveType
>(pc
[5].u
.operand
);
2074 ResolveOp op
= JSScope::abstractResolve(m_globalObject
->globalExec(), needsActivation(), scope
, ident
, (flag
== ProfileTypeBytecodeGetFromScope
? Get
: Put
), type
);
2076 // FIXME: handle other values for op.type here, and also consider what to do when we can't statically determine the globalID
2077 // https://bugs.webkit.org/show_bug.cgi?id=135184
2078 if (op
.type
== ClosureVar
)
2079 symbolTable
= op
.lexicalEnvironment
->symbolTable();
2080 else if (op
.type
== GlobalVar
)
2081 symbolTable
= m_globalObject
.get()->symbolTable();
2084 ConcurrentJITLocker
locker(symbolTable
->m_lock
);
2085 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2086 symbolTable
->prepareForTypeProfiling(locker
);
2087 globalVariableID
= symbolTable
->uniqueIDForVariable(locker
, ident
.impl(), *vm());
2088 globalTypeSet
= symbolTable
->globalTypeSetForVariable(locker
, ident
.impl(), *vm());
2090 globalVariableID
= TypeProfilerNoGlobalIDExists
;
2094 case ProfileTypeBytecodePutToLocalScope
:
2095 case ProfileTypeBytecodeGetFromLocalScope
: {
2096 const Identifier
& ident
= identifier(pc
[4].u
.operand
);
2097 symbolTable
= m_symbolTable
.get();
2098 ConcurrentJITLocker
locker(symbolTable
->m_lock
);
2099 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2100 symbolTable
->prepareForTypeProfiling(locker
);
2101 globalVariableID
= symbolTable
->uniqueIDForVariable(locker
, ident
.impl(), *vm());
2102 globalTypeSet
= symbolTable
->globalTypeSetForVariable(locker
, ident
.impl(), *vm());
2107 case ProfileTypeBytecodeHasGlobalID
: {
2108 symbolTable
= m_symbolTable
.get();
2109 ConcurrentJITLocker
locker(symbolTable
->m_lock
);
2110 globalVariableID
= symbolTable
->uniqueIDForOffset(locker
, VarOffset(profileRegister
), *vm());
2111 globalTypeSet
= symbolTable
->globalTypeSetForOffset(locker
, VarOffset(profileRegister
), *vm());
2114 case ProfileTypeBytecodeDoesNotHaveGlobalID
:
2115 case ProfileTypeBytecodeFunctionArgument
: {
2116 globalVariableID
= TypeProfilerNoGlobalIDExists
;
2119 case ProfileTypeBytecodeFunctionReturnStatement
: {
2120 RELEASE_ASSERT(ownerExecutable
->isFunctionExecutable());
2121 globalTypeSet
= jsCast
<FunctionExecutable
*>(ownerExecutable
)->returnStatementTypeSet();
2122 globalVariableID
= TypeProfilerReturnStatement
;
2123 if (!shouldAnalyze
) {
2124 // Because a return statement can be added implicitly to return undefined at the end of a function,
2125 // and these nodes don't emit expression ranges because they aren't in the actual source text of
2126 // the user's program, give the type profiler some range to identify these return statements.
2127 // Currently, the text offset that is used as identification is on the open brace of the function
2128 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2129 divotStart
= divotEnd
= m_sourceOffset
;
2130 shouldAnalyze
= true;
2136 std::pair
<TypeLocation
*, bool> locationPair
= vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID
,
2137 m_ownerExecutable
->sourceID(), divotStart
, divotEnd
, globalTypeSet
, vm());
2138 TypeLocation
* location
= locationPair
.first
;
2139 bool isNewLocation
= locationPair
.second
;
2141 if (flag
== ProfileTypeBytecodeFunctionReturnStatement
)
2142 location
->m_divotForFunctionOffsetIfReturnStatement
= m_sourceOffset
;
2144 if (shouldAnalyze
&& isNewLocation
)
2145 vm()->typeProfiler()->insertNewLocation(location
);
2147 instructions
[i
+ 2].u
.location
= location
;
2152 if (pc
[1].u
.index
== DidReachBreakpoint
)
2153 m_hasDebuggerStatement
= true;
2163 if (vm()->controlFlowProfiler())
2164 insertBasicBlockBoundariesForControlFlowProfiler(instructions
);
2166 m_instructions
= WTF::RefCountedArray
<Instruction
>(instructions
);
2168 // Set optimization thresholds only after m_instructions is initialized, since these
2169 // rely on the instruction count (and are in theory permitted to also inspect the
2170 // instruction stream to more accurate assess the cost of tier-up).
2171 optimizeAfterWarmUp();
2174 // If the concurrent thread will want the code block's hash, then compute it here
2176 if (Options::alwaysComputeHash())
2179 if (Options::dumpGeneratedBytecodes())
2182 m_heap
->m_codeBlocks
.add(this);
2183 m_heap
->reportExtraMemoryAllocated(sizeof(CodeBlock
) + m_instructions
.size() * sizeof(Instruction
));
2186 CodeBlock::~CodeBlock()
2188 if (m_vm
->m_perBytecodeProfiler
)
2189 m_vm
->m_perBytecodeProfiler
->notifyDestruction(this);
2191 #if ENABLE(VERBOSE_VALUE_PROFILE)
2192 dumpValueProfiles();
2194 while (m_incomingLLIntCalls
.begin() != m_incomingLLIntCalls
.end())
2195 m_incomingLLIntCalls
.begin()->remove();
2197 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2198 // Consider that two CodeBlocks become unreachable at the same time. There
2199 // is no guarantee about the order in which the CodeBlocks are destroyed.
2200 // So, if we don't remove incoming calls, and get destroyed before the
2201 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2202 // destructor will try to remove nodes from our (no longer valid) linked list.
2203 while (m_incomingCalls
.begin() != m_incomingCalls
.end())
2204 m_incomingCalls
.begin()->remove();
2205 while (m_incomingPolymorphicCalls
.begin() != m_incomingPolymorphicCalls
.end())
2206 m_incomingPolymorphicCalls
.begin()->remove();
2208 // Note that our outgoing calls will be removed from other CodeBlocks'
2209 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2212 for (Bag
<StructureStubInfo
>::iterator iter
= m_stubInfos
.begin(); !!iter
; ++iter
)
2214 #endif // ENABLE(JIT)
2217 void CodeBlock::setNumParameters(int newValue
)
2219 m_numParameters
= newValue
;
2221 m_argumentValueProfiles
.resizeToFit(newValue
);
2224 void EvalCodeCache::visitAggregate(SlotVisitor
& visitor
)
2226 EvalCacheMap::iterator end
= m_cacheMap
.end();
2227 for (EvalCacheMap::iterator ptr
= m_cacheMap
.begin(); ptr
!= end
; ++ptr
)
2228 visitor
.append(&ptr
->value
);
2231 CodeBlock
* CodeBlock::specialOSREntryBlockOrNull()
2234 if (jitType() != JITCode::DFGJIT
)
2236 DFG::JITCode
* jitCode
= m_jitCode
->dfg();
2237 return jitCode
->osrEntryBlock
.get();
2238 #else // ENABLE(FTL_JIT)
2240 #endif // ENABLE(FTL_JIT)
2243 void CodeBlock::visitAggregate(SlotVisitor
& visitor
)
2245 #if ENABLE(PARALLEL_GC)
2246 // I may be asked to scan myself more than once, and it may even happen concurrently.
2247 // To this end, use an atomic operation to check (and set) if I've been called already.
2248 // Only one thread may proceed past this point - whichever one wins the atomic set race.
2249 bool setByMe
= m_visitAggregateHasBeenCalled
.compareExchangeStrong(false, true);
2252 #endif // ENABLE(PARALLEL_GC)
2254 if (!!m_alternative
)
2255 m_alternative
->visitAggregate(visitor
);
2257 if (CodeBlock
* otherBlock
= specialOSREntryBlockOrNull())
2258 otherBlock
->visitAggregate(visitor
);
2260 visitor
.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock
));
2262 visitor
.reportExtraMemoryVisited(ownerExecutable(), m_jitCode
->size());
2263 if (m_instructions
.size()) {
2264 // Divide by refCount() because m_instructions points to something that is shared
2265 // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2266 // Having each CodeBlock report only its proportional share of the size is one way
2267 // of accomplishing this.
2268 visitor
.reportExtraMemoryVisited(ownerExecutable(), m_instructions
.size() * sizeof(Instruction
) / m_instructions
.refCount());
2271 visitor
.append(&m_unlinkedCode
);
2273 // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2274 // inline cache clearing, and jettisoning. The probability of us wanting to do at
2275 // least one of those things is probably quite close to 1. So we add one no matter what
2276 // and when it runs, it figures out whether it has any work to do.
2277 visitor
.addUnconditionalFinalizer(this);
2279 m_allTransitionsHaveBeenMarked
= false;
2281 if (shouldImmediatelyAssumeLivenessDuringScan()) {
2282 // This code block is live, so scan all references strongly and return.
2283 stronglyVisitStrongReferences(visitor
);
2284 stronglyVisitWeakReferences(visitor
);
2285 propagateTransitions(visitor
);
2289 // There are two things that we use weak reference harvesters for: DFG fixpoint for
2290 // jettisoning, and trying to find structures that would be live based on some
2291 // inline cache. So it makes sense to register them regardless.
2292 visitor
.addWeakReferenceHarvester(this);
2295 // We get here if we're live in the sense that our owner executable is live,
2296 // but we're not yet live for sure in another sense: we may yet decide that this
2297 // code block should be jettisoned based on its outgoing weak references being
2298 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2299 // perform one round of determining if we're live. The GC may determine, based on
2300 // either us marking additional objects, or by other objects being marked for
2301 // other reasons, that this iteration should run again; it will notify us of this
2302 // decision by calling harvestWeakReferences().
2304 m_jitCode
->dfgCommon()->livenessHasBeenProved
= false;
2306 propagateTransitions(visitor
);
2307 determineLiveness(visitor
);
2308 #else // ENABLE(DFG_JIT)
2309 RELEASE_ASSERT_NOT_REACHED();
2310 #endif // ENABLE(DFG_JIT)
2313 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
2316 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2317 // their weak references go stale. So if a basline JIT CodeBlock gets
2318 // scanned, we can assume that this means that it's live.
2319 if (!JITCode::isOptimizingJIT(jitType()))
2322 // For simplicity, we don't attempt to jettison code blocks during GC if
2323 // they are executing. Instead we strongly mark their weak references to
2324 // allow them to continue to execute soundly.
2325 if (m_mayBeExecuting
)
2328 if (Options::forceDFGCodeBlockLiveness())
2337 bool CodeBlock::isKnownToBeLiveDuringGC()
2340 // This should return true for:
2341 // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2343 // - Code blocks that were running on the stack.
2344 // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2345 // because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
2346 // would survive as true.
2347 // - Code blocks that don't have any dead weak references.
2349 return shouldImmediatelyAssumeLivenessDuringScan()
2350 || m_jitCode
->dfgCommon()->livenessHasBeenProved
;
2357 static bool shouldMarkTransition(DFG::WeakReferenceTransition
& transition
)
2359 if (transition
.m_codeOrigin
&& !Heap::isMarked(transition
.m_codeOrigin
.get()))
2362 if (!Heap::isMarked(transition
.m_from
.get()))
2367 #endif // ENABLE(DFG_JIT)
2369 void CodeBlock::propagateTransitions(SlotVisitor
& visitor
)
2371 UNUSED_PARAM(visitor
);
2373 if (m_allTransitionsHaveBeenMarked
)
2376 bool allAreMarkedSoFar
= true;
2378 Interpreter
* interpreter
= m_vm
->interpreter
;
2379 if (jitType() == JITCode::InterpreterThunk
) {
2380 const Vector
<unsigned>& propertyAccessInstructions
= m_unlinkedCode
->propertyAccessInstructions();
2381 for (size_t i
= 0; i
< propertyAccessInstructions
.size(); ++i
) {
2382 Instruction
* instruction
= &instructions()[propertyAccessInstructions
[i
]];
2383 switch (interpreter
->getOpcodeID(instruction
[0].u
.opcode
)) {
2384 case op_put_by_id_transition_direct
:
2385 case op_put_by_id_transition_normal
:
2386 case op_put_by_id_transition_direct_out_of_line
:
2387 case op_put_by_id_transition_normal_out_of_line
: {
2388 if (Heap::isMarked(instruction
[4].u
.structure
.get()))
2389 visitor
.append(&instruction
[6].u
.structure
);
2391 allAreMarkedSoFar
= false;
2401 if (JITCode::isJIT(jitType())) {
2402 for (Bag
<StructureStubInfo
>::iterator iter
= m_stubInfos
.begin(); !!iter
; ++iter
) {
2403 StructureStubInfo
& stubInfo
= **iter
;
2404 switch (stubInfo
.accessType
) {
2405 case access_put_by_id_transition_normal
:
2406 case access_put_by_id_transition_direct
: {
2407 JSCell
* origin
= stubInfo
.codeOrigin
.codeOriginOwner();
2408 if ((!origin
|| Heap::isMarked(origin
))
2409 && Heap::isMarked(stubInfo
.u
.putByIdTransition
.previousStructure
.get()))
2410 visitor
.append(&stubInfo
.u
.putByIdTransition
.structure
);
2412 allAreMarkedSoFar
= false;
2416 case access_put_by_id_list
: {
2417 PolymorphicPutByIdList
* list
= stubInfo
.u
.putByIdList
.list
;
2418 JSCell
* origin
= stubInfo
.codeOrigin
.codeOriginOwner();
2419 if (origin
&& !Heap::isMarked(origin
)) {
2420 allAreMarkedSoFar
= false;
2423 for (unsigned j
= list
->size(); j
--;) {
2424 PutByIdAccess
& access
= list
->m_list
[j
];
2425 if (!access
.isTransition())
2427 if (Heap::isMarked(access
.oldStructure()))
2428 visitor
.append(&access
.m_newStructure
);
2430 allAreMarkedSoFar
= false;
2440 #endif // ENABLE(JIT)
2443 if (JITCode::isOptimizingJIT(jitType())) {
2444 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2446 for (unsigned i
= 0; i
< dfgCommon
->transitions
.size(); ++i
) {
2447 if (shouldMarkTransition(dfgCommon
->transitions
[i
])) {
2448 // If the following three things are live, then the target of the
2449 // transition is also live:
2451 // - This code block. We know it's live already because otherwise
2452 // we wouldn't be scanning ourselves.
2454 // - The code origin of the transition. Transitions may arise from
2455 // code that was inlined. They are not relevant if the user's
2456 // object that is required for the inlinee to run is no longer
2459 // - The source of the transition. The transition checks if some
2460 // heap location holds the source, and if so, stores the target.
2461 // Hence the source must be live for the transition to be live.
2463 // We also short-circuit the liveness if the structure is harmless
2464 // to mark (i.e. its global object and prototype are both already
2467 visitor
.append(&dfgCommon
->transitions
[i
].m_to
);
2469 allAreMarkedSoFar
= false;
2472 #endif // ENABLE(DFG_JIT)
2474 if (allAreMarkedSoFar
)
2475 m_allTransitionsHaveBeenMarked
= true;
2478 void CodeBlock::determineLiveness(SlotVisitor
& visitor
)
2480 UNUSED_PARAM(visitor
);
2482 if (shouldImmediatelyAssumeLivenessDuringScan())
2486 // Check if we have any remaining work to do.
2487 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2488 if (dfgCommon
->livenessHasBeenProved
)
2491 // Now check all of our weak references. If all of them are live, then we
2492 // have proved liveness and so we scan our strong references. If at end of
2493 // GC we still have not proved liveness, then this code block is toast.
2494 bool allAreLiveSoFar
= true;
2495 for (unsigned i
= 0; i
< dfgCommon
->weakReferences
.size(); ++i
) {
2496 if (!Heap::isMarked(dfgCommon
->weakReferences
[i
].get())) {
2497 allAreLiveSoFar
= false;
2501 if (allAreLiveSoFar
) {
2502 for (unsigned i
= 0; i
< dfgCommon
->weakStructureReferences
.size(); ++i
) {
2503 if (!Heap::isMarked(dfgCommon
->weakStructureReferences
[i
].get())) {
2504 allAreLiveSoFar
= false;
2510 // If some weak references are dead, then this fixpoint iteration was
2512 if (!allAreLiveSoFar
)
2515 // All weak references are live. Record this information so we don't
2516 // come back here again, and scan the strong references.
2517 dfgCommon
->livenessHasBeenProved
= true;
2518 stronglyVisitStrongReferences(visitor
);
2519 #endif // ENABLE(DFG_JIT)
2522 void CodeBlock::visitWeakReferences(SlotVisitor
& visitor
)
2524 propagateTransitions(visitor
);
2525 determineLiveness(visitor
);
2528 void CodeBlock::finalizeUnconditionally()
2530 Interpreter
* interpreter
= m_vm
->interpreter
;
2531 if (JITCode::couldBeInterpreted(jitType())) {
2532 const Vector
<unsigned>& propertyAccessInstructions
= m_unlinkedCode
->propertyAccessInstructions();
2533 for (size_t size
= propertyAccessInstructions
.size(), i
= 0; i
< size
; ++i
) {
2534 Instruction
* curInstruction
= &instructions()[propertyAccessInstructions
[i
]];
2535 switch (interpreter
->getOpcodeID(curInstruction
[0].u
.opcode
)) {
2537 case op_get_by_id_out_of_line
:
2539 case op_put_by_id_out_of_line
:
2540 if (!curInstruction
[4].u
.structure
|| Heap::isMarked(curInstruction
[4].u
.structure
.get()))
2542 if (Options::verboseOSR())
2543 dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction
[4].u
.structure
.get());
2544 curInstruction
[4].u
.structure
.clear();
2545 curInstruction
[5].u
.operand
= 0;
2547 case op_put_by_id_transition_direct
:
2548 case op_put_by_id_transition_normal
:
2549 case op_put_by_id_transition_direct_out_of_line
:
2550 case op_put_by_id_transition_normal_out_of_line
:
2551 if (Heap::isMarked(curInstruction
[4].u
.structure
.get())
2552 && Heap::isMarked(curInstruction
[6].u
.structure
.get())
2553 && Heap::isMarked(curInstruction
[7].u
.structureChain
.get()))
2555 if (Options::verboseOSR()) {
2556 dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2557 curInstruction
[4].u
.structure
.get(),
2558 curInstruction
[6].u
.structure
.get(),
2559 curInstruction
[7].u
.structureChain
.get());
2561 curInstruction
[4].u
.structure
.clear();
2562 curInstruction
[6].u
.structure
.clear();
2563 curInstruction
[7].u
.structureChain
.clear();
2564 curInstruction
[0].u
.opcode
= interpreter
->getOpcode(op_put_by_id
);
2566 case op_get_array_length
:
2569 if (!curInstruction
[2].u
.structure
|| Heap::isMarked(curInstruction
[2].u
.structure
.get()))
2571 if (Options::verboseOSR())
2572 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction
[2].u
.structure
.get());
2573 curInstruction
[2].u
.structure
.clear();
2574 curInstruction
[3].u
.toThisStatus
= merge(
2575 curInstruction
[3].u
.toThisStatus
, ToThisClearedByGC
);
2577 case op_create_this
: {
2578 auto& cacheWriteBarrier
= curInstruction
[4].u
.jsCell
;
2579 if (!cacheWriteBarrier
|| cacheWriteBarrier
.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2581 JSCell
* cachedFunction
= cacheWriteBarrier
.get();
2582 if (Heap::isMarked(cachedFunction
))
2584 if (Options::verboseOSR())
2585 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction
);
2586 cacheWriteBarrier
.clear();
2589 case op_resolve_scope
: {
2590 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2591 // are for outer functions, and we refer to those functions strongly, and they refer
2592 // to the symbol table strongly. But it's nice to be on the safe side.
2593 WriteBarrierBase
<SymbolTable
>& symbolTable
= curInstruction
[6].u
.symbolTable
;
2594 if (!symbolTable
|| Heap::isMarked(symbolTable
.get()))
2596 if (Options::verboseOSR())
2597 dataLogF("Clearing dead symbolTable %p.\n", symbolTable
.get());
2598 symbolTable
.clear();
2601 case op_get_from_scope
:
2602 case op_put_to_scope
: {
2603 ResolveModeAndType modeAndType
=
2604 ResolveModeAndType(curInstruction
[4].u
.operand
);
2605 if (modeAndType
.type() == GlobalVar
|| modeAndType
.type() == GlobalVarWithVarInjectionChecks
|| modeAndType
.type() == LocalClosureVar
)
2607 WriteBarrierBase
<Structure
>& structure
= curInstruction
[5].u
.structure
;
2608 if (!structure
|| Heap::isMarked(structure
.get()))
2610 if (Options::verboseOSR())
2611 dataLogF("Clearing scope access with structure %p.\n", structure
.get());
2616 OpcodeID opcodeID
= interpreter
->getOpcodeID(curInstruction
[0].u
.opcode
);
2617 ASSERT_WITH_MESSAGE_UNUSED(opcodeID
, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames
[opcodeID
], opcodeID
, propertyAccessInstructions
[i
]);
2621 for (unsigned i
= 0; i
< m_llintCallLinkInfos
.size(); ++i
) {
2622 if (m_llintCallLinkInfos
[i
].isLinked() && !Heap::isMarked(m_llintCallLinkInfos
[i
].callee
.get())) {
2623 if (Options::verboseOSR())
2624 dataLog("Clearing LLInt call from ", *this, "\n");
2625 m_llintCallLinkInfos
[i
].unlink();
2627 if (!!m_llintCallLinkInfos
[i
].lastSeenCallee
&& !Heap::isMarked(m_llintCallLinkInfos
[i
].lastSeenCallee
.get()))
2628 m_llintCallLinkInfos
[i
].lastSeenCallee
.clear();
2633 // Check if we're not live. If we are, then jettison.
2634 if (!isKnownToBeLiveDuringGC()) {
2635 if (Options::verboseOSR())
2636 dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2638 if (DFG::shouldShowDisassembly()) {
2639 dataLog(*this, " will be jettisoned because of the following dead references:\n");
2640 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2641 for (unsigned i
= 0; i
< dfgCommon
->transitions
.size(); ++i
) {
2642 DFG::WeakReferenceTransition
& transition
= dfgCommon
->transitions
[i
];
2643 JSCell
* origin
= transition
.m_codeOrigin
.get();
2644 JSCell
* from
= transition
.m_from
.get();
2645 JSCell
* to
= transition
.m_to
.get();
2646 if ((!origin
|| Heap::isMarked(origin
)) && Heap::isMarked(from
))
2648 dataLog(" Transition under ", RawPointer(origin
), ", ", RawPointer(from
), " -> ", RawPointer(to
), ".\n");
2650 for (unsigned i
= 0; i
< dfgCommon
->weakReferences
.size(); ++i
) {
2651 JSCell
* weak
= dfgCommon
->weakReferences
[i
].get();
2652 if (Heap::isMarked(weak
))
2654 dataLog(" Weak reference ", RawPointer(weak
), ".\n");
2658 jettison(Profiler::JettisonDueToWeakReference
);
2661 #endif // ENABLE(DFG_JIT)
2664 // Handle inline caches.
2666 RepatchBuffer
repatchBuffer(this);
2668 for (auto iter
= callLinkInfosBegin(); !!iter
; ++iter
)
2669 (*iter
)->visitWeak(repatchBuffer
);
2671 for (Bag
<StructureStubInfo
>::iterator iter
= m_stubInfos
.begin(); !!iter
; ++iter
) {
2672 StructureStubInfo
& stubInfo
= **iter
;
2674 if (stubInfo
.visitWeakReferences(repatchBuffer
))
2677 resetStubDuringGCInternal(repatchBuffer
, stubInfo
);
2683 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker
&, StubInfoMap
& result
)
2686 toHashMap(m_stubInfos
, getStructureStubInfoCodeOrigin
, result
);
2688 UNUSED_PARAM(result
);
2692 void CodeBlock::getStubInfoMap(StubInfoMap
& result
)
2694 ConcurrentJITLocker
locker(m_lock
);
2695 getStubInfoMap(locker
, result
);
2698 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker
&, CallLinkInfoMap
& result
)
2701 toHashMap(m_callLinkInfos
, getCallLinkInfoCodeOrigin
, result
);
2703 UNUSED_PARAM(result
);
2707 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap
& result
)
2709 ConcurrentJITLocker
locker(m_lock
);
2710 getCallLinkInfoMap(locker
, result
);
2714 StructureStubInfo
* CodeBlock::addStubInfo()
2716 ConcurrentJITLocker
locker(m_lock
);
2717 return m_stubInfos
.add();
2720 StructureStubInfo
* CodeBlock::findStubInfo(CodeOrigin codeOrigin
)
2722 for (StructureStubInfo
* stubInfo
: m_stubInfos
) {
2723 if (stubInfo
->codeOrigin
== codeOrigin
)
2729 CallLinkInfo
* CodeBlock::addCallLinkInfo()
2731 ConcurrentJITLocker
locker(m_lock
);
2732 return m_callLinkInfos
.add();
2735 void CodeBlock::resetStub(StructureStubInfo
& stubInfo
)
2737 if (stubInfo
.accessType
== access_unset
)
2740 ConcurrentJITLocker
locker(m_lock
);
2742 RepatchBuffer
repatchBuffer(this);
2743 resetStubInternal(repatchBuffer
, stubInfo
);
2746 void CodeBlock::resetStubInternal(RepatchBuffer
& repatchBuffer
, StructureStubInfo
& stubInfo
)
2748 AccessType accessType
= static_cast<AccessType
>(stubInfo
.accessType
);
2750 if (Options::verboseOSR()) {
2751 // This can be called from GC destructor calls, so we don't try to do a full dump
2752 // of the CodeBlock.
2753 dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo
.accessType
), ") in ", RawPointer(this), ".\n");
2756 RELEASE_ASSERT(JITCode::isJIT(jitType()));
2758 if (isGetByIdAccess(accessType
))
2759 resetGetByID(repatchBuffer
, stubInfo
);
2760 else if (isPutByIdAccess(accessType
))
2761 resetPutByID(repatchBuffer
, stubInfo
);
2763 RELEASE_ASSERT(isInAccess(accessType
));
2764 resetIn(repatchBuffer
, stubInfo
);
2770 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer
& repatchBuffer
, StructureStubInfo
& stubInfo
)
2772 resetStubInternal(repatchBuffer
, stubInfo
);
2773 stubInfo
.resetByGC
= true;
2776 CallLinkInfo
* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index
)
2778 for (auto iter
= m_callLinkInfos
.begin(); !!iter
; ++iter
) {
2779 if ((*iter
)->codeOrigin() == CodeOrigin(index
))
2786 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor
& visitor
)
2788 visitor
.append(&m_globalObject
);
2789 visitor
.append(&m_ownerExecutable
);
2790 visitor
.append(&m_symbolTable
);
2791 visitor
.append(&m_unlinkedCode
);
2793 m_rareData
->m_evalCodeCache
.visitAggregate(visitor
);
2794 visitor
.appendValues(m_constantRegisters
.data(), m_constantRegisters
.size());
2795 for (size_t i
= 0; i
< m_functionExprs
.size(); ++i
)
2796 visitor
.append(&m_functionExprs
[i
]);
2797 for (size_t i
= 0; i
< m_functionDecls
.size(); ++i
)
2798 visitor
.append(&m_functionDecls
[i
]);
2799 for (unsigned i
= 0; i
< m_objectAllocationProfiles
.size(); ++i
)
2800 m_objectAllocationProfiles
[i
].visitAggregate(visitor
);
2803 if (JITCode::isOptimizingJIT(jitType())) {
2804 // FIXME: This is an antipattern for two reasons. References introduced by the DFG
2805 // that aren't in the original CodeBlock being compiled should be weakly referenced.
2806 // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also,
2807 // those weak references should already be tracked in the DFG as weak FrozenValues. So,
2808 // there is probably no need for this. We already have assertions that this should be
2810 // https://bugs.webkit.org/show_bug.cgi?id=146613
2811 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2812 if (dfgCommon
->inlineCallFrames
.get())
2813 dfgCommon
->inlineCallFrames
->visitAggregate(visitor
);
2817 updateAllPredictions();
2820 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor
& visitor
)
2822 UNUSED_PARAM(visitor
);
2825 if (!JITCode::isOptimizingJIT(jitType()))
2828 DFG::CommonData
* dfgCommon
= m_jitCode
->dfgCommon();
2830 for (unsigned i
= 0; i
< dfgCommon
->transitions
.size(); ++i
) {
2831 if (!!dfgCommon
->transitions
[i
].m_codeOrigin
)
2832 visitor
.append(&dfgCommon
->transitions
[i
].m_codeOrigin
); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2833 visitor
.append(&dfgCommon
->transitions
[i
].m_from
);
2834 visitor
.append(&dfgCommon
->transitions
[i
].m_to
);
2837 for (unsigned i
= 0; i
< dfgCommon
->weakReferences
.size(); ++i
)
2838 visitor
.append(&dfgCommon
->weakReferences
[i
]);
2840 for (unsigned i
= 0; i
< dfgCommon
->weakStructureReferences
.size(); ++i
)
2841 visitor
.append(&dfgCommon
->weakStructureReferences
[i
]);
2845 CodeBlock
* CodeBlock::baselineAlternative()
2848 CodeBlock
* result
= this;
2849 while (result
->alternative())
2850 result
= result
->alternative();
2851 RELEASE_ASSERT(result
);
2852 RELEASE_ASSERT(JITCode::isBaselineCode(result
->jitType()) || result
->jitType() == JITCode::None
);
2859 CodeBlock
* CodeBlock::baselineVersion()
2862 if (JITCode::isBaselineCode(jitType()))
2864 CodeBlock
* result
= replacement();
2866 // This can happen if we're creating the original CodeBlock for an executable.
2867 // Assume that we're the baseline CodeBlock.
2868 RELEASE_ASSERT(jitType() == JITCode::None
);
2871 result
= result
->baselineAlternative();
2879 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace
)
2881 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace
);
2884 bool CodeBlock::hasOptimizedReplacement()
2886 return hasOptimizedReplacement(jitType());
2890 HandlerInfo
* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset
, RequiredHandler requiredHandler
)
2892 RELEASE_ASSERT(bytecodeOffset
< instructions().size());
2897 Vector
<HandlerInfo
>& exceptionHandlers
= m_rareData
->m_exceptionHandlers
;
2898 for (size_t i
= 0; i
< exceptionHandlers
.size(); ++i
) {
2899 HandlerInfo
& handler
= exceptionHandlers
[i
];
2900 if ((requiredHandler
== RequiredHandler::CatchHandler
) && !handler
.isCatchHandler())
2903 // Handlers are ordered innermost first, so the first handler we encounter
2904 // that contains the source address is the correct handler to use.
2905 if (handler
.start
<= bytecodeOffset
&& handler
.end
> bytecodeOffset
)
2912 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset
)
2914 RELEASE_ASSERT(bytecodeOffset
< instructions().size());
2915 return m_ownerExecutable
->firstLine() + m_unlinkedCode
->lineNumberForBytecodeOffset(bytecodeOffset
);
2918 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset
)
2925 expressionRangeForBytecodeOffset(bytecodeOffset
, divot
, startOffset
, endOffset
, line
, column
);
2929 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
, int& divot
, int& startOffset
, int& endOffset
, unsigned& line
, unsigned& column
)
2931 m_unlinkedCode
->expressionRangeForBytecodeOffset(bytecodeOffset
, divot
, startOffset
, endOffset
, line
, column
);
2932 divot
+= m_sourceOffset
;
2933 column
+= line
? 1 : firstLineColumnOffset();
2934 line
+= m_ownerExecutable
->firstLine();
2937 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line
, unsigned column
)
2939 Interpreter
* interpreter
= vm()->interpreter
;
2940 const Instruction
* begin
= instructions().begin();
2941 const Instruction
* end
= instructions().end();
2942 for (const Instruction
* it
= begin
; it
!= end
;) {
2943 OpcodeID opcodeID
= interpreter
->getOpcodeID(it
->u
.opcode
);
2944 if (opcodeID
== op_debug
) {
2945 unsigned bytecodeOffset
= it
- begin
;
2947 unsigned opDebugLine
;
2948 unsigned opDebugColumn
;
2949 expressionRangeForBytecodeOffset(bytecodeOffset
, unused
, unused
, unused
, opDebugLine
, opDebugColumn
);
2950 if (line
== opDebugLine
&& (column
== Breakpoint::unspecifiedColumn
|| column
== opDebugColumn
))
2953 it
+= opcodeLengths
[opcodeID
];
2958 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode
)
2960 m_rareCaseProfiles
.shrinkToFit();
2961 m_specialFastCaseProfiles
.shrinkToFit();
2963 if (shrinkMode
== EarlyShrink
) {
2964 m_constantRegisters
.shrinkToFit();
2965 m_constantsSourceCodeRepresentation
.shrinkToFit();
2968 m_rareData
->m_switchJumpTables
.shrinkToFit();
2969 m_rareData
->m_stringSwitchJumpTables
.shrinkToFit();
2971 } // else don't shrink these, because we would have already pointed pointers into these tables.
2975 void CodeBlock::unlinkCalls()
2977 if (!!m_alternative
)
2978 m_alternative
->unlinkCalls();
2979 for (size_t i
= 0; i
< m_llintCallLinkInfos
.size(); ++i
) {
2980 if (m_llintCallLinkInfos
[i
].isLinked())
2981 m_llintCallLinkInfos
[i
].unlink();
2983 if (m_callLinkInfos
.isEmpty())
2985 if (!m_vm
->canUseJIT())
2987 RepatchBuffer
repatchBuffer(this);
2988 for (auto iter
= m_callLinkInfos
.begin(); !!iter
; ++iter
) {
2989 CallLinkInfo
& info
= **iter
;
2990 if (!info
.isLinked())
2992 info
.unlink(repatchBuffer
);
2996 void CodeBlock::linkIncomingCall(ExecState
* callerFrame
, CallLinkInfo
* incoming
)
2998 noticeIncomingCall(callerFrame
);
2999 m_incomingCalls
.push(incoming
);
3002 void CodeBlock::linkIncomingPolymorphicCall(ExecState
* callerFrame
, PolymorphicCallNode
* incoming
)
3004 noticeIncomingCall(callerFrame
);
3005 m_incomingPolymorphicCalls
.push(incoming
);
3007 #endif // ENABLE(JIT)
3009 void CodeBlock::unlinkIncomingCalls()
3011 while (m_incomingLLIntCalls
.begin() != m_incomingLLIntCalls
.end())
3012 m_incomingLLIntCalls
.begin()->unlink();
3014 if (m_incomingCalls
.isEmpty() && m_incomingPolymorphicCalls
.isEmpty())
3016 RepatchBuffer
repatchBuffer(this);
3017 while (m_incomingCalls
.begin() != m_incomingCalls
.end())
3018 m_incomingCalls
.begin()->unlink(repatchBuffer
);
3019 while (m_incomingPolymorphicCalls
.begin() != m_incomingPolymorphicCalls
.end())
3020 m_incomingPolymorphicCalls
.begin()->unlink(repatchBuffer
);
3021 #endif // ENABLE(JIT)
3024 void CodeBlock::linkIncomingCall(ExecState
* callerFrame
, LLIntCallLinkInfo
* incoming
)
3026 noticeIncomingCall(callerFrame
);
3027 m_incomingLLIntCalls
.push(incoming
);
3030 void CodeBlock::clearEvalCache()
3032 if (!!m_alternative
)
3033 m_alternative
->clearEvalCache();
3034 if (CodeBlock
* otherBlock
= specialOSREntryBlockOrNull())
3035 otherBlock
->clearEvalCache();
3038 m_rareData
->m_evalCodeCache
.clear();
3041 void CodeBlock::install()
3043 ownerExecutable()->installCode(this);
3046 PassRefPtr
<CodeBlock
> CodeBlock::newReplacement()
3048 return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
3052 CodeBlock
* ProgramCodeBlock::replacement()
3054 return jsCast
<ProgramExecutable
*>(ownerExecutable())->codeBlock();
3057 CodeBlock
* EvalCodeBlock::replacement()
3059 return jsCast
<EvalExecutable
*>(ownerExecutable())->codeBlock();
3062 CodeBlock
* FunctionCodeBlock::replacement()
3064 return jsCast
<FunctionExecutable
*>(ownerExecutable())->codeBlockFor(m_isConstructor
? CodeForConstruct
: CodeForCall
);
3067 DFG::CapabilityLevel
ProgramCodeBlock::capabilityLevelInternal()
3069 return DFG::programCapabilityLevel(this);
3072 DFG::CapabilityLevel
EvalCodeBlock::capabilityLevelInternal()
3074 return DFG::evalCapabilityLevel(this);
3077 DFG::CapabilityLevel
FunctionCodeBlock::capabilityLevelInternal()
3079 if (m_isConstructor
)
3080 return DFG::functionForConstructCapabilityLevel(this);
3081 return DFG::functionForCallCapabilityLevel(this);
3085 void CodeBlock::jettison(Profiler::JettisonReason reason
, ReoptimizationMode mode
, const FireDetail
* detail
)
3087 RELEASE_ASSERT(reason
!= Profiler::NotJettisoned
);
3090 if (DFG::shouldShowDisassembly()) {
3091 dataLog("Jettisoning ", *this);
3092 if (mode
== CountReoptimization
)
3093 dataLog(" and counting reoptimization");
3094 dataLog(" due to ", reason
);
3096 dataLog(", ", *detail
);
3100 DeferGCForAWhile
deferGC(*m_heap
);
3101 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3103 if (Profiler::Compilation
* compilation
= jitCode()->dfgCommon()->compilation
.get())
3104 compilation
->setJettisonReason(reason
, detail
);
3106 // We want to accomplish two things here:
3107 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3108 // we should OSR exit at the top of the next bytecode instruction after the return.
3109 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3111 // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
3112 // whether the invalidation has already happened.
3113 if (!jitCode()->dfgCommon()->invalidate()) {
3114 // Nothing to do since we've already been invalidated. That means that we cannot be
3115 // the optimized replacement.
3116 RELEASE_ASSERT(this != replacement());
3120 if (DFG::shouldShowDisassembly())
3121 dataLog(" Did invalidate ", *this, "\n");
3123 // Count the reoptimization if that's what the user wanted.
3124 if (mode
== CountReoptimization
) {
3125 // FIXME: Maybe this should call alternative().
3126 // https://bugs.webkit.org/show_bug.cgi?id=123677
3127 baselineAlternative()->countReoptimization();
3128 if (DFG::shouldShowDisassembly())
3129 dataLog(" Did count reoptimization for ", *this, "\n");
3132 // Now take care of the entrypoint.
3133 if (this != replacement()) {
3134 // This means that we were never the entrypoint. This can happen for OSR entry code
3138 alternative()->optimizeAfterWarmUp();
3139 tallyFrequentExitSites();
3140 alternative()->install();
3141 if (DFG::shouldShowDisassembly())
3142 dataLog(" Did install baseline version of ", *this, "\n");
3143 #else // ENABLE(DFG_JIT)
3145 UNUSED_PARAM(detail
);
3146 UNREACHABLE_FOR_PLATFORM();
3147 #endif // ENABLE(DFG_JIT)
3150 JSGlobalObject
* CodeBlock::globalObjectFor(CodeOrigin codeOrigin
)
3152 if (!codeOrigin
.inlineCallFrame
)
3153 return globalObject();
3154 return jsCast
<FunctionExecutable
*>(codeOrigin
.inlineCallFrame
->executable
.get())->eitherCodeBlock()->globalObject();
3157 class RecursionCheckFunctor
{
3159 RecursionCheckFunctor(CallFrame
* startCallFrame
, CodeBlock
* codeBlock
, unsigned depthToCheck
)
3160 : m_startCallFrame(startCallFrame
)
3161 , m_codeBlock(codeBlock
)
3162 , m_depthToCheck(depthToCheck
)
3163 , m_foundStartCallFrame(false)
3164 , m_didRecurse(false)
3167 StackVisitor::Status
operator()(StackVisitor
& visitor
)
3169 CallFrame
* currentCallFrame
= visitor
->callFrame();
3171 if (currentCallFrame
== m_startCallFrame
)
3172 m_foundStartCallFrame
= true;
3174 if (m_foundStartCallFrame
) {
3175 if (visitor
->callFrame()->codeBlock() == m_codeBlock
) {
3176 m_didRecurse
= true;
3177 return StackVisitor::Done
;
3180 if (!m_depthToCheck
--)
3181 return StackVisitor::Done
;
3184 return StackVisitor::Continue
;
3187 bool didRecurse() const { return m_didRecurse
; }
3190 CallFrame
* m_startCallFrame
;
3191 CodeBlock
* m_codeBlock
;
3192 unsigned m_depthToCheck
;
3193 bool m_foundStartCallFrame
;
3197 void CodeBlock::noticeIncomingCall(ExecState
* callerFrame
)
3199 CodeBlock
* callerCodeBlock
= callerFrame
->codeBlock();
3201 if (Options::verboseCallLink())
3202 dataLog("Noticing call link from ", pointerDump(callerCodeBlock
), " to ", *this, "\n");
3205 if (!m_shouldAlwaysBeInlined
)
3208 if (!callerCodeBlock
) {
3209 m_shouldAlwaysBeInlined
= false;
3210 if (Options::verboseCallLink())
3211 dataLog(" Clearing SABI because caller is native.\n");
3215 if (!hasBaselineJITProfiling())
3218 if (!DFG::mightInlineFunction(this))
3221 if (!canInline(m_capabilityLevelState
))
3224 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock
)) {
3225 m_shouldAlwaysBeInlined
= false;
3226 if (Options::verboseCallLink())
3227 dataLog(" Clearing SABI because caller is too large.\n");
3231 if (callerCodeBlock
->jitType() == JITCode::InterpreterThunk
) {
3232 // If the caller is still in the interpreter, then we can't expect inlining to
3233 // happen anytime soon. Assume it's profitable to optimize it separately. This
3234 // ensures that a function is SABI only if it is called no more frequently than
3235 // any of its callers.
3236 m_shouldAlwaysBeInlined
= false;
3237 if (Options::verboseCallLink())
3238 dataLog(" Clearing SABI because caller is in LLInt.\n");
3242 if (JITCode::isOptimizingJIT(callerCodeBlock
->jitType())) {
3243 m_shouldAlwaysBeInlined
= false;
3244 if (Options::verboseCallLink())
3245 dataLog(" Clearing SABI bcause caller was already optimized.\n");
3249 if (callerCodeBlock
->codeType() != FunctionCode
) {
3250 // If the caller is either eval or global code, assume that that won't be
3251 // optimized anytime soon. For eval code this is particularly true since we
3252 // delay eval optimization by a *lot*.
3253 m_shouldAlwaysBeInlined
= false;
3254 if (Options::verboseCallLink())
3255 dataLog(" Clearing SABI because caller is not a function.\n");
3259 // Recursive calls won't be inlined.
3260 RecursionCheckFunctor
functor(callerFrame
, this, Options::maximumInliningDepth());
3261 vm()->topCallFrame
->iterate(functor
);
3263 if (functor
.didRecurse()) {
3264 if (Options::verboseCallLink())
3265 dataLog(" Clearing SABI because recursion was detected.\n");
3266 m_shouldAlwaysBeInlined
= false;
3270 if (callerCodeBlock
->m_capabilityLevelState
== DFG::CapabilityLevelNotSet
) {
3271 dataLog("In call from ", *callerCodeBlock
, " ", callerFrame
->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3275 if (canCompile(callerCodeBlock
->m_capabilityLevelState
))
3278 if (Options::verboseCallLink())
3279 dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
3281 m_shouldAlwaysBeInlined
= false;
3285 unsigned CodeBlock::reoptimizationRetryCounter() const
3288 ASSERT(m_reoptimizationRetryCounter
<= Options::reoptimizationRetryCounterMax());
3289 return m_reoptimizationRetryCounter
;
3292 #endif // ENABLE(JIT)
3296 void CodeBlock::countReoptimization()
3298 m_reoptimizationRetryCounter
++;
3299 if (m_reoptimizationRetryCounter
> Options::reoptimizationRetryCounterMax())
3300 m_reoptimizationRetryCounter
= Options::reoptimizationRetryCounterMax();
3303 unsigned CodeBlock::numberOfDFGCompiles()
3305 ASSERT(JITCode::isBaselineCode(jitType()));
3306 if (Options::testTheFTL()) {
3307 if (m_didFailFTLCompilation
)
3309 return (m_hasBeenCompiledWithFTL
? 1 : 0) + m_reoptimizationRetryCounter
;
3311 return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter
;
3314 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3316 if (codeType() == EvalCode
)
3317 return Options::evalThresholdMultiplier();
3322 double CodeBlock::optimizationThresholdScalingFactor()
3324 // This expression arises from doing a least-squares fit of
3326 // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3328 // against the data points:
3331 // 10 0.9 (smallest reasonable code block)
3332 // 200 1.0 (typical small-ish code block)
3333 // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
3334 // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
3335 // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
3336 // 10000 6.0 (similar to above)
3338 // I achieve the minimization using the following Mathematica code:
3340 // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3342 // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3345 // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3346 // {a, b, c, d}][[2]]
3348 // And the code below (to initialize a, b, c, d) is generated by:
3350 // Print["const double " <> ToString[#[[1]]] <> " = " <>
3351 // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
3353 // We've long known the following to be true:
3354 // - Small code blocks are cheap to optimize and so we should do it sooner rather
3356 // - Large code blocks are expensive to optimize and so we should postpone doing so,
3357 // and sometimes have a large enough threshold that we never optimize them.
3358 // - The difference in cost is not totally linear because (a) just invoking the
3359 // DFG incurs some base cost and (b) for large code blocks there is enough slop
3360 // in the correlation between instruction count and the actual compilation cost
3361 // that for those large blocks, the instruction count should not have a strong
3362 // influence on our threshold.
3364 // I knew the goals but I didn't know how to achieve them; so I picked an interesting
3365 // example where the heuristics were right (code block in 3d-cube with instruction
3366 // count 320, which got compiled early as it should have been) and one where they were
3367 // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
3368 // to compile and didn't run often enough to warrant compilation in my opinion), and
3369 // then threw in additional data points that represented my own guess of what our
3370 // heuristics should do for some round-numbered examples.
3372 // The expression to which I decided to fit the data arose because I started with an
3373 // affine function, and then did two things: put the linear part in an Abs to ensure
3374 // that the fit didn't end up choosing a negative value of c (which would result in
3375 // the function turning over and going negative for large x) and I threw in a Sqrt
3376 // term because Sqrt represents my intution that the function should be more sensitive
3377 // to small changes in small values of x, but less sensitive when x gets large.
3379 // Note that the current fit essentially eliminates the linear portion of the
3380 // expression (c == 0.0).
3381 const double a
= 0.061504;
3382 const double b
= 1.02406;
3383 const double c
= 0.0;
3384 const double d
= 0.825914;
3386 double instructionCount
= this->instructionCount();
3388 ASSERT(instructionCount
); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3390 double result
= d
+ a
* sqrt(instructionCount
+ b
) + c
* instructionCount
;
3392 result
*= codeTypeThresholdMultiplier();
3394 if (Options::verboseOSR()) {
3396 *this, ": instruction count is ", instructionCount
,
3397 ", scaling execution counter by ", result
, " * ", codeTypeThresholdMultiplier(),
3403 static int32_t clipThreshold(double threshold
)
3405 if (threshold
< 1.0)
3408 if (threshold
> static_cast<double>(std::numeric_limits
<int32_t>::max()))
3409 return std::numeric_limits
<int32_t>::max();
3411 return static_cast<int32_t>(threshold
);
3414 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold
)
3416 return clipThreshold(
3417 static_cast<double>(desiredThreshold
) *
3418 optimizationThresholdScalingFactor() *
3419 (1 << reoptimizationRetryCounter()));
3422 bool CodeBlock::checkIfOptimizationThresholdReached()
3425 if (DFG::Worklist
* worklist
= DFG::existingGlobalDFGWorklistOrNull()) {
3426 if (worklist
->compilationState(DFG::CompilationKey(this, DFG::DFGMode
))
3427 == DFG::Worklist::Compiled
) {
3428 optimizeNextInvocation();
3434 return m_jitExecuteCounter
.checkIfThresholdCrossedAndSet(this);
3437 void CodeBlock::optimizeNextInvocation()
3439 if (Options::verboseOSR())
3440 dataLog(*this, ": Optimizing next invocation.\n");
3441 m_jitExecuteCounter
.setNewThreshold(0, this);
3444 void CodeBlock::dontOptimizeAnytimeSoon()
3446 if (Options::verboseOSR())
3447 dataLog(*this, ": Not optimizing anytime soon.\n");
3448 m_jitExecuteCounter
.deferIndefinitely();
3451 void CodeBlock::optimizeAfterWarmUp()
3453 if (Options::verboseOSR())
3454 dataLog(*this, ": Optimizing after warm-up.\n");
3456 m_jitExecuteCounter
.setNewThreshold(
3457 adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3461 void CodeBlock::optimizeAfterLongWarmUp()
3463 if (Options::verboseOSR())
3464 dataLog(*this, ": Optimizing after long warm-up.\n");
3466 m_jitExecuteCounter
.setNewThreshold(
3467 adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3471 void CodeBlock::optimizeSoon()
3473 if (Options::verboseOSR())
3474 dataLog(*this, ": Optimizing soon.\n");
3476 m_jitExecuteCounter
.setNewThreshold(
3477 adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3481 void CodeBlock::forceOptimizationSlowPathConcurrently()
3483 if (Options::verboseOSR())
3484 dataLog(*this, ": Forcing slow path concurrently.\n");
3485 m_jitExecuteCounter
.forceSlowPathConcurrently();
3489 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result
)
3491 JITCode::JITType type
= jitType();
3492 if (type
!= JITCode::BaselineJIT
) {
3493 dataLog(*this, ": expected to have baseline code but have ", type
, "\n");
3494 RELEASE_ASSERT_NOT_REACHED();
3497 CodeBlock
* theReplacement
= replacement();
3498 if ((result
== CompilationSuccessful
) != (theReplacement
!= this)) {
3499 dataLog(*this, ": we have result = ", result
, " but ");
3500 if (theReplacement
== this)
3501 dataLog("we are our own replacement.\n");
3503 dataLog("our replacement is ", pointerDump(theReplacement
), "\n");
3504 RELEASE_ASSERT_NOT_REACHED();
3508 case CompilationSuccessful
:
3509 RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3510 optimizeNextInvocation();
3512 case CompilationFailed
:
3513 dontOptimizeAnytimeSoon();
3515 case CompilationDeferred
:
3516 // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3517 // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3518 // necessarily guarantee anything. So, we make sure that even if that
3519 // function ends up being a no-op, we still eventually retry and realize
3520 // that we have optimized code ready.
3521 optimizeAfterWarmUp();
3523 case CompilationInvalidated
:
3524 // Retry with exponential backoff.
3525 countReoptimization();
3526 optimizeAfterWarmUp();
3530 dataLog("Unrecognized result: ", static_cast<int>(result
), "\n");
3531 RELEASE_ASSERT_NOT_REACHED();
3536 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold
)
3538 ASSERT(JITCode::isOptimizingJIT(jitType()));
3539 // Compute this the lame way so we don't saturate. This is called infrequently
3540 // enough that this loop won't hurt us.
3541 unsigned result
= desiredThreshold
;
3542 for (unsigned n
= baselineVersion()->reoptimizationRetryCounter(); n
--;) {
3543 unsigned newResult
= result
<< 1;
3544 if (newResult
< result
)
3545 return std::numeric_limits
<uint32_t>::max();
3551 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3553 return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3556 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3558 return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3561 bool CodeBlock::shouldReoptimizeNow()
3563 return osrExitCounter() >= exitCountThresholdForReoptimization();
3566 bool CodeBlock::shouldReoptimizeFromLoopNow()
3568 return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3572 ArrayProfile
* CodeBlock::getArrayProfile(unsigned bytecodeOffset
)
3574 for (unsigned i
= 0; i
< m_arrayProfiles
.size(); ++i
) {
3575 if (m_arrayProfiles
[i
].bytecodeOffset() == bytecodeOffset
)
3576 return &m_arrayProfiles
[i
];
3581 ArrayProfile
* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset
)
3583 ArrayProfile
* result
= getArrayProfile(bytecodeOffset
);
3586 return addArrayProfile(bytecodeOffset
);
3589 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles
, unsigned& numberOfSamplesInProfiles
)
3591 ConcurrentJITLocker
locker(m_lock
);
3593 numberOfLiveNonArgumentValueProfiles
= 0;
3594 numberOfSamplesInProfiles
= 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3595 for (unsigned i
= 0; i
< totalNumberOfValueProfiles(); ++i
) {
3596 ValueProfile
* profile
= getFromAllValueProfiles(i
);
3597 unsigned numSamples
= profile
->totalNumberOfSamples();
3598 if (numSamples
> ValueProfile::numberOfBuckets
)
3599 numSamples
= ValueProfile::numberOfBuckets
; // We don't want profiles that are extremely hot to be given more weight.
3600 numberOfSamplesInProfiles
+= numSamples
;
3601 if (profile
->m_bytecodeOffset
< 0) {
3602 profile
->computeUpdatedPrediction(locker
);
3605 if (profile
->numberOfSamples() || profile
->m_prediction
!= SpecNone
)
3606 numberOfLiveNonArgumentValueProfiles
++;
3607 profile
->computeUpdatedPrediction(locker
);
3611 m_lazyOperandValueProfiles
.computeUpdatedPredictions(locker
);
3615 void CodeBlock::updateAllValueProfilePredictions()
3617 unsigned ignoredValue1
, ignoredValue2
;
3618 updateAllPredictionsAndCountLiveness(ignoredValue1
, ignoredValue2
);
3621 void CodeBlock::updateAllArrayPredictions()
3623 ConcurrentJITLocker
locker(m_lock
);
3625 for (unsigned i
= m_arrayProfiles
.size(); i
--;)
3626 m_arrayProfiles
[i
].computeUpdatedPrediction(locker
, this);
3628 // Don't count these either, for similar reasons.
3629 for (unsigned i
= m_arrayAllocationProfiles
.size(); i
--;)
3630 m_arrayAllocationProfiles
[i
].updateIndexingType();
3633 void CodeBlock::updateAllPredictions()
3635 updateAllValueProfilePredictions();
3636 updateAllArrayPredictions();
3639 bool CodeBlock::shouldOptimizeNow()
3641 if (Options::verboseOSR())
3642 dataLog("Considering optimizing ", *this, "...\n");
3644 if (m_optimizationDelayCounter
>= Options::maximumOptimizationDelay())
3647 updateAllArrayPredictions();
3649 unsigned numberOfLiveNonArgumentValueProfiles
;
3650 unsigned numberOfSamplesInProfiles
;
3651 updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles
, numberOfSamplesInProfiles
);
3653 if (Options::verboseOSR()) {
3655 "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3656 (double)numberOfLiveNonArgumentValueProfiles
/ numberOfValueProfiles(),
3657 numberOfLiveNonArgumentValueProfiles
, numberOfValueProfiles(),
3658 (double)numberOfSamplesInProfiles
/ ValueProfile::numberOfBuckets
/ numberOfValueProfiles(),
3659 numberOfSamplesInProfiles
, ValueProfile::numberOfBuckets
* numberOfValueProfiles());
3662 if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles
/ numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3663 && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles
/ ValueProfile::numberOfBuckets
/ totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3664 && static_cast<unsigned>(m_optimizationDelayCounter
) + 1 >= Options::minimumOptimizationDelay())
3667 ASSERT(m_optimizationDelayCounter
< std::numeric_limits
<uint8_t>::max());
3668 m_optimizationDelayCounter
++;
3669 optimizeAfterWarmUp();
3674 void CodeBlock::tallyFrequentExitSites()
3676 ASSERT(JITCode::isOptimizingJIT(jitType()));
3677 ASSERT(alternative()->jitType() == JITCode::BaselineJIT
);
3679 CodeBlock
* profiledBlock
= alternative();
3681 switch (jitType()) {
3682 case JITCode::DFGJIT
: {
3683 DFG::JITCode
* jitCode
= m_jitCode
->dfg();
3684 for (unsigned i
= 0; i
< jitCode
->osrExit
.size(); ++i
) {
3685 DFG::OSRExit
& exit
= jitCode
->osrExit
[i
];
3686 exit
.considerAddingAsFrequentExitSite(profiledBlock
);
3692 case JITCode::FTLJIT
: {
3693 // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3694 // vector contains a totally different type, that just so happens to behave like
3695 // DFG::JITCode::osrExit.
3696 FTL::JITCode
* jitCode
= m_jitCode
->ftl();
3697 for (unsigned i
= 0; i
< jitCode
->osrExit
.size(); ++i
) {
3698 FTL::OSRExit
& exit
= jitCode
->osrExit
[i
];
3699 exit
.considerAddingAsFrequentExitSite(profiledBlock
);
3706 RELEASE_ASSERT_NOT_REACHED();
3710 #endif // ENABLE(DFG_JIT)
3712 #if ENABLE(VERBOSE_VALUE_PROFILE)
3713 void CodeBlock::dumpValueProfiles()
3715 dataLog("ValueProfile for ", *this, ":\n");
3716 for (unsigned i
= 0; i
< totalNumberOfValueProfiles(); ++i
) {
3717 ValueProfile
* profile
= getFromAllValueProfiles(i
);
3718 if (profile
->m_bytecodeOffset
< 0) {
3719 ASSERT(profile
->m_bytecodeOffset
== -1);
3720 dataLogF(" arg = %u: ", i
);
3722 dataLogF(" bc = %d: ", profile
->m_bytecodeOffset
);
3723 if (!profile
->numberOfSamples() && profile
->m_prediction
== SpecNone
) {
3724 dataLogF("<empty>\n");
3727 profile
->dump(WTF::dataFile());
3730 dataLog("RareCaseProfile for ", *this, ":\n");
3731 for (unsigned i
= 0; i
< numberOfRareCaseProfiles(); ++i
) {
3732 RareCaseProfile
* profile
= rareCaseProfile(i
);
3733 dataLogF(" bc = %d: %u\n", profile
->m_bytecodeOffset
, profile
->m_counter
);
3735 dataLog("SpecialFastCaseProfile for ", *this, ":\n");
3736 for (unsigned i
= 0; i
< numberOfSpecialFastCaseProfiles(); ++i
) {
3737 RareCaseProfile
* profile
= specialFastCaseProfile(i
);
3738 dataLogF(" bc = %d: %u\n", profile
->m_bytecodeOffset
, profile
->m_counter
);
3741 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
3743 unsigned CodeBlock::frameRegisterCount()
3745 switch (jitType()) {
3746 case JITCode::InterpreterThunk
:
3747 return LLInt::frameRegisterCountFor(this);
3750 case JITCode::BaselineJIT
:
3751 return JIT::frameRegisterCountFor(this);
3752 #endif // ENABLE(JIT)
3755 case JITCode::DFGJIT
:
3756 case JITCode::FTLJIT
:
3757 return jitCode()->dfgCommon()->frameRegisterCount
;
3758 #endif // ENABLE(DFG_JIT)
3761 RELEASE_ASSERT_NOT_REACHED();
3766 int CodeBlock::stackPointerOffset()
3768 return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
3771 size_t CodeBlock::predictedMachineCodeSize()
3773 // This will be called from CodeBlock::CodeBlock before either m_vm or the
3774 // instructions have been initialized. It's OK to return 0 because what will really
3775 // matter is the recomputation of this value when the slow path is triggered.
3779 if (!m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
)
3780 return 0; // It's as good of a prediction as we'll get.
3782 // Be conservative: return a size that will be an overestimation 84% of the time.
3783 double multiplier
= m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.mean() +
3784 m_vm
->machineCodeBytesPerBytecodeWordForBaselineJIT
.standardDeviation();
3786 // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
3787 // here is OK, since this whole method is just a heuristic.
3788 if (multiplier
< 0 || multiplier
> 1000)
3791 double doubleResult
= multiplier
* m_instructions
.size();
3793 // Be even more paranoid: silently reject values that won't fit into a size_t. If
3794 // the function is so huge that we can't even fit it into virtual memory then we
3795 // should probably have some other guards in place to prevent us from even getting
3797 if (doubleResult
> std::numeric_limits
<size_t>::max())
3800 return static_cast<size_t>(doubleResult
);
3803 bool CodeBlock::usesOpcode(OpcodeID opcodeID
)
3805 Interpreter
* interpreter
= vm()->interpreter
;
3806 Instruction
* instructionsBegin
= instructions().begin();
3807 unsigned instructionCount
= instructions().size();
3809 for (unsigned bytecodeOffset
= 0; bytecodeOffset
< instructionCount
; ) {
3810 switch (interpreter
->getOpcodeID(instructionsBegin
[bytecodeOffset
].u
.opcode
)) {
3811 #define DEFINE_OP(curOpcode, length) \
3813 if (curOpcode == opcodeID) \
3815 bytecodeOffset += length; \
3817 FOR_EACH_OPCODE_ID(DEFINE_OP
)
3820 RELEASE_ASSERT_NOT_REACHED();
3828 String
CodeBlock::nameForRegister(VirtualRegister virtualRegister
)
3830 ConcurrentJITLocker
locker(symbolTable()->m_lock
);
3831 SymbolTable::Map::iterator end
= symbolTable()->end(locker
);
3832 for (SymbolTable::Map::iterator ptr
= symbolTable()->begin(locker
); ptr
!= end
; ++ptr
) {
3833 if (ptr
->value
.varOffset() == VarOffset(virtualRegister
)) {
3834 // FIXME: This won't work from the compilation thread.
3835 // https://bugs.webkit.org/show_bug.cgi?id=115300
3836 return ptr
->key
.get();
3839 if (virtualRegister
== thisRegister())
3840 return ASCIILiteral("this");
3841 if (virtualRegister
.isArgument())
3842 return String::format("arguments[%3d]", virtualRegister
.toArgument());
3847 ValueProfile
* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset
)
3849 ValueProfile
* result
= binarySearch
<ValueProfile
, int>(
3850 m_valueProfiles
, m_valueProfiles
.size(), bytecodeOffset
,
3851 getValueProfileBytecodeOffset
<ValueProfile
>);
3852 ASSERT(result
->m_bytecodeOffset
!= -1);
3853 ASSERT(instructions()[bytecodeOffset
+ opcodeLength(
3854 m_vm
->interpreter
->getOpcodeID(
3855 instructions()[bytecodeOffset
].u
.opcode
)) - 1].u
.profile
== result
);
3859 void CodeBlock::validate()
3861 BytecodeLivenessAnalysis
liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
3863 FastBitVector liveAtHead
= liveness
.getLivenessInfoAtBytecodeOffset(0);
3865 if (liveAtHead
.numBits() != static_cast<size_t>(m_numCalleeRegisters
)) {
3866 beginValidationDidFail();
3867 dataLog(" Wrong number of bits in result!\n");
3868 dataLog(" Result: ", liveAtHead
, "\n");
3869 dataLog(" Bit count: ", liveAtHead
.numBits(), "\n");
3870 endValidationDidFail();
3873 for (unsigned i
= m_numCalleeRegisters
; i
--;) {
3874 VirtualRegister reg
= virtualRegisterForLocal(i
);
3876 if (liveAtHead
.get(i
)) {
3877 beginValidationDidFail();
3878 dataLog(" Variable ", reg
, " is expected to be dead.\n");
3879 dataLog(" Result: ", liveAtHead
, "\n");
3880 endValidationDidFail();
3885 void CodeBlock::beginValidationDidFail()
3887 dataLog("Validation failure in ", *this, ":\n");
3891 void CodeBlock::endValidationDidFail()
3896 dataLog("Validation failure.\n");
3897 RELEASE_ASSERT_NOT_REACHED();
3900 void CodeBlock::addBreakpoint(unsigned numBreakpoints
)
3902 m_numBreakpoints
+= numBreakpoints
;
3903 ASSERT(m_numBreakpoints
);
3904 if (JITCode::isOptimizingJIT(jitType()))
3905 jettison(Profiler::JettisonDueToDebuggerBreakpoint
);
3908 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode
)
3910 m_steppingMode
= mode
;
3911 if (mode
== SteppingModeEnabled
&& JITCode::isOptimizingJIT(jitType()))
3912 jettison(Profiler::JettisonDueToDebuggerStepping
);
3915 RareCaseProfile
* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset
)
3917 return tryBinarySearch
<RareCaseProfile
, int>(
3918 m_rareCaseProfiles
, m_rareCaseProfiles
.size(), bytecodeOffset
,
3919 getRareCaseProfileBytecodeOffset
);
3923 DFG::CapabilityLevel
CodeBlock::capabilityLevel()
3925 DFG::CapabilityLevel result
= capabilityLevelInternal();
3926 m_capabilityLevelState
= result
;
3931 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(Vector
<Instruction
, 0, UnsafeVectorOverflow
>& instructions
)
3933 const Vector
<size_t>& bytecodeOffsets
= unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3934 for (size_t i
= 0, offsetsLength
= bytecodeOffsets
.size(); i
< offsetsLength
; i
++) {
3935 // Because op_profile_control_flow is emitted at the beginning of every basic block, finding
3936 // the next op_profile_control_flow will give us the text range of a single basic block.
3937 size_t startIdx
= bytecodeOffsets
[i
];
3938 RELEASE_ASSERT(vm()->interpreter
->getOpcodeID(instructions
[startIdx
].u
.opcode
) == op_profile_control_flow
);
3939 int basicBlockStartOffset
= instructions
[startIdx
+ 1].u
.operand
;
3940 int basicBlockEndOffset
;
3941 if (i
+ 1 < offsetsLength
) {
3942 size_t endIdx
= bytecodeOffsets
[i
+ 1];
3943 RELEASE_ASSERT(vm()->interpreter
->getOpcodeID(instructions
[endIdx
].u
.opcode
) == op_profile_control_flow
);
3944 basicBlockEndOffset
= instructions
[endIdx
+ 1].u
.operand
- 1;
3946 basicBlockEndOffset
= m_sourceOffset
+ m_ownerExecutable
->source().length() - 1; // Offset before the closing brace.
3947 basicBlockStartOffset
= std::min(basicBlockStartOffset
, basicBlockEndOffset
); // Some start offsets may be at the closing brace, ensure it is the offset before.
3950 // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3951 // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than
3952 // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node
3953 // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different
3954 // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript
3955 // program. The condition:
3956 // (basicBlockEndOffset < basicBlockStartOffset)
3957 // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic
3958 // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These
3959 // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same
3960 // internal data structure, so if any of them execute, it will record the same textual basic block in the
3961 // JavaScript program as executing.
3962 // At the bytecode level, this situation looks like:
3963 // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3965 // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3967 // m: op_profile_control_flow
3968 if (basicBlockEndOffset
< basicBlockStartOffset
) {
3969 RELEASE_ASSERT(i
+ 1 < offsetsLength
); // We should never encounter dummy blocks at the end of a CodeBlock.
3970 instructions
[startIdx
+ 1].u
.basicBlockLocation
= vm()->controlFlowProfiler()->dummyBasicBlock();
3974 BasicBlockLocation
* basicBlockLocation
= vm()->controlFlowProfiler()->getBasicBlockLocation(m_ownerExecutable
->sourceID(), basicBlockStartOffset
, basicBlockEndOffset
);
3976 // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3977 // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3978 // This is necessary because in the original source text of a JavaScript program,
3979 // function literals form new basic blocks boundaries, but they aren't represented
3980 // inside the CodeBlock's instruction stream.
3981 auto insertFunctionGaps
= [basicBlockLocation
, basicBlockStartOffset
, basicBlockEndOffset
] (const WriteBarrier
<FunctionExecutable
>& functionExecutable
) {
3982 const UnlinkedFunctionExecutable
* executable
= functionExecutable
->unlinkedExecutable();
3983 int functionStart
= executable
->typeProfilingStartOffset();
3984 int functionEnd
= executable
->typeProfilingEndOffset();
3985 if (functionStart
>= basicBlockStartOffset
&& functionEnd
<= basicBlockEndOffset
)
3986 basicBlockLocation
->insertGap(functionStart
, functionEnd
);
3989 for (const WriteBarrier
<FunctionExecutable
>& executable
: m_functionDecls
)
3990 insertFunctionGaps(executable
);
3991 for (const WriteBarrier
<FunctionExecutable
>& executable
: m_functionExprs
)
3992 insertFunctionGaps(executable
);
3994 instructions
[startIdx
+ 1].u
.basicBlockLocation
= basicBlockLocation
;