]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/CodeBlock.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / bytecode / CodeBlock.cpp
1 /*
2 * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
42 #include "Debugger.h"
43 #include "FunctionExecutableDump.h"
44 #include "Interpreter.h"
45 #include "JIT.h"
46 #include "JITStubs.h"
47 #include "JSCJSValue.h"
48 #include "JSFunction.h"
49 #include "JSLexicalEnvironment.h"
50 #include "JSNameScope.h"
51 #include "LLIntEntrypoint.h"
52 #include "LowLevelInterpreter.h"
53 #include "JSCInlines.h"
54 #include "PolymorphicGetByIdList.h"
55 #include "PolymorphicPutByIdList.h"
56 #include "ProfilerDatabase.h"
57 #include "ReduceWhitespace.h"
58 #include "Repatch.h"
59 #include "RepatchBuffer.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "TypeLocationCache.h"
63 #include "TypeProfiler.h"
64 #include "UnlinkedInstructionStream.h"
65 #include <wtf/BagToHashMap.h>
66 #include <wtf/CommaPrinter.h>
67 #include <wtf/StringExtras.h>
68 #include <wtf/StringPrintStream.h>
69 #include <wtf/text/UniquedStringImpl.h>
70
71 #if ENABLE(DFG_JIT)
72 #include "DFGOperations.h"
73 #endif
74
75 #if ENABLE(FTL_JIT)
76 #include "FTLJITCode.h"
77 #endif
78
79 namespace JSC {
80
81 CString CodeBlock::inferredName() const
82 {
83 switch (codeType()) {
84 case GlobalCode:
85 return "<global>";
86 case EvalCode:
87 return "<eval>";
88 case FunctionCode:
89 return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
90 default:
91 CRASH();
92 return CString("", 0);
93 }
94 }
95
96 bool CodeBlock::hasHash() const
97 {
98 return !!m_hash;
99 }
100
101 bool CodeBlock::isSafeToComputeHash() const
102 {
103 return !isCompilationThread();
104 }
105
106 CodeBlockHash CodeBlock::hash() const
107 {
108 if (!m_hash) {
109 RELEASE_ASSERT(isSafeToComputeHash());
110 m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
111 }
112 return m_hash;
113 }
114
115 CString CodeBlock::sourceCodeForTools() const
116 {
117 if (codeType() != FunctionCode)
118 return ownerExecutable()->source().toUTF8();
119
120 SourceProvider* provider = source();
121 FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
122 UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
123 unsigned unlinkedStartOffset = unlinked->startOffset();
124 unsigned linkedStartOffset = executable->source().startOffset();
125 int delta = linkedStartOffset - unlinkedStartOffset;
126 unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
127 unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
128 return toCString(
129 "function ",
130 provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
131 }
132
133 CString CodeBlock::sourceCodeOnOneLine() const
134 {
135 return reduceWhitespace(sourceCodeForTools());
136 }
137
138 CString CodeBlock::hashAsStringIfPossible() const
139 {
140 if (hasHash() || isSafeToComputeHash())
141 return toCString(hash());
142 return "<no-hash>";
143 }
144
145 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
146 {
147 out.print(inferredName(), "#", hashAsStringIfPossible());
148 out.print(":[", RawPointer(this), "->");
149 if (!!m_alternative)
150 out.print(RawPointer(m_alternative.get()), "->");
151 out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
152
153 if (codeType() == FunctionCode)
154 out.print(specializationKind());
155 out.print(", ", instructionCount());
156 if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
157 out.print(" (ShouldAlwaysBeInlined)");
158 if (ownerExecutable()->neverInline())
159 out.print(" (NeverInline)");
160 if (ownerExecutable()->didTryToEnterInLoop())
161 out.print(" (DidTryToEnterInLoop)");
162 if (ownerExecutable()->isStrictMode())
163 out.print(" (StrictMode)");
164 if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
165 out.print(" (FTLFail)");
166 if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
167 out.print(" (HadFTLReplacement)");
168 out.print("]");
169 }
170
171 void CodeBlock::dump(PrintStream& out) const
172 {
173 dumpAssumingJITType(out, jitType());
174 }
175
176 static CString idName(int id0, const Identifier& ident)
177 {
178 return toCString(ident.impl(), "(@id", id0, ")");
179 }
180
181 CString CodeBlock::registerName(int r) const
182 {
183 if (isConstantRegisterIndex(r))
184 return constantName(r);
185
186 return toCString(VirtualRegister(r));
187 }
188
189 CString CodeBlock::constantName(int index) const
190 {
191 JSValue value = getConstant(index);
192 return toCString(value, "(", VirtualRegister(index), ")");
193 }
194
195 static CString regexpToSourceString(RegExp* regExp)
196 {
197 char postfix[5] = { '/', 0, 0, 0, 0 };
198 int index = 1;
199 if (regExp->global())
200 postfix[index++] = 'g';
201 if (regExp->ignoreCase())
202 postfix[index++] = 'i';
203 if (regExp->multiline())
204 postfix[index] = 'm';
205
206 return toCString("/", regExp->pattern().impl(), postfix);
207 }
208
209 static CString regexpName(int re, RegExp* regexp)
210 {
211 return toCString(regexpToSourceString(regexp), "(@re", re, ")");
212 }
213
214 NEVER_INLINE static const char* debugHookName(int debugHookID)
215 {
216 switch (static_cast<DebugHookID>(debugHookID)) {
217 case DidEnterCallFrame:
218 return "didEnterCallFrame";
219 case WillLeaveCallFrame:
220 return "willLeaveCallFrame";
221 case WillExecuteStatement:
222 return "willExecuteStatement";
223 case WillExecuteProgram:
224 return "willExecuteProgram";
225 case DidExecuteProgram:
226 return "didExecuteProgram";
227 case DidReachBreakpoint:
228 return "didReachBreakpoint";
229 }
230
231 RELEASE_ASSERT_NOT_REACHED();
232 return "";
233 }
234
235 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
236 {
237 int r0 = (++it)->u.operand;
238 int r1 = (++it)->u.operand;
239
240 printLocationAndOp(out, exec, location, it, op);
241 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
242 }
243
244 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
245 {
246 int r0 = (++it)->u.operand;
247 int r1 = (++it)->u.operand;
248 int r2 = (++it)->u.operand;
249 printLocationAndOp(out, exec, location, it, op);
250 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
251 }
252
253 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
254 {
255 int r0 = (++it)->u.operand;
256 int offset = (++it)->u.operand;
257 printLocationAndOp(out, exec, location, it, op);
258 out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
259 }
260
261 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
262 {
263 const char* op;
264 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
265 case op_get_by_id:
266 op = "get_by_id";
267 break;
268 case op_get_by_id_out_of_line:
269 op = "get_by_id_out_of_line";
270 break;
271 case op_get_array_length:
272 op = "array_length";
273 break;
274 default:
275 RELEASE_ASSERT_NOT_REACHED();
276 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
277 op = 0;
278 #endif
279 }
280 int r0 = (++it)->u.operand;
281 int r1 = (++it)->u.operand;
282 int id0 = (++it)->u.operand;
283 printLocationAndOp(out, exec, location, it, op);
284 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
285 it += 4; // Increment up to the value profiler.
286 }
287
288 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
289 {
290 if (!structure)
291 return;
292
293 out.printf("%s = %p", name, structure);
294
295 PropertyOffset offset = structure->getConcurrently(ident.impl());
296 if (offset != invalidOffset)
297 out.printf(" (offset = %d)", offset);
298 }
299
300 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
301 {
302 out.printf("chain = %p: [", chain);
303 bool first = true;
304 for (WriteBarrier<Structure>* currentStructure = chain->head();
305 *currentStructure;
306 ++currentStructure) {
307 if (first)
308 first = false;
309 else
310 out.printf(", ");
311 dumpStructure(out, "struct", currentStructure->get(), ident);
312 }
313 out.printf("]");
314 }
315
316 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
317 {
318 Instruction* instruction = instructions().begin() + location;
319
320 const Identifier& ident = identifier(instruction[3].u.operand);
321
322 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
323
324 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
325 out.printf(" llint(array_length)");
326 else if (Structure* structure = instruction[4].u.structure.get()) {
327 out.printf(" llint(");
328 dumpStructure(out, "struct", structure, ident);
329 out.printf(")");
330 }
331
332 #if ENABLE(JIT)
333 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
334 StructureStubInfo& stubInfo = *stubPtr;
335 if (stubInfo.resetByGC)
336 out.print(" (Reset By GC)");
337
338 if (stubInfo.seen) {
339 out.printf(" jit(");
340
341 Structure* baseStructure = 0;
342 Structure* prototypeStructure = 0;
343 StructureChain* chain = 0;
344 PolymorphicGetByIdList* list = 0;
345
346 switch (stubInfo.accessType) {
347 case access_get_by_id_self:
348 out.printf("self");
349 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
350 break;
351 case access_get_by_id_list:
352 out.printf("list");
353 list = stubInfo.u.getByIdList.list;
354 break;
355 case access_unset:
356 out.printf("unset");
357 break;
358 default:
359 RELEASE_ASSERT_NOT_REACHED();
360 break;
361 }
362
363 if (baseStructure) {
364 out.printf(", ");
365 dumpStructure(out, "struct", baseStructure, ident);
366 }
367
368 if (prototypeStructure) {
369 out.printf(", ");
370 dumpStructure(out, "prototypeStruct", baseStructure, ident);
371 }
372
373 if (chain) {
374 out.printf(", ");
375 dumpChain(out, chain, ident);
376 }
377
378 if (list) {
379 out.printf(", list = %p: [", list);
380 for (unsigned i = 0; i < list->size(); ++i) {
381 if (i)
382 out.printf(", ");
383 out.printf("(");
384 dumpStructure(out, "base", list->at(i).structure(), ident);
385 if (list->at(i).chain()) {
386 out.printf(", ");
387 dumpChain(out, list->at(i).chain(), ident);
388 }
389 out.printf(")");
390 }
391 out.printf("]");
392 }
393 out.printf(")");
394 }
395 }
396 #else
397 UNUSED_PARAM(map);
398 #endif
399 }
400
401 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
402 {
403 Instruction* instruction = instructions().begin() + location;
404
405 const Identifier& ident = identifier(instruction[2].u.operand);
406
407 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
408
409 if (Structure* structure = instruction[4].u.structure.get()) {
410 switch (exec->interpreter()->getOpcodeID(instruction[0].u.opcode)) {
411 case op_put_by_id:
412 case op_put_by_id_out_of_line:
413 out.print(" llint(");
414 dumpStructure(out, "struct", structure, ident);
415 out.print(")");
416 break;
417
418 case op_put_by_id_transition_direct:
419 case op_put_by_id_transition_normal:
420 case op_put_by_id_transition_direct_out_of_line:
421 case op_put_by_id_transition_normal_out_of_line:
422 out.print(" llint(");
423 dumpStructure(out, "prev", structure, ident);
424 out.print(", ");
425 dumpStructure(out, "next", instruction[6].u.structure.get(), ident);
426 if (StructureChain* chain = instruction[7].u.structureChain.get()) {
427 out.print(", ");
428 dumpChain(out, chain, ident);
429 }
430 out.print(")");
431 break;
432
433 default:
434 out.print(" llint(unknown)");
435 break;
436 }
437 }
438
439 #if ENABLE(JIT)
440 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
441 StructureStubInfo& stubInfo = *stubPtr;
442 if (stubInfo.resetByGC)
443 out.print(" (Reset By GC)");
444
445 if (stubInfo.seen) {
446 out.printf(" jit(");
447
448 switch (stubInfo.accessType) {
449 case access_put_by_id_replace:
450 out.print("replace, ");
451 dumpStructure(out, "struct", stubInfo.u.putByIdReplace.baseObjectStructure.get(), ident);
452 break;
453 case access_put_by_id_transition_normal:
454 case access_put_by_id_transition_direct:
455 out.print("transition, ");
456 dumpStructure(out, "prev", stubInfo.u.putByIdTransition.previousStructure.get(), ident);
457 out.print(", ");
458 dumpStructure(out, "next", stubInfo.u.putByIdTransition.structure.get(), ident);
459 if (StructureChain* chain = stubInfo.u.putByIdTransition.chain.get()) {
460 out.print(", ");
461 dumpChain(out, chain, ident);
462 }
463 break;
464 case access_put_by_id_list: {
465 out.printf("list = [");
466 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
467 CommaPrinter comma;
468 for (unsigned i = 0; i < list->size(); ++i) {
469 out.print(comma, "(");
470 const PutByIdAccess& access = list->at(i);
471
472 if (access.isReplace()) {
473 out.print("replace, ");
474 dumpStructure(out, "struct", access.oldStructure(), ident);
475 } else if (access.isSetter()) {
476 out.print("setter, ");
477 dumpStructure(out, "struct", access.oldStructure(), ident);
478 } else if (access.isCustom()) {
479 out.print("custom, ");
480 dumpStructure(out, "struct", access.oldStructure(), ident);
481 } else if (access.isTransition()) {
482 out.print("transition, ");
483 dumpStructure(out, "prev", access.oldStructure(), ident);
484 out.print(", ");
485 dumpStructure(out, "next", access.newStructure(), ident);
486 if (access.chain()) {
487 out.print(", ");
488 dumpChain(out, access.chain(), ident);
489 }
490 } else
491 out.print("unknown");
492
493 out.print(")");
494 }
495 out.print("]");
496 break;
497 }
498 case access_unset:
499 out.printf("unset");
500 break;
501 default:
502 RELEASE_ASSERT_NOT_REACHED();
503 break;
504 }
505 out.printf(")");
506 }
507 }
508 #else
509 UNUSED_PARAM(map);
510 #endif
511 }
512
513 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
514 {
515 int dst = (++it)->u.operand;
516 int func = (++it)->u.operand;
517 int argCount = (++it)->u.operand;
518 int registerOffset = (++it)->u.operand;
519 printLocationAndOp(out, exec, location, it, op);
520 out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
521 if (cacheDumpMode == DumpCaches) {
522 LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
523 if (callLinkInfo->lastSeenCallee) {
524 out.printf(
525 " llint(%p, exec %p)",
526 callLinkInfo->lastSeenCallee.get(),
527 callLinkInfo->lastSeenCallee->executable());
528 }
529 #if ENABLE(JIT)
530 if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
531 JSFunction* target = info->lastSeenCallee();
532 if (target)
533 out.printf(" jit(%p, exec %p)", target, target->executable());
534 }
535
536 if (jitType() != JITCode::FTLJIT)
537 out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
538 #else
539 UNUSED_PARAM(map);
540 #endif
541 }
542 ++it;
543 ++it;
544 dumpArrayProfiling(out, it, hasPrintedProfiling);
545 dumpValueProfiling(out, it, hasPrintedProfiling);
546 }
547
548 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
549 {
550 int r0 = (++it)->u.operand;
551 int id0 = (++it)->u.operand;
552 int r1 = (++it)->u.operand;
553 printLocationAndOp(out, exec, location, it, op);
554 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
555 it += 5;
556 }
557
558 void CodeBlock::dumpSource()
559 {
560 dumpSource(WTF::dataFile());
561 }
562
563 void CodeBlock::dumpSource(PrintStream& out)
564 {
565 ScriptExecutable* executable = ownerExecutable();
566 if (executable->isFunctionExecutable()) {
567 FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
568 String source = functionExecutable->source().provider()->getRange(
569 functionExecutable->parametersStartOffset(),
570 functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
571
572 out.print("function ", inferredName(), source);
573 return;
574 }
575 out.print(executable->source().toString());
576 }
577
578 void CodeBlock::dumpBytecode()
579 {
580 dumpBytecode(WTF::dataFile());
581 }
582
583 void CodeBlock::dumpBytecode(PrintStream& out)
584 {
585 // We only use the ExecState* for things that don't actually lead to JS execution,
586 // like converting a JSString to a String. Hence the globalExec is appropriate.
587 ExecState* exec = m_globalObject->globalExec();
588
589 size_t instructionCount = 0;
590
591 for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
592 ++instructionCount;
593
594 out.print(*this);
595 out.printf(
596 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
597 static_cast<unsigned long>(instructions().size()),
598 static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
599 m_numParameters, m_numCalleeRegisters, m_numVars);
600 if (needsActivation() && codeType() == FunctionCode)
601 out.printf("; lexical environment in r%d", activationRegister().offset());
602 out.printf("\n");
603
604 StubInfoMap stubInfos;
605 CallLinkInfoMap callLinkInfos;
606 getStubInfoMap(stubInfos);
607 getCallLinkInfoMap(callLinkInfos);
608
609 const Instruction* begin = instructions().begin();
610 const Instruction* end = instructions().end();
611 for (const Instruction* it = begin; it != end; ++it)
612 dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
613
614 if (numberOfIdentifiers()) {
615 out.printf("\nIdentifiers:\n");
616 size_t i = 0;
617 do {
618 out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
619 ++i;
620 } while (i != numberOfIdentifiers());
621 }
622
623 if (!m_constantRegisters.isEmpty()) {
624 out.printf("\nConstants:\n");
625 size_t i = 0;
626 do {
627 const char* sourceCodeRepresentationDescription = nullptr;
628 switch (m_constantsSourceCodeRepresentation[i]) {
629 case SourceCodeRepresentation::Double:
630 sourceCodeRepresentationDescription = ": in source as double";
631 break;
632 case SourceCodeRepresentation::Integer:
633 sourceCodeRepresentationDescription = ": in source as integer";
634 break;
635 case SourceCodeRepresentation::Other:
636 sourceCodeRepresentationDescription = "";
637 break;
638 }
639 out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
640 ++i;
641 } while (i < m_constantRegisters.size());
642 }
643
644 if (size_t count = m_unlinkedCode->numberOfRegExps()) {
645 out.printf("\nm_regexps:\n");
646 size_t i = 0;
647 do {
648 out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
649 ++i;
650 } while (i < count);
651 }
652
653 if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
654 out.printf("\nException Handlers:\n");
655 unsigned i = 0;
656 do {
657 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
658 out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] } %s\n",
659 i + 1, handler.start, handler.end, handler.target, handler.scopeDepth, handler.typeName());
660 ++i;
661 } while (i < m_rareData->m_exceptionHandlers.size());
662 }
663
664 if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
665 out.printf("Switch Jump Tables:\n");
666 unsigned i = 0;
667 do {
668 out.printf(" %1d = {\n", i);
669 int entry = 0;
670 Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
671 for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
672 if (!*iter)
673 continue;
674 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
675 }
676 out.printf(" }\n");
677 ++i;
678 } while (i < m_rareData->m_switchJumpTables.size());
679 }
680
681 if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
682 out.printf("\nString Switch Jump Tables:\n");
683 unsigned i = 0;
684 do {
685 out.printf(" %1d = {\n", i);
686 StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
687 for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
688 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
689 out.printf(" }\n");
690 ++i;
691 } while (i < m_rareData->m_stringSwitchJumpTables.size());
692 }
693
694 out.printf("\n");
695 }
696
697 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
698 {
699 if (hasPrintedProfiling) {
700 out.print("; ");
701 return;
702 }
703
704 out.print(" ");
705 hasPrintedProfiling = true;
706 }
707
708 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
709 {
710 ConcurrentJITLocker locker(m_lock);
711
712 ++it;
713 CString description = it->u.profile->briefDescription(locker);
714 if (!description.length())
715 return;
716 beginDumpProfiling(out, hasPrintedProfiling);
717 out.print(description);
718 }
719
720 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
721 {
722 ConcurrentJITLocker locker(m_lock);
723
724 ++it;
725 if (!it->u.arrayProfile)
726 return;
727 CString description = it->u.arrayProfile->briefDescription(locker, this);
728 if (!description.length())
729 return;
730 beginDumpProfiling(out, hasPrintedProfiling);
731 out.print(description);
732 }
733
734 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
735 {
736 if (!profile || !profile->m_counter)
737 return;
738
739 beginDumpProfiling(out, hasPrintedProfiling);
740 out.print(name, profile->m_counter);
741 }
742
743 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
744 {
745 out.printf("[%4d] %-17s ", location, op);
746 }
747
748 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
749 {
750 printLocationAndOp(out, exec, location, it, op);
751 out.printf("%s", registerName(operand).data());
752 }
753
754 void CodeBlock::dumpBytecode(
755 PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
756 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
757 {
758 int location = it - begin;
759 bool hasPrintedProfiling = false;
760 OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
761 switch (opcode) {
762 case op_enter: {
763 printLocationAndOp(out, exec, location, it, "enter");
764 break;
765 }
766 case op_create_lexical_environment: {
767 int r0 = (++it)->u.operand;
768 int r1 = (++it)->u.operand;
769 printLocationAndOp(out, exec, location, it, "create_lexical_environment");
770 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
771 break;
772 }
773 case op_get_scope: {
774 int r0 = (++it)->u.operand;
775 printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
776 break;
777 }
778 case op_create_direct_arguments: {
779 int r0 = (++it)->u.operand;
780 printLocationAndOp(out, exec, location, it, "create_direct_arguments");
781 out.printf("%s", registerName(r0).data());
782 break;
783 }
784 case op_create_scoped_arguments: {
785 int r0 = (++it)->u.operand;
786 int r1 = (++it)->u.operand;
787 printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
788 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
789 break;
790 }
791 case op_create_out_of_band_arguments: {
792 int r0 = (++it)->u.operand;
793 printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments");
794 out.printf("%s", registerName(r0).data());
795 break;
796 }
797 case op_create_this: {
798 int r0 = (++it)->u.operand;
799 int r1 = (++it)->u.operand;
800 unsigned inferredInlineCapacity = (++it)->u.operand;
801 unsigned cachedFunction = (++it)->u.operand;
802 printLocationAndOp(out, exec, location, it, "create_this");
803 out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
804 break;
805 }
806 case op_to_this: {
807 int r0 = (++it)->u.operand;
808 printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
809 Structure* structure = (++it)->u.structure.get();
810 if (structure)
811 out.print(", cache(struct = ", RawPointer(structure), ")");
812 out.print(", ", (++it)->u.toThisStatus);
813 break;
814 }
815 case op_check_tdz: {
816 int r0 = (++it)->u.operand;
817 printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
818 break;
819 }
820 case op_new_object: {
821 int r0 = (++it)->u.operand;
822 unsigned inferredInlineCapacity = (++it)->u.operand;
823 printLocationAndOp(out, exec, location, it, "new_object");
824 out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
825 ++it; // Skip object allocation profile.
826 break;
827 }
828 case op_new_array: {
829 int dst = (++it)->u.operand;
830 int argv = (++it)->u.operand;
831 int argc = (++it)->u.operand;
832 printLocationAndOp(out, exec, location, it, "new_array");
833 out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
834 ++it; // Skip array allocation profile.
835 break;
836 }
837 case op_new_array_with_size: {
838 int dst = (++it)->u.operand;
839 int length = (++it)->u.operand;
840 printLocationAndOp(out, exec, location, it, "new_array_with_size");
841 out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
842 ++it; // Skip array allocation profile.
843 break;
844 }
845 case op_new_array_buffer: {
846 int dst = (++it)->u.operand;
847 int argv = (++it)->u.operand;
848 int argc = (++it)->u.operand;
849 printLocationAndOp(out, exec, location, it, "new_array_buffer");
850 out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
851 ++it; // Skip array allocation profile.
852 break;
853 }
854 case op_new_regexp: {
855 int r0 = (++it)->u.operand;
856 int re0 = (++it)->u.operand;
857 printLocationAndOp(out, exec, location, it, "new_regexp");
858 out.printf("%s, ", registerName(r0).data());
859 if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
860 out.printf("%s", regexpName(re0, regexp(re0)).data());
861 else
862 out.printf("bad_regexp(%d)", re0);
863 break;
864 }
865 case op_mov: {
866 int r0 = (++it)->u.operand;
867 int r1 = (++it)->u.operand;
868 printLocationAndOp(out, exec, location, it, "mov");
869 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
870 break;
871 }
872 case op_profile_type: {
873 int r0 = (++it)->u.operand;
874 ++it;
875 ++it;
876 ++it;
877 ++it;
878 printLocationAndOp(out, exec, location, it, "op_profile_type");
879 out.printf("%s", registerName(r0).data());
880 break;
881 }
882 case op_profile_control_flow: {
883 BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
884 printLocationAndOp(out, exec, location, it, "profile_control_flow");
885 out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
886 break;
887 }
888 case op_not: {
889 printUnaryOp(out, exec, location, it, "not");
890 break;
891 }
892 case op_eq: {
893 printBinaryOp(out, exec, location, it, "eq");
894 break;
895 }
896 case op_eq_null: {
897 printUnaryOp(out, exec, location, it, "eq_null");
898 break;
899 }
900 case op_neq: {
901 printBinaryOp(out, exec, location, it, "neq");
902 break;
903 }
904 case op_neq_null: {
905 printUnaryOp(out, exec, location, it, "neq_null");
906 break;
907 }
908 case op_stricteq: {
909 printBinaryOp(out, exec, location, it, "stricteq");
910 break;
911 }
912 case op_nstricteq: {
913 printBinaryOp(out, exec, location, it, "nstricteq");
914 break;
915 }
916 case op_less: {
917 printBinaryOp(out, exec, location, it, "less");
918 break;
919 }
920 case op_lesseq: {
921 printBinaryOp(out, exec, location, it, "lesseq");
922 break;
923 }
924 case op_greater: {
925 printBinaryOp(out, exec, location, it, "greater");
926 break;
927 }
928 case op_greatereq: {
929 printBinaryOp(out, exec, location, it, "greatereq");
930 break;
931 }
932 case op_inc: {
933 int r0 = (++it)->u.operand;
934 printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
935 break;
936 }
937 case op_dec: {
938 int r0 = (++it)->u.operand;
939 printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
940 break;
941 }
942 case op_to_number: {
943 printUnaryOp(out, exec, location, it, "to_number");
944 break;
945 }
946 case op_to_string: {
947 printUnaryOp(out, exec, location, it, "to_string");
948 break;
949 }
950 case op_negate: {
951 printUnaryOp(out, exec, location, it, "negate");
952 break;
953 }
954 case op_add: {
955 printBinaryOp(out, exec, location, it, "add");
956 ++it;
957 break;
958 }
959 case op_mul: {
960 printBinaryOp(out, exec, location, it, "mul");
961 ++it;
962 break;
963 }
964 case op_div: {
965 printBinaryOp(out, exec, location, it, "div");
966 ++it;
967 break;
968 }
969 case op_mod: {
970 printBinaryOp(out, exec, location, it, "mod");
971 break;
972 }
973 case op_sub: {
974 printBinaryOp(out, exec, location, it, "sub");
975 ++it;
976 break;
977 }
978 case op_lshift: {
979 printBinaryOp(out, exec, location, it, "lshift");
980 break;
981 }
982 case op_rshift: {
983 printBinaryOp(out, exec, location, it, "rshift");
984 break;
985 }
986 case op_urshift: {
987 printBinaryOp(out, exec, location, it, "urshift");
988 break;
989 }
990 case op_bitand: {
991 printBinaryOp(out, exec, location, it, "bitand");
992 ++it;
993 break;
994 }
995 case op_bitxor: {
996 printBinaryOp(out, exec, location, it, "bitxor");
997 ++it;
998 break;
999 }
1000 case op_bitor: {
1001 printBinaryOp(out, exec, location, it, "bitor");
1002 ++it;
1003 break;
1004 }
1005 case op_check_has_instance: {
1006 int r0 = (++it)->u.operand;
1007 int r1 = (++it)->u.operand;
1008 int r2 = (++it)->u.operand;
1009 int offset = (++it)->u.operand;
1010 printLocationAndOp(out, exec, location, it, "check_has_instance");
1011 out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
1012 break;
1013 }
1014 case op_instanceof: {
1015 int r0 = (++it)->u.operand;
1016 int r1 = (++it)->u.operand;
1017 int r2 = (++it)->u.operand;
1018 printLocationAndOp(out, exec, location, it, "instanceof");
1019 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1020 break;
1021 }
1022 case op_unsigned: {
1023 printUnaryOp(out, exec, location, it, "unsigned");
1024 break;
1025 }
1026 case op_typeof: {
1027 printUnaryOp(out, exec, location, it, "typeof");
1028 break;
1029 }
1030 case op_is_undefined: {
1031 printUnaryOp(out, exec, location, it, "is_undefined");
1032 break;
1033 }
1034 case op_is_boolean: {
1035 printUnaryOp(out, exec, location, it, "is_boolean");
1036 break;
1037 }
1038 case op_is_number: {
1039 printUnaryOp(out, exec, location, it, "is_number");
1040 break;
1041 }
1042 case op_is_string: {
1043 printUnaryOp(out, exec, location, it, "is_string");
1044 break;
1045 }
1046 case op_is_object: {
1047 printUnaryOp(out, exec, location, it, "is_object");
1048 break;
1049 }
1050 case op_is_object_or_null: {
1051 printUnaryOp(out, exec, location, it, "is_object_or_null");
1052 break;
1053 }
1054 case op_is_function: {
1055 printUnaryOp(out, exec, location, it, "is_function");
1056 break;
1057 }
1058 case op_in: {
1059 printBinaryOp(out, exec, location, it, "in");
1060 break;
1061 }
1062 case op_init_global_const_nop: {
1063 printLocationAndOp(out, exec, location, it, "init_global_const_nop");
1064 it++;
1065 it++;
1066 it++;
1067 it++;
1068 break;
1069 }
1070 case op_init_global_const: {
1071 WriteBarrier<Unknown>* variablePointer = (++it)->u.variablePointer;
1072 int r0 = (++it)->u.operand;
1073 printLocationAndOp(out, exec, location, it, "init_global_const");
1074 out.printf("g%d(%p), %s", m_globalObject->findVariableIndex(variablePointer).offset(), variablePointer, registerName(r0).data());
1075 it++;
1076 it++;
1077 break;
1078 }
1079 case op_get_by_id:
1080 case op_get_by_id_out_of_line:
1081 case op_get_array_length: {
1082 printGetByIdOp(out, exec, location, it);
1083 printGetByIdCacheStatus(out, exec, location, stubInfos);
1084 dumpValueProfiling(out, it, hasPrintedProfiling);
1085 break;
1086 }
1087 case op_put_by_id: {
1088 printPutByIdOp(out, exec, location, it, "put_by_id");
1089 printPutByIdCacheStatus(out, exec, location, stubInfos);
1090 break;
1091 }
1092 case op_put_by_id_out_of_line: {
1093 printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
1094 printPutByIdCacheStatus(out, exec, location, stubInfos);
1095 break;
1096 }
1097 case op_put_by_id_transition_direct: {
1098 printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
1099 printPutByIdCacheStatus(out, exec, location, stubInfos);
1100 break;
1101 }
1102 case op_put_by_id_transition_direct_out_of_line: {
1103 printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
1104 printPutByIdCacheStatus(out, exec, location, stubInfos);
1105 break;
1106 }
1107 case op_put_by_id_transition_normal: {
1108 printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
1109 printPutByIdCacheStatus(out, exec, location, stubInfos);
1110 break;
1111 }
1112 case op_put_by_id_transition_normal_out_of_line: {
1113 printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
1114 printPutByIdCacheStatus(out, exec, location, stubInfos);
1115 break;
1116 }
1117 case op_put_getter_by_id: {
1118 int r0 = (++it)->u.operand;
1119 int id0 = (++it)->u.operand;
1120 int r1 = (++it)->u.operand;
1121 printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1122 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1123 break;
1124 }
1125 case op_put_setter_by_id: {
1126 int r0 = (++it)->u.operand;
1127 int id0 = (++it)->u.operand;
1128 int r1 = (++it)->u.operand;
1129 printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1130 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1131 break;
1132 }
1133 case op_put_getter_setter: {
1134 int r0 = (++it)->u.operand;
1135 int id0 = (++it)->u.operand;
1136 int r1 = (++it)->u.operand;
1137 int r2 = (++it)->u.operand;
1138 printLocationAndOp(out, exec, location, it, "put_getter_setter");
1139 out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
1140 break;
1141 }
1142 case op_del_by_id: {
1143 int r0 = (++it)->u.operand;
1144 int r1 = (++it)->u.operand;
1145 int id0 = (++it)->u.operand;
1146 printLocationAndOp(out, exec, location, it, "del_by_id");
1147 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1148 break;
1149 }
1150 case op_get_by_val: {
1151 int r0 = (++it)->u.operand;
1152 int r1 = (++it)->u.operand;
1153 int r2 = (++it)->u.operand;
1154 printLocationAndOp(out, exec, location, it, "get_by_val");
1155 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1156 dumpArrayProfiling(out, it, hasPrintedProfiling);
1157 dumpValueProfiling(out, it, hasPrintedProfiling);
1158 break;
1159 }
1160 case op_put_by_val: {
1161 int r0 = (++it)->u.operand;
1162 int r1 = (++it)->u.operand;
1163 int r2 = (++it)->u.operand;
1164 printLocationAndOp(out, exec, location, it, "put_by_val");
1165 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1166 dumpArrayProfiling(out, it, hasPrintedProfiling);
1167 break;
1168 }
1169 case op_put_by_val_direct: {
1170 int r0 = (++it)->u.operand;
1171 int r1 = (++it)->u.operand;
1172 int r2 = (++it)->u.operand;
1173 printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1174 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1175 dumpArrayProfiling(out, it, hasPrintedProfiling);
1176 break;
1177 }
1178 case op_del_by_val: {
1179 int r0 = (++it)->u.operand;
1180 int r1 = (++it)->u.operand;
1181 int r2 = (++it)->u.operand;
1182 printLocationAndOp(out, exec, location, it, "del_by_val");
1183 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1184 break;
1185 }
1186 case op_put_by_index: {
1187 int r0 = (++it)->u.operand;
1188 unsigned n0 = (++it)->u.operand;
1189 int r1 = (++it)->u.operand;
1190 printLocationAndOp(out, exec, location, it, "put_by_index");
1191 out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1192 break;
1193 }
1194 case op_jmp: {
1195 int offset = (++it)->u.operand;
1196 printLocationAndOp(out, exec, location, it, "jmp");
1197 out.printf("%d(->%d)", offset, location + offset);
1198 break;
1199 }
1200 case op_jtrue: {
1201 printConditionalJump(out, exec, begin, it, location, "jtrue");
1202 break;
1203 }
1204 case op_jfalse: {
1205 printConditionalJump(out, exec, begin, it, location, "jfalse");
1206 break;
1207 }
1208 case op_jeq_null: {
1209 printConditionalJump(out, exec, begin, it, location, "jeq_null");
1210 break;
1211 }
1212 case op_jneq_null: {
1213 printConditionalJump(out, exec, begin, it, location, "jneq_null");
1214 break;
1215 }
1216 case op_jneq_ptr: {
1217 int r0 = (++it)->u.operand;
1218 Special::Pointer pointer = (++it)->u.specialPointer;
1219 int offset = (++it)->u.operand;
1220 printLocationAndOp(out, exec, location, it, "jneq_ptr");
1221 out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1222 break;
1223 }
1224 case op_jless: {
1225 int r0 = (++it)->u.operand;
1226 int r1 = (++it)->u.operand;
1227 int offset = (++it)->u.operand;
1228 printLocationAndOp(out, exec, location, it, "jless");
1229 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1230 break;
1231 }
1232 case op_jlesseq: {
1233 int r0 = (++it)->u.operand;
1234 int r1 = (++it)->u.operand;
1235 int offset = (++it)->u.operand;
1236 printLocationAndOp(out, exec, location, it, "jlesseq");
1237 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1238 break;
1239 }
1240 case op_jgreater: {
1241 int r0 = (++it)->u.operand;
1242 int r1 = (++it)->u.operand;
1243 int offset = (++it)->u.operand;
1244 printLocationAndOp(out, exec, location, it, "jgreater");
1245 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1246 break;
1247 }
1248 case op_jgreatereq: {
1249 int r0 = (++it)->u.operand;
1250 int r1 = (++it)->u.operand;
1251 int offset = (++it)->u.operand;
1252 printLocationAndOp(out, exec, location, it, "jgreatereq");
1253 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1254 break;
1255 }
1256 case op_jnless: {
1257 int r0 = (++it)->u.operand;
1258 int r1 = (++it)->u.operand;
1259 int offset = (++it)->u.operand;
1260 printLocationAndOp(out, exec, location, it, "jnless");
1261 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1262 break;
1263 }
1264 case op_jnlesseq: {
1265 int r0 = (++it)->u.operand;
1266 int r1 = (++it)->u.operand;
1267 int offset = (++it)->u.operand;
1268 printLocationAndOp(out, exec, location, it, "jnlesseq");
1269 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1270 break;
1271 }
1272 case op_jngreater: {
1273 int r0 = (++it)->u.operand;
1274 int r1 = (++it)->u.operand;
1275 int offset = (++it)->u.operand;
1276 printLocationAndOp(out, exec, location, it, "jngreater");
1277 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1278 break;
1279 }
1280 case op_jngreatereq: {
1281 int r0 = (++it)->u.operand;
1282 int r1 = (++it)->u.operand;
1283 int offset = (++it)->u.operand;
1284 printLocationAndOp(out, exec, location, it, "jngreatereq");
1285 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1286 break;
1287 }
1288 case op_loop_hint: {
1289 printLocationAndOp(out, exec, location, it, "loop_hint");
1290 break;
1291 }
1292 case op_switch_imm: {
1293 int tableIndex = (++it)->u.operand;
1294 int defaultTarget = (++it)->u.operand;
1295 int scrutineeRegister = (++it)->u.operand;
1296 printLocationAndOp(out, exec, location, it, "switch_imm");
1297 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1298 break;
1299 }
1300 case op_switch_char: {
1301 int tableIndex = (++it)->u.operand;
1302 int defaultTarget = (++it)->u.operand;
1303 int scrutineeRegister = (++it)->u.operand;
1304 printLocationAndOp(out, exec, location, it, "switch_char");
1305 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1306 break;
1307 }
1308 case op_switch_string: {
1309 int tableIndex = (++it)->u.operand;
1310 int defaultTarget = (++it)->u.operand;
1311 int scrutineeRegister = (++it)->u.operand;
1312 printLocationAndOp(out, exec, location, it, "switch_string");
1313 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1314 break;
1315 }
1316 case op_new_func: {
1317 int r0 = (++it)->u.operand;
1318 int r1 = (++it)->u.operand;
1319 int f0 = (++it)->u.operand;
1320 printLocationAndOp(out, exec, location, it, "new_func");
1321 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1322 break;
1323 }
1324 case op_new_func_exp: {
1325 int r0 = (++it)->u.operand;
1326 int r1 = (++it)->u.operand;
1327 int f0 = (++it)->u.operand;
1328 printLocationAndOp(out, exec, location, it, "new_func_exp");
1329 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1330 break;
1331 }
1332 case op_call: {
1333 printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1334 break;
1335 }
1336 case op_call_eval: {
1337 printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1338 break;
1339 }
1340
1341 case op_construct_varargs:
1342 case op_call_varargs: {
1343 int result = (++it)->u.operand;
1344 int callee = (++it)->u.operand;
1345 int thisValue = (++it)->u.operand;
1346 int arguments = (++it)->u.operand;
1347 int firstFreeRegister = (++it)->u.operand;
1348 int varArgOffset = (++it)->u.operand;
1349 ++it;
1350 printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
1351 out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1352 dumpValueProfiling(out, it, hasPrintedProfiling);
1353 break;
1354 }
1355
1356 case op_ret: {
1357 int r0 = (++it)->u.operand;
1358 printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1359 break;
1360 }
1361 case op_construct: {
1362 printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1363 break;
1364 }
1365 case op_strcat: {
1366 int r0 = (++it)->u.operand;
1367 int r1 = (++it)->u.operand;
1368 int count = (++it)->u.operand;
1369 printLocationAndOp(out, exec, location, it, "strcat");
1370 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1371 break;
1372 }
1373 case op_to_primitive: {
1374 int r0 = (++it)->u.operand;
1375 int r1 = (++it)->u.operand;
1376 printLocationAndOp(out, exec, location, it, "to_primitive");
1377 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1378 break;
1379 }
1380 case op_get_enumerable_length: {
1381 int dst = it[1].u.operand;
1382 int base = it[2].u.operand;
1383 printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1384 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1385 it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1386 break;
1387 }
1388 case op_has_indexed_property: {
1389 int dst = it[1].u.operand;
1390 int base = it[2].u.operand;
1391 int propertyName = it[3].u.operand;
1392 ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1393 printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1394 out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1395 it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1396 break;
1397 }
1398 case op_has_structure_property: {
1399 int dst = it[1].u.operand;
1400 int base = it[2].u.operand;
1401 int propertyName = it[3].u.operand;
1402 int enumerator = it[4].u.operand;
1403 printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1404 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1405 it += OPCODE_LENGTH(op_has_structure_property) - 1;
1406 break;
1407 }
1408 case op_has_generic_property: {
1409 int dst = it[1].u.operand;
1410 int base = it[2].u.operand;
1411 int propertyName = it[3].u.operand;
1412 printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1413 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1414 it += OPCODE_LENGTH(op_has_generic_property) - 1;
1415 break;
1416 }
1417 case op_get_direct_pname: {
1418 int dst = it[1].u.operand;
1419 int base = it[2].u.operand;
1420 int propertyName = it[3].u.operand;
1421 int index = it[4].u.operand;
1422 int enumerator = it[5].u.operand;
1423 ValueProfile* profile = it[6].u.profile;
1424 printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1425 out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1426 it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1427 break;
1428
1429 }
1430 case op_get_property_enumerator: {
1431 int dst = it[1].u.operand;
1432 int base = it[2].u.operand;
1433 printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1434 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1435 it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1436 break;
1437 }
1438 case op_enumerator_structure_pname: {
1439 int dst = it[1].u.operand;
1440 int enumerator = it[2].u.operand;
1441 int index = it[3].u.operand;
1442 printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1443 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1444 it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1445 break;
1446 }
1447 case op_enumerator_generic_pname: {
1448 int dst = it[1].u.operand;
1449 int enumerator = it[2].u.operand;
1450 int index = it[3].u.operand;
1451 printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1452 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1453 it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1454 break;
1455 }
1456 case op_to_index_string: {
1457 int dst = it[1].u.operand;
1458 int index = it[2].u.operand;
1459 printLocationAndOp(out, exec, location, it, "op_to_index_string");
1460 out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1461 it += OPCODE_LENGTH(op_to_index_string) - 1;
1462 break;
1463 }
1464 case op_push_with_scope: {
1465 int dst = (++it)->u.operand;
1466 int newScope = (++it)->u.operand;
1467 printLocationAndOp(out, exec, location, it, "push_with_scope");
1468 out.printf("%s, %s", registerName(dst).data(), registerName(newScope).data());
1469 break;
1470 }
1471 case op_pop_scope: {
1472 int r0 = (++it)->u.operand;
1473 printLocationOpAndRegisterOperand(out, exec, location, it, "pop_scope", r0);
1474 break;
1475 }
1476 case op_push_name_scope: {
1477 int dst = (++it)->u.operand;
1478 int r1 = (++it)->u.operand;
1479 int k0 = (++it)->u.operand;
1480 JSNameScope::Type scopeType = (JSNameScope::Type)(++it)->u.operand;
1481 printLocationAndOp(out, exec, location, it, "push_name_scope");
1482 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(r1).data(), constantName(k0).data(), (scopeType == JSNameScope::FunctionNameScope) ? "functionScope" : ((scopeType == JSNameScope::CatchScope) ? "catchScope" : "unknownScopeType"));
1483 break;
1484 }
1485 case op_catch: {
1486 int r0 = (++it)->u.operand;
1487 int r1 = (++it)->u.operand;
1488 printLocationAndOp(out, exec, location, it, "catch");
1489 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1490 break;
1491 }
1492 case op_throw: {
1493 int r0 = (++it)->u.operand;
1494 printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1495 break;
1496 }
1497 case op_throw_static_error: {
1498 int k0 = (++it)->u.operand;
1499 int k1 = (++it)->u.operand;
1500 printLocationAndOp(out, exec, location, it, "throw_static_error");
1501 out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
1502 break;
1503 }
1504 case op_debug: {
1505 int debugHookID = (++it)->u.operand;
1506 int hasBreakpointFlag = (++it)->u.operand;
1507 printLocationAndOp(out, exec, location, it, "debug");
1508 out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1509 break;
1510 }
1511 case op_profile_will_call: {
1512 int function = (++it)->u.operand;
1513 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1514 break;
1515 }
1516 case op_profile_did_call: {
1517 int function = (++it)->u.operand;
1518 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1519 break;
1520 }
1521 case op_end: {
1522 int r0 = (++it)->u.operand;
1523 printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1524 break;
1525 }
1526 case op_resolve_scope: {
1527 int r0 = (++it)->u.operand;
1528 int scope = (++it)->u.operand;
1529 int id0 = (++it)->u.operand;
1530 ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1531 int depth = (++it)->u.operand;
1532 printLocationAndOp(out, exec, location, it, "resolve_scope");
1533 out.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(),
1534 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1535 depth);
1536 ++it;
1537 break;
1538 }
1539 case op_get_from_scope: {
1540 int r0 = (++it)->u.operand;
1541 int r1 = (++it)->u.operand;
1542 int id0 = (++it)->u.operand;
1543 ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1544 ++it; // Structure
1545 int operand = (++it)->u.operand; // Operand
1546 printLocationAndOp(out, exec, location, it, "get_from_scope");
1547 out.print(registerName(r0), ", ", registerName(r1));
1548 if (static_cast<unsigned>(id0) == UINT_MAX)
1549 out.print(", anonymous");
1550 else
1551 out.print(", ", idName(id0, identifier(id0)));
1552 out.print(", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, ", operand);
1553 dumpValueProfiling(out, it, hasPrintedProfiling);
1554 break;
1555 }
1556 case op_put_to_scope: {
1557 int r0 = (++it)->u.operand;
1558 int id0 = (++it)->u.operand;
1559 int r1 = (++it)->u.operand;
1560 ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1561 ++it; // Structure
1562 int operand = (++it)->u.operand; // Operand
1563 printLocationAndOp(out, exec, location, it, "put_to_scope");
1564 out.print(registerName(r0));
1565 if (static_cast<unsigned>(id0) == UINT_MAX)
1566 out.print(", anonymous");
1567 else
1568 out.print(", ", idName(id0, identifier(id0)));
1569 out.print(", ", registerName(r1), ", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, <structure>, ", operand);
1570 break;
1571 }
1572 case op_get_from_arguments: {
1573 int r0 = (++it)->u.operand;
1574 int r1 = (++it)->u.operand;
1575 int offset = (++it)->u.operand;
1576 printLocationAndOp(out, exec, location, it, "get_from_arguments");
1577 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1578 dumpValueProfiling(out, it, hasPrintedProfiling);
1579 break;
1580 }
1581 case op_put_to_arguments: {
1582 int r0 = (++it)->u.operand;
1583 int offset = (++it)->u.operand;
1584 int r1 = (++it)->u.operand;
1585 printLocationAndOp(out, exec, location, it, "put_to_arguments");
1586 out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1587 break;
1588 }
1589 default:
1590 RELEASE_ASSERT_NOT_REACHED();
1591 }
1592
1593 dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1594 dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1595
1596 #if ENABLE(DFG_JIT)
1597 Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1598 if (!exitSites.isEmpty()) {
1599 out.print(" !! frequent exits: ");
1600 CommaPrinter comma;
1601 for (unsigned i = 0; i < exitSites.size(); ++i)
1602 out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1603 }
1604 #else // ENABLE(DFG_JIT)
1605 UNUSED_PARAM(location);
1606 #endif // ENABLE(DFG_JIT)
1607 out.print("\n");
1608 }
1609
1610 void CodeBlock::dumpBytecode(
1611 PrintStream& out, unsigned bytecodeOffset,
1612 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1613 {
1614 ExecState* exec = m_globalObject->globalExec();
1615 const Instruction* it = instructions().begin() + bytecodeOffset;
1616 dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1617 }
1618
1619 #define FOR_EACH_MEMBER_VECTOR(macro) \
1620 macro(instructions) \
1621 macro(callLinkInfos) \
1622 macro(linkedCallerList) \
1623 macro(identifiers) \
1624 macro(functionExpressions) \
1625 macro(constantRegisters)
1626
1627 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1628 macro(regexps) \
1629 macro(functions) \
1630 macro(exceptionHandlers) \
1631 macro(switchJumpTables) \
1632 macro(stringSwitchJumpTables) \
1633 macro(evalCodeCache) \
1634 macro(expressionInfo) \
1635 macro(lineInfo) \
1636 macro(callReturnIndexVector)
1637
1638 template<typename T>
1639 static size_t sizeInBytes(const Vector<T>& vector)
1640 {
1641 return vector.capacity() * sizeof(T);
1642 }
1643
1644 namespace {
1645
1646 class PutToScopeFireDetail : public FireDetail {
1647 public:
1648 PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1649 : m_codeBlock(codeBlock)
1650 , m_ident(ident)
1651 {
1652 }
1653
1654 virtual void dump(PrintStream& out) const override
1655 {
1656 out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1657 }
1658
1659 private:
1660 CodeBlock* m_codeBlock;
1661 const Identifier& m_ident;
1662 };
1663
1664 } // anonymous namespace
1665
1666 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1667 : m_globalObject(other.m_globalObject)
1668 , m_heap(other.m_heap)
1669 , m_numCalleeRegisters(other.m_numCalleeRegisters)
1670 , m_numVars(other.m_numVars)
1671 , m_isConstructor(other.m_isConstructor)
1672 , m_shouldAlwaysBeInlined(true)
1673 , m_didFailFTLCompilation(false)
1674 , m_hasBeenCompiledWithFTL(false)
1675 , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1676 , m_hasDebuggerStatement(false)
1677 , m_steppingMode(SteppingModeDisabled)
1678 , m_numBreakpoints(0)
1679 , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1680 , m_vm(other.m_vm)
1681 , m_instructions(other.m_instructions)
1682 , m_thisRegister(other.m_thisRegister)
1683 , m_scopeRegister(other.m_scopeRegister)
1684 , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister)
1685 , m_isStrictMode(other.m_isStrictMode)
1686 , m_needsActivation(other.m_needsActivation)
1687 , m_mayBeExecuting(false)
1688 , m_source(other.m_source)
1689 , m_sourceOffset(other.m_sourceOffset)
1690 , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1691 , m_codeType(other.m_codeType)
1692 , m_constantRegisters(other.m_constantRegisters)
1693 , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1694 , m_functionDecls(other.m_functionDecls)
1695 , m_functionExprs(other.m_functionExprs)
1696 , m_osrExitCounter(0)
1697 , m_optimizationDelayCounter(0)
1698 , m_reoptimizationRetryCounter(0)
1699 , m_hash(other.m_hash)
1700 #if ENABLE(JIT)
1701 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1702 #endif
1703 {
1704 m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1705
1706 ASSERT(m_heap->isDeferred());
1707 ASSERT(m_scopeRegister.isLocal());
1708
1709 if (SymbolTable* symbolTable = other.symbolTable())
1710 m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1711
1712 setNumParameters(other.numParameters());
1713 optimizeAfterWarmUp();
1714 jitAfterWarmUp();
1715
1716 if (other.m_rareData) {
1717 createRareDataIfNecessary();
1718
1719 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1720 m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1721 m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1722 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1723 }
1724
1725 m_heap->m_codeBlocks.add(this);
1726 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock));
1727 }
1728
1729 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1730 : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1731 , m_heap(&m_globalObject->vm().heap)
1732 , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1733 , m_numVars(unlinkedCodeBlock->m_numVars)
1734 , m_isConstructor(unlinkedCodeBlock->isConstructor())
1735 , m_shouldAlwaysBeInlined(true)
1736 , m_didFailFTLCompilation(false)
1737 , m_hasBeenCompiledWithFTL(false)
1738 , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1739 , m_hasDebuggerStatement(false)
1740 , m_steppingMode(SteppingModeDisabled)
1741 , m_numBreakpoints(0)
1742 , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1743 , m_vm(unlinkedCodeBlock->vm())
1744 , m_thisRegister(unlinkedCodeBlock->thisRegister())
1745 , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1746 , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister())
1747 , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1748 , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1749 , m_mayBeExecuting(false)
1750 , m_source(sourceProvider)
1751 , m_sourceOffset(sourceOffset)
1752 , m_firstLineColumnOffset(firstLineColumnOffset)
1753 , m_codeType(unlinkedCodeBlock->codeType())
1754 , m_osrExitCounter(0)
1755 , m_optimizationDelayCounter(0)
1756 , m_reoptimizationRetryCounter(0)
1757 #if ENABLE(JIT)
1758 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1759 #endif
1760 {
1761 m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1762
1763 ASSERT(m_heap->isDeferred());
1764 ASSERT(m_scopeRegister.isLocal());
1765
1766 bool didCloneSymbolTable = false;
1767
1768 if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
1769 if (m_vm->typeProfiler()) {
1770 ConcurrentJITLocker locker(symbolTable->m_lock);
1771 symbolTable->prepareForTypeProfiling(locker);
1772 }
1773
1774 if (codeType() == FunctionCode && symbolTable->scopeSize()) {
1775 m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->cloneScopePart(*m_vm));
1776 didCloneSymbolTable = true;
1777 } else
1778 m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1779 }
1780
1781 ASSERT(m_source);
1782 setNumParameters(unlinkedCodeBlock->numParameters());
1783
1784 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1785 vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset());
1786
1787 setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1788 if (unlinkedCodeBlock->usesGlobalObject())
1789 m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1790
1791 for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1792 LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1793 if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1794 m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type));
1795 }
1796
1797 m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1798 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1799 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1800 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1801 vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1802 m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1803 }
1804
1805 m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1806 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1807 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1808 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1809 vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1810 m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1811 }
1812
1813 if (unlinkedCodeBlock->hasRareData()) {
1814 createRareDataIfNecessary();
1815 if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1816 m_rareData->m_constantBuffers.grow(count);
1817 for (size_t i = 0; i < count; i++) {
1818 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1819 m_rareData->m_constantBuffers[i] = buffer;
1820 }
1821 }
1822 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1823 m_rareData->m_exceptionHandlers.resizeToFit(count);
1824 size_t nonLocalScopeDepth = scope->depth();
1825 for (size_t i = 0; i < count; i++) {
1826 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
1827 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
1828 #if ENABLE(JIT)
1829 handler.initialize(unlinkedHandler, nonLocalScopeDepth,
1830 CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
1831 #else
1832 handler.initialize(unlinkedHandler, nonLocalScopeDepth);
1833 #endif
1834 }
1835 }
1836
1837 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1838 m_rareData->m_stringSwitchJumpTables.grow(count);
1839 for (size_t i = 0; i < count; i++) {
1840 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1841 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1842 for (; ptr != end; ++ptr) {
1843 OffsetLocation offset;
1844 offset.branchOffset = ptr->value;
1845 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1846 }
1847 }
1848 }
1849
1850 if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1851 m_rareData->m_switchJumpTables.grow(count);
1852 for (size_t i = 0; i < count; i++) {
1853 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1854 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1855 destTable.branchOffsets = sourceTable.branchOffsets;
1856 destTable.min = sourceTable.min;
1857 }
1858 }
1859 }
1860
1861 // Allocate metadata buffers for the bytecode
1862 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1863 m_llintCallLinkInfos.resizeToFit(size);
1864 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1865 m_arrayProfiles.grow(size);
1866 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1867 m_arrayAllocationProfiles.resizeToFit(size);
1868 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1869 m_valueProfiles.resizeToFit(size);
1870 if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1871 m_objectAllocationProfiles.resizeToFit(size);
1872
1873 // Copy and translate the UnlinkedInstructions
1874 unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1875 UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1876
1877 Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1878 for (unsigned i = 0; !instructionReader.atEnd(); ) {
1879 const UnlinkedInstruction* pc = instructionReader.next();
1880
1881 unsigned opLength = opcodeLength(pc[0].u.opcode);
1882
1883 instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1884 for (size_t j = 1; j < opLength; ++j) {
1885 if (sizeof(int32_t) != sizeof(intptr_t))
1886 instructions[i + j].u.pointer = 0;
1887 instructions[i + j].u.operand = pc[j].u.operand;
1888 }
1889 switch (pc[0].u.opcode) {
1890 case op_has_indexed_property: {
1891 int arrayProfileIndex = pc[opLength - 1].u.operand;
1892 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1893
1894 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1895 break;
1896 }
1897 case op_call_varargs:
1898 case op_construct_varargs:
1899 case op_get_by_val: {
1900 int arrayProfileIndex = pc[opLength - 2].u.operand;
1901 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1902
1903 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1904 FALLTHROUGH;
1905 }
1906 case op_get_direct_pname:
1907 case op_get_by_id:
1908 case op_get_from_arguments: {
1909 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1910 ASSERT(profile->m_bytecodeOffset == -1);
1911 profile->m_bytecodeOffset = i;
1912 instructions[i + opLength - 1] = profile;
1913 break;
1914 }
1915 case op_put_by_val: {
1916 int arrayProfileIndex = pc[opLength - 1].u.operand;
1917 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1918 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1919 break;
1920 }
1921 case op_put_by_val_direct: {
1922 int arrayProfileIndex = pc[opLength - 1].u.operand;
1923 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1924 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1925 break;
1926 }
1927
1928 case op_new_array:
1929 case op_new_array_buffer:
1930 case op_new_array_with_size: {
1931 int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1932 instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1933 break;
1934 }
1935 case op_new_object: {
1936 int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1937 ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1938 int inferredInlineCapacity = pc[opLength - 2].u.operand;
1939
1940 instructions[i + opLength - 1] = objectAllocationProfile;
1941 objectAllocationProfile->initialize(*vm(),
1942 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1943 break;
1944 }
1945
1946 case op_call:
1947 case op_call_eval: {
1948 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1949 ASSERT(profile->m_bytecodeOffset == -1);
1950 profile->m_bytecodeOffset = i;
1951 instructions[i + opLength - 1] = profile;
1952 int arrayProfileIndex = pc[opLength - 2].u.operand;
1953 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1954 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1955 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1956 break;
1957 }
1958 case op_construct: {
1959 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1960 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1961 ASSERT(profile->m_bytecodeOffset == -1);
1962 profile->m_bytecodeOffset = i;
1963 instructions[i + opLength - 1] = profile;
1964 break;
1965 }
1966 case op_get_by_id_out_of_line:
1967 case op_get_array_length:
1968 CRASH();
1969
1970 case op_init_global_const_nop: {
1971 ASSERT(codeType() == GlobalCode);
1972 Identifier ident = identifier(pc[4].u.operand);
1973 SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1974 if (entry.isNull())
1975 break;
1976
1977 instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1978 instructions[i + 1] = &m_globalObject->variableAt(entry.varOffset().scopeOffset());
1979 break;
1980 }
1981
1982 case op_resolve_scope: {
1983 const Identifier& ident = identifier(pc[3].u.operand);
1984 ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
1985 RELEASE_ASSERT(type != LocalClosureVar);
1986
1987 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, type);
1988 instructions[i + 4].u.operand = op.type;
1989 instructions[i + 5].u.operand = op.depth;
1990 if (op.lexicalEnvironment)
1991 instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable());
1992 break;
1993 }
1994
1995 case op_get_from_scope: {
1996 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1997 ASSERT(profile->m_bytecodeOffset == -1);
1998 profile->m_bytecodeOffset = i;
1999 instructions[i + opLength - 1] = profile;
2000
2001 // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
2002
2003 ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
2004 if (modeAndType.type() == LocalClosureVar) {
2005 instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand();
2006 break;
2007 }
2008
2009 const Identifier& ident = identifier(pc[3].u.operand);
2010
2011 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, modeAndType.type());
2012
2013 instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2014 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2015 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2016 else if (op.structure)
2017 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2018 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2019 break;
2020 }
2021
2022 case op_put_to_scope: {
2023 // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
2024 ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
2025 if (modeAndType.type() == LocalClosureVar) {
2026 // Only do watching if the property we're putting to is not anonymous.
2027 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
2028 RELEASE_ASSERT(didCloneSymbolTable);
2029 const Identifier& ident = identifier(pc[2].u.operand);
2030 ConcurrentJITLocker locker(m_symbolTable->m_lock);
2031 SymbolTable::Map::iterator iter = m_symbolTable->find(locker, ident.impl());
2032 ASSERT(iter != m_symbolTable->end(locker));
2033 iter->value.prepareToWatch();
2034 instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2035 } else
2036 instructions[i + 5].u.watchpointSet = nullptr;
2037 break;
2038 }
2039
2040 const Identifier& ident = identifier(pc[2].u.operand);
2041
2042 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Put, modeAndType.type());
2043
2044 instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2045 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2046 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2047 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2048 if (op.watchpointSet)
2049 op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2050 } else if (op.structure)
2051 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2052 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2053
2054 break;
2055 }
2056
2057 case op_profile_type: {
2058 RELEASE_ASSERT(vm()->typeProfiler());
2059 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2060 size_t instructionOffset = i + opLength - 1;
2061 unsigned divotStart, divotEnd;
2062 GlobalVariableID globalVariableID = 0;
2063 RefPtr<TypeSet> globalTypeSet;
2064 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2065 VirtualRegister profileRegister(pc[1].u.operand);
2066 ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2067 SymbolTable* symbolTable = nullptr;
2068
2069 switch (flag) {
2070 case ProfileTypeBytecodePutToScope:
2071 case ProfileTypeBytecodeGetFromScope: {
2072 const Identifier& ident = identifier(pc[4].u.operand);
2073 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2074 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, (flag == ProfileTypeBytecodeGetFromScope ? Get : Put), type);
2075
2076 // FIXME: handle other values for op.type here, and also consider what to do when we can't statically determine the globalID
2077 // https://bugs.webkit.org/show_bug.cgi?id=135184
2078 if (op.type == ClosureVar)
2079 symbolTable = op.lexicalEnvironment->symbolTable();
2080 else if (op.type == GlobalVar)
2081 symbolTable = m_globalObject.get()->symbolTable();
2082
2083 if (symbolTable) {
2084 ConcurrentJITLocker locker(symbolTable->m_lock);
2085 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2086 symbolTable->prepareForTypeProfiling(locker);
2087 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2088 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2089 } else
2090 globalVariableID = TypeProfilerNoGlobalIDExists;
2091
2092 break;
2093 }
2094 case ProfileTypeBytecodePutToLocalScope:
2095 case ProfileTypeBytecodeGetFromLocalScope: {
2096 const Identifier& ident = identifier(pc[4].u.operand);
2097 symbolTable = m_symbolTable.get();
2098 ConcurrentJITLocker locker(symbolTable->m_lock);
2099 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2100 symbolTable->prepareForTypeProfiling(locker);
2101 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2102 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2103
2104 break;
2105 }
2106
2107 case ProfileTypeBytecodeHasGlobalID: {
2108 symbolTable = m_symbolTable.get();
2109 ConcurrentJITLocker locker(symbolTable->m_lock);
2110 globalVariableID = symbolTable->uniqueIDForOffset(locker, VarOffset(profileRegister), *vm());
2111 globalTypeSet = symbolTable->globalTypeSetForOffset(locker, VarOffset(profileRegister), *vm());
2112 break;
2113 }
2114 case ProfileTypeBytecodeDoesNotHaveGlobalID:
2115 case ProfileTypeBytecodeFunctionArgument: {
2116 globalVariableID = TypeProfilerNoGlobalIDExists;
2117 break;
2118 }
2119 case ProfileTypeBytecodeFunctionReturnStatement: {
2120 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2121 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2122 globalVariableID = TypeProfilerReturnStatement;
2123 if (!shouldAnalyze) {
2124 // Because a return statement can be added implicitly to return undefined at the end of a function,
2125 // and these nodes don't emit expression ranges because they aren't in the actual source text of
2126 // the user's program, give the type profiler some range to identify these return statements.
2127 // Currently, the text offset that is used as identification is on the open brace of the function
2128 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2129 divotStart = divotEnd = m_sourceOffset;
2130 shouldAnalyze = true;
2131 }
2132 break;
2133 }
2134 }
2135
2136 std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2137 m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
2138 TypeLocation* location = locationPair.first;
2139 bool isNewLocation = locationPair.second;
2140
2141 if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2142 location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
2143
2144 if (shouldAnalyze && isNewLocation)
2145 vm()->typeProfiler()->insertNewLocation(location);
2146
2147 instructions[i + 2].u.location = location;
2148 break;
2149 }
2150
2151 case op_debug: {
2152 if (pc[1].u.index == DidReachBreakpoint)
2153 m_hasDebuggerStatement = true;
2154 break;
2155 }
2156
2157 default:
2158 break;
2159 }
2160 i += opLength;
2161 }
2162
2163 if (vm()->controlFlowProfiler())
2164 insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2165
2166 m_instructions = WTF::RefCountedArray<Instruction>(instructions);
2167
2168 // Set optimization thresholds only after m_instructions is initialized, since these
2169 // rely on the instruction count (and are in theory permitted to also inspect the
2170 // instruction stream to more accurate assess the cost of tier-up).
2171 optimizeAfterWarmUp();
2172 jitAfterWarmUp();
2173
2174 // If the concurrent thread will want the code block's hash, then compute it here
2175 // synchronously.
2176 if (Options::alwaysComputeHash())
2177 hash();
2178
2179 if (Options::dumpGeneratedBytecodes())
2180 dumpBytecode();
2181
2182 m_heap->m_codeBlocks.add(this);
2183 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
2184 }
2185
2186 CodeBlock::~CodeBlock()
2187 {
2188 if (m_vm->m_perBytecodeProfiler)
2189 m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2190
2191 #if ENABLE(VERBOSE_VALUE_PROFILE)
2192 dumpValueProfiles();
2193 #endif
2194 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2195 m_incomingLLIntCalls.begin()->remove();
2196 #if ENABLE(JIT)
2197 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2198 // Consider that two CodeBlocks become unreachable at the same time. There
2199 // is no guarantee about the order in which the CodeBlocks are destroyed.
2200 // So, if we don't remove incoming calls, and get destroyed before the
2201 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2202 // destructor will try to remove nodes from our (no longer valid) linked list.
2203 while (m_incomingCalls.begin() != m_incomingCalls.end())
2204 m_incomingCalls.begin()->remove();
2205 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2206 m_incomingPolymorphicCalls.begin()->remove();
2207
2208 // Note that our outgoing calls will be removed from other CodeBlocks'
2209 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2210 // destructors.
2211
2212 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2213 (*iter)->deref();
2214 #endif // ENABLE(JIT)
2215 }
2216
2217 void CodeBlock::setNumParameters(int newValue)
2218 {
2219 m_numParameters = newValue;
2220
2221 m_argumentValueProfiles.resizeToFit(newValue);
2222 }
2223
2224 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2225 {
2226 EvalCacheMap::iterator end = m_cacheMap.end();
2227 for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2228 visitor.append(&ptr->value);
2229 }
2230
2231 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2232 {
2233 #if ENABLE(FTL_JIT)
2234 if (jitType() != JITCode::DFGJIT)
2235 return 0;
2236 DFG::JITCode* jitCode = m_jitCode->dfg();
2237 return jitCode->osrEntryBlock.get();
2238 #else // ENABLE(FTL_JIT)
2239 return 0;
2240 #endif // ENABLE(FTL_JIT)
2241 }
2242
2243 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2244 {
2245 #if ENABLE(PARALLEL_GC)
2246 // I may be asked to scan myself more than once, and it may even happen concurrently.
2247 // To this end, use an atomic operation to check (and set) if I've been called already.
2248 // Only one thread may proceed past this point - whichever one wins the atomic set race.
2249 bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true);
2250 if (!setByMe)
2251 return;
2252 #endif // ENABLE(PARALLEL_GC)
2253
2254 if (!!m_alternative)
2255 m_alternative->visitAggregate(visitor);
2256
2257 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2258 otherBlock->visitAggregate(visitor);
2259
2260 visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock));
2261 if (m_jitCode)
2262 visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size());
2263 if (m_instructions.size()) {
2264 // Divide by refCount() because m_instructions points to something that is shared
2265 // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2266 // Having each CodeBlock report only its proportional share of the size is one way
2267 // of accomplishing this.
2268 visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2269 }
2270
2271 visitor.append(&m_unlinkedCode);
2272
2273 // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2274 // inline cache clearing, and jettisoning. The probability of us wanting to do at
2275 // least one of those things is probably quite close to 1. So we add one no matter what
2276 // and when it runs, it figures out whether it has any work to do.
2277 visitor.addUnconditionalFinalizer(this);
2278
2279 m_allTransitionsHaveBeenMarked = false;
2280
2281 if (shouldImmediatelyAssumeLivenessDuringScan()) {
2282 // This code block is live, so scan all references strongly and return.
2283 stronglyVisitStrongReferences(visitor);
2284 stronglyVisitWeakReferences(visitor);
2285 propagateTransitions(visitor);
2286 return;
2287 }
2288
2289 // There are two things that we use weak reference harvesters for: DFG fixpoint for
2290 // jettisoning, and trying to find structures that would be live based on some
2291 // inline cache. So it makes sense to register them regardless.
2292 visitor.addWeakReferenceHarvester(this);
2293
2294 #if ENABLE(DFG_JIT)
2295 // We get here if we're live in the sense that our owner executable is live,
2296 // but we're not yet live for sure in another sense: we may yet decide that this
2297 // code block should be jettisoned based on its outgoing weak references being
2298 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2299 // perform one round of determining if we're live. The GC may determine, based on
2300 // either us marking additional objects, or by other objects being marked for
2301 // other reasons, that this iteration should run again; it will notify us of this
2302 // decision by calling harvestWeakReferences().
2303
2304 m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2305
2306 propagateTransitions(visitor);
2307 determineLiveness(visitor);
2308 #else // ENABLE(DFG_JIT)
2309 RELEASE_ASSERT_NOT_REACHED();
2310 #endif // ENABLE(DFG_JIT)
2311 }
2312
2313 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
2314 {
2315 #if ENABLE(DFG_JIT)
2316 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2317 // their weak references go stale. So if a basline JIT CodeBlock gets
2318 // scanned, we can assume that this means that it's live.
2319 if (!JITCode::isOptimizingJIT(jitType()))
2320 return true;
2321
2322 // For simplicity, we don't attempt to jettison code blocks during GC if
2323 // they are executing. Instead we strongly mark their weak references to
2324 // allow them to continue to execute soundly.
2325 if (m_mayBeExecuting)
2326 return true;
2327
2328 if (Options::forceDFGCodeBlockLiveness())
2329 return true;
2330
2331 return false;
2332 #else
2333 return true;
2334 #endif
2335 }
2336
2337 bool CodeBlock::isKnownToBeLiveDuringGC()
2338 {
2339 #if ENABLE(DFG_JIT)
2340 // This should return true for:
2341 // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2342 // are live.
2343 // - Code blocks that were running on the stack.
2344 // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2345 // because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
2346 // would survive as true.
2347 // - Code blocks that don't have any dead weak references.
2348
2349 return shouldImmediatelyAssumeLivenessDuringScan()
2350 || m_jitCode->dfgCommon()->livenessHasBeenProved;
2351 #else
2352 return true;
2353 #endif
2354 }
2355
2356 #if ENABLE(DFG_JIT)
2357 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2358 {
2359 if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2360 return false;
2361
2362 if (!Heap::isMarked(transition.m_from.get()))
2363 return false;
2364
2365 return true;
2366 }
2367 #endif // ENABLE(DFG_JIT)
2368
2369 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2370 {
2371 UNUSED_PARAM(visitor);
2372
2373 if (m_allTransitionsHaveBeenMarked)
2374 return;
2375
2376 bool allAreMarkedSoFar = true;
2377
2378 Interpreter* interpreter = m_vm->interpreter;
2379 if (jitType() == JITCode::InterpreterThunk) {
2380 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2381 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2382 Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2383 switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2384 case op_put_by_id_transition_direct:
2385 case op_put_by_id_transition_normal:
2386 case op_put_by_id_transition_direct_out_of_line:
2387 case op_put_by_id_transition_normal_out_of_line: {
2388 if (Heap::isMarked(instruction[4].u.structure.get()))
2389 visitor.append(&instruction[6].u.structure);
2390 else
2391 allAreMarkedSoFar = false;
2392 break;
2393 }
2394 default:
2395 break;
2396 }
2397 }
2398 }
2399
2400 #if ENABLE(JIT)
2401 if (JITCode::isJIT(jitType())) {
2402 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2403 StructureStubInfo& stubInfo = **iter;
2404 switch (stubInfo.accessType) {
2405 case access_put_by_id_transition_normal:
2406 case access_put_by_id_transition_direct: {
2407 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2408 if ((!origin || Heap::isMarked(origin))
2409 && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2410 visitor.append(&stubInfo.u.putByIdTransition.structure);
2411 else
2412 allAreMarkedSoFar = false;
2413 break;
2414 }
2415
2416 case access_put_by_id_list: {
2417 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2418 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2419 if (origin && !Heap::isMarked(origin)) {
2420 allAreMarkedSoFar = false;
2421 break;
2422 }
2423 for (unsigned j = list->size(); j--;) {
2424 PutByIdAccess& access = list->m_list[j];
2425 if (!access.isTransition())
2426 continue;
2427 if (Heap::isMarked(access.oldStructure()))
2428 visitor.append(&access.m_newStructure);
2429 else
2430 allAreMarkedSoFar = false;
2431 }
2432 break;
2433 }
2434
2435 default:
2436 break;
2437 }
2438 }
2439 }
2440 #endif // ENABLE(JIT)
2441
2442 #if ENABLE(DFG_JIT)
2443 if (JITCode::isOptimizingJIT(jitType())) {
2444 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2445
2446 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2447 if (shouldMarkTransition(dfgCommon->transitions[i])) {
2448 // If the following three things are live, then the target of the
2449 // transition is also live:
2450 //
2451 // - This code block. We know it's live already because otherwise
2452 // we wouldn't be scanning ourselves.
2453 //
2454 // - The code origin of the transition. Transitions may arise from
2455 // code that was inlined. They are not relevant if the user's
2456 // object that is required for the inlinee to run is no longer
2457 // live.
2458 //
2459 // - The source of the transition. The transition checks if some
2460 // heap location holds the source, and if so, stores the target.
2461 // Hence the source must be live for the transition to be live.
2462 //
2463 // We also short-circuit the liveness if the structure is harmless
2464 // to mark (i.e. its global object and prototype are both already
2465 // live).
2466
2467 visitor.append(&dfgCommon->transitions[i].m_to);
2468 } else
2469 allAreMarkedSoFar = false;
2470 }
2471 }
2472 #endif // ENABLE(DFG_JIT)
2473
2474 if (allAreMarkedSoFar)
2475 m_allTransitionsHaveBeenMarked = true;
2476 }
2477
2478 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2479 {
2480 UNUSED_PARAM(visitor);
2481
2482 if (shouldImmediatelyAssumeLivenessDuringScan())
2483 return;
2484
2485 #if ENABLE(DFG_JIT)
2486 // Check if we have any remaining work to do.
2487 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2488 if (dfgCommon->livenessHasBeenProved)
2489 return;
2490
2491 // Now check all of our weak references. If all of them are live, then we
2492 // have proved liveness and so we scan our strong references. If at end of
2493 // GC we still have not proved liveness, then this code block is toast.
2494 bool allAreLiveSoFar = true;
2495 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2496 if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2497 allAreLiveSoFar = false;
2498 break;
2499 }
2500 }
2501 if (allAreLiveSoFar) {
2502 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2503 if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2504 allAreLiveSoFar = false;
2505 break;
2506 }
2507 }
2508 }
2509
2510 // If some weak references are dead, then this fixpoint iteration was
2511 // unsuccessful.
2512 if (!allAreLiveSoFar)
2513 return;
2514
2515 // All weak references are live. Record this information so we don't
2516 // come back here again, and scan the strong references.
2517 dfgCommon->livenessHasBeenProved = true;
2518 stronglyVisitStrongReferences(visitor);
2519 #endif // ENABLE(DFG_JIT)
2520 }
2521
2522 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2523 {
2524 propagateTransitions(visitor);
2525 determineLiveness(visitor);
2526 }
2527
2528 void CodeBlock::finalizeUnconditionally()
2529 {
2530 Interpreter* interpreter = m_vm->interpreter;
2531 if (JITCode::couldBeInterpreted(jitType())) {
2532 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2533 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2534 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2535 switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2536 case op_get_by_id:
2537 case op_get_by_id_out_of_line:
2538 case op_put_by_id:
2539 case op_put_by_id_out_of_line:
2540 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2541 break;
2542 if (Options::verboseOSR())
2543 dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2544 curInstruction[4].u.structure.clear();
2545 curInstruction[5].u.operand = 0;
2546 break;
2547 case op_put_by_id_transition_direct:
2548 case op_put_by_id_transition_normal:
2549 case op_put_by_id_transition_direct_out_of_line:
2550 case op_put_by_id_transition_normal_out_of_line:
2551 if (Heap::isMarked(curInstruction[4].u.structure.get())
2552 && Heap::isMarked(curInstruction[6].u.structure.get())
2553 && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2554 break;
2555 if (Options::verboseOSR()) {
2556 dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2557 curInstruction[4].u.structure.get(),
2558 curInstruction[6].u.structure.get(),
2559 curInstruction[7].u.structureChain.get());
2560 }
2561 curInstruction[4].u.structure.clear();
2562 curInstruction[6].u.structure.clear();
2563 curInstruction[7].u.structureChain.clear();
2564 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2565 break;
2566 case op_get_array_length:
2567 break;
2568 case op_to_this:
2569 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2570 break;
2571 if (Options::verboseOSR())
2572 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2573 curInstruction[2].u.structure.clear();
2574 curInstruction[3].u.toThisStatus = merge(
2575 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2576 break;
2577 case op_create_this: {
2578 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2579 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2580 break;
2581 JSCell* cachedFunction = cacheWriteBarrier.get();
2582 if (Heap::isMarked(cachedFunction))
2583 break;
2584 if (Options::verboseOSR())
2585 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2586 cacheWriteBarrier.clear();
2587 break;
2588 }
2589 case op_resolve_scope: {
2590 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2591 // are for outer functions, and we refer to those functions strongly, and they refer
2592 // to the symbol table strongly. But it's nice to be on the safe side.
2593 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2594 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2595 break;
2596 if (Options::verboseOSR())
2597 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2598 symbolTable.clear();
2599 break;
2600 }
2601 case op_get_from_scope:
2602 case op_put_to_scope: {
2603 ResolveModeAndType modeAndType =
2604 ResolveModeAndType(curInstruction[4].u.operand);
2605 if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar)
2606 continue;
2607 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2608 if (!structure || Heap::isMarked(structure.get()))
2609 break;
2610 if (Options::verboseOSR())
2611 dataLogF("Clearing scope access with structure %p.\n", structure.get());
2612 structure.clear();
2613 break;
2614 }
2615 default:
2616 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2617 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2618 }
2619 }
2620
2621 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2622 if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2623 if (Options::verboseOSR())
2624 dataLog("Clearing LLInt call from ", *this, "\n");
2625 m_llintCallLinkInfos[i].unlink();
2626 }
2627 if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2628 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2629 }
2630 }
2631
2632 #if ENABLE(DFG_JIT)
2633 // Check if we're not live. If we are, then jettison.
2634 if (!isKnownToBeLiveDuringGC()) {
2635 if (Options::verboseOSR())
2636 dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2637
2638 if (DFG::shouldShowDisassembly()) {
2639 dataLog(*this, " will be jettisoned because of the following dead references:\n");
2640 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2641 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2642 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2643 JSCell* origin = transition.m_codeOrigin.get();
2644 JSCell* from = transition.m_from.get();
2645 JSCell* to = transition.m_to.get();
2646 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2647 continue;
2648 dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2649 }
2650 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2651 JSCell* weak = dfgCommon->weakReferences[i].get();
2652 if (Heap::isMarked(weak))
2653 continue;
2654 dataLog(" Weak reference ", RawPointer(weak), ".\n");
2655 }
2656 }
2657
2658 jettison(Profiler::JettisonDueToWeakReference);
2659 return;
2660 }
2661 #endif // ENABLE(DFG_JIT)
2662
2663 #if ENABLE(JIT)
2664 // Handle inline caches.
2665 if (!!jitCode()) {
2666 RepatchBuffer repatchBuffer(this);
2667
2668 for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2669 (*iter)->visitWeak(repatchBuffer);
2670
2671 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2672 StructureStubInfo& stubInfo = **iter;
2673
2674 if (stubInfo.visitWeakReferences(repatchBuffer))
2675 continue;
2676
2677 resetStubDuringGCInternal(repatchBuffer, stubInfo);
2678 }
2679 }
2680 #endif
2681 }
2682
2683 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2684 {
2685 #if ENABLE(JIT)
2686 toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2687 #else
2688 UNUSED_PARAM(result);
2689 #endif
2690 }
2691
2692 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2693 {
2694 ConcurrentJITLocker locker(m_lock);
2695 getStubInfoMap(locker, result);
2696 }
2697
2698 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2699 {
2700 #if ENABLE(JIT)
2701 toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2702 #else
2703 UNUSED_PARAM(result);
2704 #endif
2705 }
2706
2707 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2708 {
2709 ConcurrentJITLocker locker(m_lock);
2710 getCallLinkInfoMap(locker, result);
2711 }
2712
2713 #if ENABLE(JIT)
2714 StructureStubInfo* CodeBlock::addStubInfo()
2715 {
2716 ConcurrentJITLocker locker(m_lock);
2717 return m_stubInfos.add();
2718 }
2719
2720 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2721 {
2722 for (StructureStubInfo* stubInfo : m_stubInfos) {
2723 if (stubInfo->codeOrigin == codeOrigin)
2724 return stubInfo;
2725 }
2726 return nullptr;
2727 }
2728
2729 CallLinkInfo* CodeBlock::addCallLinkInfo()
2730 {
2731 ConcurrentJITLocker locker(m_lock);
2732 return m_callLinkInfos.add();
2733 }
2734
2735 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2736 {
2737 if (stubInfo.accessType == access_unset)
2738 return;
2739
2740 ConcurrentJITLocker locker(m_lock);
2741
2742 RepatchBuffer repatchBuffer(this);
2743 resetStubInternal(repatchBuffer, stubInfo);
2744 }
2745
2746 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2747 {
2748 AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2749
2750 if (Options::verboseOSR()) {
2751 // This can be called from GC destructor calls, so we don't try to do a full dump
2752 // of the CodeBlock.
2753 dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2754 }
2755
2756 RELEASE_ASSERT(JITCode::isJIT(jitType()));
2757
2758 if (isGetByIdAccess(accessType))
2759 resetGetByID(repatchBuffer, stubInfo);
2760 else if (isPutByIdAccess(accessType))
2761 resetPutByID(repatchBuffer, stubInfo);
2762 else {
2763 RELEASE_ASSERT(isInAccess(accessType));
2764 resetIn(repatchBuffer, stubInfo);
2765 }
2766
2767 stubInfo.reset();
2768 }
2769
2770 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2771 {
2772 resetStubInternal(repatchBuffer, stubInfo);
2773 stubInfo.resetByGC = true;
2774 }
2775
2776 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2777 {
2778 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2779 if ((*iter)->codeOrigin() == CodeOrigin(index))
2780 return *iter;
2781 }
2782 return nullptr;
2783 }
2784 #endif
2785
2786 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2787 {
2788 visitor.append(&m_globalObject);
2789 visitor.append(&m_ownerExecutable);
2790 visitor.append(&m_symbolTable);
2791 visitor.append(&m_unlinkedCode);
2792 if (m_rareData)
2793 m_rareData->m_evalCodeCache.visitAggregate(visitor);
2794 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2795 for (size_t i = 0; i < m_functionExprs.size(); ++i)
2796 visitor.append(&m_functionExprs[i]);
2797 for (size_t i = 0; i < m_functionDecls.size(); ++i)
2798 visitor.append(&m_functionDecls[i]);
2799 for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2800 m_objectAllocationProfiles[i].visitAggregate(visitor);
2801
2802 #if ENABLE(DFG_JIT)
2803 if (JITCode::isOptimizingJIT(jitType())) {
2804 // FIXME: This is an antipattern for two reasons. References introduced by the DFG
2805 // that aren't in the original CodeBlock being compiled should be weakly referenced.
2806 // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also,
2807 // those weak references should already be tracked in the DFG as weak FrozenValues. So,
2808 // there is probably no need for this. We already have assertions that this should be
2809 // unnecessary.
2810 // https://bugs.webkit.org/show_bug.cgi?id=146613
2811 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2812 if (dfgCommon->inlineCallFrames.get())
2813 dfgCommon->inlineCallFrames->visitAggregate(visitor);
2814 }
2815 #endif
2816
2817 updateAllPredictions();
2818 }
2819
2820 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2821 {
2822 UNUSED_PARAM(visitor);
2823
2824 #if ENABLE(DFG_JIT)
2825 if (!JITCode::isOptimizingJIT(jitType()))
2826 return;
2827
2828 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2829
2830 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2831 if (!!dfgCommon->transitions[i].m_codeOrigin)
2832 visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2833 visitor.append(&dfgCommon->transitions[i].m_from);
2834 visitor.append(&dfgCommon->transitions[i].m_to);
2835 }
2836
2837 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2838 visitor.append(&dfgCommon->weakReferences[i]);
2839
2840 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
2841 visitor.append(&dfgCommon->weakStructureReferences[i]);
2842 #endif
2843 }
2844
2845 CodeBlock* CodeBlock::baselineAlternative()
2846 {
2847 #if ENABLE(JIT)
2848 CodeBlock* result = this;
2849 while (result->alternative())
2850 result = result->alternative();
2851 RELEASE_ASSERT(result);
2852 RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2853 return result;
2854 #else
2855 return this;
2856 #endif
2857 }
2858
2859 CodeBlock* CodeBlock::baselineVersion()
2860 {
2861 #if ENABLE(JIT)
2862 if (JITCode::isBaselineCode(jitType()))
2863 return this;
2864 CodeBlock* result = replacement();
2865 if (!result) {
2866 // This can happen if we're creating the original CodeBlock for an executable.
2867 // Assume that we're the baseline CodeBlock.
2868 RELEASE_ASSERT(jitType() == JITCode::None);
2869 return this;
2870 }
2871 result = result->baselineAlternative();
2872 return result;
2873 #else
2874 return this;
2875 #endif
2876 }
2877
2878 #if ENABLE(JIT)
2879 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2880 {
2881 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2882 }
2883
2884 bool CodeBlock::hasOptimizedReplacement()
2885 {
2886 return hasOptimizedReplacement(jitType());
2887 }
2888 #endif
2889
2890 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
2891 {
2892 RELEASE_ASSERT(bytecodeOffset < instructions().size());
2893
2894 if (!m_rareData)
2895 return 0;
2896
2897 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2898 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2899 HandlerInfo& handler = exceptionHandlers[i];
2900 if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
2901 continue;
2902
2903 // Handlers are ordered innermost first, so the first handler we encounter
2904 // that contains the source address is the correct handler to use.
2905 if (handler.start <= bytecodeOffset && handler.end > bytecodeOffset)
2906 return &handler;
2907 }
2908
2909 return 0;
2910 }
2911
2912 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2913 {
2914 RELEASE_ASSERT(bytecodeOffset < instructions().size());
2915 return m_ownerExecutable->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2916 }
2917
2918 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2919 {
2920 int divot;
2921 int startOffset;
2922 int endOffset;
2923 unsigned line;
2924 unsigned column;
2925 expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2926 return column;
2927 }
2928
2929 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2930 {
2931 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2932 divot += m_sourceOffset;
2933 column += line ? 1 : firstLineColumnOffset();
2934 line += m_ownerExecutable->firstLine();
2935 }
2936
2937 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2938 {
2939 Interpreter* interpreter = vm()->interpreter;
2940 const Instruction* begin = instructions().begin();
2941 const Instruction* end = instructions().end();
2942 for (const Instruction* it = begin; it != end;) {
2943 OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2944 if (opcodeID == op_debug) {
2945 unsigned bytecodeOffset = it - begin;
2946 int unused;
2947 unsigned opDebugLine;
2948 unsigned opDebugColumn;
2949 expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2950 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2951 return true;
2952 }
2953 it += opcodeLengths[opcodeID];
2954 }
2955 return false;
2956 }
2957
2958 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2959 {
2960 m_rareCaseProfiles.shrinkToFit();
2961 m_specialFastCaseProfiles.shrinkToFit();
2962
2963 if (shrinkMode == EarlyShrink) {
2964 m_constantRegisters.shrinkToFit();
2965 m_constantsSourceCodeRepresentation.shrinkToFit();
2966
2967 if (m_rareData) {
2968 m_rareData->m_switchJumpTables.shrinkToFit();
2969 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2970 }
2971 } // else don't shrink these, because we would have already pointed pointers into these tables.
2972 }
2973
2974 #if ENABLE(JIT)
2975 void CodeBlock::unlinkCalls()
2976 {
2977 if (!!m_alternative)
2978 m_alternative->unlinkCalls();
2979 for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2980 if (m_llintCallLinkInfos[i].isLinked())
2981 m_llintCallLinkInfos[i].unlink();
2982 }
2983 if (m_callLinkInfos.isEmpty())
2984 return;
2985 if (!m_vm->canUseJIT())
2986 return;
2987 RepatchBuffer repatchBuffer(this);
2988 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2989 CallLinkInfo& info = **iter;
2990 if (!info.isLinked())
2991 continue;
2992 info.unlink(repatchBuffer);
2993 }
2994 }
2995
2996 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2997 {
2998 noticeIncomingCall(callerFrame);
2999 m_incomingCalls.push(incoming);
3000 }
3001
3002 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
3003 {
3004 noticeIncomingCall(callerFrame);
3005 m_incomingPolymorphicCalls.push(incoming);
3006 }
3007 #endif // ENABLE(JIT)
3008
3009 void CodeBlock::unlinkIncomingCalls()
3010 {
3011 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
3012 m_incomingLLIntCalls.begin()->unlink();
3013 #if ENABLE(JIT)
3014 if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
3015 return;
3016 RepatchBuffer repatchBuffer(this);
3017 while (m_incomingCalls.begin() != m_incomingCalls.end())
3018 m_incomingCalls.begin()->unlink(repatchBuffer);
3019 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3020 m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer);
3021 #endif // ENABLE(JIT)
3022 }
3023
3024 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3025 {
3026 noticeIncomingCall(callerFrame);
3027 m_incomingLLIntCalls.push(incoming);
3028 }
3029
3030 void CodeBlock::clearEvalCache()
3031 {
3032 if (!!m_alternative)
3033 m_alternative->clearEvalCache();
3034 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
3035 otherBlock->clearEvalCache();
3036 if (!m_rareData)
3037 return;
3038 m_rareData->m_evalCodeCache.clear();
3039 }
3040
3041 void CodeBlock::install()
3042 {
3043 ownerExecutable()->installCode(this);
3044 }
3045
3046 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
3047 {
3048 return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
3049 }
3050
3051 #if ENABLE(JIT)
3052 CodeBlock* ProgramCodeBlock::replacement()
3053 {
3054 return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3055 }
3056
3057 CodeBlock* EvalCodeBlock::replacement()
3058 {
3059 return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3060 }
3061
3062 CodeBlock* FunctionCodeBlock::replacement()
3063 {
3064 return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3065 }
3066
3067 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
3068 {
3069 return DFG::programCapabilityLevel(this);
3070 }
3071
3072 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
3073 {
3074 return DFG::evalCapabilityLevel(this);
3075 }
3076
3077 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
3078 {
3079 if (m_isConstructor)
3080 return DFG::functionForConstructCapabilityLevel(this);
3081 return DFG::functionForCallCapabilityLevel(this);
3082 }
3083 #endif
3084
3085 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3086 {
3087 RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3088
3089 #if ENABLE(DFG_JIT)
3090 if (DFG::shouldShowDisassembly()) {
3091 dataLog("Jettisoning ", *this);
3092 if (mode == CountReoptimization)
3093 dataLog(" and counting reoptimization");
3094 dataLog(" due to ", reason);
3095 if (detail)
3096 dataLog(", ", *detail);
3097 dataLog(".\n");
3098 }
3099
3100 DeferGCForAWhile deferGC(*m_heap);
3101 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3102
3103 if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3104 compilation->setJettisonReason(reason, detail);
3105
3106 // We want to accomplish two things here:
3107 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3108 // we should OSR exit at the top of the next bytecode instruction after the return.
3109 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3110
3111 // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
3112 // whether the invalidation has already happened.
3113 if (!jitCode()->dfgCommon()->invalidate()) {
3114 // Nothing to do since we've already been invalidated. That means that we cannot be
3115 // the optimized replacement.
3116 RELEASE_ASSERT(this != replacement());
3117 return;
3118 }
3119
3120 if (DFG::shouldShowDisassembly())
3121 dataLog(" Did invalidate ", *this, "\n");
3122
3123 // Count the reoptimization if that's what the user wanted.
3124 if (mode == CountReoptimization) {
3125 // FIXME: Maybe this should call alternative().
3126 // https://bugs.webkit.org/show_bug.cgi?id=123677
3127 baselineAlternative()->countReoptimization();
3128 if (DFG::shouldShowDisassembly())
3129 dataLog(" Did count reoptimization for ", *this, "\n");
3130 }
3131
3132 // Now take care of the entrypoint.
3133 if (this != replacement()) {
3134 // This means that we were never the entrypoint. This can happen for OSR entry code
3135 // blocks.
3136 return;
3137 }
3138 alternative()->optimizeAfterWarmUp();
3139 tallyFrequentExitSites();
3140 alternative()->install();
3141 if (DFG::shouldShowDisassembly())
3142 dataLog(" Did install baseline version of ", *this, "\n");
3143 #else // ENABLE(DFG_JIT)
3144 UNUSED_PARAM(mode);
3145 UNUSED_PARAM(detail);
3146 UNREACHABLE_FOR_PLATFORM();
3147 #endif // ENABLE(DFG_JIT)
3148 }
3149
3150 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3151 {
3152 if (!codeOrigin.inlineCallFrame)
3153 return globalObject();
3154 return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
3155 }
3156
3157 class RecursionCheckFunctor {
3158 public:
3159 RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3160 : m_startCallFrame(startCallFrame)
3161 , m_codeBlock(codeBlock)
3162 , m_depthToCheck(depthToCheck)
3163 , m_foundStartCallFrame(false)
3164 , m_didRecurse(false)
3165 { }
3166
3167 StackVisitor::Status operator()(StackVisitor& visitor)
3168 {
3169 CallFrame* currentCallFrame = visitor->callFrame();
3170
3171 if (currentCallFrame == m_startCallFrame)
3172 m_foundStartCallFrame = true;
3173
3174 if (m_foundStartCallFrame) {
3175 if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3176 m_didRecurse = true;
3177 return StackVisitor::Done;
3178 }
3179
3180 if (!m_depthToCheck--)
3181 return StackVisitor::Done;
3182 }
3183
3184 return StackVisitor::Continue;
3185 }
3186
3187 bool didRecurse() const { return m_didRecurse; }
3188
3189 private:
3190 CallFrame* m_startCallFrame;
3191 CodeBlock* m_codeBlock;
3192 unsigned m_depthToCheck;
3193 bool m_foundStartCallFrame;
3194 bool m_didRecurse;
3195 };
3196
3197 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3198 {
3199 CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3200
3201 if (Options::verboseCallLink())
3202 dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3203
3204 #if ENABLE(DFG_JIT)
3205 if (!m_shouldAlwaysBeInlined)
3206 return;
3207
3208 if (!callerCodeBlock) {
3209 m_shouldAlwaysBeInlined = false;
3210 if (Options::verboseCallLink())
3211 dataLog(" Clearing SABI because caller is native.\n");
3212 return;
3213 }
3214
3215 if (!hasBaselineJITProfiling())
3216 return;
3217
3218 if (!DFG::mightInlineFunction(this))
3219 return;
3220
3221 if (!canInline(m_capabilityLevelState))
3222 return;
3223
3224 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3225 m_shouldAlwaysBeInlined = false;
3226 if (Options::verboseCallLink())
3227 dataLog(" Clearing SABI because caller is too large.\n");
3228 return;
3229 }
3230
3231 if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3232 // If the caller is still in the interpreter, then we can't expect inlining to
3233 // happen anytime soon. Assume it's profitable to optimize it separately. This
3234 // ensures that a function is SABI only if it is called no more frequently than
3235 // any of its callers.
3236 m_shouldAlwaysBeInlined = false;
3237 if (Options::verboseCallLink())
3238 dataLog(" Clearing SABI because caller is in LLInt.\n");
3239 return;
3240 }
3241
3242 if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
3243 m_shouldAlwaysBeInlined = false;
3244 if (Options::verboseCallLink())
3245 dataLog(" Clearing SABI bcause caller was already optimized.\n");
3246 return;
3247 }
3248
3249 if (callerCodeBlock->codeType() != FunctionCode) {
3250 // If the caller is either eval or global code, assume that that won't be
3251 // optimized anytime soon. For eval code this is particularly true since we
3252 // delay eval optimization by a *lot*.
3253 m_shouldAlwaysBeInlined = false;
3254 if (Options::verboseCallLink())
3255 dataLog(" Clearing SABI because caller is not a function.\n");
3256 return;
3257 }
3258
3259 // Recursive calls won't be inlined.
3260 RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
3261 vm()->topCallFrame->iterate(functor);
3262
3263 if (functor.didRecurse()) {
3264 if (Options::verboseCallLink())
3265 dataLog(" Clearing SABI because recursion was detected.\n");
3266 m_shouldAlwaysBeInlined = false;
3267 return;
3268 }
3269
3270 if (callerCodeBlock->m_capabilityLevelState == DFG::CapabilityLevelNotSet) {
3271 dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3272 CRASH();
3273 }
3274
3275 if (canCompile(callerCodeBlock->m_capabilityLevelState))
3276 return;
3277
3278 if (Options::verboseCallLink())
3279 dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
3280
3281 m_shouldAlwaysBeInlined = false;
3282 #endif
3283 }
3284
3285 unsigned CodeBlock::reoptimizationRetryCounter() const
3286 {
3287 #if ENABLE(JIT)
3288 ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
3289 return m_reoptimizationRetryCounter;
3290 #else
3291 return 0;
3292 #endif // ENABLE(JIT)
3293 }
3294
3295 #if ENABLE(JIT)
3296 void CodeBlock::countReoptimization()
3297 {
3298 m_reoptimizationRetryCounter++;
3299 if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
3300 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
3301 }
3302
3303 unsigned CodeBlock::numberOfDFGCompiles()
3304 {
3305 ASSERT(JITCode::isBaselineCode(jitType()));
3306 if (Options::testTheFTL()) {
3307 if (m_didFailFTLCompilation)
3308 return 1000000;
3309 return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
3310 }
3311 return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
3312 }
3313
3314 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3315 {
3316 if (codeType() == EvalCode)
3317 return Options::evalThresholdMultiplier();
3318
3319 return 1;
3320 }
3321
3322 double CodeBlock::optimizationThresholdScalingFactor()
3323 {
3324 // This expression arises from doing a least-squares fit of
3325 //
3326 // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3327 //
3328 // against the data points:
3329 //
3330 // x F[x_]
3331 // 10 0.9 (smallest reasonable code block)
3332 // 200 1.0 (typical small-ish code block)
3333 // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
3334 // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
3335 // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
3336 // 10000 6.0 (similar to above)
3337 //
3338 // I achieve the minimization using the following Mathematica code:
3339 //
3340 // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3341 //
3342 // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3343 //
3344 // solution =
3345 // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3346 // {a, b, c, d}][[2]]
3347 //
3348 // And the code below (to initialize a, b, c, d) is generated by:
3349 //
3350 // Print["const double " <> ToString[#[[1]]] <> " = " <>
3351 // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
3352 //
3353 // We've long known the following to be true:
3354 // - Small code blocks are cheap to optimize and so we should do it sooner rather
3355 // than later.
3356 // - Large code blocks are expensive to optimize and so we should postpone doing so,
3357 // and sometimes have a large enough threshold that we never optimize them.
3358 // - The difference in cost is not totally linear because (a) just invoking the
3359 // DFG incurs some base cost and (b) for large code blocks there is enough slop
3360 // in the correlation between instruction count and the actual compilation cost
3361 // that for those large blocks, the instruction count should not have a strong
3362 // influence on our threshold.
3363 //
3364 // I knew the goals but I didn't know how to achieve them; so I picked an interesting
3365 // example where the heuristics were right (code block in 3d-cube with instruction
3366 // count 320, which got compiled early as it should have been) and one where they were
3367 // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
3368 // to compile and didn't run often enough to warrant compilation in my opinion), and
3369 // then threw in additional data points that represented my own guess of what our
3370 // heuristics should do for some round-numbered examples.
3371 //
3372 // The expression to which I decided to fit the data arose because I started with an
3373 // affine function, and then did two things: put the linear part in an Abs to ensure
3374 // that the fit didn't end up choosing a negative value of c (which would result in
3375 // the function turning over and going negative for large x) and I threw in a Sqrt
3376 // term because Sqrt represents my intution that the function should be more sensitive
3377 // to small changes in small values of x, but less sensitive when x gets large.
3378
3379 // Note that the current fit essentially eliminates the linear portion of the
3380 // expression (c == 0.0).
3381 const double a = 0.061504;
3382 const double b = 1.02406;
3383 const double c = 0.0;
3384 const double d = 0.825914;
3385
3386 double instructionCount = this->instructionCount();
3387
3388 ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3389
3390 double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3391
3392 result *= codeTypeThresholdMultiplier();
3393
3394 if (Options::verboseOSR()) {
3395 dataLog(
3396 *this, ": instruction count is ", instructionCount,
3397 ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3398 "\n");
3399 }
3400 return result;
3401 }
3402
3403 static int32_t clipThreshold(double threshold)
3404 {
3405 if (threshold < 1.0)
3406 return 1;
3407
3408 if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3409 return std::numeric_limits<int32_t>::max();
3410
3411 return static_cast<int32_t>(threshold);
3412 }
3413
3414 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3415 {
3416 return clipThreshold(
3417 static_cast<double>(desiredThreshold) *
3418 optimizationThresholdScalingFactor() *
3419 (1 << reoptimizationRetryCounter()));
3420 }
3421
3422 bool CodeBlock::checkIfOptimizationThresholdReached()
3423 {
3424 #if ENABLE(DFG_JIT)
3425 if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
3426 if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3427 == DFG::Worklist::Compiled) {
3428 optimizeNextInvocation();
3429 return true;
3430 }
3431 }
3432 #endif
3433
3434 return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3435 }
3436
3437 void CodeBlock::optimizeNextInvocation()
3438 {
3439 if (Options::verboseOSR())
3440 dataLog(*this, ": Optimizing next invocation.\n");
3441 m_jitExecuteCounter.setNewThreshold(0, this);
3442 }
3443
3444 void CodeBlock::dontOptimizeAnytimeSoon()
3445 {
3446 if (Options::verboseOSR())
3447 dataLog(*this, ": Not optimizing anytime soon.\n");
3448 m_jitExecuteCounter.deferIndefinitely();
3449 }
3450
3451 void CodeBlock::optimizeAfterWarmUp()
3452 {
3453 if (Options::verboseOSR())
3454 dataLog(*this, ": Optimizing after warm-up.\n");
3455 #if ENABLE(DFG_JIT)
3456 m_jitExecuteCounter.setNewThreshold(
3457 adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3458 #endif
3459 }
3460
3461 void CodeBlock::optimizeAfterLongWarmUp()
3462 {
3463 if (Options::verboseOSR())
3464 dataLog(*this, ": Optimizing after long warm-up.\n");
3465 #if ENABLE(DFG_JIT)
3466 m_jitExecuteCounter.setNewThreshold(
3467 adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3468 #endif
3469 }
3470
3471 void CodeBlock::optimizeSoon()
3472 {
3473 if (Options::verboseOSR())
3474 dataLog(*this, ": Optimizing soon.\n");
3475 #if ENABLE(DFG_JIT)
3476 m_jitExecuteCounter.setNewThreshold(
3477 adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3478 #endif
3479 }
3480
3481 void CodeBlock::forceOptimizationSlowPathConcurrently()
3482 {
3483 if (Options::verboseOSR())
3484 dataLog(*this, ": Forcing slow path concurrently.\n");
3485 m_jitExecuteCounter.forceSlowPathConcurrently();
3486 }
3487
3488 #if ENABLE(DFG_JIT)
3489 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3490 {
3491 JITCode::JITType type = jitType();
3492 if (type != JITCode::BaselineJIT) {
3493 dataLog(*this, ": expected to have baseline code but have ", type, "\n");
3494 RELEASE_ASSERT_NOT_REACHED();
3495 }
3496
3497 CodeBlock* theReplacement = replacement();
3498 if ((result == CompilationSuccessful) != (theReplacement != this)) {
3499 dataLog(*this, ": we have result = ", result, " but ");
3500 if (theReplacement == this)
3501 dataLog("we are our own replacement.\n");
3502 else
3503 dataLog("our replacement is ", pointerDump(theReplacement), "\n");
3504 RELEASE_ASSERT_NOT_REACHED();
3505 }
3506
3507 switch (result) {
3508 case CompilationSuccessful:
3509 RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3510 optimizeNextInvocation();
3511 return;
3512 case CompilationFailed:
3513 dontOptimizeAnytimeSoon();
3514 return;
3515 case CompilationDeferred:
3516 // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3517 // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3518 // necessarily guarantee anything. So, we make sure that even if that
3519 // function ends up being a no-op, we still eventually retry and realize
3520 // that we have optimized code ready.
3521 optimizeAfterWarmUp();
3522 return;
3523 case CompilationInvalidated:
3524 // Retry with exponential backoff.
3525 countReoptimization();
3526 optimizeAfterWarmUp();
3527 return;
3528 }
3529
3530 dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
3531 RELEASE_ASSERT_NOT_REACHED();
3532 }
3533
3534 #endif
3535
3536 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3537 {
3538 ASSERT(JITCode::isOptimizingJIT(jitType()));
3539 // Compute this the lame way so we don't saturate. This is called infrequently
3540 // enough that this loop won't hurt us.
3541 unsigned result = desiredThreshold;
3542 for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3543 unsigned newResult = result << 1;
3544 if (newResult < result)
3545 return std::numeric_limits<uint32_t>::max();
3546 result = newResult;
3547 }
3548 return result;
3549 }
3550
3551 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3552 {
3553 return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3554 }
3555
3556 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3557 {
3558 return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3559 }
3560
3561 bool CodeBlock::shouldReoptimizeNow()
3562 {
3563 return osrExitCounter() >= exitCountThresholdForReoptimization();
3564 }
3565
3566 bool CodeBlock::shouldReoptimizeFromLoopNow()
3567 {
3568 return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3569 }
3570 #endif
3571
3572 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3573 {
3574 for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3575 if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3576 return &m_arrayProfiles[i];
3577 }
3578 return 0;
3579 }
3580
3581 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3582 {
3583 ArrayProfile* result = getArrayProfile(bytecodeOffset);
3584 if (result)
3585 return result;
3586 return addArrayProfile(bytecodeOffset);
3587 }
3588
3589 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3590 {
3591 ConcurrentJITLocker locker(m_lock);
3592
3593 numberOfLiveNonArgumentValueProfiles = 0;
3594 numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3595 for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3596 ValueProfile* profile = getFromAllValueProfiles(i);
3597 unsigned numSamples = profile->totalNumberOfSamples();
3598 if (numSamples > ValueProfile::numberOfBuckets)
3599 numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3600 numberOfSamplesInProfiles += numSamples;
3601 if (profile->m_bytecodeOffset < 0) {
3602 profile->computeUpdatedPrediction(locker);
3603 continue;
3604 }
3605 if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3606 numberOfLiveNonArgumentValueProfiles++;
3607 profile->computeUpdatedPrediction(locker);
3608 }
3609
3610 #if ENABLE(DFG_JIT)
3611 m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
3612 #endif
3613 }
3614
3615 void CodeBlock::updateAllValueProfilePredictions()
3616 {
3617 unsigned ignoredValue1, ignoredValue2;
3618 updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
3619 }
3620
3621 void CodeBlock::updateAllArrayPredictions()
3622 {
3623 ConcurrentJITLocker locker(m_lock);
3624
3625 for (unsigned i = m_arrayProfiles.size(); i--;)
3626 m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3627
3628 // Don't count these either, for similar reasons.
3629 for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3630 m_arrayAllocationProfiles[i].updateIndexingType();
3631 }
3632
3633 void CodeBlock::updateAllPredictions()
3634 {
3635 updateAllValueProfilePredictions();
3636 updateAllArrayPredictions();
3637 }
3638
3639 bool CodeBlock::shouldOptimizeNow()
3640 {
3641 if (Options::verboseOSR())
3642 dataLog("Considering optimizing ", *this, "...\n");
3643
3644 if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
3645 return true;
3646
3647 updateAllArrayPredictions();
3648
3649 unsigned numberOfLiveNonArgumentValueProfiles;
3650 unsigned numberOfSamplesInProfiles;
3651 updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
3652
3653 if (Options::verboseOSR()) {
3654 dataLogF(
3655 "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3656 (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
3657 numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
3658 (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
3659 numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
3660 }
3661
3662 if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3663 && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3664 && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
3665 return true;
3666
3667 ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
3668 m_optimizationDelayCounter++;
3669 optimizeAfterWarmUp();
3670 return false;
3671 }
3672
3673 #if ENABLE(DFG_JIT)
3674 void CodeBlock::tallyFrequentExitSites()
3675 {
3676 ASSERT(JITCode::isOptimizingJIT(jitType()));
3677 ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
3678
3679 CodeBlock* profiledBlock = alternative();
3680
3681 switch (jitType()) {
3682 case JITCode::DFGJIT: {
3683 DFG::JITCode* jitCode = m_jitCode->dfg();
3684 for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3685 DFG::OSRExit& exit = jitCode->osrExit[i];
3686 exit.considerAddingAsFrequentExitSite(profiledBlock);
3687 }
3688 break;
3689 }
3690
3691 #if ENABLE(FTL_JIT)
3692 case JITCode::FTLJIT: {
3693 // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3694 // vector contains a totally different type, that just so happens to behave like
3695 // DFG::JITCode::osrExit.
3696 FTL::JITCode* jitCode = m_jitCode->ftl();
3697 for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3698 FTL::OSRExit& exit = jitCode->osrExit[i];
3699 exit.considerAddingAsFrequentExitSite(profiledBlock);
3700 }
3701 break;
3702 }
3703 #endif
3704
3705 default:
3706 RELEASE_ASSERT_NOT_REACHED();
3707 break;
3708 }
3709 }
3710 #endif // ENABLE(DFG_JIT)
3711
3712 #if ENABLE(VERBOSE_VALUE_PROFILE)
3713 void CodeBlock::dumpValueProfiles()
3714 {
3715 dataLog("ValueProfile for ", *this, ":\n");
3716 for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3717 ValueProfile* profile = getFromAllValueProfiles(i);
3718 if (profile->m_bytecodeOffset < 0) {
3719 ASSERT(profile->m_bytecodeOffset == -1);
3720 dataLogF(" arg = %u: ", i);
3721 } else
3722 dataLogF(" bc = %d: ", profile->m_bytecodeOffset);
3723 if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
3724 dataLogF("<empty>\n");
3725 continue;
3726 }
3727 profile->dump(WTF::dataFile());
3728 dataLogF("\n");
3729 }
3730 dataLog("RareCaseProfile for ", *this, ":\n");
3731 for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
3732 RareCaseProfile* profile = rareCaseProfile(i);
3733 dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3734 }
3735 dataLog("SpecialFastCaseProfile for ", *this, ":\n");
3736 for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
3737 RareCaseProfile* profile = specialFastCaseProfile(i);
3738 dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3739 }
3740 }
3741 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
3742
3743 unsigned CodeBlock::frameRegisterCount()
3744 {
3745 switch (jitType()) {
3746 case JITCode::InterpreterThunk:
3747 return LLInt::frameRegisterCountFor(this);
3748
3749 #if ENABLE(JIT)
3750 case JITCode::BaselineJIT:
3751 return JIT::frameRegisterCountFor(this);
3752 #endif // ENABLE(JIT)
3753
3754 #if ENABLE(DFG_JIT)
3755 case JITCode::DFGJIT:
3756 case JITCode::FTLJIT:
3757 return jitCode()->dfgCommon()->frameRegisterCount;
3758 #endif // ENABLE(DFG_JIT)
3759
3760 default:
3761 RELEASE_ASSERT_NOT_REACHED();
3762 return 0;
3763 }
3764 }
3765
3766 int CodeBlock::stackPointerOffset()
3767 {
3768 return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
3769 }
3770
3771 size_t CodeBlock::predictedMachineCodeSize()
3772 {
3773 // This will be called from CodeBlock::CodeBlock before either m_vm or the
3774 // instructions have been initialized. It's OK to return 0 because what will really
3775 // matter is the recomputation of this value when the slow path is triggered.
3776 if (!m_vm)
3777 return 0;
3778
3779 if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
3780 return 0; // It's as good of a prediction as we'll get.
3781
3782 // Be conservative: return a size that will be an overestimation 84% of the time.
3783 double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
3784 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
3785
3786 // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
3787 // here is OK, since this whole method is just a heuristic.
3788 if (multiplier < 0 || multiplier > 1000)
3789 return 0;
3790
3791 double doubleResult = multiplier * m_instructions.size();
3792
3793 // Be even more paranoid: silently reject values that won't fit into a size_t. If
3794 // the function is so huge that we can't even fit it into virtual memory then we
3795 // should probably have some other guards in place to prevent us from even getting
3796 // to this point.
3797 if (doubleResult > std::numeric_limits<size_t>::max())
3798 return 0;
3799
3800 return static_cast<size_t>(doubleResult);
3801 }
3802
3803 bool CodeBlock::usesOpcode(OpcodeID opcodeID)
3804 {
3805 Interpreter* interpreter = vm()->interpreter;
3806 Instruction* instructionsBegin = instructions().begin();
3807 unsigned instructionCount = instructions().size();
3808
3809 for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
3810 switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
3811 #define DEFINE_OP(curOpcode, length) \
3812 case curOpcode: \
3813 if (curOpcode == opcodeID) \
3814 return true; \
3815 bytecodeOffset += length; \
3816 break;
3817 FOR_EACH_OPCODE_ID(DEFINE_OP)
3818 #undef DEFINE_OP
3819 default:
3820 RELEASE_ASSERT_NOT_REACHED();
3821 break;
3822 }
3823 }
3824
3825 return false;
3826 }
3827
3828 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
3829 {
3830 ConcurrentJITLocker locker(symbolTable()->m_lock);
3831 SymbolTable::Map::iterator end = symbolTable()->end(locker);
3832 for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
3833 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
3834 // FIXME: This won't work from the compilation thread.
3835 // https://bugs.webkit.org/show_bug.cgi?id=115300
3836 return ptr->key.get();
3837 }
3838 }
3839 if (virtualRegister == thisRegister())
3840 return ASCIILiteral("this");
3841 if (virtualRegister.isArgument())
3842 return String::format("arguments[%3d]", virtualRegister.toArgument());
3843
3844 return "";
3845 }
3846
3847 ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
3848 {
3849 ValueProfile* result = binarySearch<ValueProfile, int>(
3850 m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
3851 getValueProfileBytecodeOffset<ValueProfile>);
3852 ASSERT(result->m_bytecodeOffset != -1);
3853 ASSERT(instructions()[bytecodeOffset + opcodeLength(
3854 m_vm->interpreter->getOpcodeID(
3855 instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
3856 return result;
3857 }
3858
3859 void CodeBlock::validate()
3860 {
3861 BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
3862
3863 FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
3864
3865 if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) {
3866 beginValidationDidFail();
3867 dataLog(" Wrong number of bits in result!\n");
3868 dataLog(" Result: ", liveAtHead, "\n");
3869 dataLog(" Bit count: ", liveAtHead.numBits(), "\n");
3870 endValidationDidFail();
3871 }
3872
3873 for (unsigned i = m_numCalleeRegisters; i--;) {
3874 VirtualRegister reg = virtualRegisterForLocal(i);
3875
3876 if (liveAtHead.get(i)) {
3877 beginValidationDidFail();
3878 dataLog(" Variable ", reg, " is expected to be dead.\n");
3879 dataLog(" Result: ", liveAtHead, "\n");
3880 endValidationDidFail();
3881 }
3882 }
3883 }
3884
3885 void CodeBlock::beginValidationDidFail()
3886 {
3887 dataLog("Validation failure in ", *this, ":\n");
3888 dataLog("\n");
3889 }
3890
3891 void CodeBlock::endValidationDidFail()
3892 {
3893 dataLog("\n");
3894 dumpBytecode();
3895 dataLog("\n");
3896 dataLog("Validation failure.\n");
3897 RELEASE_ASSERT_NOT_REACHED();
3898 }
3899
3900 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3901 {
3902 m_numBreakpoints += numBreakpoints;
3903 ASSERT(m_numBreakpoints);
3904 if (JITCode::isOptimizingJIT(jitType()))
3905 jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3906 }
3907
3908 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3909 {
3910 m_steppingMode = mode;
3911 if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3912 jettison(Profiler::JettisonDueToDebuggerStepping);
3913 }
3914
3915 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
3916 {
3917 return tryBinarySearch<RareCaseProfile, int>(
3918 m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
3919 getRareCaseProfileBytecodeOffset);
3920 }
3921
3922 #if ENABLE(JIT)
3923 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3924 {
3925 DFG::CapabilityLevel result = capabilityLevelInternal();
3926 m_capabilityLevelState = result;
3927 return result;
3928 }
3929 #endif
3930
3931 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(Vector<Instruction, 0, UnsafeVectorOverflow>& instructions)
3932 {
3933 const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3934 for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3935 // Because op_profile_control_flow is emitted at the beginning of every basic block, finding
3936 // the next op_profile_control_flow will give us the text range of a single basic block.
3937 size_t startIdx = bytecodeOffsets[i];
3938 RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow);
3939 int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
3940 int basicBlockEndOffset;
3941 if (i + 1 < offsetsLength) {
3942 size_t endIdx = bytecodeOffsets[i + 1];
3943 RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow);
3944 basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
3945 } else {
3946 basicBlockEndOffset = m_sourceOffset + m_ownerExecutable->source().length() - 1; // Offset before the closing brace.
3947 basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3948 }
3949
3950 // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3951 // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than
3952 // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node
3953 // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different
3954 // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript
3955 // program. The condition:
3956 // (basicBlockEndOffset < basicBlockStartOffset)
3957 // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic
3958 // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These
3959 // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same
3960 // internal data structure, so if any of them execute, it will record the same textual basic block in the
3961 // JavaScript program as executing.
3962 // At the bytecode level, this situation looks like:
3963 // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3964 // ...
3965 // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3966 // ...
3967 // m: op_profile_control_flow
3968 if (basicBlockEndOffset < basicBlockStartOffset) {
3969 RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3970 instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3971 continue;
3972 }
3973
3974 BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(m_ownerExecutable->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3975
3976 // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3977 // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3978 // This is necessary because in the original source text of a JavaScript program,
3979 // function literals form new basic blocks boundaries, but they aren't represented
3980 // inside the CodeBlock's instruction stream.
3981 auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3982 const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3983 int functionStart = executable->typeProfilingStartOffset();
3984 int functionEnd = executable->typeProfilingEndOffset();
3985 if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3986 basicBlockLocation->insertGap(functionStart, functionEnd);
3987 };
3988
3989 for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3990 insertFunctionGaps(executable);
3991 for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3992 insertFunctionGaps(executable);
3993
3994 instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
3995 }
3996 }
3997
3998 } // namespace JSC