+ ASSERT(m_heap->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
+
+ bool didCloneSymbolTable = false;
+
+ if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
+ if (m_vm->typeProfiler()) {
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ symbolTable->prepareForTypeProfiling(locker);
+ }
+
+ if (codeType() == FunctionCode && symbolTable->scopeSize()) {
+ m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->cloneScopePart(*m_vm));
+ didCloneSymbolTable = true;
+ } else
+ m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
+ }
+
+ ASSERT(m_source);
+ setNumParameters(unlinkedCodeBlock->numParameters());
+
+ if (vm()->typeProfiler() || vm()->controlFlowProfiler())
+ vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset());
+
+ setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
+ if (unlinkedCodeBlock->usesGlobalObject())
+ m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
+
+ for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
+ LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
+ if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
+ m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type));
+ }
+
+ m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
+ for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
+ UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
+ if (vm()->typeProfiler() || vm()->controlFlowProfiler())
+ vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+ }
+
+ m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
+ for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
+ UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
+ if (vm()->typeProfiler() || vm()->controlFlowProfiler())
+ vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+ }
+
+ if (unlinkedCodeBlock->hasRareData()) {
+ createRareDataIfNecessary();
+ if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
+ m_rareData->m_constantBuffers.grow(count);
+ for (size_t i = 0; i < count; i++) {
+ const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
+ m_rareData->m_constantBuffers[i] = buffer;
+ }
+ }
+ if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
+ m_rareData->m_exceptionHandlers.resizeToFit(count);
+ size_t nonLocalScopeDepth = scope->depth();
+ for (size_t i = 0; i < count; i++) {
+ const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+#if ENABLE(JIT)
+ handler.initialize(unlinkedHandler, nonLocalScopeDepth,
+ CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
+#else
+ handler.initialize(unlinkedHandler, nonLocalScopeDepth);
+#endif
+ }
+ }
+
+ if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
+ m_rareData->m_stringSwitchJumpTables.grow(count);
+ for (size_t i = 0; i < count; i++) {
+ UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
+ UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
+ for (; ptr != end; ++ptr) {
+ OffsetLocation offset;
+ offset.branchOffset = ptr->value;
+ m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
+ }
+ }
+ }
+
+ if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
+ m_rareData->m_switchJumpTables.grow(count);
+ for (size_t i = 0; i < count; i++) {
+ UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
+ SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
+ destTable.branchOffsets = sourceTable.branchOffsets;
+ destTable.min = sourceTable.min;
+ }
+ }
+ }
+
+ // Allocate metadata buffers for the bytecode
+ if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
+ m_llintCallLinkInfos.resizeToFit(size);
+ if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
+ m_arrayProfiles.grow(size);
+ if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
+ m_arrayAllocationProfiles.resizeToFit(size);
+ if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
+ m_valueProfiles.resizeToFit(size);
+ if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
+ m_objectAllocationProfiles.resizeToFit(size);
+
+ // Copy and translate the UnlinkedInstructions
+ unsigned instructionCount = unlinkedCodeBlock->instructions().count();
+ UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
+
+ Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
+ for (unsigned i = 0; !instructionReader.atEnd(); ) {
+ const UnlinkedInstruction* pc = instructionReader.next();
+
+ unsigned opLength = opcodeLength(pc[0].u.opcode);
+
+ instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
+ for (size_t j = 1; j < opLength; ++j) {
+ if (sizeof(int32_t) != sizeof(intptr_t))
+ instructions[i + j].u.pointer = 0;
+ instructions[i + j].u.operand = pc[j].u.operand;
+ }
+ switch (pc[0].u.opcode) {
+ case op_has_indexed_property: {
+ int arrayProfileIndex = pc[opLength - 1].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+ instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ break;
+ }
+ case op_call_varargs:
+ case op_construct_varargs:
+ case op_get_by_val: {
+ int arrayProfileIndex = pc[opLength - 2].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+ instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
+ FALLTHROUGH;
+ }
+ case op_get_direct_pname:
+ case op_get_by_id:
+ case op_get_from_arguments: {
+ ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ instructions[i + opLength - 1] = profile;
+ break;
+ }
+ case op_put_by_val: {
+ int arrayProfileIndex = pc[opLength - 1].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+ instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ break;
+ }
+ case op_put_by_val_direct: {
+ int arrayProfileIndex = pc[opLength - 1].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+ instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ break;
+ }
+
+ case op_new_array:
+ case op_new_array_buffer:
+ case op_new_array_with_size: {
+ int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
+ instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
+ break;
+ }
+ case op_new_object: {
+ int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
+ ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
+ int inferredInlineCapacity = pc[opLength - 2].u.operand;
+
+ instructions[i + opLength - 1] = objectAllocationProfile;
+ objectAllocationProfile->initialize(*vm(),
+ m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
+ break;
+ }
+
+ case op_call:
+ case op_call_eval: {
+ ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ instructions[i + opLength - 1] = profile;
+ int arrayProfileIndex = pc[opLength - 2].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+ instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
+ instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
+ break;
+ }
+ case op_construct: {
+ instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
+ ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ instructions[i + opLength - 1] = profile;
+ break;
+ }
+ case op_get_by_id_out_of_line:
+ case op_get_array_length:
+ CRASH();
+
+ case op_init_global_const_nop: {
+ ASSERT(codeType() == GlobalCode);
+ Identifier ident = identifier(pc[4].u.operand);
+ SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
+ if (entry.isNull())
+ break;
+
+ instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
+ instructions[i + 1] = &m_globalObject->variableAt(entry.varOffset().scopeOffset());
+ break;
+ }
+
+ case op_resolve_scope: {
+ const Identifier& ident = identifier(pc[3].u.operand);
+ ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
+ RELEASE_ASSERT(type != LocalClosureVar);
+
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, type);
+ instructions[i + 4].u.operand = op.type;
+ instructions[i + 5].u.operand = op.depth;
+ if (op.lexicalEnvironment)
+ instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable());
+ break;
+ }
+
+ case op_get_from_scope: {
+ ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = i;
+ instructions[i + opLength - 1] = profile;
+
+ // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
+
+ ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
+ if (modeAndType.type() == LocalClosureVar) {
+ instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand();
+ break;
+ }
+
+ const Identifier& ident = identifier(pc[3].u.operand);
+
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, modeAndType.type());
+
+ instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 5].u.watchpointSet = op.watchpointSet;
+ else if (op.structure)
+ instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
+ break;
+ }
+
+ case op_put_to_scope: {
+ // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
+ if (modeAndType.type() == LocalClosureVar) {
+ // Only do watching if the property we're putting to is not anonymous.
+ if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
+ RELEASE_ASSERT(didCloneSymbolTable);
+ const Identifier& ident = identifier(pc[2].u.operand);
+ ConcurrentJITLocker locker(m_symbolTable->m_lock);
+ SymbolTable::Map::iterator iter = m_symbolTable->find(locker, ident.impl());
+ ASSERT(iter != m_symbolTable->end(locker));
+ iter->value.prepareToWatch();
+ instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+ } else
+ instructions[i + 5].u.watchpointSet = nullptr;
+ break;
+ }
+
+ const Identifier& ident = identifier(pc[2].u.operand);
+
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Put, modeAndType.type());
+
+ instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 5].u.watchpointSet = op.watchpointSet;
+ else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
+ if (op.watchpointSet)
+ op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
+ } else if (op.structure)
+ instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
+
+ break;
+ }
+
+ case op_profile_type: {
+ RELEASE_ASSERT(vm()->typeProfiler());
+ // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+ size_t instructionOffset = i + opLength - 1;
+ unsigned divotStart, divotEnd;
+ GlobalVariableID globalVariableID = 0;
+ RefPtr<TypeSet> globalTypeSet;
+ bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
+ VirtualRegister profileRegister(pc[1].u.operand);
+ ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
+ SymbolTable* symbolTable = nullptr;
+
+ switch (flag) {
+ case ProfileTypeBytecodePutToScope:
+ case ProfileTypeBytecodeGetFromScope: {
+ const Identifier& ident = identifier(pc[4].u.operand);
+ ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, (flag == ProfileTypeBytecodeGetFromScope ? Get : Put), type);
+
+ // FIXME: handle other values for op.type here, and also consider what to do when we can't statically determine the globalID
+ // https://bugs.webkit.org/show_bug.cgi?id=135184
+ if (op.type == ClosureVar)
+ symbolTable = op.lexicalEnvironment->symbolTable();
+ else if (op.type == GlobalVar)
+ symbolTable = m_globalObject.get()->symbolTable();
+
+ if (symbolTable) {
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ symbolTable->prepareForTypeProfiling(locker);
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
+ } else
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+
+ break;
+ }
+ case ProfileTypeBytecodePutToLocalScope:
+ case ProfileTypeBytecodeGetFromLocalScope: {
+ const Identifier& ident = identifier(pc[4].u.operand);
+ symbolTable = m_symbolTable.get();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ symbolTable->prepareForTypeProfiling(locker);
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
+
+ break;
+ }
+
+ case ProfileTypeBytecodeHasGlobalID: {
+ symbolTable = m_symbolTable.get();
+ ConcurrentJITLocker locker(symbolTable->m_lock);
+ globalVariableID = symbolTable->uniqueIDForOffset(locker, VarOffset(profileRegister), *vm());
+ globalTypeSet = symbolTable->globalTypeSetForOffset(locker, VarOffset(profileRegister), *vm());
+ break;
+ }
+ case ProfileTypeBytecodeDoesNotHaveGlobalID:
+ case ProfileTypeBytecodeFunctionArgument: {
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+ break;
+ }
+ case ProfileTypeBytecodeFunctionReturnStatement: {
+ RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+ globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
+ globalVariableID = TypeProfilerReturnStatement;
+ if (!shouldAnalyze) {
+ // Because a return statement can be added implicitly to return undefined at the end of a function,
+ // and these nodes don't emit expression ranges because they aren't in the actual source text of
+ // the user's program, give the type profiler some range to identify these return statements.
+ // Currently, the text offset that is used as identification is on the open brace of the function
+ // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+ divotStart = divotEnd = m_sourceOffset;
+ shouldAnalyze = true;
+ }
+ break;
+ }
+ }
+
+ std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+ m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
+ TypeLocation* location = locationPair.first;
+ bool isNewLocation = locationPair.second;
+
+ if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+ location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
+
+ if (shouldAnalyze && isNewLocation)
+ vm()->typeProfiler()->insertNewLocation(location);
+
+ instructions[i + 2].u.location = location;
+ break;
+ }
+
+ case op_debug: {
+ if (pc[1].u.index == DidReachBreakpoint)
+ m_hasDebuggerStatement = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ i += opLength;
+ }
+
+ if (vm()->controlFlowProfiler())
+ insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+ m_instructions = WTF::RefCountedArray<Instruction>(instructions);
+
+ // Set optimization thresholds only after m_instructions is initialized, since these
+ // rely on the instruction count (and are in theory permitted to also inspect the
+ // instruction stream to more accurate assess the cost of tier-up).
+ optimizeAfterWarmUp();
+ jitAfterWarmUp();
+
+ // If the concurrent thread will want the code block's hash, then compute it here
+ // synchronously.
+ if (Options::alwaysComputeHash())
+ hash();
+
+ if (Options::dumpGeneratedBytecodes())
+ dumpBytecode();
+
+ m_heap->m_codeBlocks.add(this);
+ m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
+}