X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/a253471d7f8e4d91bf6ebabab00155c3b387d3d0..93a3786624b2768d89bfa27e46598dc64e2fb70a:/llint/LowLevelInterpreter32_64.asm?ds=inline diff --git a/llint/LowLevelInterpreter32_64.asm b/llint/LowLevelInterpreter32_64.asm index 1a089bd..20aa130 100644 --- a/llint/LowLevelInterpreter32_64.asm +++ b/llint/LowLevelInterpreter32_64.asm @@ -105,34 +105,52 @@ macro dispatchAfterCall() end macro cCall2(function, arg1, arg2) - if ARMv7 + if ARM or ARMv7 or ARMv7_TRADITIONAL move arg1, t0 move arg2, t1 + call function elsif X86 + resetX87Stack poke arg1, 0 poke arg2, 1 + call function + elsif MIPS or SH4 + move arg1, a0 + move arg2, a1 + call function + elsif C_LOOP + cloopCallSlowPath function, arg1, arg2 else error end - call function end # This barely works. arg3 and arg4 should probably be immediates. macro cCall4(function, arg1, arg2, arg3, arg4) - if ARMv7 + if ARM or ARMv7 or ARMv7_TRADITIONAL move arg1, t0 move arg2, t1 move arg3, t2 move arg4, t3 + call function elsif X86 + resetX87Stack poke arg1, 0 poke arg2, 1 poke arg3, 2 poke arg4, 3 + call function + elsif MIPS or SH4 + move arg1, a0 + move arg2, a1 + move arg3, a2 + move arg4, a3 + call function + elsif C_LOOP + error else error end - call function end macro callSlowPath(slowPath) @@ -169,6 +187,14 @@ macro callCallSlowPath(advance, slowPath, action) action(t0) end +macro callWatchdogTimerHandler(throwHandler) + storei PC, ArgumentCount + TagOffset[cfr] + cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC) + move t1, cfr + btpnz t0, throwHandler + loadi ArgumentCount + TagOffset[cfr], PC +end + macro checkSwitchToJITForLoop() checkSwitchToJIT( 1, @@ -286,9 +312,9 @@ macro functionArityCheck(doneLabel, slow_path) cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error move t1, cfr btiz t0, .continue - loadp JITStackFrame::globalData[sp], t1 - loadp JSGlobalData::callFrameForThrow[t1], t0 - jmp JSGlobalData::targetMachinePCForThrow[t1] + loadp JITStackFrame::vm[sp], t1 + loadp VM::callFrameForThrow[t1], t0 + jmp VM::targetMachinePCForThrow[t1] .continue: # Reload CodeBlock and PC, since the slow_path clobbered it. loadp CodeBlock[cfr], t1 @@ -301,8 +327,8 @@ end _llint_op_enter: traceExecution() - loadp CodeBlock[cfr], t2 - loadi CodeBlock::m_numVars[t2], t2 + loadp CodeBlock[cfr], t2 // t2 = cfr.CodeBlock + loadi CodeBlock::m_numVars[t2], t2 // t2 = t2.m_numVars btiz t2, .opEnterDone move UndefinedTag, t0 move 0, t1 @@ -344,31 +370,30 @@ _llint_op_create_arguments: _llint_op_create_this: traceExecution() loadi 8[PC], t0 - assertNotConstant(t0) - bineq TagOffset[cfr, t0, 8], CellTag, .opCreateThisSlow - loadi PayloadOffset[cfr, t0, 8], t0 - loadp JSCell::m_structure[t0], t1 - bbb Structure::m_typeInfo + TypeInfo::m_type[t1], ObjectType, .opCreateThisSlow - loadp JSObject::m_inheritorID[t0], t2 - btpz t2, .opCreateThisSlow - allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow) + loadp PayloadOffset[cfr, t0, 8], t0 + loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1 + loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2 + btpz t1, .opCreateThisSlow + allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow) loadi 4[PC], t1 storei CellTag, TagOffset[cfr, t1, 8] storei t0, PayloadOffset[cfr, t1, 8] - dispatch(3) + dispatch(4) .opCreateThisSlow: callSlowPath(_llint_slow_path_create_this) - dispatch(3) + dispatch(4) _llint_op_get_callee: traceExecution() loadi 4[PC], t0 loadp PayloadOffset + Callee[cfr], t1 + loadp 8[PC], t2 + valueProfile(CellTag, t1, t2) storei CellTag, TagOffset[cfr, t0, 8] storei t1, PayloadOffset[cfr, t0, 8] - dispatch(2) + dispatch(3) _llint_op_convert_this: @@ -378,27 +403,29 @@ _llint_op_convert_this: loadi PayloadOffset[cfr, t0, 8], t0 loadp JSCell::m_structure[t0], t0 bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow - dispatch(2) + loadi 8[PC], t1 + valueProfile(CellTag, t0, t1) + dispatch(3) .opConvertThisSlow: callSlowPath(_llint_slow_path_convert_this) - dispatch(2) + dispatch(3) _llint_op_new_object: traceExecution() - loadp CodeBlock[cfr], t0 - loadp CodeBlock::m_globalObject[t0], t0 - loadp JSGlobalObject::m_emptyObjectStructure[t0], t1 - allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow) + loadpFromInstruction(3, t0) + loadp ObjectAllocationProfile::m_allocator[t0], t1 + loadp ObjectAllocationProfile::m_structure[t0], t2 + allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow) loadi 4[PC], t1 storei CellTag, TagOffset[cfr, t1, 8] storei t0, PayloadOffset[cfr, t1, 8] - dispatch(2) + dispatch(4) .opNewObjectSlow: callSlowPath(_llint_slow_path_new_object) - dispatch(2) + dispatch(4) _llint_op_mov: @@ -456,7 +483,13 @@ _llint_op_eq_null: loadi PayloadOffset[cfr, t0, 8], t0 bineq t1, CellTag, .opEqNullImmediate loadp JSCell::m_structure[t0], t1 - tbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1 + btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined + move 0, t1 + jmp .opEqNullNotImmediate +.opEqNullMasqueradesAsUndefined: + loadp CodeBlock[cfr], t0 + loadp CodeBlock::m_globalObject[t0], t0 + cpeq Structure::m_globalObject[t1], t0, t1 jmp .opEqNullNotImmediate .opEqNullImmediate: cieq t1, NullTag, t2 @@ -497,7 +530,13 @@ _llint_op_neq_null: loadi PayloadOffset[cfr, t0, 8], t0 bineq t1, CellTag, .opNeqNullImmediate loadp JSCell::m_structure[t0], t1 - tbz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1 + btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined + move 1, t1 + jmp .opNeqNullNotImmediate +.opNeqNullMasqueradesAsUndefined: + loadp CodeBlock[cfr], t0 + loadp CodeBlock::m_globalObject[t0], t0 + cpneq Structure::m_globalObject[t1], t0, t1 jmp .opNeqNullNotImmediate .opNeqNullImmediate: cineq t1, NullTag, t2 @@ -543,88 +582,48 @@ _llint_op_nstricteq: strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq) -_llint_op_pre_inc: +_llint_op_inc: traceExecution() loadi 4[PC], t0 - bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreIncSlow + bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow loadi PayloadOffset[cfr, t0, 8], t1 - baddio 1, t1, .opPreIncSlow + baddio 1, t1, .opIncSlow storei t1, PayloadOffset[cfr, t0, 8] dispatch(2) -.opPreIncSlow: +.opIncSlow: callSlowPath(_llint_slow_path_pre_inc) dispatch(2) -_llint_op_pre_dec: +_llint_op_dec: traceExecution() loadi 4[PC], t0 - bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreDecSlow + bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow loadi PayloadOffset[cfr, t0, 8], t1 - bsubio 1, t1, .opPreDecSlow + bsubio 1, t1, .opDecSlow storei t1, PayloadOffset[cfr, t0, 8] dispatch(2) -.opPreDecSlow: +.opDecSlow: callSlowPath(_llint_slow_path_pre_dec) dispatch(2) -_llint_op_post_inc: - traceExecution() - loadi 8[PC], t0 - loadi 4[PC], t1 - bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostIncSlow - bieq t0, t1, .opPostIncDone - loadi PayloadOffset[cfr, t0, 8], t2 - move t2, t3 - baddio 1, t3, .opPostIncSlow - storei Int32Tag, TagOffset[cfr, t1, 8] - storei t2, PayloadOffset[cfr, t1, 8] - storei t3, PayloadOffset[cfr, t0, 8] -.opPostIncDone: - dispatch(3) - -.opPostIncSlow: - callSlowPath(_llint_slow_path_post_inc) - dispatch(3) - - -_llint_op_post_dec: - traceExecution() - loadi 8[PC], t0 - loadi 4[PC], t1 - bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostDecSlow - bieq t0, t1, .opPostDecDone - loadi PayloadOffset[cfr, t0, 8], t2 - move t2, t3 - bsubio 1, t3, .opPostDecSlow - storei Int32Tag, TagOffset[cfr, t1, 8] - storei t2, PayloadOffset[cfr, t1, 8] - storei t3, PayloadOffset[cfr, t0, 8] -.opPostDecDone: - dispatch(3) - -.opPostDecSlow: - callSlowPath(_llint_slow_path_post_dec) - dispatch(3) - - -_llint_op_to_jsnumber: +_llint_op_to_number: traceExecution() loadi 8[PC], t0 loadi 4[PC], t1 loadConstantOrVariable(t0, t2, t3) - bieq t2, Int32Tag, .opToJsnumberIsInt - biaeq t2, EmptyValueTag, .opToJsnumberSlow -.opToJsnumberIsInt: + bieq t2, Int32Tag, .opToNumberIsInt + biaeq t2, LowestTag, .opToNumberSlow +.opToNumberIsInt: storei t2, TagOffset[cfr, t1, 8] storei t3, PayloadOffset[cfr, t1, 8] dispatch(3) -.opToJsnumberSlow: - callSlowPath(_llint_slow_path_to_jsnumber) +.opToNumberSlow: + callSlowPath(_llint_slow_path_to_number) dispatch(3) @@ -827,28 +826,21 @@ _llint_op_bitor: _llint_op_check_has_instance: traceExecution() - loadi 4[PC], t1 + loadi 12[PC], t1 loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow) loadp JSCell::m_structure[t0], t0 - btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsHasInstance, .opCheckHasInstanceSlow - dispatch(2) + btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow + dispatch(5) .opCheckHasInstanceSlow: callSlowPath(_llint_slow_path_check_has_instance) - dispatch(2) + dispatch(0) _llint_op_instanceof: traceExecution() - # Check that baseVal implements the default HasInstance behavior. - # FIXME: This should be deprecated. - loadi 12[PC], t1 - loadConstantOrVariablePayloadUnchecked(t1, t0) - loadp JSCell::m_structure[t0], t0 - btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opInstanceofSlow - # Actually do the work. - loadi 16[PC], t0 + loadi 12[PC], t0 loadi 4[PC], t3 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow) loadp JSCell::m_structure[t1], t2 @@ -868,11 +860,11 @@ _llint_op_instanceof: .opInstanceofDone: storei BooleanTag, TagOffset[cfr, t3, 8] storei t0, PayloadOffset[cfr, t3, 8] - dispatch(5) + dispatch(4) .opInstanceofSlow: callSlowPath(_llint_slow_path_instanceof) - dispatch(5) + dispatch(4) _llint_op_is_undefined: @@ -887,7 +879,14 @@ _llint_op_is_undefined: dispatch(3) .opIsUndefinedCell: loadp JSCell::m_structure[t3], t1 - tbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1 + btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined + move 0, t1 + storei t1, PayloadOffset[cfr, t0, 8] + dispatch(3) +.opIsUndefinedMasqueradesAsUndefined: + loadp CodeBlock[cfr], t3 + loadp CodeBlock::m_globalObject[t3], t3 + cpeq Structure::m_globalObject[t1], t3, t1 storei t1, PayloadOffset[cfr, t0, 8] dispatch(3) @@ -931,6 +930,26 @@ _llint_op_is_string: dispatch(3) +macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload) + assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end) + negi propertyOffset + loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage + loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag + loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload +end + +macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload) + bilt propertyOffset, firstOutOfLineOffset, .isInline + loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage + negi propertyOffset + jmp .ready +.isInline: + addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage +.ready: + loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag + loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload +end + macro resolveGlobal(size, slow) # Operands are as follows: # 4[PC] Destination for the load. @@ -942,9 +961,7 @@ macro resolveGlobal(size, slow) loadp JSCell::m_structure[t0], t1 bpneq t1, 12[PC], slow loadi 16[PC], t1 - loadp JSObject::m_propertyStorage[t0], t0 - loadi TagOffset[t0, t1, 8], t2 - loadi PayloadOffset[t0, t1, 8], t3 + loadPropertyAtVariableOffsetKnownNotInline(t1, t0, t2, t3) loadi 4[PC], t0 storei t2, TagOffset[cfr, t0, 8] storei t3, PayloadOffset[cfr, t0, 8] @@ -952,158 +969,92 @@ macro resolveGlobal(size, slow) valueProfile(t2, t3, t0) end -_llint_op_resolve_global: +_llint_op_init_global_const: traceExecution() - resolveGlobal(6, .opResolveGlobalSlow) - dispatch(6) - -.opResolveGlobalSlow: - callSlowPath(_llint_slow_path_resolve_global) - dispatch(6) - - -# Gives you the scope in t0, while allowing you to optionally perform additional checks on the -# scopes as they are traversed. scopeCheck() is called with two arguments: the register -# holding the scope, and a register that can be used for scratch. Note that this does not -# use t3, so you can hold stuff in t3 if need be. -macro getScope(deBruijinIndexOperand, scopeCheck) - loadp ScopeChain + PayloadOffset[cfr], t0 - loadi deBruijinIndexOperand, t2 - - btiz t2, .done - - loadp CodeBlock[cfr], t1 - bineq CodeBlock::m_codeType[t1], FunctionCode, .loop - btbz CodeBlock::m_needsFullScopeChain[t1], .loop - - loadi CodeBlock::m_activationRegister[t1], t1 - - # Need to conditionally skip over one scope. - bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation - scopeCheck(t0, t1) - loadp ScopeChainNode::next[t0], t0 -.noActivation: - subi 1, t2 - - btiz t2, .done -.loop: - scopeCheck(t0, t1) - loadp ScopeChainNode::next[t0], t0 - subi 1, t2 - btinz t2, .loop - -.done: -end - -_llint_op_resolve_global_dynamic: - traceExecution() - loadp JITStackFrame::globalData[sp], t3 - loadp JSGlobalData::activationStructure[t3], t3 - getScope( - 20[PC], - macro (scope, scratch) - loadp ScopeChainNode::object[scope], scratch - bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow - end) - resolveGlobal(7, .opResolveGlobalDynamicSlow) - dispatch(7) - -.opResolveGlobalDynamicSuperSlow: - callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic) - dispatch(7) - -.opResolveGlobalDynamicSlow: - callSlowPath(_llint_slow_path_resolve_global_dynamic) - dispatch(7) + loadi 8[PC], t1 + loadi 4[PC], t0 + loadConstantOrVariable(t1, t2, t3) + writeBarrier(t2, t3) + storei t2, TagOffset[t0] + storei t3, PayloadOffset[t0] + dispatch(5) -_llint_op_get_scoped_var: +_llint_op_init_global_const_check: traceExecution() - # Operands are as follows: - # 4[PC] Destination for the load. - # 8[PC] Index of register in the scope. - # 12[PC] De Bruijin index. - getScope(12[PC], macro (scope, scratch) end) - loadi 4[PC], t1 - loadi 8[PC], t2 - loadp ScopeChainNode::object[t0], t0 - loadp JSVariableObject::m_registers[t0], t0 - loadi TagOffset[t0, t2, 8], t3 - loadi PayloadOffset[t0, t2, 8], t0 - storei t3, TagOffset[cfr, t1, 8] - storei t0, PayloadOffset[cfr, t1, 8] - loadi 16[PC], t1 - valueProfile(t3, t0, t1) + loadp 12[PC], t2 + loadi 8[PC], t1 + loadi 4[PC], t0 + btbnz [t2], .opInitGlobalConstCheckSlow + loadConstantOrVariable(t1, t2, t3) + writeBarrier(t2, t3) + storei t2, TagOffset[t0] + storei t3, PayloadOffset[t0] + dispatch(5) +.opInitGlobalConstCheckSlow: + callSlowPath(_llint_slow_path_init_global_const_check) dispatch(5) +# We only do monomorphic get_by_id caching for now, and we do not modify the +# opcode. We do, however, allow for the cache to change anytime if fails, since +# ping-ponging is free. At best we get lucky and the get_by_id will continue +# to take fast path on the new cache. At worst we take slow path, which is what +# we would have been doing anyway. -_llint_op_put_scoped_var: +macro getById(getPropertyStorage) traceExecution() - getScope(8[PC], macro (scope, scratch) end) - loadi 12[PC], t1 - loadConstantOrVariable(t1, t3, t2) - loadi 4[PC], t1 - writeBarrier(t3, t2) - loadp ScopeChainNode::object[t0], t0 - loadp JSVariableObject::m_registers[t0], t0 - storei t3, TagOffset[t0, t1, 8] - storei t2, PayloadOffset[t0, t1, 8] - dispatch(4) + loadi 8[PC], t0 + loadi 16[PC], t1 + loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow) + loadi 20[PC], t2 + getPropertyStorage( + t3, + t0, + macro (propertyStorage, scratch) + bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow + loadi 4[PC], t1 + loadi TagOffset[propertyStorage, t2], scratch + loadi PayloadOffset[propertyStorage, t2], t2 + storei scratch, TagOffset[cfr, t1, 8] + storei t2, PayloadOffset[cfr, t1, 8] + loadi 32[PC], t1 + valueProfile(scratch, t2, t1) + dispatch(9) + end) + .opGetByIdSlow: + callSlowPath(_llint_slow_path_get_by_id) + dispatch(9) +end -_llint_op_get_global_var: - traceExecution() - loadi 8[PC], t1 - loadi 4[PC], t3 - loadp CodeBlock[cfr], t0 - loadp CodeBlock::m_globalObject[t0], t0 - loadp JSGlobalObject::m_registers[t0], t0 - loadi TagOffset[t0, t1, 8], t2 - loadi PayloadOffset[t0, t1, 8], t1 - storei t2, TagOffset[cfr, t3, 8] - storei t1, PayloadOffset[cfr, t3, 8] - loadi 12[PC], t3 - valueProfile(t2, t1, t3) - dispatch(4) +_llint_op_get_by_id: + getById(withInlineStorage) -_llint_op_put_global_var: - traceExecution() - loadi 8[PC], t1 - loadp CodeBlock[cfr], t0 - loadp CodeBlock::m_globalObject[t0], t0 - loadp JSGlobalObject::m_registers[t0], t0 - loadConstantOrVariable(t1, t2, t3) - loadi 4[PC], t1 - writeBarrier(t2, t3) - storei t2, TagOffset[t0, t1, 8] - storei t3, PayloadOffset[t0, t1, 8] - dispatch(3) +_llint_op_get_by_id_out_of_line: + getById(withOutOfLineStorage) -_llint_op_get_by_id: +_llint_op_get_array_length: traceExecution() - # We only do monomorphic get_by_id caching for now, and we do not modify the - # opcode. We do, however, allow for the cache to change anytime if fails, since - # ping-ponging is free. At best we get lucky and the get_by_id will continue - # to take fast path on the new cache. At worst we take slow path, which is what - # we would have been doing anyway. loadi 8[PC], t0 - loadi 16[PC], t1 - loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow) - loadi 20[PC], t2 - loadp JSObject::m_propertyStorage[t3], t0 - bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow + loadp 16[PC], t1 + loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow) + loadp JSCell::m_structure[t3], t2 + arrayProfile(t2, t1, t0) + btiz t2, IsArray, .opGetArrayLengthSlow + btiz t2, IndexingShapeMask, .opGetArrayLengthSlow loadi 4[PC], t1 - loadi TagOffset[t0, t2], t3 - loadi PayloadOffset[t0, t2], t2 - storei t3, TagOffset[cfr, t1, 8] - storei t2, PayloadOffset[cfr, t1, 8] - loadi 32[PC], t1 - valueProfile(t3, t2, t1) + loadp 32[PC], t2 + loadp JSObject::m_butterfly[t3], t0 + loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0 + bilt t0, 0, .opGetArrayLengthSlow + valueProfile(Int32Tag, t0, t2) + storep t0, PayloadOffset[cfr, t1, 8] + storep Int32Tag, TagOffset[cfr, t1, 8] dispatch(9) -.opGetByIdSlow: +.opGetArrayLengthSlow: callSlowPath(_llint_slow_path_get_by_id) dispatch(9) @@ -1124,98 +1075,158 @@ _llint_op_get_arguments_length: dispatch(4) -_llint_op_put_by_id: +macro putById(getPropertyStorage) traceExecution() loadi 4[PC], t3 loadi 16[PC], t1 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) loadi 12[PC], t2 - loadp JSObject::m_propertyStorage[t0], t3 - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - loadi 20[PC], t1 - loadConstantOrVariable2Reg(t2, t0, t2) - writeBarrier(t0, t2) - storei t0, TagOffset[t3, t1] - storei t2, PayloadOffset[t3, t1] - dispatch(9) + getPropertyStorage( + t0, + t3, + macro (propertyStorage, scratch) + bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow + loadi 20[PC], t1 + loadConstantOrVariable2Reg(t2, scratch, t2) + writeBarrier(scratch, t2) + storei scratch, TagOffset[propertyStorage, t1] + storei t2, PayloadOffset[propertyStorage, t1] + dispatch(9) + end) +end + +_llint_op_put_by_id: + putById(withInlineStorage) .opPutByIdSlow: callSlowPath(_llint_slow_path_put_by_id) dispatch(9) -macro putByIdTransition(additionalChecks) +_llint_op_put_by_id_out_of_line: + putById(withOutOfLineStorage) + + +macro putByIdTransition(additionalChecks, getPropertyStorage) traceExecution() loadi 4[PC], t3 loadi 16[PC], t1 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) loadi 12[PC], t2 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - additionalChecks(t1, t3, .opPutByIdSlow) + additionalChecks(t1, t3) loadi 20[PC], t1 - loadp JSObject::m_propertyStorage[t0], t3 - addp t1, t3 - loadConstantOrVariable2Reg(t2, t1, t2) - writeBarrier(t1, t2) - storei t1, TagOffset[t3] - loadi 24[PC], t1 - storei t2, PayloadOffset[t3] - storep t1, JSCell::m_structure[t0] - dispatch(9) + getPropertyStorage( + t0, + t3, + macro (propertyStorage, scratch) + addp t1, propertyStorage, t3 + loadConstantOrVariable2Reg(t2, t1, t2) + writeBarrier(t1, t2) + storei t1, TagOffset[t3] + loadi 24[PC], t1 + storei t2, PayloadOffset[t3] + storep t1, JSCell::m_structure[t0] + dispatch(9) + end) +end + +macro noAdditionalChecks(oldStructure, scratch) +end + +macro structureChainChecks(oldStructure, scratch) + const protoCell = oldStructure # Reusing the oldStructure register for the proto + + loadp 28[PC], scratch + assert(macro (ok) btpnz scratch, ok end) + loadp StructureChain::m_vector[scratch], scratch + assert(macro (ok) btpnz scratch, ok end) + bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done +.loop: + loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell + loadp JSCell::m_structure[protoCell], oldStructure + bpneq oldStructure, [scratch], .opPutByIdSlow + addp 4, scratch + bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop +.done: end _llint_op_put_by_id_transition_direct: - putByIdTransition(macro (oldStructure, scratch, slow) end) + putByIdTransition(noAdditionalChecks, withInlineStorage) + + +_llint_op_put_by_id_transition_direct_out_of_line: + putByIdTransition(noAdditionalChecks, withOutOfLineStorage) _llint_op_put_by_id_transition_normal: - putByIdTransition( - macro (oldStructure, scratch, slow) - const protoCell = oldStructure # Reusing the oldStructure register for the proto - - loadp 28[PC], scratch - assert(macro (ok) btpnz scratch, ok end) - loadp StructureChain::m_vector[scratch], scratch - assert(macro (ok) btpnz scratch, ok end) - bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done - .loop: - loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell - loadp JSCell::m_structure[protoCell], oldStructure - bpneq oldStructure, [scratch], slow - addp 4, scratch - bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop - .done: - end) + putByIdTransition(structureChainChecks, withInlineStorage) + + +_llint_op_put_by_id_transition_normal_out_of_line: + putByIdTransition(structureChainChecks, withOutOfLineStorage) _llint_op_get_by_val: traceExecution() - loadp CodeBlock[cfr], t1 loadi 8[PC], t2 - loadi 12[PC], t3 - loadp CodeBlock::m_globalData[t1], t1 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow) - loadp JSGlobalData::jsArrayClassInfo[t1], t2 + loadp JSCell::m_structure[t0], t2 + loadp 16[PC], t3 + arrayProfile(t2, t3, t1) + loadi 12[PC], t3 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow) - bpneq [t0], t2, .opGetByValSlow - loadp JSArray::m_storage[t0], t3 - biaeq t1, JSArray::m_vectorLength[t0], .opGetByValSlow + loadp JSObject::m_butterfly[t0], t3 + andi IndexingShapeMask, t2 + bieq t2, Int32Shape, .opGetByValIsContiguous + bineq t2, ContiguousShape, .opGetByValNotContiguous +.opGetByValIsContiguous: + + biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValOutOfBounds + loadi TagOffset[t3, t1, 8], t2 + loadi PayloadOffset[t3, t1, 8], t1 + jmp .opGetByValDone + +.opGetByValNotContiguous: + bineq t2, DoubleShape, .opGetByValNotDouble + biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValOutOfBounds + loadd [t3, t1, 8], ft0 + bdnequn ft0, ft0, .opGetByValSlow + # FIXME: This could be massively optimized. + fd2ii ft0, t1, t2 loadi 4[PC], t0 + jmp .opGetByValNotEmpty + +.opGetByValNotDouble: + subi ArrayStorageShape, t2 + bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow + biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValOutOfBounds loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2 loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1 - bieq t2, EmptyValueTag, .opGetByValSlow + +.opGetByValDone: + loadi 4[PC], t0 + bieq t2, EmptyValueTag, .opGetByValOutOfBounds +.opGetByValNotEmpty: storei t2, TagOffset[cfr, t0, 8] storei t1, PayloadOffset[cfr, t0, 8] - loadi 16[PC], t0 + loadi 20[PC], t0 valueProfile(t2, t1, t0) - dispatch(5) + dispatch(6) +.opGetByValOutOfBounds: + if VALUE_PROFILER + loadpFromInstruction(4, t0) + storeb 1, ArrayProfile::m_outOfBounds[t0] + end .opGetByValSlow: callSlowPath(_llint_slow_path_get_by_val) - dispatch(5) + dispatch(6) _llint_op_get_argument_by_val: + # FIXME: At some point we should array profile this. Right now it isn't necessary + # since the DFG will never turn a get_argument_by_val into a GetByVal. traceExecution() loadi 8[PC], t0 loadi 12[PC], t1 @@ -1228,13 +1239,15 @@ _llint_op_get_argument_by_val: loadi 4[PC], t3 loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0 loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1 + loadi 20[PC], t2 storei t0, TagOffset[cfr, t3, 8] storei t1, PayloadOffset[cfr, t3, 8] - dispatch(5) + valueProfile(t0, t1, t2) + dispatch(6) .opGetArgumentByValSlow: callSlowPath(_llint_slow_path_get_argument_by_val) - dispatch(5) + dispatch(6) _llint_op_get_by_pname: @@ -1253,9 +1266,11 @@ _llint_op_get_by_pname: loadi [cfr, t0, 8], t0 subi 1, t0 biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow - loadp JSObject::m_propertyStorage[t2], t2 - loadi TagOffset[t2, t0, 8], t1 - loadi PayloadOffset[t2, t0, 8], t3 + bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty + addi firstOutOfLineOffset, t0 + subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0 +.opGetByPnameInlineProperty: + loadPropertyAtVariableOffset(t0, t2, t1, t3) loadi 4[PC], t0 storei t1, TagOffset[cfr, t0, 8] storei t3, PayloadOffset[cfr, t0, 8] @@ -1266,41 +1281,105 @@ _llint_op_get_by_pname: dispatch(7) +macro contiguousPutByVal(storeCallback) + biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .outOfBounds +.storeResult: + loadi 12[PC], t2 + storeCallback(t2, t1, t0, t3) + dispatch(5) + +.outOfBounds: + biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValOutOfBounds + if VALUE_PROFILER + loadp 16[PC], t2 + storeb 1, ArrayProfile::m_mayStoreToHole[t2] + end + addi 1, t3, t2 + storei t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0] + jmp .storeResult +end + _llint_op_put_by_val: traceExecution() loadi 4[PC], t0 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow) + loadp JSCell::m_structure[t1], t2 + loadp 16[PC], t3 + arrayProfile(t2, t3, t0) loadi 8[PC], t0 - loadConstantOrVariablePayload(t0, Int32Tag, t2, .opPutByValSlow) - loadp CodeBlock[cfr], t0 - loadp CodeBlock::m_globalData[t0], t0 - loadp JSGlobalData::jsArrayClassInfo[t0], t0 - bpneq [t1], t0, .opPutByValSlow - biaeq t2, JSArray::m_vectorLength[t1], .opPutByValSlow - loadp JSArray::m_storage[t1], t0 - bieq ArrayStorage::m_vector + TagOffset[t0, t2, 8], EmptyValueTag, .opPutByValEmpty -.opPutByValStoreResult: - loadi 12[PC], t3 - loadConstantOrVariable2Reg(t3, t1, t3) - writeBarrier(t1, t3) - storei t1, ArrayStorage::m_vector + TagOffset[t0, t2, 8] - storei t3, ArrayStorage::m_vector + PayloadOffset[t0, t2, 8] - dispatch(4) + loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow) + loadp JSObject::m_butterfly[t1], t0 + andi IndexingShapeMask, t2 + bineq t2, Int32Shape, .opPutByValNotInt32 + contiguousPutByVal( + macro (operand, scratch, base, index) + loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow) + storei Int32Tag, TagOffset[base, index, 8] + storei scratch, PayloadOffset[base, index, 8] + end) + +.opPutByValNotInt32: + bineq t2, DoubleShape, .opPutByValNotDouble + contiguousPutByVal( + macro (operand, scratch, base, index) + const tag = scratch + const payload = operand + loadConstantOrVariable2Reg(operand, tag, payload) + bineq tag, Int32Tag, .notInt + ci2d payload, ft0 + jmp .ready + .notInt: + fii2d payload, tag, ft0 + bdnequn ft0, ft0, .opPutByValSlow + .ready: + stored ft0, [base, index, 8] + end) -.opPutByValEmpty: +.opPutByValNotDouble: + bineq t2, ContiguousShape, .opPutByValNotContiguous + contiguousPutByVal( + macro (operand, scratch, base, index) + const tag = scratch + const payload = operand + loadConstantOrVariable2Reg(operand, tag, payload) + writeBarrier(tag, payload) + storei tag, TagOffset[base, index, 8] + storei payload, PayloadOffset[base, index, 8] + end) + +.opPutByValNotContiguous: + bineq t2, ArrayStorageShape, .opPutByValSlow + biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValOutOfBounds + bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty +.opPutByValArrayStorageStoreResult: + loadi 12[PC], t2 + loadConstantOrVariable2Reg(t2, t1, t2) + writeBarrier(t1, t2) + storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8] + storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8] + dispatch(5) + +.opPutByValArrayStorageEmpty: + if VALUE_PROFILER + loadp 16[PC], t1 + storeb 1, ArrayProfile::m_mayStoreToHole[t1] + end addi 1, ArrayStorage::m_numValuesInVector[t0] - bib t2, ArrayStorage::m_length[t0], .opPutByValStoreResult - addi 1, t2, t1 - storei t1, ArrayStorage::m_length[t0] - jmp .opPutByValStoreResult + bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult + addi 1, t3, t1 + storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0] + jmp .opPutByValArrayStorageStoreResult +.opPutByValOutOfBounds: + if VALUE_PROFILER + loadpFromInstruction(4, t0) + storeb 1, ArrayProfile::m_outOfBounds[t0] + end .opPutByValSlow: callSlowPath(_llint_slow_path_put_by_val) - dispatch(4) + dispatch(5) -_llint_op_loop: - jmp _llint_op_jmp _llint_op_jmp: traceExecution() dispatchBranch(4[PC]) @@ -1328,7 +1407,7 @@ macro equalNull(cellHandler, immediateHandler) loadi PayloadOffset[cfr, t0, 8], t0 bineq t1, CellTag, .immediate loadp JSCell::m_structure[t0], t2 - cellHandler(Structure::m_typeInfo + TypeInfo::m_flags[t2], .target) + cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target) dispatch(3) .target: @@ -1343,14 +1422,25 @@ end _llint_op_jeq_null: traceExecution() equalNull( - macro (value, target) btbnz value, MasqueradesAsUndefined, target end, + macro (structure, value, target) + btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined + loadp CodeBlock[cfr], t0 + loadp CodeBlock::m_globalObject[t0], t0 + bpeq Structure::m_globalObject[structure], t0, target +.opJeqNullNotMasqueradesAsUndefined: + end, macro (value, target) bieq value, NullTag, target end) _llint_op_jneq_null: traceExecution() equalNull( - macro (value, target) btbz value, MasqueradesAsUndefined, target end, + macro (structure, value, target) + btbz value, MasqueradesAsUndefined, target + loadp CodeBlock[cfr], t0 + loadp CodeBlock::m_globalObject[t0], t0 + bpneq Structure::m_globalObject[structure], t0, target + end, macro (value, target) bineq value, NullTag, target end) @@ -1358,7 +1448,10 @@ _llint_op_jneq_ptr: traceExecution() loadi 4[PC], t0 loadi 8[PC], t1 + loadp CodeBlock[cfr], t2 + loadp CodeBlock::m_globalObject[t2], t2 bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch + loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough .opJneqPtrBranch: dispatchBranch(12[PC]) @@ -1482,6 +1575,18 @@ _llint_op_new_func: dispatch(4) +macro arrayProfileForCall() + if VALUE_PROFILER + loadi 12[PC], t3 + bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done + loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0 + loadp JSCell::m_structure[t0], t0 + loadp 20[PC], t1 + storep t0, ArrayProfile::m_lastSeenStructure[t1] + .done: + end +end + macro doCall(slowPath) loadi 4[PC], t0 loadi 16[PC], t1 @@ -1492,7 +1597,7 @@ macro doCall(slowPath) addp 24, PC lshifti 3, t3 addp cfr, t3 # t3 contains the new value of cfr - loadp JSFunction::m_scopeChain[t2], t0 + loadp JSFunction::m_scope[t2], t0 storei t2, Callee + PayloadOffset[t3] storei t0, ScopeChain + PayloadOffset[t3] loadi 8 - 24[PC], t2 @@ -1502,8 +1607,7 @@ macro doCall(slowPath) storei CellTag, Callee + TagOffset[t3] storei CellTag, ScopeChain + TagOffset[t3] move t3, cfr - call LLIntCallLinkInfo::machineCodeTarget[t1] - dispatchAfterCall() + callTargetFunction(t1) .opCallSlow: slowPathForCall(6, slowPath) @@ -1513,13 +1617,10 @@ end _llint_op_tear_off_activation: traceExecution() loadi 4[PC], t0 - loadi 8[PC], t1 - bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationCreated - bieq TagOffset[cfr, t1, 8], EmptyValueTag, .opTearOffActivationNotCreated -.opTearOffActivationCreated: + bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated callSlowPath(_llint_slow_path_tear_off_activation) .opTearOffActivationNotCreated: - dispatch(3) + dispatch(2) _llint_op_tear_off_arguments: @@ -1529,7 +1630,7 @@ _llint_op_tear_off_arguments: bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated callSlowPath(_llint_slow_path_tear_off_arguments) .opTearOffArgumentsNotCreated: - dispatch(2) + dispatch(3) _llint_op_ret: @@ -1630,14 +1731,14 @@ _llint_op_catch: # the interpreter's throw trampoline (see _llint_throw_trampoline). # The JIT throwing protocol calls for the cfr to be in t0. The throwing # code must have known that we were throwing to the interpreter, and have - # set JSGlobalData::targetInterpreterPCForThrow. + # set VM::targetInterpreterPCForThrow. move t0, cfr - loadp JITStackFrame::globalData[sp], t3 - loadi JSGlobalData::targetInterpreterPCForThrow[t3], PC - loadi JSGlobalData::exception + PayloadOffset[t3], t0 - loadi JSGlobalData::exception + TagOffset[t3], t1 - storei 0, JSGlobalData::exception + PayloadOffset[t3] - storei EmptyValueTag, JSGlobalData::exception + TagOffset[t3] + loadp JITStackFrame::vm[sp], t3 + loadi VM::targetInterpreterPCForThrow[t3], PC + loadi VM::exception + PayloadOffset[t3], t0 + loadi VM::exception + TagOffset[t3], t1 + storei 0, VM::exception + PayloadOffset[t3] + storei EmptyValueTag, VM::exception + TagOffset[t3] loadi 4[PC], t2 storei t0, PayloadOffset[cfr, t2, 8] storei t1, TagOffset[cfr, t2, 8] @@ -1645,6 +1746,71 @@ _llint_op_catch: dispatch(2) +# Gives you the scope in t0, while allowing you to optionally perform additional checks on the +# scopes as they are traversed. scopeCheck() is called with two arguments: the register +# holding the scope, and a register that can be used for scratch. Note that this does not +# use t3, so you can hold stuff in t3 if need be. +macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck) + loadp ScopeChain + PayloadOffset[cfr], t0 + loadi deBruijinIndexOperand, t2 + + btiz t2, .done + + loadp CodeBlock[cfr], t1 + bineq CodeBlock::m_codeType[t1], FunctionCode, .loop + btbz CodeBlock::m_needsActivation[t1], .loop + + loadi CodeBlock::m_activationRegister[t1], t1 + + # Need to conditionally skip over one scope. + bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation + scopeCheck(t0, t1) + loadp JSScope::m_next[t0], t0 +.noActivation: + subi 1, t2 + + btiz t2, .done +.loop: + scopeCheck(t0, t1) + loadp JSScope::m_next[t0], t0 + subi 1, t2 + btinz t2, .loop + +.done: + +end + +_llint_op_get_scoped_var: + traceExecution() + # Operands are as follows: + # 4[PC] Destination for the load. + # 8[PC] Index of register in the scope. + # 12[PC] De Bruijin index. + getDeBruijnScope(12[PC], macro (scope, scratch) end) + loadi 4[PC], t1 + loadi 8[PC], t2 + loadp JSVariableObject::m_registers[t0], t0 + loadi TagOffset[t0, t2, 8], t3 + loadi PayloadOffset[t0, t2, 8], t0 + storei t3, TagOffset[cfr, t1, 8] + storei t0, PayloadOffset[cfr, t1, 8] + loadi 16[PC], t1 + valueProfile(t3, t0, t1) + dispatch(5) + + +_llint_op_put_scoped_var: + traceExecution() + getDeBruijnScope(8[PC], macro (scope, scratch) end) + loadi 12[PC], t1 + loadConstantOrVariable(t1, t3, t2) + loadi 4[PC], t1 + writeBarrier(t3, t2) + loadp JSVariableObject::m_registers[t0], t0 + storei t3, TagOffset[t0, t1, 8] + storei t2, PayloadOffset[t0, t1, 8] + dispatch(4) + _llint_op_end: traceExecution() checkSwitchToJITForEpilogue() @@ -1659,16 +1825,16 @@ _llint_throw_from_slow_path_trampoline: # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so # the throw target is not necessarily interpreted code, we come to here. # This essentially emulates the JIT's throwing protocol. - loadp JITStackFrame::globalData[sp], t1 - loadp JSGlobalData::callFrameForThrow[t1], t0 - jmp JSGlobalData::targetMachinePCForThrow[t1] + loadp JITStackFrame::vm[sp], t1 + loadp VM::callFrameForThrow[t1], t0 + jmp VM::targetMachinePCForThrow[t1] _llint_throw_during_call_trampoline: preserveReturnAddressAfterCall(t2) - loadp JITStackFrame::globalData[sp], t1 - loadp JSGlobalData::callFrameForThrow[t1], t0 - jmp JSGlobalData::targetMachinePCForThrow[t1] + loadp JITStackFrame::vm[sp], t1 + loadp VM::callFrameForThrow[t1], t0 + jmp VM::targetMachinePCForThrow[t1] macro nativeCallTrampoline(executableOffsetToFunction) @@ -1678,8 +1844,8 @@ macro nativeCallTrampoline(executableOffsetToFunction) storei CellTag, ScopeChain + TagOffset[cfr] storei t1, ScopeChain + PayloadOffset[cfr] if X86 - loadp JITStackFrame::globalData + 4[sp], t3 # Additional offset for return address - storep cfr, JSGlobalData::topCallFrame[t3] + loadp JITStackFrame::vm + 4[sp], t3 # Additional offset for return address + storep cfr, VM::topCallFrame[t3] peek 0, t1 storep t1, ReturnPC[cfr] move cfr, t2 # t2 = ecx @@ -1689,10 +1855,37 @@ macro nativeCallTrampoline(executableOffsetToFunction) move t0, cfr call executableOffsetToFunction[t1] addp 16 - 4, sp - loadp JITStackFrame::globalData + 4[sp], t3 - elsif ARMv7 - loadp JITStackFrame::globalData[sp], t3 - storep cfr, JSGlobalData::topCallFrame[t3] + loadp JITStackFrame::vm + 4[sp], t3 + elsif ARM or ARMv7 or ARMv7_TRADITIONAL + loadp JITStackFrame::vm[sp], t3 + storep cfr, VM::topCallFrame[t3] + move t0, t2 + preserveReturnAddressAfterCall(t3) + storep t3, ReturnPC[cfr] + move cfr, t0 + loadi Callee + PayloadOffset[cfr], t1 + loadp JSFunction::m_executable[t1], t1 + move t2, cfr + call executableOffsetToFunction[t1] + restoreReturnAddressBeforeReturn(t3) + loadp JITStackFrame::vm[sp], t3 + elsif MIPS + loadp JITStackFrame::vm[sp], t3 + storep cfr, VM::topCallFrame[t3] + move t0, t2 + preserveReturnAddressAfterCall(t3) + storep t3, ReturnPC[cfr] + move cfr, t0 + loadi Callee + PayloadOffset[cfr], t1 + loadp JSFunction::m_executable[t1], t1 + move t2, cfr + move t0, a0 + call executableOffsetToFunction[t1] + restoreReturnAddressBeforeReturn(t3) + loadp JITStackFrame::vm[sp], t3 + elsif SH4 + loadp JITStackFrame::vm[sp], t3 + storep cfr, VM::topCallFrame[t3] move t0, t2 preserveReturnAddressAfterCall(t3) storep t3, ReturnPC[cfr] @@ -1702,11 +1895,24 @@ macro nativeCallTrampoline(executableOffsetToFunction) move t2, cfr call executableOffsetToFunction[t1] restoreReturnAddressBeforeReturn(t3) - loadp JITStackFrame::globalData[sp], t3 + loadp JITStackFrame::vm[sp], t3 + elsif C_LOOP + loadp JITStackFrame::vm[sp], t3 + storep cfr, VM::topCallFrame[t3] + move t0, t2 + preserveReturnAddressAfterCall(t3) + storep t3, ReturnPC[cfr] + move cfr, t0 + loadi Callee + PayloadOffset[cfr], t1 + loadp JSFunction::m_executable[t1], t1 + move t2, cfr + cloopCallNative executableOffsetToFunction[t1] + restoreReturnAddressBeforeReturn(t3) + loadp JITStackFrame::vm[sp], t3 else error end - bineq JSGlobalData::exception + TagOffset[t3], EmptyValueTag, .exception + bineq VM::exception + TagOffset[t3], EmptyValueTag, .exception ret .exception: preserveReturnAddressAfterCall(t1) # This is really only needed on X86