X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/2d39b0e377c0896910ee49ae70082ba665faf986..refs/heads/master:/jit/JITOpcodes.cpp?ds=sidebyside diff --git a/jit/JITOpcodes.cpp b/jit/JITOpcodes.cpp index 25a843f..0813b15 100644 --- a/jit/JITOpcodes.cpp +++ b/jit/JITOpcodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012-2015 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer * * Redistribution and use in source and binary forms, with or without @@ -28,17 +28,23 @@ #if ENABLE(JIT) #include "JIT.h" -#include "Arguments.h" +#include "BasicBlockLocation.h" #include "CopiedSpaceInlines.h" #include "Debugger.h" +#include "Exception.h" #include "Heap.h" #include "JITInlines.h" #include "JSArray.h" #include "JSCell.h" #include "JSFunction.h" -#include "JSPropertyNameIterator.h" +#include "JSNameScope.h" +#include "JSPropertyNameEnumerator.h" +#include "LinkBuffer.h" #include "MaxFrameExtentForSlowPathCall.h" +#include "RepatchBuffer.h" #include "SlowPathCall.h" +#include "TypeLocation.h" +#include "TypeProfilerLog.h" #include "VirtualRegister.h" namespace JSC { @@ -59,15 +65,6 @@ void JIT::emit_op_mov(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_captured_mov(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int src = currentInstruction[2].u.operand; - - emitGetVirtualRegister(src, regT0); - emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet); - emitPutVirtualRegister(dst); -} void JIT::emit_op_end(Instruction* currentInstruction) { @@ -229,42 +226,26 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) -{ - int activation = currentInstruction[1].u.operand; - Jump activationNotCreated = branchTest64(Zero, addressFor(activation)); - emitGetVirtualRegister(activation, regT0); - callOperation(operationTearOffActivation, regT0); - activationNotCreated.link(this); -} - -void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) +void JIT::emit_op_is_object(Instruction* currentInstruction) { - int arguments = currentInstruction[1].u.operand; - int activation = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; - Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset()))); - emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0); - emitGetVirtualRegister(activation, regT1); - callOperation(operationTearOffArguments, regT0, regT1); - argsNotCreated.link(this); -} + emitGetVirtualRegister(value, regT0); + Jump isNotCell = emitJumpIfNotJSCell(regT0); -void JIT::emit_op_ret(Instruction* currentInstruction) -{ - ASSERT(callFrameRegister != regT1); - ASSERT(regT1 != returnValueGPR); - ASSERT(returnValueGPR != callFrameRegister); + compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); + emitTagAsBoolImmediate(regT0); + Jump done = jump(); - // Return the result in %eax. - emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); + isNotCell.link(this); + move(TrustedImm32(ValueFalse), regT0); - checkStackPointerAlignment(); - emitFunctionEpilogue(); - ret(); + done.link(this); + emitPutVirtualRegister(dst); } -void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) +void JIT::emit_op_ret(Instruction* currentInstruction) { ASSERT(callFrameRegister != regT1); ASSERT(regT1 != returnValueGPR); @@ -272,19 +253,8 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) // Return the result in %eax. emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); - Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR); - Jump notObject = emitJumpIfCellNotObject(returnValueGPR); - - // Return. - emitFunctionEpilogue(); - ret(); - // Return 'this' in %eax. - notJSCell.link(this); - notObject.link(this); - emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR); - - // Return. + checkStackPointerAlignment(); emitFunctionEpilogue(); ret(); } @@ -297,9 +267,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitGetVirtualRegister(src, regT0); Jump isImm = emitJumpIfNotJSCell(regT0); - addSlowCase(branchStructure(NotEqual, - Address(regT0, JSCell::structureIDOffset()), - m_vm->stringStructure.get())); + addSlowCase(emitJumpIfCellObject(regT0)); isImm.link(this); if (dst != src) @@ -456,115 +424,18 @@ void JIT::emit_op_throw(Instruction* currentInstruction) jumpToExceptionHandler(); } -void JIT::emit_op_get_pnames(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int breakTarget = currentInstruction[5].u.operand; - - JumpList isNotObject; - - emitGetVirtualRegister(base, regT0); - if (!m_codeBlock->isKnownNotImmediate(base)) - isNotObject.append(emitJumpIfNotJSCell(regT0)); - if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode()) - isNotObject.append(emitJumpIfCellNotObject(regT0)); - - // We could inline the case where you have a valid cache, but - // this call doesn't seem to be hot. - Label isObject(this); - callOperation(operationGetPNames, regT0); - emitStoreCell(dst, returnValueGPR); - load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); - store64(tagTypeNumberRegister, addressFor(i)); - store32(TrustedImm32(Int32Tag), intTagFor(size)); - store32(regT3, intPayloadFor(size)); - Jump end = jump(); - - isNotObject.link(this); - move(regT0, regT1); - and32(TrustedImm32(~TagBitUndefined), regT1); - addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget); - callOperation(operationToObject, base, regT0); - jump().linkTo(isObject, this); - - end.link(this); -} - -void JIT::emit_op_next_pname(Instruction* currentInstruction) +void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int it = currentInstruction[5].u.operand; - int target = currentInstruction[6].u.operand; - - JumpList callHasProperty; - - Label begin(this); - load32(intPayloadFor(i), regT0); - Jump end = branch32(Equal, regT0, intPayloadFor(size)); - - // Grab key @ i - loadPtr(addressFor(it), regT1); - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); - - load64(BaseIndex(regT2, regT0, TimesEight), regT2); - - emitPutVirtualRegister(dst, regT2); - - // Increment i - add32(TrustedImm32(1), regT0); - store32(regT0, intPayloadFor(i)); - - // Verify that i is valid: - emitGetVirtualRegister(base, regT0); - - // Test base's structure - emitLoadStructure(regT0, regT2, regT3); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); - - // Test base's prototype chain - loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); - addJump(branchTestPtr(Zero, Address(regT3)), target); - - Label checkPrototype(this); - load64(Address(regT2, Structure::prototypeOffset()), regT2); - callHasProperty.append(emitJumpIfNotJSCell(regT2)); - emitLoadStructure(regT2, regT2, regT1); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); - addPtr(TrustedImm32(sizeof(Structure*)), regT3); - branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); - - // Continue loop. - addJump(jump(), target); - - // Slow case: Ask the object if i is valid. - callHasProperty.link(this); - emitGetVirtualRegister(dst, regT1); - callOperation(operationHasProperty, regT0, regT1); - - // Test for valid key. - addJump(branchTest32(NonZero, regT0), target); - jump().linkTo(begin, this); - - // End of loop. - end.link(this); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + callOperation(operationPushWithScope, dst, regT0); } -void JIT::emit_op_push_with_scope(Instruction* currentInstruction) +void JIT::emit_op_pop_scope(Instruction* currentInstruction) { - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - callOperation(operationPushWithScope, regT0); -} + int scope = currentInstruction[1].u.operand; -void JIT::emit_op_pop_scope(Instruction*) -{ - callOperation(operationPopScope); + callOperation(operationPopScope, scope); } void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) @@ -618,10 +489,28 @@ void JIT::emit_op_to_number(Instruction* currentInstruction) emitPutVirtualRegister(currentInstruction[1].u.operand); } +void JIT::emit_op_to_string(Instruction* currentInstruction) +{ + int srcVReg = currentInstruction[2].u.operand; + emitGetVirtualRegister(srcVReg, regT0); + + addSlowCase(emitJumpIfNotJSCell(regT0)); + addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + void JIT::emit_op_push_name_scope(Instruction* currentInstruction) { + int dst = currentInstruction[1].u.operand; emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand); + if (currentInstruction[4].u.operand == JSNameScope::CatchScope) { + callOperation(operationPushCatchScope, dst, jsCast(getConstantOperand(currentInstruction[3].u.operand)), regT0); + return; + } + + RELEASE_ASSERT(currentInstruction[4].u.operand == JSNameScope::FunctionNameScope); + callOperation(operationPushFunctionNameScope, dst, jsCast(getConstantOperand(currentInstruction[3].u.operand)), regT0); } void JIT::emit_op_catch(Instruction* currentInstruction) @@ -633,12 +522,17 @@ void JIT::emit_op_catch(Instruction* currentInstruction) move(TrustedImmPtr(m_vm), regT3); load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); + load64(Address(regT3, VM::vmEntryFrameForThrowOffset()), regT0); + store64(regT0, Address(regT3, VM::topVMEntryFrameOffset())); addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); load64(Address(regT3, VM::exceptionOffset()), regT0); store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); emitPutVirtualRegister(currentInstruction[1].u.operand); + + load64(Address(regT0, Exception::valueOffset()), regT0); + emitPutVirtualRegister(currentInstruction[2].u.operand); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) @@ -779,34 +673,23 @@ void JIT::emit_op_enter(Instruction*) emitEnterOptimizationCheck(); } -void JIT::emit_op_create_activation(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - - Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); - callOperation(operationCreateActivation, 0); - emitStoreCell(dst, returnValueGPR); - activationCreated.link(this); -} - -void JIT::emit_op_create_arguments(Instruction* currentInstruction) +void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; + int scope = currentInstruction[2].u.operand; - Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); - - callOperation(operationCreateArguments); + emitGetVirtualRegister(scope, regT0); + callOperation(operationCreateActivation, regT0); emitStoreCell(dst, returnValueGPR); - emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR); - - argsCreated.link(this); + emitStoreCell(scope, returnValueGPR); } -void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +void JIT::emit_op_get_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - - store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst)); + emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); + loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); + emitStoreCell(dst, regT0); } void JIT::emit_op_to_this(Instruction* currentInstruction) @@ -823,53 +706,58 @@ void JIT::emit_op_to_this(Instruction* currentInstruction) addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2)); } -void JIT::emit_op_get_callee(Instruction* currentInstruction) -{ - int result = currentInstruction[1].u.operand; - WriteBarrierBase* cachedFunction = ¤tInstruction[2].u.jsCell; - emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); - - loadPtr(cachedFunction, regT2); - addSlowCase(branchPtr(NotEqual, regT0, regT2)); - - emitPutVirtualRegister(result); -} - -void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector::iterator& iter) -{ - linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee); - slowPathCall.call(); -} - void JIT::emit_op_create_this(Instruction* currentInstruction) { int callee = currentInstruction[2].u.operand; + WriteBarrierBase* cachedFunction = ¤tInstruction[4].u.jsCell; RegisterID calleeReg = regT0; + RegisterID rareDataReg = regT4; RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID structureReg = regT2; + RegisterID cachedFunctionReg = regT4; RegisterID scratchReg = regT3; emitGetVirtualRegister(callee, calleeReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); + addSlowCase(branchTestPtr(Zero, rareDataReg)); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); addSlowCase(branchTestPtr(Zero, allocatorReg)); + loadPtr(cachedFunction, cachedFunctionReg); + Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); + addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); + hasSeenMultipleCallees.link(this); + emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector::iterator& iter) { + linkSlowCase(iter); // doesn't have rare data linkSlowCase(iter); // doesn't have an allocation profile linkSlowCase(iter); // allocation failed + linkSlowCase(iter); // cached function didn't match JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); slowPathCall.call(); } +void JIT::emit_op_check_tdz(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + addSlowCase(branchTest64(Zero, regT0)); +} + +void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector::iterator& iter) +{ + linkSlowCase(iter); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); + slowPathCall.call(); +} + void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress())); @@ -1016,76 +904,17 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector::iterator& iter) -{ - linkSlowCase(iter); - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - callOperation(operationGetArgumentsLength, dst, base); -} - -void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector::iterator& iter) { - int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); - emitGetVirtualRegister(property, regT1); - addSlowCase(emitJumpIfNotImmediateInteger(regT1)); - add32(TrustedImm32(1), regT1); - // regT1 now contains the integer index of the argument we want, including this - emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2); - addSlowCase(branch32(AboveOrEqual, regT1, regT2)); - - signExtend32ToPtr(regT1, regT1); - load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast(sizeof(Register))), regT0); - emitValueProfilingSite(); - emitPutVirtualRegister(dst, regT0); -} + linkSlowCase(iter); // Not JSCell. + linkSlowCase(iter); // Not JSString. -void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - int arguments = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - - linkSlowCase(iter); - Jump skipArgumentsCreation = jump(); - - linkSlowCase(iter); - linkSlowCase(iter); - callOperation(operationCreateArguments); - emitStoreCell(arguments, returnValueGPR); - emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR); - - skipArgumentsCreation.link(this); - emitGetVirtualRegister(arguments, regT0); - emitGetVirtualRegister(property, regT1); - callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); + slowPathCall.call(); } #endif // USE(JSVALUE64) -void JIT::emit_op_touch_entry(Instruction* currentInstruction) -{ - if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated()) - return; - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry); - slowPathCall.call(); -} - void JIT::emit_op_loop_hint(Instruction*) { // Emit the JIT optimization check: @@ -1139,32 +968,36 @@ void JIT::emit_op_new_func(Instruction* currentInstruction) { Jump lazyJump; int dst = currentInstruction[1].u.operand; - if (currentInstruction[3].u.operand) { -#if USE(JSVALUE32_64) - lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); + +#if USE(JSVALUE64) + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); #else - lazyJump = branchTest64(NonZero, addressFor(dst)); + emitLoadPayload(currentInstruction[2].u.operand, regT0); #endif - } - - FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand); - callOperation(operationNewFunction, dst, funcExec); - - if (currentInstruction[3].u.operand) - lazyJump.link(this); -} - -void JIT::emit_op_new_captured_func(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func); - slowPathCall.call(); + FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand); + callOperation(operationNewFunction, dst, regT0, funcExec); } void JIT::emit_op_new_func_exp(Instruction* currentInstruction) { + Jump notUndefinedScope; int dst = currentInstruction[1].u.operand; - FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand); - callOperation(operationNewFunction, dst, funcExpr); +#if USE(JSVALUE64) + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined()))); + store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst)); +#else + emitLoadPayload(currentInstruction[2].u.operand, regT0); + notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag)); + emitStore(dst, jsUndefined()); +#endif + + Jump done = jump(); + notUndefinedScope.link(this); + + FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[3].u.operand); + callOperation(operationNewFunction, dst, regT0, funcExpr); + done.link(this); } void JIT::emit_op_new_array(Instruction* currentInstruction) @@ -1201,16 +1034,330 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction) callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size); } -void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector::iterator& iter) +#if USE(JSVALUE64) +void JIT::emit_op_has_structure_property(Instruction* currentInstruction) { - VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet; - if (!set || set->state() == IsInvalidated) - return; -#if USE(JSVALUE32_64) + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int enumerator = currentInstruction[4].u.operand; + + emitGetVirtualRegister(base, regT0); + emitGetVirtualRegister(enumerator, regT1); + emitJumpSlowCaseIfNotJSCell(regT0, base); + + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); + emitPutVirtualRegister(dst); +} + +void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) +{ + Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; + + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); + Jump done = jump(); + + LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); + + patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + + patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); + + byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( + m_codeBlock, patchBuffer, + ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); + + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationHasIndexedPropertyGeneric)); +} + +void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + + emitGetVirtualRegisters(base, regT0, property, regT1); + + // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. + // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if + // number was signed since m_vectorLength is always less than intmax (since the total allocation + // size is always less than 4Gb). As such zero extending will have been correct (and extending the value + // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign + // extending since it makes it easier to re-tag the value in the slow case. + zeroExtend32ToPtr(regT1, regT1); + + emitJumpSlowCaseIfNotJSCell(regT0, base); + emitArrayProfilingSiteWithCell(regT0, regT2, profile); + and32(TrustedImm32(IndexingShapeMask), regT2); + + JITArrayMode mode = chooseArrayMode(profile); + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); + + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); + + addSlowCase(badType); + addSlowCase(slowCases); + + Label done = label(); + + emitPutVirtualRegister(dst); + + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); +} + +void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + + linkSlowCaseIfNotJSCell(iter, base); // base cell check + linkSlowCase(iter); // base array check + linkSlowCase(iter); // vector length check + linkSlowCase(iter); // empty value + + Label slowPath = label(); + + emitGetVirtualRegister(base, regT0); + emitGetVirtualRegister(property, regT1); + Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, profile); + + m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; + m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; + m_byValInstructionIndex++; +} + +void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int index = currentInstruction[4].u.operand; + int enumerator = currentInstruction[5].u.operand; + + // Check that base is a cell + emitGetVirtualRegister(base, regT0); + emitJumpSlowCaseIfNotJSCell(regT0, base); + + // Check the structure + emitGetVirtualRegister(enumerator, regT2); + load32(Address(regT0, JSCell::structureIDOffset()), regT1); + addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + // Compute the offset + emitGetVirtualRegister(index, regT1); + // If index is less than the enumerator's cached inline storage, then it's an inline access + Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); + addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); + signExtend32ToPtr(regT1, regT1); + load64(BaseIndex(regT0, regT1, TimesEight), regT0); + + Jump done = jump(); + + // Otherwise it's out of line + outOfLineAccess.link(this); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); + sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1); + neg32(regT1); + signExtend32ToPtr(regT1, regT1); + int32_t offsetOfFirstProperty = static_cast(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0); + + done.link(this); + emitValueProfilingSite(); + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector::iterator& iter) +{ + int base = currentInstruction[2].u.operand; + linkSlowCaseIfNotJSCell(iter, base); linkSlowCase(iter); -#endif + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); + slowPathCall.call(); +} + +void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; + + emitGetVirtualRegister(index, regT0); + emitGetVirtualRegister(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); + + move(TrustedImm64(JSValue::encode(jsNull())), regT0); + + Jump done = jump(); + inBounds.link(this); + + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + signExtend32ToPtr(regT0, regT0); + load64(BaseIndex(regT1, regT0, TimesEight), regT0); + + done.link(this); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; + + emitGetVirtualRegister(index, regT0); + emitGetVirtualRegister(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); + + move(TrustedImm64(JSValue::encode(jsNull())), regT0); + + Jump done = jump(); + inBounds.link(this); + + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + signExtend32ToPtr(regT0, regT0); + load64(BaseIndex(regT1, regT0, TimesEight), regT0); + + done.link(this); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_profile_type(Instruction* currentInstruction) +{ + TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; + int valueToProfile = currentInstruction[1].u.operand; + + emitGetVirtualRegister(valueToProfile, regT0); + + JumpList jumpToEnd; + + // Compile in a predictive type check, if possible, to see if we can skip writing to the log. + // These typechecks are inlined to match those of the 64-bit JSValue type checks. + if (cachedTypeLocation->m_lastSeenType == TypeUndefined) + jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined())))); + else if (cachedTypeLocation->m_lastSeenType == TypeNull) + jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull())))); + else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) { + move(regT0, regT1); + and64(TrustedImm32(~1), regT1); + jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse))); + } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) + jumpToEnd.append(emitJumpIfImmediateInteger(regT0)); + else if (cachedTypeLocation->m_lastSeenType == TypeNumber) + jumpToEnd.append(emitJumpIfImmediateNumber(regT0)); + else if (cachedTypeLocation->m_lastSeenType == TypeString) { + Jump isNotCell = emitJumpIfNotJSCell(regT0); + jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + isNotCell.link(this); + } + + // Load the type profiling log into T2. + TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); + move(TrustedImmPtr(cachedTypeProfilerLog), regT2); + // Load the next log entry into T1. + loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); + + // Store the JSValue onto the log entry. + store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset())); + + // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry. + Jump notCell = emitJumpIfNotJSCell(regT0); + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + Jump skipIsCell = jump(); + notCell.link(this); + store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + skipIsCell.link(this); + + // Store the typeLocation on the log entry. + move(TrustedImmPtr(cachedTypeLocation), regT0); + store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); + + // Increment the current log entry. + addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); + store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); + Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); + // Clear the log if we're at the end of the log. + callOperation(operationProcessTypeProfilerLog); + skipClearLog.link(this); + + jumpToEnd.link(this); +} + +#endif // USE(JSVALUE64) + +void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length); + slowPathCall.call(); +} + +void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector::iterator& iter) +{ linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov); + linkSlowCase(iter); + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property); + slowPathCall.call(); +} + +void JIT::emit_op_has_generic_property(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property); + slowPathCall.call(); +} + +void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator); + slowPathCall.call(); +} + +void JIT::emit_op_to_index_string(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string); + slowPathCall.call(); +} + +void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) +{ + BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; + if (!basicBlockLocation->hasExecuted()) + basicBlockLocation->emitExecuteCode(*this, regT1); +} + +void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments); + slowPathCall.call(); +} + +void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments); + slowPathCall.call(); +} + +void JIT::emit_op_create_out_of_band_arguments(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_out_of_band_arguments); slowPathCall.call(); }