/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
*/
#include "config.h"
+
+#if ENABLE(JIT)
+
#include "JIT.h"
// This probably does not belong here; adding here for now as a quick Windows build fix.
JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
#endif
-#if ENABLE(JIT)
-
+#include "ArityCheckFailReturnThunks.h"
#include "CodeBlock.h"
+#include "DFGCapabilities.h"
#include "Interpreter.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
+#include "JITInlines.h"
+#include "JITOperations.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
+#include "ProfilerDatabase.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
+#include "SlowPathCall.h"
+#include "StackAlignment.h"
+#include <wtf/CryptographicallyRandomNumber.h>
using namespace std;
repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
- : m_interpreter(globalData->interpreter)
- , m_globalData(globalData)
- , m_codeBlock(codeBlock)
- , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
- , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
- , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
- , m_bytecodeIndex((unsigned)-1)
-#if USE(JSVALUE32_64)
- , m_jumpTargetIndex(0)
- , m_mappedBytecodeIndex((unsigned)-1)
- , m_mappedVirtualRegisterIndex((unsigned)-1)
- , m_mappedTag((RegisterID)-1)
- , m_mappedPayload((RegisterID)-1)
-#else
- , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
- , m_jumpTargetsPosition(0)
-#endif
+JIT::JIT(VM* vm, CodeBlock* codeBlock)
+ : JSInterfaceJIT(vm, codeBlock)
+ , m_interpreter(vm->interpreter)
+ , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
+ , m_bytecodeOffset((unsigned)-1)
+ , m_getByIdIndex(UINT_MAX)
+ , m_putByIdIndex(UINT_MAX)
+ , m_byValInstructionIndex(UINT_MAX)
+ , m_callLinkInfoIndex(UINT_MAX)
+ , m_randomGenerator(cryptographicallyRandomNumber())
+ , m_canBeOptimized(false)
+ , m_shouldEmitProfiling(false)
{
}
-#if USE(JSVALUE32_64)
-void JIT::emitTimeoutCheck()
+#if ENABLE(DFG_JIT)
+void JIT::emitEnterOptimizationCheck()
{
- Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall stubCall(this, cti_timeout_check);
- stubCall.addArgument(regT1, regT0); // save last result registers.
- stubCall.call(timeoutCheckRegister);
- stubCall.getArgument(0, regT1, regT0); // reload last result registers.
- skipTimeout.link(this);
-}
-#else
-void JIT::emitTimeoutCheck()
-{
- Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
- skipTimeout.link(this);
+ if (!canBeOptimized())
+ return;
- killLastResultRegister();
+ JumpList skipOptimize;
+
+ skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
+ ASSERT(!m_bytecodeOffset);
+ callOperation(operationOptimize, m_bytecodeOffset);
+ skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
+ move(returnValueGPR2, stackPointerRegister);
+ jump(returnValueGPR);
+ skipOptimize.link(this);
}
#endif
#define NEXT_OPCODE(name) \
- m_bytecodeIndex += OPCODE_LENGTH(name); \
+ m_bytecodeOffset += OPCODE_LENGTH(name); \
break;
-#if USE(JSVALUE32_64)
-#define DEFINE_BINARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand); \
- stubCall.addArgument(currentInstruction[3].u.operand); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-
-#define DEFINE_UNARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-
-#else // USE(JSVALUE32_64)
-
-#define DEFINE_BINARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
- stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-
-#define DEFINE_UNARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
+#define DEFINE_SLOW_OP(name) \
+ case op_##name: { \
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
+ slowPathCall.call(); \
+ NEXT_OPCODE(op_##name); \
}
-#endif // USE(JSVALUE32_64)
#define DEFINE_OP(name) \
case name: { \
void JIT::privateCompileMainPass()
{
+ jitAssertTagsInPlace();
+ jitAssertArgumentCountSane();
+
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
unsigned instructionCount = m_codeBlock->instructions().size();
- m_propertyAccessInstructionIndex = 0;
- m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
- for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
- Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
- ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex);
+ for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ if (m_disassembler)
+ m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
#if ENABLE(OPCODE_SAMPLING)
- if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
sampleInstruction(currentInstruction);
#endif
-#if !USE(JSVALUE32_64)
- if (m_labels[m_bytecodeIndex].isUsed())
- killLastResultRegister();
-#endif
-
- m_labels[m_bytecodeIndex] = label();
+ m_labels[m_bytecodeOffset] = label();
- switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
- DEFINE_BINARY_OP(op_del_by_val)
-#if USE(JSVALUE32)
- DEFINE_BINARY_OP(op_div)
+#if ENABLE(JIT_VERBOSE)
+ dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
- DEFINE_BINARY_OP(op_in)
- DEFINE_BINARY_OP(op_less)
- DEFINE_BINARY_OP(op_lesseq)
- DEFINE_BINARY_OP(op_urshift)
- DEFINE_UNARY_OP(op_is_boolean)
- DEFINE_UNARY_OP(op_is_function)
- DEFINE_UNARY_OP(op_is_number)
- DEFINE_UNARY_OP(op_is_object)
- DEFINE_UNARY_OP(op_is_string)
- DEFINE_UNARY_OP(op_is_undefined)
-#if !USE(JSVALUE32_64)
- DEFINE_UNARY_OP(op_negate)
-#endif
- DEFINE_UNARY_OP(op_typeof)
-
+
+ OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ if (m_compilation) {
+ add64(
+ TrustedImm32(1),
+ AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
+ m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
+ }
+
+ if (Options::eagerlyUpdateTopCallFrame())
+ updateTopCallFrame();
+
+ switch (opcodeID) {
+ DEFINE_SLOW_OP(del_by_val)
+ DEFINE_SLOW_OP(in)
+ DEFINE_SLOW_OP(less)
+ DEFINE_SLOW_OP(lesseq)
+ DEFINE_SLOW_OP(greater)
+ DEFINE_SLOW_OP(greatereq)
+ DEFINE_SLOW_OP(is_function)
+ DEFINE_SLOW_OP(is_object)
+ DEFINE_SLOW_OP(typeof)
+
+ DEFINE_OP(op_touch_entry)
DEFINE_OP(op_add)
DEFINE_OP(op_bitand)
- DEFINE_OP(op_bitnot)
DEFINE_OP(op_bitor)
DEFINE_OP(op_bitxor)
DEFINE_OP(op_call)
DEFINE_OP(op_call_eval)
DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_construct_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
- DEFINE_OP(op_construct_verify)
- DEFINE_OP(op_convert_this)
- DEFINE_OP(op_init_arguments)
+ DEFINE_OP(op_get_callee)
+ DEFINE_OP(op_create_this)
+ DEFINE_OP(op_to_this)
+ DEFINE_OP(op_init_lazy_reg)
DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
-#if !USE(JSVALUE32)
DEFINE_OP(op_div)
-#endif
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
- DEFINE_OP(op_enter_with_activation)
+ DEFINE_OP(op_create_activation)
DEFINE_OP(op_eq)
DEFINE_OP(op_eq_null)
+ case op_get_by_id_out_of_line:
+ case op_get_array_length:
DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_arguments_length)
DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_argument_by_val)
DEFINE_OP(op_get_by_pname)
- DEFINE_OP(op_get_global_var)
DEFINE_OP(op_get_pnames)
- DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_check_has_instance)
DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_is_undefined)
+ DEFINE_OP(op_is_boolean)
+ DEFINE_OP(op_is_number)
+ DEFINE_OP(op_is_string)
DEFINE_OP(op_jeq_null)
DEFINE_OP(op_jfalse)
DEFINE_OP(op_jmp)
- DEFINE_OP(op_jmp_scopes)
DEFINE_OP(op_jneq_null)
DEFINE_OP(op_jneq_ptr)
- DEFINE_OP(op_jnless)
DEFINE_OP(op_jless)
+ DEFINE_OP(op_jlesseq)
+ DEFINE_OP(op_jgreater)
+ DEFINE_OP(op_jgreatereq)
+ DEFINE_OP(op_jnless)
DEFINE_OP(op_jnlesseq)
- DEFINE_OP(op_jsr)
+ DEFINE_OP(op_jngreater)
+ DEFINE_OP(op_jngreatereq)
DEFINE_OP(op_jtrue)
- DEFINE_OP(op_load_varargs)
- DEFINE_OP(op_loop)
- DEFINE_OP(op_loop_if_less)
- DEFINE_OP(op_loop_if_lesseq)
- DEFINE_OP(op_loop_if_true)
- DEFINE_OP(op_loop_if_false)
+ DEFINE_OP(op_loop_hint)
DEFINE_OP(op_lshift)
- DEFINE_OP(op_method_check)
DEFINE_OP(op_mod)
+ DEFINE_OP(op_captured_mov)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
-#if USE(JSVALUE32_64)
DEFINE_OP(op_negate)
-#endif
DEFINE_OP(op_neq)
DEFINE_OP(op_neq_null)
DEFINE_OP(op_new_array)
- DEFINE_OP(op_new_error)
+ DEFINE_OP(op_new_array_with_size)
+ DEFINE_OP(op_new_array_buffer)
DEFINE_OP(op_new_func)
+ DEFINE_OP(op_new_captured_func)
DEFINE_OP(op_new_func_exp)
DEFINE_OP(op_new_object)
DEFINE_OP(op_new_regexp)
DEFINE_OP(op_not)
DEFINE_OP(op_nstricteq)
DEFINE_OP(op_pop_scope)
- DEFINE_OP(op_post_dec)
- DEFINE_OP(op_post_inc)
- DEFINE_OP(op_pre_dec)
- DEFINE_OP(op_pre_inc)
+ DEFINE_OP(op_dec)
+ DEFINE_OP(op_inc)
DEFINE_OP(op_profile_did_call)
DEFINE_OP(op_profile_will_call)
- DEFINE_OP(op_push_new_scope)
- DEFINE_OP(op_push_scope)
+ DEFINE_OP(op_push_name_scope)
+ DEFINE_OP(op_push_with_scope)
+ case op_put_by_id_out_of_line:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
DEFINE_OP(op_put_by_id)
DEFINE_OP(op_put_by_index)
+ case op_put_by_val_direct:
DEFINE_OP(op_put_by_val)
- DEFINE_OP(op_put_getter)
- DEFINE_OP(op_put_global_var)
- DEFINE_OP(op_put_scoped_var)
- DEFINE_OP(op_put_setter)
- DEFINE_OP(op_resolve)
- DEFINE_OP(op_resolve_base)
- DEFINE_OP(op_resolve_global)
- DEFINE_OP(op_resolve_skip)
- DEFINE_OP(op_resolve_with_base)
+ DEFINE_OP(op_put_getter_setter)
+ case op_init_global_const_nop:
+ NEXT_OPCODE(op_init_global_const_nop);
+ DEFINE_OP(op_init_global_const)
+
DEFINE_OP(op_ret)
+ DEFINE_OP(op_ret_object_or_this)
DEFINE_OP(op_rshift)
- DEFINE_OP(op_sret)
+ DEFINE_OP(op_unsigned)
+ DEFINE_OP(op_urshift)
DEFINE_OP(op_strcat)
DEFINE_OP(op_stricteq)
DEFINE_OP(op_sub)
DEFINE_OP(op_tear_off_activation)
DEFINE_OP(op_tear_off_arguments)
DEFINE_OP(op_throw)
- DEFINE_OP(op_to_jsnumber)
+ DEFINE_OP(op_throw_static_error)
+ DEFINE_OP(op_to_number)
DEFINE_OP(op_to_primitive)
- case op_get_array_length:
- case op_get_by_id_chain:
- case op_get_by_id_generic:
- case op_get_by_id_proto:
- case op_get_by_id_proto_list:
- case op_get_by_id_self:
- case op_get_by_id_self_list:
- case op_get_string_length:
- case op_put_by_id_generic:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
- ASSERT_NOT_REACHED();
+ DEFINE_OP(op_resolve_scope)
+ DEFINE_OP(op_get_from_scope)
+ DEFINE_OP(op_put_to_scope)
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
}
}
- ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
- ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
-
void JIT::privateCompileLinkPass()
{
unsigned jmpTableCount = m_jmpTable.size();
for (unsigned i = 0; i < jmpTableCount; ++i)
- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this);
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
m_jmpTable.clear();
}
{
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- m_propertyAccessInstructionIndex = 0;
-#if USE(JSVALUE32_64)
- m_globalResolveInfoIndex = 0;
-#endif
+ m_getByIdIndex = 0;
+ m_putByIdIndex = 0;
+ m_byValInstructionIndex = 0;
m_callLinkInfoIndex = 0;
+
+ // Use this to assert that slow-path code associates new profiling sites with existing
+ // ValueProfiles rather than creating new ones. This ensures that for a given instruction
+ // (say, get_by_id) we get combined statistics for both the fast-path executions of that
+ // instructions and the slow-path executions. Furthermore, if the slow-path code created
+ // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
+ // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
+ unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
-#if !USE(JSVALUE32_64)
- killLastResultRegister();
-#endif
+ m_bytecodeOffset = iter->to;
- m_bytecodeIndex = iter->to;
-#ifndef NDEBUG
- unsigned firstTo = m_bytecodeIndex;
+ unsigned firstTo = m_bytecodeOffset;
+
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+
+ RareCaseProfile* rareCaseProfile = 0;
+ if (shouldEmitProfiling())
+ rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
+
+#if ENABLE(JIT_VERBOSE)
+ dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
- Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+
+ if (m_disassembler)
+ m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_SLOWCASE_OP(op_add)
DEFINE_SLOWCASE_OP(op_bitand)
- DEFINE_SLOWCASE_OP(op_bitnot)
DEFINE_SLOWCASE_OP(op_bitor)
DEFINE_SLOWCASE_OP(op_bitxor)
DEFINE_SLOWCASE_OP(op_call)
DEFINE_SLOWCASE_OP(op_call_eval)
DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_construct_varargs)
DEFINE_SLOWCASE_OP(op_construct)
- DEFINE_SLOWCASE_OP(op_construct_verify)
- DEFINE_SLOWCASE_OP(op_convert_this)
-#if !USE(JSVALUE32)
+ DEFINE_SLOWCASE_OP(op_to_this)
+ DEFINE_SLOWCASE_OP(op_create_this)
+ DEFINE_SLOWCASE_OP(op_captured_mov)
DEFINE_SLOWCASE_OP(op_div)
-#endif
DEFINE_SLOWCASE_OP(op_eq)
+ DEFINE_SLOWCASE_OP(op_get_callee)
+ case op_get_by_id_out_of_line:
+ case op_get_array_length:
DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_arguments_length)
DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_get_argument_by_val)
DEFINE_SLOWCASE_OP(op_get_by_pname)
+ DEFINE_SLOWCASE_OP(op_check_has_instance)
DEFINE_SLOWCASE_OP(op_instanceof)
DEFINE_SLOWCASE_OP(op_jfalse)
- DEFINE_SLOWCASE_OP(op_jnless)
DEFINE_SLOWCASE_OP(op_jless)
+ DEFINE_SLOWCASE_OP(op_jlesseq)
+ DEFINE_SLOWCASE_OP(op_jgreater)
+ DEFINE_SLOWCASE_OP(op_jgreatereq)
+ DEFINE_SLOWCASE_OP(op_jnless)
DEFINE_SLOWCASE_OP(op_jnlesseq)
+ DEFINE_SLOWCASE_OP(op_jngreater)
+ DEFINE_SLOWCASE_OP(op_jngreatereq)
DEFINE_SLOWCASE_OP(op_jtrue)
- DEFINE_SLOWCASE_OP(op_loop_if_less)
- DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
- DEFINE_SLOWCASE_OP(op_loop_if_true)
- DEFINE_SLOWCASE_OP(op_loop_if_false)
+ DEFINE_SLOWCASE_OP(op_loop_hint)
DEFINE_SLOWCASE_OP(op_lshift)
- DEFINE_SLOWCASE_OP(op_method_check)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
-#if USE(JSVALUE32_64)
DEFINE_SLOWCASE_OP(op_negate)
-#endif
DEFINE_SLOWCASE_OP(op_neq)
+ DEFINE_SLOWCASE_OP(op_new_object)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
- DEFINE_SLOWCASE_OP(op_post_dec)
- DEFINE_SLOWCASE_OP(op_post_inc)
- DEFINE_SLOWCASE_OP(op_pre_dec)
- DEFINE_SLOWCASE_OP(op_pre_inc)
+ DEFINE_SLOWCASE_OP(op_dec)
+ DEFINE_SLOWCASE_OP(op_inc)
+ case op_put_by_id_out_of_line:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
+ case op_put_by_val_direct:
DEFINE_SLOWCASE_OP(op_put_by_val)
-#if USE(JSVALUE32_64)
- DEFINE_SLOWCASE_OP(op_resolve_global)
-#endif
DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_unsigned)
+ DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
- DEFINE_SLOWCASE_OP(op_to_jsnumber)
+ DEFINE_SLOWCASE_OP(op_to_number)
DEFINE_SLOWCASE_OP(op_to_primitive)
+
+ DEFINE_SLOWCASE_OP(op_resolve_scope)
+ DEFINE_SLOWCASE_OP(op_get_from_scope)
+ DEFINE_SLOWCASE_OP(op_put_to_scope)
+
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
- ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
- ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+ RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
+ RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+
+ if (shouldEmitProfiling())
+ add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
emitJumpSlowToHot(jump(), 0);
}
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
-#endif
- ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+ RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
+ RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
+ RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
-JITCode JIT::privateCompile()
+CompilationResult JIT::privateCompile(JITCompilationEffort effort)
{
+ DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
+ switch (level) {
+ case DFG::CannotCompile:
+ m_canBeOptimized = false;
+ m_canBeOptimizedOrInlined = false;
+ m_shouldEmitProfiling = false;
+ break;
+ case DFG::CanInline:
+ m_canBeOptimized = false;
+ m_canBeOptimizedOrInlined = true;
+ m_shouldEmitProfiling = true;
+ break;
+ case DFG::CanCompile:
+ case DFG::CanCompileAndInline:
+ m_canBeOptimized = true;
+ m_canBeOptimizedOrInlined = true;
+ m_shouldEmitProfiling = true;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ switch (m_codeBlock->codeType()) {
+ case GlobalCode:
+ case EvalCode:
+ m_codeBlock->m_shouldAlwaysBeInlined = false;
+ break;
+ case FunctionCode:
+ // We could have already set it to false because we detected an uninlineable call.
+ // Don't override that observation.
+ m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
+ break;
+ }
+
+ if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
+ m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
+ if (m_vm->m_perBytecodeProfiler) {
+ m_compilation = adoptRef(
+ new Profiler::Compilation(
+ m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
+ Profiler::Baseline));
+ m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
+ }
+
+ if (m_disassembler)
+ m_disassembler->setStartOfCode(label());
+
+ // Just add a little bit of randomness to the codegen
+ if (m_randomGenerator.getUint32() & 1)
+ nop();
+
+ emitFunctionPrologue();
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+
+ Label beginLabel(this);
+
sampleCodeBlock(m_codeBlock);
#if ENABLE(OPCODE_SAMPLING)
sampleInstruction(m_codeBlock->instructions().begin());
#endif
- // Could use a pop_m, but would need to offset the following instruction if so.
- preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
-
- Jump slowRegisterFileCheck;
- Label afterRegisterFileCheck;
+ Jump stackOverflow;
if (m_codeBlock->codeType() == FunctionCode) {
- // In the case of a fast linked call, we do not set this up in the caller.
- emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
-
- peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*));
- addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
+ ASSERT(m_bytecodeOffset == (unsigned)-1);
+ if (shouldEmitProfiling()) {
+ for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
+ // If this is a constructor, then we want to put in a dummy profiling site (to
+ // keep things consistent) but we don't actually want to record the dummy value.
+ if (m_codeBlock->m_isConstructor && !argument)
+ continue;
+ int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
+#if USE(JSVALUE64)
+ load64(Address(callFrameRegister, offset), regT0);
+#elif USE(JSVALUE32_64)
+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#endif
+ emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
+ }
+ }
- slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end)));
- afterRegisterFileCheck = label();
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
+ stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
}
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
+
+ if (m_disassembler)
+ m_disassembler->setEndOfSlowPath(label());
+ Label arityCheck;
if (m_codeBlock->codeType() == FunctionCode) {
- slowRegisterFileCheck.link(this);
- m_bytecodeIndex = 0;
- JITStubCall(this, cti_register_file_check).call();
-#ifndef NDEBUG
- m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+ stackOverflow.link(this);
+ m_bytecodeOffset = 0;
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
+ arityCheck = label();
+ store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
+ emitFunctionPrologue();
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+
+ load32(payloadFor(JSStack::ArgumentCount), regT1);
+ branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
+
+ m_bytecodeOffset = 0;
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ if (returnValueGPR != regT0)
+ move(returnValueGPR, regT0);
+ branchTest32(Zero, regT0).linkTo(beginLabel, this);
+ GPRReg thunkReg;
+#if USE(JSVALUE64)
+ thunkReg = GPRInfo::regT7;
+#else
+ thunkReg = GPRInfo::regT5;
+#endif
+ move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), thunkReg);
+ loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg);
+ emitNakedCall(m_vm->getCTIStub(arityFixup).code());
+
+#if !ASSERT_DISABLED
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
- jump(afterRegisterFileCheck);
+
+ jump(beginLabel);
}
ASSERT(m_jmpTable.isEmpty());
+
+ privateCompileExceptionHandlers();
+
+ if (m_disassembler)
+ m_disassembler->setEndOfCode(label());
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
+ if (patchBuffer.didFailToAllocate())
+ return CompilationFailed;
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
SwitchRecord record = m_switches[i];
- unsigned bytecodeIndex = record.bytecodeIndex;
+ unsigned bytecodeOffset = record.bytecodeOffset;
if (record.type != SwitchRecord::String) {
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
- unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ unsigned offset = it->value.branchOffset;
+ it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
patchBuffer.link(iter->from, FunctionPtr(iter->to));
}
- if (m_codeBlock->hasExceptionInfo()) {
- m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex));
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(patchBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(patchBuffer);
+
+ m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
+ for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
+ CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
+ CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
+ CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
+ CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
+
+ m_codeBlock->byValInfo(i) = ByValInfo(
+ m_byValCompilationInfo[i].bytecodeIndex,
+ badTypeJump,
+ m_byValCompilationInfo[i].arrayMode,
+ differenceBetweenCodePtr(badTypeJump, doneTarget),
+ differenceBetweenCodePtr(returnAddress, slowPathTarget));
+ }
+ for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
+ CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
+ CallLinkInfo& info = *compilationInfo.callLinkInfo;
+ info.callReturnLocation = patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(compilationInfo.hotPathBegin);
+ info.hotPathOther = patchBuffer.locationOfNearCall(compilationInfo.hotPathOther);
}
- // Link absolute addresses for jsr
- for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
- patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
- StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
- info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
+ CompactJITCodeMap::Encoder jitCodeMapEncoder;
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
+ if (m_labels[bytecodeOffset].isSet())
+ jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
}
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.ownerCodeBlock = m_codeBlock;
- info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
- info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
+
+ MacroAssemblerCodePtr withArityCheck;
+ if (m_codeBlock->codeType() == FunctionCode)
+ withArityCheck = patchBuffer.locationOf(arityCheck);
+
+ if (Options::showDisassembly())
+ m_disassembler->dump(patchBuffer);
+ if (m_compilation) {
+ m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
+ m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
}
+
+ CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
+
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
+ static_cast<double>(result.size()) /
+ static_cast<double>(m_codeBlock->instructions().size()));
+
+ m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
+ m_codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
+
+#if ENABLE(JIT_VERBOSE)
+ dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
#endif
- unsigned methodCallCount = m_methodCallCompilationInfo.size();
- m_codeBlock->addMethodCallLinkInfos(methodCallCount);
- for (unsigned i = 0; i < methodCallCount; ++i) {
- MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
- info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare);
- info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
- }
-
- return patchBuffer.finalizeCode();
+
+ return CompilationSuccessful;
}
-#if !USE(JSVALUE32_64)
-void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
+void JIT::privateCompileExceptionHandlers()
{
- loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
- loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst);
- loadPtr(Address(dst, index * sizeof(Register)), dst);
-}
+ if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
+ return;
-void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index)
-{
- loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject);
- loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject);
- storePtr(src, Address(variableObject, index * sizeof(Register)));
-}
-#endif
+ Jump doLookup;
-#if ENABLE(JIT_OPTIMIZE_CALL)
-void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
-{
- // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
- // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
- // match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get());
-#if USE(JSVALUE32_64)
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
-#else
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
+ emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
+ doLookup = jump();
+ }
+
+ if (!m_exceptionChecks.empty())
+ m_exceptionChecks.link(this);
+
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+
+ if (doLookup.isSet())
+ doLookup.link(this);
+
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
#endif
+ m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
+ jumpToExceptionHandler();
}
-void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
{
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters)));
- // Currently we only link calls with the exact number of arguments.
- // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
- ASSERT(!callLinkInfo->isLinked());
-
- if (calleeCodeBlock)
- calleeCodeBlock->addCaller(callLinkInfo);
-
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
- repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall());
- }
+ return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters);
+}
- // patch the call so we do not continue to try to link.
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
+int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
+{
+ return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
}
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC