2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
29 #include <wtf/Platform.h>
34 #undef FIELD_OFFSET // Fix conflict with winnt.h.
37 // FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes.
38 // The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
39 // NULL can cause compiler problems, especially in cases of multiple inheritance.
40 #define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
44 ALWAYS_INLINE
void JIT::killLastResultRegister()
46 m_lastResultBytecodeRegister
= std::numeric_limits
<int>::max();
49 // get arg puts an arg from the SF register array into a h/w register
50 ALWAYS_INLINE
void JIT::emitGetVirtualRegister(int src
, RegisterID dst
)
52 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
54 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
55 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
56 JSValuePtr value
= m_codeBlock
->getConstant(src
);
57 move(ImmPtr(JSValuePtr::encode(value
)), dst
);
58 killLastResultRegister();
62 if (src
== m_lastResultBytecodeRegister
&& m_codeBlock
->isTemporaryRegisterIndex(src
)) {
63 bool atJumpTarget
= false;
64 while (m_jumpTargetsPosition
< m_codeBlock
->numberOfJumpTargets() && m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) <= m_bytecodeIndex
) {
65 if (m_codeBlock
->jumpTarget(m_jumpTargetsPosition
) == m_bytecodeIndex
)
67 ++m_jumpTargetsPosition
;
71 // The argument we want is already stored in eax
74 killLastResultRegister();
79 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), dst
);
80 killLastResultRegister();
83 ALWAYS_INLINE
void JIT::emitGetVirtualRegisters(int src1
, RegisterID dst1
, int src2
, RegisterID dst2
)
85 if (src2
== m_lastResultBytecodeRegister
) {
86 emitGetVirtualRegister(src2
, dst2
);
87 emitGetVirtualRegister(src1
, dst1
);
89 emitGetVirtualRegister(src1
, dst1
);
90 emitGetVirtualRegister(src2
, dst2
);
94 // puts an arg onto the stack, as an arg to a context threaded function.
95 ALWAYS_INLINE
void JIT::emitPutJITStubArg(RegisterID src
, unsigned argumentNumber
)
97 poke(src
, argumentNumber
);
100 ALWAYS_INLINE
void JIT::emitPutJITStubArgConstant(unsigned value
, unsigned argumentNumber
)
102 poke(Imm32(value
), argumentNumber
);
105 ALWAYS_INLINE
void JIT::emitPutJITStubArgConstant(void* value
, unsigned argumentNumber
)
107 poke(ImmPtr(value
), argumentNumber
);
110 ALWAYS_INLINE
void JIT::emitGetJITStubArg(unsigned argumentNumber
, RegisterID dst
)
112 peek(dst
, argumentNumber
);
115 ALWAYS_INLINE JSValuePtr
JIT::getConstantOperand(unsigned src
)
117 ASSERT(m_codeBlock
->isConstantRegisterIndex(src
));
118 return m_codeBlock
->getConstant(src
);
121 ALWAYS_INLINE
int32_t JIT::getConstantOperandImmediateInt(unsigned src
)
123 return getConstantOperand(src
).getInt32Fast();
126 ALWAYS_INLINE
bool JIT::isOperandConstantImmediateInt(unsigned src
)
128 return m_codeBlock
->isConstantRegisterIndex(src
) && getConstantOperand(src
).isInt32Fast();
131 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
132 ALWAYS_INLINE
void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src
, unsigned argumentNumber
, RegisterID scratch
)
134 if (m_codeBlock
->isConstantRegisterIndex(src
)) {
135 JSValuePtr value
= m_codeBlock
->getConstant(src
);
136 emitPutJITStubArgConstant(JSValuePtr::encode(value
), argumentNumber
);
138 loadPtr(Address(callFrameRegister
, src
* sizeof(Register
)), scratch
);
139 emitPutJITStubArg(scratch
, argumentNumber
);
142 killLastResultRegister();
145 ALWAYS_INLINE
void JIT::emitPutCTIParam(void* value
, unsigned name
)
147 poke(ImmPtr(value
), name
);
150 ALWAYS_INLINE
void JIT::emitPutCTIParam(RegisterID from
, unsigned name
)
155 ALWAYS_INLINE
void JIT::emitGetCTIParam(unsigned name
, RegisterID to
)
158 killLastResultRegister();
161 ALWAYS_INLINE
void JIT::emitPutToCallFrameHeader(RegisterID from
, RegisterFile::CallFrameHeaderEntry entry
)
163 storePtr(from
, Address(callFrameRegister
, entry
* sizeof(Register
)));
166 ALWAYS_INLINE
void JIT::emitPutImmediateToCallFrameHeader(void* value
, RegisterFile::CallFrameHeaderEntry entry
)
168 storePtr(ImmPtr(value
), Address(callFrameRegister
, entry
* sizeof(Register
)));
171 ALWAYS_INLINE
void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry
, RegisterID to
)
173 loadPtr(Address(callFrameRegister
, entry
* sizeof(Register
)), to
);
174 killLastResultRegister();
177 ALWAYS_INLINE
void JIT::emitPutVirtualRegister(unsigned dst
, RegisterID from
)
179 storePtr(from
, Address(callFrameRegister
, dst
* sizeof(Register
)));
180 m_lastResultBytecodeRegister
= (from
== X86::eax
) ? dst
: std::numeric_limits
<int>::max();
181 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
184 ALWAYS_INLINE
void JIT::emitInitRegister(unsigned dst
)
186 storePtr(ImmPtr(JSValuePtr::encode(jsUndefined())), Address(callFrameRegister
, dst
* sizeof(Register
)));
187 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
190 ALWAYS_INLINE
JIT::Jump
JIT::emitNakedCall(X86::RegisterID r
)
192 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
194 Jump nakedCall
= call(r
);
195 m_calls
.append(CallRecord(nakedCall
, m_bytecodeIndex
));
199 ALWAYS_INLINE
JIT::Jump
JIT::emitNakedCall(void* function
)
201 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
203 Jump nakedCall
= call();
204 m_calls
.append(CallRecord(nakedCall
, m_bytecodeIndex
, function
));
208 #if USE(JIT_STUB_ARGUMENT_REGISTER)
209 ALWAYS_INLINE
void JIT::restoreArgumentReference()
212 move(X86::esp
, X86::edi
);
214 move(X86::esp
, X86::ecx
);
216 emitPutCTIParam(callFrameRegister
, STUB_ARGS_callFrame
);
218 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline()
220 // In the trampoline on x86-64, the first argument register is not overwritten.
221 #if !PLATFORM(X86_64)
222 move(X86::esp
, X86::ecx
);
223 addPtr(Imm32(sizeof(void*)), X86::ecx
);
226 #elif USE(JIT_STUB_ARGUMENT_STACK)
227 ALWAYS_INLINE
void JIT::restoreArgumentReference()
229 storePtr(X86::esp
, X86::esp
);
230 emitPutCTIParam(callFrameRegister
, STUB_ARGS_callFrame
);
232 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline() {}
233 #else // JIT_STUB_ARGUMENT_VA_LIST
234 ALWAYS_INLINE
void JIT::restoreArgumentReference()
236 emitPutCTIParam(callFrameRegister
, STUB_ARGS_callFrame
);
238 ALWAYS_INLINE
void JIT::restoreArgumentReferenceForTrampoline() {}
241 ALWAYS_INLINE
JIT::Jump
JIT::emitCTICall_internal(void* helper
)
243 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
245 #if ENABLE(OPCODE_SAMPLING)
246 sampleInstruction(m_codeBlock
->instructions().begin() + m_bytecodeIndex
, true);
248 restoreArgumentReference();
249 Jump ctiCall
= call();
250 m_calls
.append(CallRecord(ctiCall
, m_bytecodeIndex
, helper
));
251 #if ENABLE(OPCODE_SAMPLING)
252 sampleInstruction(m_codeBlock
->instructions().begin() + m_bytecodeIndex
, false);
254 killLastResultRegister();
259 ALWAYS_INLINE
JIT::Jump
JIT::checkStructure(RegisterID reg
, Structure
* structure
)
261 return jnePtr(Address(reg
, FIELD_OFFSET(JSCell
, m_structure
)), ImmPtr(structure
));
264 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfJSCell(RegisterID reg
)
266 #if USE(ALTERNATE_JSIMMEDIATE)
267 return jzPtr(reg
, tagMaskRegister
);
269 return jz32(reg
, Imm32(JSImmediate::TagMask
));
273 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfBothJSCells(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
276 orPtr(reg2
, scratch
);
277 return emitJumpIfJSCell(scratch
);
280 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg
)
282 addSlowCase(emitJumpIfJSCell(reg
));
285 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotJSCell(RegisterID reg
)
287 #if USE(ALTERNATE_JSIMMEDIATE)
288 return jnzPtr(reg
, tagMaskRegister
);
290 return jnz32(reg
, Imm32(JSImmediate::TagMask
));
294 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
)
296 addSlowCase(emitJumpIfNotJSCell(reg
));
299 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg
, int vReg
)
301 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
302 emitJumpSlowCaseIfNotJSCell(reg
);
305 ALWAYS_INLINE
void JIT::linkSlowCaseIfNotJSCell(Vector
<SlowCaseEntry
>::iterator
& iter
, int vReg
)
307 if (!m_codeBlock
->isKnownNotImmediate(vReg
))
311 #if USE(ALTERNATE_JSIMMEDIATE)
312 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateNumber(RegisterID reg
)
314 return jnzPtr(reg
, tagTypeNumberRegister
);
316 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateNumber(RegisterID reg
)
318 return jzPtr(reg
, tagTypeNumberRegister
);
322 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfImmediateInteger(RegisterID reg
)
324 #if USE(ALTERNATE_JSIMMEDIATE)
325 return jaePtr(reg
, tagTypeNumberRegister
);
327 return jnz32(reg
, Imm32(JSImmediate::TagTypeNumber
));
331 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateInteger(RegisterID reg
)
333 #if USE(ALTERNATE_JSIMMEDIATE)
334 return jbPtr(reg
, tagTypeNumberRegister
);
336 return jz32(reg
, Imm32(JSImmediate::TagTypeNumber
));
340 ALWAYS_INLINE
JIT::Jump
JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
343 andPtr(reg2
, scratch
);
344 return emitJumpIfNotImmediateInteger(scratch
);
347 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg
)
349 addSlowCase(emitJumpIfNotImmediateInteger(reg
));
352 ALWAYS_INLINE
void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1
, RegisterID reg2
, RegisterID scratch
)
354 addSlowCase(emitJumpIfNotImmediateIntegers(reg1
, reg2
, scratch
));
357 #if !USE(ALTERNATE_JSIMMEDIATE)
358 ALWAYS_INLINE
void JIT::emitFastArithDeTagImmediate(RegisterID reg
)
360 subPtr(Imm32(JSImmediate::TagTypeNumber
), reg
);
363 ALWAYS_INLINE
JIT::Jump
JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg
)
365 return jzSubPtr(Imm32(JSImmediate::TagTypeNumber
), reg
);
369 ALWAYS_INLINE
void JIT::emitFastArithReTagImmediate(RegisterID src
, RegisterID dest
)
371 #if USE(ALTERNATE_JSIMMEDIATE)
372 emitFastArithIntToImmNoCheck(src
, dest
);
376 addPtr(Imm32(JSImmediate::TagTypeNumber
), dest
);
380 ALWAYS_INLINE
void JIT::emitFastArithImmToInt(RegisterID reg
)
382 #if USE(ALTERNATE_JSIMMEDIATE)
385 rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift
), reg
);
389 // operand is int32_t, must have been zero-extended if register is 64-bit.
390 ALWAYS_INLINE
void JIT::emitFastArithIntToImmNoCheck(RegisterID src
, RegisterID dest
)
392 #if USE(ALTERNATE_JSIMMEDIATE)
395 orPtr(tagTypeNumberRegister
, dest
);
397 signExtend32ToPtr(src
, dest
);
399 emitFastArithReTagImmediate(dest
, dest
);
403 ALWAYS_INLINE
void JIT::emitTagAsBoolImmediate(RegisterID reg
)
405 lshift32(Imm32(JSImmediate::ExtendedPayloadShift
), reg
);
406 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool
)), reg
);
409 ALWAYS_INLINE
void JIT::addSlowCase(Jump jump
)
411 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
413 m_slowCases
.append(SlowCaseEntry(jump
, m_bytecodeIndex
));
416 ALWAYS_INLINE
void JIT::addJump(Jump jump
, int relativeOffset
)
418 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
420 m_jmpTable
.append(JumpTable(jump
, m_bytecodeIndex
+ relativeOffset
));
423 ALWAYS_INLINE
void JIT::emitJumpSlowToHot(Jump jump
, int relativeOffset
)
425 ASSERT(m_bytecodeIndex
!= (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
427 jump
.linkTo(m_labels
[m_bytecodeIndex
+ relativeOffset
], this);
432 #endif // ENABLE(JIT)