]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef JITInlineMethods_h | |
27 | #define JITInlineMethods_h | |
28 | ||
29 | #include <wtf/Platform.h> | |
30 | ||
31 | #if ENABLE(JIT) | |
32 | ||
33 | #if PLATFORM(WIN) | |
34 | #undef FIELD_OFFSET // Fix conflict with winnt.h. | |
35 | #endif | |
36 | ||
37 | // FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes. | |
38 | // The magic number 0x4000 is insignificant. We use it to avoid using NULL, since | |
39 | // NULL can cause compiler problems, especially in cases of multiple inheritance. | |
40 | #define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000) | |
41 | ||
42 | namespace JSC { | |
43 | ||
44 | ALWAYS_INLINE void JIT::killLastResultRegister() | |
45 | { | |
46 | m_lastResultBytecodeRegister = std::numeric_limits<int>::max(); | |
47 | } | |
48 | ||
49 | // get arg puts an arg from the SF register array into a h/w register | |
50 | ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst) | |
51 | { | |
52 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
53 | ||
54 | // TODO: we want to reuse values that are already in registers if we can - add a register allocator! | |
55 | if (m_codeBlock->isConstantRegisterIndex(src)) { | |
56 | JSValuePtr value = m_codeBlock->getConstant(src); | |
57 | move(ImmPtr(JSValuePtr::encode(value)), dst); | |
58 | killLastResultRegister(); | |
59 | return; | |
60 | } | |
61 | ||
62 | if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) { | |
63 | bool atJumpTarget = false; | |
64 | while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) { | |
65 | if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex) | |
66 | atJumpTarget = true; | |
67 | ++m_jumpTargetsPosition; | |
68 | } | |
69 | ||
70 | if (!atJumpTarget) { | |
71 | // The argument we want is already stored in eax | |
72 | if (dst != X86::eax) | |
73 | move(X86::eax, dst); | |
74 | killLastResultRegister(); | |
75 | return; | |
76 | } | |
77 | } | |
78 | ||
79 | loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst); | |
80 | killLastResultRegister(); | |
81 | } | |
82 | ||
83 | ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2) | |
84 | { | |
85 | if (src2 == m_lastResultBytecodeRegister) { | |
86 | emitGetVirtualRegister(src2, dst2); | |
87 | emitGetVirtualRegister(src1, dst1); | |
88 | } else { | |
89 | emitGetVirtualRegister(src1, dst1); | |
90 | emitGetVirtualRegister(src2, dst2); | |
91 | } | |
92 | } | |
93 | ||
94 | // puts an arg onto the stack, as an arg to a context threaded function. | |
95 | ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber) | |
96 | { | |
97 | poke(src, argumentNumber); | |
98 | } | |
99 | ||
100 | ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber) | |
101 | { | |
102 | poke(Imm32(value), argumentNumber); | |
103 | } | |
104 | ||
105 | ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber) | |
106 | { | |
107 | poke(ImmPtr(value), argumentNumber); | |
108 | } | |
109 | ||
110 | ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst) | |
111 | { | |
112 | peek(dst, argumentNumber); | |
113 | } | |
114 | ||
115 | ALWAYS_INLINE JSValuePtr JIT::getConstantOperand(unsigned src) | |
116 | { | |
117 | ASSERT(m_codeBlock->isConstantRegisterIndex(src)); | |
118 | return m_codeBlock->getConstant(src); | |
119 | } | |
120 | ||
121 | ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src) | |
122 | { | |
123 | return getConstantOperand(src).getInt32Fast(); | |
124 | } | |
125 | ||
126 | ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) | |
127 | { | |
128 | return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32Fast(); | |
129 | } | |
130 | ||
131 | // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function. | |
132 | ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch) | |
133 | { | |
134 | if (m_codeBlock->isConstantRegisterIndex(src)) { | |
135 | JSValuePtr value = m_codeBlock->getConstant(src); | |
136 | emitPutJITStubArgConstant(JSValuePtr::encode(value), argumentNumber); | |
137 | } else { | |
138 | loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch); | |
139 | emitPutJITStubArg(scratch, argumentNumber); | |
140 | } | |
141 | ||
142 | killLastResultRegister(); | |
143 | } | |
144 | ||
145 | ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name) | |
146 | { | |
147 | poke(ImmPtr(value), name); | |
148 | } | |
149 | ||
150 | ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name) | |
151 | { | |
152 | poke(from, name); | |
153 | } | |
154 | ||
155 | ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to) | |
156 | { | |
157 | peek(to, name); | |
158 | killLastResultRegister(); | |
159 | } | |
160 | ||
161 | ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) | |
162 | { | |
163 | storePtr(from, Address(callFrameRegister, entry * sizeof(Register))); | |
164 | } | |
165 | ||
166 | ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry) | |
167 | { | |
168 | storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register))); | |
169 | } | |
170 | ||
171 | ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to) | |
172 | { | |
173 | loadPtr(Address(callFrameRegister, entry * sizeof(Register)), to); | |
174 | killLastResultRegister(); | |
175 | } | |
176 | ||
177 | ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from) | |
178 | { | |
179 | storePtr(from, Address(callFrameRegister, dst * sizeof(Register))); | |
180 | m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max(); | |
181 | // FIXME: #ifndef NDEBUG, Write the correct m_type to the register. | |
182 | } | |
183 | ||
184 | ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) | |
185 | { | |
186 | storePtr(ImmPtr(JSValuePtr::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register))); | |
187 | // FIXME: #ifndef NDEBUG, Write the correct m_type to the register. | |
188 | } | |
189 | ||
190 | ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(X86::RegisterID r) | |
191 | { | |
192 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
193 | ||
194 | Jump nakedCall = call(r); | |
195 | m_calls.append(CallRecord(nakedCall, m_bytecodeIndex)); | |
196 | return nakedCall; | |
197 | } | |
198 | ||
199 | ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(void* function) | |
200 | { | |
201 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
202 | ||
203 | Jump nakedCall = call(); | |
204 | m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function)); | |
205 | return nakedCall; | |
206 | } | |
207 | ||
208 | #if USE(JIT_STUB_ARGUMENT_REGISTER) | |
209 | ALWAYS_INLINE void JIT::restoreArgumentReference() | |
210 | { | |
211 | #if PLATFORM(X86_64) | |
212 | move(X86::esp, X86::edi); | |
213 | #else | |
214 | move(X86::esp, X86::ecx); | |
215 | #endif | |
216 | emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame); | |
217 | } | |
218 | ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() | |
219 | { | |
220 | // In the trampoline on x86-64, the first argument register is not overwritten. | |
221 | #if !PLATFORM(X86_64) | |
222 | move(X86::esp, X86::ecx); | |
223 | addPtr(Imm32(sizeof(void*)), X86::ecx); | |
224 | #endif | |
225 | } | |
226 | #elif USE(JIT_STUB_ARGUMENT_STACK) | |
227 | ALWAYS_INLINE void JIT::restoreArgumentReference() | |
228 | { | |
229 | storePtr(X86::esp, X86::esp); | |
230 | emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame); | |
231 | } | |
232 | ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {} | |
233 | #else // JIT_STUB_ARGUMENT_VA_LIST | |
234 | ALWAYS_INLINE void JIT::restoreArgumentReference() | |
235 | { | |
236 | emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame); | |
237 | } | |
238 | ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {} | |
239 | #endif | |
240 | ||
241 | ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper) | |
242 | { | |
243 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
244 | ||
245 | #if ENABLE(OPCODE_SAMPLING) | |
246 | sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, true); | |
247 | #endif | |
248 | restoreArgumentReference(); | |
249 | Jump ctiCall = call(); | |
250 | m_calls.append(CallRecord(ctiCall, m_bytecodeIndex, helper)); | |
251 | #if ENABLE(OPCODE_SAMPLING) | |
252 | sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, false); | |
253 | #endif | |
254 | killLastResultRegister(); | |
255 | ||
256 | return ctiCall; | |
257 | } | |
258 | ||
259 | ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) | |
260 | { | |
261 | return jnePtr(Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure)); | |
262 | } | |
263 | ||
264 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) | |
265 | { | |
266 | #if USE(ALTERNATE_JSIMMEDIATE) | |
267 | return jzPtr(reg, tagMaskRegister); | |
268 | #else | |
269 | return jz32(reg, Imm32(JSImmediate::TagMask)); | |
270 | #endif | |
271 | } | |
272 | ||
273 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch) | |
274 | { | |
275 | move(reg1, scratch); | |
276 | orPtr(reg2, scratch); | |
277 | return emitJumpIfJSCell(scratch); | |
278 | } | |
279 | ||
280 | ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg) | |
281 | { | |
282 | addSlowCase(emitJumpIfJSCell(reg)); | |
283 | } | |
284 | ||
285 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg) | |
286 | { | |
287 | #if USE(ALTERNATE_JSIMMEDIATE) | |
288 | return jnzPtr(reg, tagMaskRegister); | |
289 | #else | |
290 | return jnz32(reg, Imm32(JSImmediate::TagMask)); | |
291 | #endif | |
292 | } | |
293 | ||
294 | ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg) | |
295 | { | |
296 | addSlowCase(emitJumpIfNotJSCell(reg)); | |
297 | } | |
298 | ||
299 | ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg) | |
300 | { | |
301 | if (!m_codeBlock->isKnownNotImmediate(vReg)) | |
302 | emitJumpSlowCaseIfNotJSCell(reg); | |
303 | } | |
304 | ||
305 | ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg) | |
306 | { | |
307 | if (!m_codeBlock->isKnownNotImmediate(vReg)) | |
308 | linkSlowCase(iter); | |
309 | } | |
310 | ||
311 | #if USE(ALTERNATE_JSIMMEDIATE) | |
312 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg) | |
313 | { | |
314 | return jnzPtr(reg, tagTypeNumberRegister); | |
315 | } | |
316 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg) | |
317 | { | |
318 | return jzPtr(reg, tagTypeNumberRegister); | |
319 | } | |
320 | #endif | |
321 | ||
322 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg) | |
323 | { | |
324 | #if USE(ALTERNATE_JSIMMEDIATE) | |
325 | return jaePtr(reg, tagTypeNumberRegister); | |
326 | #else | |
327 | return jnz32(reg, Imm32(JSImmediate::TagTypeNumber)); | |
328 | #endif | |
329 | } | |
330 | ||
331 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg) | |
332 | { | |
333 | #if USE(ALTERNATE_JSIMMEDIATE) | |
334 | return jbPtr(reg, tagTypeNumberRegister); | |
335 | #else | |
336 | return jz32(reg, Imm32(JSImmediate::TagTypeNumber)); | |
337 | #endif | |
338 | } | |
339 | ||
340 | ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) | |
341 | { | |
342 | move(reg1, scratch); | |
343 | andPtr(reg2, scratch); | |
344 | return emitJumpIfNotImmediateInteger(scratch); | |
345 | } | |
346 | ||
347 | ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg) | |
348 | { | |
349 | addSlowCase(emitJumpIfNotImmediateInteger(reg)); | |
350 | } | |
351 | ||
352 | ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) | |
353 | { | |
354 | addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch)); | |
355 | } | |
356 | ||
357 | #if !USE(ALTERNATE_JSIMMEDIATE) | |
358 | ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg) | |
359 | { | |
360 | subPtr(Imm32(JSImmediate::TagTypeNumber), reg); | |
361 | } | |
362 | ||
363 | ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg) | |
364 | { | |
365 | return jzSubPtr(Imm32(JSImmediate::TagTypeNumber), reg); | |
366 | } | |
367 | #endif | |
368 | ||
369 | ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest) | |
370 | { | |
371 | #if USE(ALTERNATE_JSIMMEDIATE) | |
372 | emitFastArithIntToImmNoCheck(src, dest); | |
373 | #else | |
374 | if (src != dest) | |
375 | move(src, dest); | |
376 | addPtr(Imm32(JSImmediate::TagTypeNumber), dest); | |
377 | #endif | |
378 | } | |
379 | ||
380 | ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg) | |
381 | { | |
382 | #if USE(ALTERNATE_JSIMMEDIATE) | |
383 | UNUSED_PARAM(reg); | |
384 | #else | |
385 | rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg); | |
386 | #endif | |
387 | } | |
388 | ||
389 | // operand is int32_t, must have been zero-extended if register is 64-bit. | |
390 | ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest) | |
391 | { | |
392 | #if USE(ALTERNATE_JSIMMEDIATE) | |
393 | if (src != dest) | |
394 | move(src, dest); | |
395 | orPtr(tagTypeNumberRegister, dest); | |
396 | #else | |
397 | signExtend32ToPtr(src, dest); | |
398 | addPtr(dest, dest); | |
399 | emitFastArithReTagImmediate(dest, dest); | |
400 | #endif | |
401 | } | |
402 | ||
403 | ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg) | |
404 | { | |
405 | lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg); | |
406 | or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg); | |
407 | } | |
408 | ||
409 | ALWAYS_INLINE void JIT::addSlowCase(Jump jump) | |
410 | { | |
411 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
412 | ||
413 | m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex)); | |
414 | } | |
415 | ||
416 | ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset) | |
417 | { | |
418 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
419 | ||
420 | m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset)); | |
421 | } | |
422 | ||
423 | ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset) | |
424 | { | |
425 | ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. | |
426 | ||
427 | jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this); | |
428 | } | |
429 | ||
430 | } | |
431 | ||
432 | #endif // ENABLE(JIT) | |
433 | ||
434 | #endif |