]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITInlineMethods.h
824ca4b0341a8f579a4b847e3e27e50b69787bab
[apple/javascriptcore.git] / jit / JITInlineMethods.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
28
29
30 #if ENABLE(JIT)
31
32 namespace JSC {
33
34 /* Deprecated: Please use JITStubCall instead. */
35
36 // puts an arg onto the stack, as an arg to a context threaded function.
37 ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
38 {
39 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
40 poke(src, argumentStackOffset);
41 }
42
43 /* Deprecated: Please use JITStubCall instead. */
44
45 ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
46 {
47 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
48 poke(Imm32(value), argumentStackOffset);
49 }
50
51 /* Deprecated: Please use JITStubCall instead. */
52
53 ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
54 {
55 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
56 poke(ImmPtr(value), argumentStackOffset);
57 }
58
59 /* Deprecated: Please use JITStubCall instead. */
60
61 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
62 {
63 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
64 peek(dst, argumentStackOffset);
65 }
66
67 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
68 {
69 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
70 }
71
72 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
73 {
74 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
75 return m_codeBlock->getConstant(src);
76 }
77
78 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
79 {
80 storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
81 }
82
83 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
84 {
85 storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
86 }
87
88 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
89 {
90 loadPtr(Address(from, entry * sizeof(Register)), to);
91 #if !USE(JSVALUE32_64)
92 killLastResultRegister();
93 #endif
94 }
95
96 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
97 {
98 failures.append(branchPtr(NotEqual, Address(src), ImmPtr(m_globalData->jsStringVPtr)));
99 failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
100 failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
101 loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
102 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
103 load16(MacroAssembler::Address(dst, 0), dst);
104 }
105
106 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
107 {
108 load32(Address(from, entry * sizeof(Register)), to);
109 #if !USE(JSVALUE32_64)
110 killLastResultRegister();
111 #endif
112 }
113
114 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
115 {
116 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
117
118 Call nakedCall = nearCall();
119 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
120 return nakedCall;
121 }
122
123 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
124
125 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
126 {
127 #if CPU(ARM_TRADITIONAL)
128 #ifndef NDEBUG
129 // Ensure the label after the sequence can also fit
130 insnSpace += sizeof(ARMWord);
131 constSpace += sizeof(uint64_t);
132 #endif
133
134 ensureSpace(insnSpace, constSpace);
135
136 #endif
137
138 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
139 #ifndef NDEBUG
140 m_uninterruptedInstructionSequenceBegin = label();
141 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
142 #endif
143 #endif
144 }
145
146 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
147 {
148 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
149 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
150 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
151 #endif
152 }
153
154 #endif
155
156 #if CPU(ARM)
157
158 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
159 {
160 move(linkRegister, reg);
161 }
162
163 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
164 {
165 move(reg, linkRegister);
166 }
167
168 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
169 {
170 loadPtr(address, linkRegister);
171 }
172
173 #elif CPU(MIPS)
174
175 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
176 {
177 move(returnAddressRegister, reg);
178 }
179
180 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
181 {
182 move(reg, returnAddressRegister);
183 }
184
185 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
186 {
187 loadPtr(address, returnAddressRegister);
188 }
189
190 #else // CPU(X86) || CPU(X86_64)
191
192 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
193 {
194 pop(reg);
195 }
196
197 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
198 {
199 push(reg);
200 }
201
202 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
203 {
204 push(address);
205 }
206
207 #endif
208
209 #if USE(JIT_STUB_ARGUMENT_VA_LIST)
210 ALWAYS_INLINE void JIT::restoreArgumentReference()
211 {
212 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
213 }
214 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
215 #else
216 ALWAYS_INLINE void JIT::restoreArgumentReference()
217 {
218 move(stackPointerRegister, firstArgumentRegister);
219 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
220 }
221 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
222 {
223 #if CPU(X86)
224 // Within a trampoline the return address will be on the stack at this point.
225 addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
226 #elif CPU(ARM)
227 move(stackPointerRegister, firstArgumentRegister);
228 #endif
229 // In the trampoline on x86-64, the first argument register is not overwritten.
230 }
231 #endif
232
233 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
234 {
235 return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
236 }
237
238 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
239 {
240 if (!m_codeBlock->isKnownNotImmediate(vReg))
241 linkSlowCase(iter);
242 }
243
244 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
245 {
246 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
247
248 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
249 }
250
251 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
252 {
253 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
254
255 const JumpList::JumpVector& jumpVector = jumpList.jumps();
256 size_t size = jumpVector.size();
257 for (size_t i = 0; i < size; ++i)
258 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
259 }
260
261 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
262 {
263 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
264
265 m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
266 }
267
268 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
269 {
270 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
271
272 jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
273 }
274
275 #if ENABLE(SAMPLING_FLAGS)
276 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
277 {
278 ASSERT(flag >= 1);
279 ASSERT(flag <= 32);
280 or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
281 }
282
283 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
284 {
285 ASSERT(flag >= 1);
286 ASSERT(flag <= 32);
287 and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
288 }
289 #endif
290
291 #if ENABLE(SAMPLING_COUNTERS)
292 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
293 {
294 #if CPU(X86_64) // Or any other 64-bit plattform.
295 addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
296 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
297 intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
298 add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
299 addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
300 #else
301 #error "SAMPLING_FLAGS not implemented on this platform."
302 #endif
303 }
304 #endif
305
306 #if ENABLE(OPCODE_SAMPLING)
307 #if CPU(X86_64)
308 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
309 {
310 move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
311 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
312 }
313 #else
314 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
315 {
316 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
317 }
318 #endif
319 #endif
320
321 #if ENABLE(CODEBLOCK_SAMPLING)
322 #if CPU(X86_64)
323 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
324 {
325 move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
326 storePtr(ImmPtr(codeBlock), X86Registers::ecx);
327 }
328 #else
329 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
330 {
331 storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
332 }
333 #endif
334 #endif
335
336 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
337 {
338 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
339 }
340
341 #if USE(JSVALUE32_64)
342
343 inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
344 {
345 RegisterID mappedTag;
346 if (getMappedTag(index, mappedTag)) {
347 move(mappedTag, tag);
348 unmap(tag);
349 return;
350 }
351
352 if (m_codeBlock->isConstantRegisterIndex(index)) {
353 move(Imm32(getConstantOperand(index).tag()), tag);
354 unmap(tag);
355 return;
356 }
357
358 load32(tagFor(index), tag);
359 unmap(tag);
360 }
361
362 inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
363 {
364 RegisterID mappedPayload;
365 if (getMappedPayload(index, mappedPayload)) {
366 move(mappedPayload, payload);
367 unmap(payload);
368 return;
369 }
370
371 if (m_codeBlock->isConstantRegisterIndex(index)) {
372 move(Imm32(getConstantOperand(index).payload()), payload);
373 unmap(payload);
374 return;
375 }
376
377 load32(payloadFor(index), payload);
378 unmap(payload);
379 }
380
381 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
382 {
383 move(Imm32(v.payload()), payload);
384 move(Imm32(v.tag()), tag);
385 }
386
387 inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
388 {
389 ASSERT(tag != payload);
390
391 if (base == callFrameRegister) {
392 ASSERT(payload != base);
393 emitLoadPayload(index, payload);
394 emitLoadTag(index, tag);
395 return;
396 }
397
398 if (payload == base) { // avoid stomping base
399 load32(tagFor(index, base), tag);
400 load32(payloadFor(index, base), payload);
401 return;
402 }
403
404 load32(payloadFor(index, base), payload);
405 load32(tagFor(index, base), tag);
406 }
407
408 inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
409 {
410 if (isMapped(index1)) {
411 emitLoad(index1, tag1, payload1);
412 emitLoad(index2, tag2, payload2);
413 return;
414 }
415 emitLoad(index2, tag2, payload2);
416 emitLoad(index1, tag1, payload1);
417 }
418
419 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
420 {
421 if (m_codeBlock->isConstantRegisterIndex(index)) {
422 Register& inConstantPool = m_codeBlock->constantRegister(index);
423 loadDouble(&inConstantPool, value);
424 } else
425 loadDouble(addressFor(index), value);
426 }
427
428 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
429 {
430 if (m_codeBlock->isConstantRegisterIndex(index)) {
431 Register& inConstantPool = m_codeBlock->constantRegister(index);
432 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
433 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
434 } else
435 convertInt32ToDouble(payloadFor(index), value);
436 }
437
438 inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
439 {
440 store32(payload, payloadFor(index, base));
441 store32(tag, tagFor(index, base));
442 }
443
444 inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
445 {
446 store32(payload, payloadFor(index, callFrameRegister));
447 if (!indexIsInt32)
448 store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
449 }
450
451 inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
452 {
453 store32(payload, payloadFor(index, callFrameRegister));
454 if (!indexIsInt32)
455 store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
456 }
457
458 inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
459 {
460 store32(payload, payloadFor(index, callFrameRegister));
461 if (!indexIsCell)
462 store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
463 }
464
465 inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
466 {
467 if (!indexIsBool)
468 store32(Imm32(0), payloadFor(index, callFrameRegister));
469 store32(tag, tagFor(index, callFrameRegister));
470 }
471
472 inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
473 {
474 storeDouble(value, addressFor(index));
475 }
476
477 inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
478 {
479 store32(Imm32(constant.payload()), payloadFor(index, base));
480 store32(Imm32(constant.tag()), tagFor(index, base));
481 }
482
483 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
484 {
485 emitStore(dst, jsUndefined());
486 }
487
488 inline bool JIT::isLabeled(unsigned bytecodeIndex)
489 {
490 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
491 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
492 if (jumpTarget == bytecodeIndex)
493 return true;
494 if (jumpTarget > bytecodeIndex)
495 return false;
496 }
497 return false;
498 }
499
500 inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
501 {
502 if (isLabeled(bytecodeIndex))
503 return;
504
505 m_mappedBytecodeIndex = bytecodeIndex;
506 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
507 m_mappedTag = tag;
508 m_mappedPayload = payload;
509 }
510
511 inline void JIT::unmap(RegisterID registerID)
512 {
513 if (m_mappedTag == registerID)
514 m_mappedTag = (RegisterID)-1;
515 else if (m_mappedPayload == registerID)
516 m_mappedPayload = (RegisterID)-1;
517 }
518
519 inline void JIT::unmap()
520 {
521 m_mappedBytecodeIndex = (unsigned)-1;
522 m_mappedVirtualRegisterIndex = (unsigned)-1;
523 m_mappedTag = (RegisterID)-1;
524 m_mappedPayload = (RegisterID)-1;
525 }
526
527 inline bool JIT::isMapped(unsigned virtualRegisterIndex)
528 {
529 if (m_mappedBytecodeIndex != m_bytecodeIndex)
530 return false;
531 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
532 return false;
533 return true;
534 }
535
536 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
537 {
538 if (m_mappedBytecodeIndex != m_bytecodeIndex)
539 return false;
540 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
541 return false;
542 if (m_mappedPayload == (RegisterID)-1)
543 return false;
544 payload = m_mappedPayload;
545 return true;
546 }
547
548 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
549 {
550 if (m_mappedBytecodeIndex != m_bytecodeIndex)
551 return false;
552 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
553 return false;
554 if (m_mappedTag == (RegisterID)-1)
555 return false;
556 tag = m_mappedTag;
557 return true;
558 }
559
560 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
561 {
562 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
563 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
564 addSlowCase(jump());
565 else
566 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
567 }
568 }
569
570 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
571 {
572 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
573 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
574 addSlowCase(jump());
575 else
576 addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
577 }
578 }
579
580 inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
581 {
582 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
583 linkSlowCase(iter);
584 }
585
586 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
587 {
588 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
589 }
590
591 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
592 {
593 if (isOperandConstantImmediateInt(op1)) {
594 constant = getConstantOperand(op1).asInt32();
595 op = op2;
596 return true;
597 }
598
599 if (isOperandConstantImmediateInt(op2)) {
600 constant = getConstantOperand(op2).asInt32();
601 op = op1;
602 return true;
603 }
604
605 return false;
606 }
607
608 /* Deprecated: Please use JITStubCall instead. */
609
610 ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
611 {
612 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
613 poke(payload, argumentStackOffset);
614 poke(tag, argumentStackOffset + 1);
615 }
616
617 /* Deprecated: Please use JITStubCall instead. */
618
619 ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
620 {
621 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
622 if (m_codeBlock->isConstantRegisterIndex(src)) {
623 JSValue constant = m_codeBlock->getConstant(src);
624 poke(Imm32(constant.payload()), argumentStackOffset);
625 poke(Imm32(constant.tag()), argumentStackOffset + 1);
626 } else {
627 emitLoad(src, scratch1, scratch2);
628 poke(scratch2, argumentStackOffset);
629 poke(scratch1, argumentStackOffset + 1);
630 }
631 }
632
633 #else // USE(JSVALUE32_64)
634
635 ALWAYS_INLINE void JIT::killLastResultRegister()
636 {
637 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
638 }
639
640 // get arg puts an arg from the SF register array into a h/w register
641 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
642 {
643 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
644
645 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
646 if (m_codeBlock->isConstantRegisterIndex(src)) {
647 JSValue value = m_codeBlock->getConstant(src);
648 move(ImmPtr(JSValue::encode(value)), dst);
649 killLastResultRegister();
650 return;
651 }
652
653 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
654 bool atJumpTarget = false;
655 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
656 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
657 atJumpTarget = true;
658 ++m_jumpTargetsPosition;
659 }
660
661 if (!atJumpTarget) {
662 // The argument we want is already stored in eax
663 if (dst != cachedResultRegister)
664 move(cachedResultRegister, dst);
665 killLastResultRegister();
666 return;
667 }
668 }
669
670 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
671 killLastResultRegister();
672 }
673
674 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
675 {
676 if (src2 == m_lastResultBytecodeRegister) {
677 emitGetVirtualRegister(src2, dst2);
678 emitGetVirtualRegister(src1, dst1);
679 } else {
680 emitGetVirtualRegister(src1, dst1);
681 emitGetVirtualRegister(src2, dst2);
682 }
683 }
684
685 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
686 {
687 return getConstantOperand(src).asInt32();
688 }
689
690 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
691 {
692 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
693 }
694
695 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
696 {
697 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
698 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
699 }
700
701 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
702 {
703 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
704 }
705
706 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
707 {
708 #if USE(JSVALUE64)
709 return branchTestPtr(Zero, reg, tagMaskRegister);
710 #else
711 return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
712 #endif
713 }
714
715 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
716 {
717 move(reg1, scratch);
718 orPtr(reg2, scratch);
719 return emitJumpIfJSCell(scratch);
720 }
721
722 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
723 {
724 addSlowCase(emitJumpIfJSCell(reg));
725 }
726
727 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
728 {
729 #if USE(JSVALUE64)
730 return branchTestPtr(NonZero, reg, tagMaskRegister);
731 #else
732 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
733 #endif
734 }
735
736 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
737 {
738 addSlowCase(emitJumpIfNotJSCell(reg));
739 }
740
741 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
742 {
743 if (!m_codeBlock->isKnownNotImmediate(vReg))
744 emitJumpSlowCaseIfNotJSCell(reg);
745 }
746
747 #if USE(JSVALUE64)
748
749 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
750 {
751 if (m_codeBlock->isConstantRegisterIndex(index)) {
752 Register& inConstantPool = m_codeBlock->constantRegister(index);
753 loadDouble(&inConstantPool, value);
754 } else
755 loadDouble(addressFor(index), value);
756 }
757
758 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
759 {
760 if (m_codeBlock->isConstantRegisterIndex(index)) {
761 Register& inConstantPool = m_codeBlock->constantRegister(index);
762 convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value);
763 } else
764 convertInt32ToDouble(addressFor(index), value);
765 }
766 #endif
767
768 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
769 {
770 #if USE(JSVALUE64)
771 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
772 #else
773 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
774 #endif
775 }
776
777 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
778 {
779 #if USE(JSVALUE64)
780 return branchPtr(Below, reg, tagTypeNumberRegister);
781 #else
782 return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
783 #endif
784 }
785
786 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
787 {
788 move(reg1, scratch);
789 andPtr(reg2, scratch);
790 return emitJumpIfNotImmediateInteger(scratch);
791 }
792
793 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
794 {
795 addSlowCase(emitJumpIfNotImmediateInteger(reg));
796 }
797
798 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
799 {
800 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
801 }
802
803 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
804 {
805 addSlowCase(emitJumpIfNotImmediateNumber(reg));
806 }
807
808 #if !USE(JSVALUE64)
809 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
810 {
811 subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
812 }
813
814 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
815 {
816 return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
817 }
818 #endif
819
820 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
821 {
822 #if USE(JSVALUE64)
823 emitFastArithIntToImmNoCheck(src, dest);
824 #else
825 if (src != dest)
826 move(src, dest);
827 addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
828 #endif
829 }
830
831 // operand is int32_t, must have been zero-extended if register is 64-bit.
832 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
833 {
834 #if USE(JSVALUE64)
835 if (src != dest)
836 move(src, dest);
837 orPtr(tagTypeNumberRegister, dest);
838 #else
839 signExtend32ToPtr(src, dest);
840 addPtr(dest, dest);
841 emitFastArithReTagImmediate(dest, dest);
842 #endif
843 }
844
845 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
846 {
847 lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
848 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
849 }
850
851 /* Deprecated: Please use JITStubCall instead. */
852
853 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
854 ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
855 {
856 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
857 if (m_codeBlock->isConstantRegisterIndex(src)) {
858 JSValue value = m_codeBlock->getConstant(src);
859 poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
860 } else {
861 loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
862 poke(scratch, argumentStackOffset);
863 }
864
865 killLastResultRegister();
866 }
867
868 #endif // USE(JSVALUE32_64)
869
870 } // namespace JSC
871
872 #endif // ENABLE(JIT)
873
874 #endif