]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITInlineMethods.h
c22a6921a523af364a5f249ae67bd995a8e871b1
[apple/javascriptcore.git] / jit / JITInlineMethods.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
28
29 #include <wtf/Platform.h>
30
31 #if ENABLE(JIT)
32
33 namespace JSC {
34
35 /* Deprecated: Please use JITStubCall instead. */
36
37 // puts an arg onto the stack, as an arg to a context threaded function.
38 ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
39 {
40 poke(src, argumentNumber);
41 }
42
43 /* Deprecated: Please use JITStubCall instead. */
44
45 ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
46 {
47 poke(Imm32(value), argumentNumber);
48 }
49
50 /* Deprecated: Please use JITStubCall instead. */
51
52 ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
53 {
54 poke(ImmPtr(value), argumentNumber);
55 }
56
57 /* Deprecated: Please use JITStubCall instead. */
58
59 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
60 {
61 peek(dst, argumentNumber);
62 }
63
64 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
65 {
66 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
67 return m_codeBlock->getConstant(src);
68 }
69
70 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
71 {
72 storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
73 }
74
75 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
76 {
77 storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
78 }
79
80 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
81 {
82 loadPtr(Address(from, entry * sizeof(Register)), to);
83 #if !USE(JSVALUE32_64)
84 killLastResultRegister();
85 #endif
86 }
87
88 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
89 {
90 load32(Address(from, entry * sizeof(Register)), to);
91 #if !USE(JSVALUE32_64)
92 killLastResultRegister();
93 #endif
94 }
95
96 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
97 {
98 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
99
100 Call nakedCall = nearCall();
101 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
102 return nakedCall;
103 }
104
105 #if PLATFORM(X86) || PLATFORM(X86_64)
106
107 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
108 {
109 pop(reg);
110 }
111
112 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
113 {
114 push(reg);
115 }
116
117 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
118 {
119 push(address);
120 }
121
122 #elif PLATFORM_ARM_ARCH(7)
123
124 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
125 {
126 move(linkRegister, reg);
127 }
128
129 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
130 {
131 move(reg, linkRegister);
132 }
133
134 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
135 {
136 loadPtr(address, linkRegister);
137 }
138
139 #endif
140
141 #if USE(JIT_STUB_ARGUMENT_VA_LIST)
142 ALWAYS_INLINE void JIT::restoreArgumentReference()
143 {
144 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
145 }
146 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
147 #else
148 ALWAYS_INLINE void JIT::restoreArgumentReference()
149 {
150 move(stackPointerRegister, firstArgumentRegister);
151 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
152 }
153 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
154 {
155 #if PLATFORM(X86)
156 // Within a trampoline the return address will be on the stack at this point.
157 addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
158 #elif PLATFORM_ARM_ARCH(7)
159 move(stackPointerRegister, firstArgumentRegister);
160 #endif
161 // In the trampoline on x86-64, the first argument register is not overwritten.
162 }
163 #endif
164
165 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
166 {
167 return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
168 }
169
170 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
171 {
172 if (!m_codeBlock->isKnownNotImmediate(vReg))
173 linkSlowCase(iter);
174 }
175
176 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
177 {
178 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
179
180 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
181 }
182
183 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
184 {
185 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
186
187 const JumpList::JumpVector& jumpVector = jumpList.jumps();
188 size_t size = jumpVector.size();
189 for (size_t i = 0; i < size; ++i)
190 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
191 }
192
193 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
194 {
195 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
196
197 m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
198 }
199
200 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
201 {
202 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
203
204 jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
205 }
206
207 #if ENABLE(SAMPLING_FLAGS)
208 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
209 {
210 ASSERT(flag >= 1);
211 ASSERT(flag <= 32);
212 or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
213 }
214
215 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
216 {
217 ASSERT(flag >= 1);
218 ASSERT(flag <= 32);
219 and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
220 }
221 #endif
222
223 #if ENABLE(SAMPLING_COUNTERS)
224 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
225 {
226 #if PLATFORM(X86_64) // Or any other 64-bit plattform.
227 addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
228 #elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
229 intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
230 add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
231 addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
232 #else
233 #error "SAMPLING_FLAGS not implemented on this platform."
234 #endif
235 }
236 #endif
237
238 #if ENABLE(OPCODE_SAMPLING)
239 #if PLATFORM(X86_64)
240 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
241 {
242 move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
243 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
244 }
245 #else
246 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
247 {
248 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
249 }
250 #endif
251 #endif
252
253 #if ENABLE(CODEBLOCK_SAMPLING)
254 #if PLATFORM(X86_64)
255 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
256 {
257 move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
258 storePtr(ImmPtr(codeBlock), X86::ecx);
259 }
260 #else
261 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
262 {
263 storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
264 }
265 #endif
266 #endif
267
268 #if USE(JSVALUE32_64)
269
270 inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
271 {
272 return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
273 }
274
275 inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
276 {
277 return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
278 }
279
280 inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
281 {
282 return Address(base, (index * sizeof(Register)));
283 }
284
285 inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
286 {
287 RegisterID mappedTag;
288 if (getMappedTag(index, mappedTag)) {
289 move(mappedTag, tag);
290 unmap(tag);
291 return;
292 }
293
294 if (m_codeBlock->isConstantRegisterIndex(index)) {
295 move(Imm32(getConstantOperand(index).tag()), tag);
296 unmap(tag);
297 return;
298 }
299
300 load32(tagFor(index), tag);
301 unmap(tag);
302 }
303
304 inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
305 {
306 RegisterID mappedPayload;
307 if (getMappedPayload(index, mappedPayload)) {
308 move(mappedPayload, payload);
309 unmap(payload);
310 return;
311 }
312
313 if (m_codeBlock->isConstantRegisterIndex(index)) {
314 move(Imm32(getConstantOperand(index).payload()), payload);
315 unmap(payload);
316 return;
317 }
318
319 load32(payloadFor(index), payload);
320 unmap(payload);
321 }
322
323 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
324 {
325 move(Imm32(v.payload()), payload);
326 move(Imm32(v.tag()), tag);
327 }
328
329 inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
330 {
331 ASSERT(tag != payload);
332
333 if (base == callFrameRegister) {
334 ASSERT(payload != base);
335 emitLoadPayload(index, payload);
336 emitLoadTag(index, tag);
337 return;
338 }
339
340 if (payload == base) { // avoid stomping base
341 load32(tagFor(index, base), tag);
342 load32(payloadFor(index, base), payload);
343 return;
344 }
345
346 load32(payloadFor(index, base), payload);
347 load32(tagFor(index, base), tag);
348 }
349
350 inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
351 {
352 if (isMapped(index1)) {
353 emitLoad(index1, tag1, payload1);
354 emitLoad(index2, tag2, payload2);
355 return;
356 }
357 emitLoad(index2, tag2, payload2);
358 emitLoad(index1, tag1, payload1);
359 }
360
361 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
362 {
363 if (m_codeBlock->isConstantRegisterIndex(index)) {
364 Register& inConstantPool = m_codeBlock->constantRegister(index);
365 loadDouble(&inConstantPool, value);
366 } else
367 loadDouble(addressFor(index), value);
368 }
369
370 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
371 {
372 if (m_codeBlock->isConstantRegisterIndex(index)) {
373 Register& inConstantPool = m_codeBlock->constantRegister(index);
374 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
375 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
376 } else
377 convertInt32ToDouble(payloadFor(index), value);
378 }
379
380 inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
381 {
382 store32(payload, payloadFor(index, base));
383 store32(tag, tagFor(index, base));
384 }
385
386 inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
387 {
388 store32(payload, payloadFor(index, callFrameRegister));
389 if (!indexIsInt32)
390 store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
391 }
392
393 inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
394 {
395 store32(payload, payloadFor(index, callFrameRegister));
396 if (!indexIsInt32)
397 store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
398 }
399
400 inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
401 {
402 store32(payload, payloadFor(index, callFrameRegister));
403 if (!indexIsCell)
404 store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
405 }
406
407 inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
408 {
409 if (!indexIsBool)
410 store32(Imm32(0), payloadFor(index, callFrameRegister));
411 store32(tag, tagFor(index, callFrameRegister));
412 }
413
414 inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
415 {
416 storeDouble(value, addressFor(index));
417 }
418
419 inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
420 {
421 store32(Imm32(constant.payload()), payloadFor(index, base));
422 store32(Imm32(constant.tag()), tagFor(index, base));
423 }
424
425 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
426 {
427 emitStore(dst, jsUndefined());
428 }
429
430 inline bool JIT::isLabeled(unsigned bytecodeIndex)
431 {
432 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
433 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
434 if (jumpTarget == bytecodeIndex)
435 return true;
436 if (jumpTarget > bytecodeIndex)
437 return false;
438 }
439 return false;
440 }
441
442 inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
443 {
444 if (isLabeled(bytecodeIndex))
445 return;
446
447 m_mappedBytecodeIndex = bytecodeIndex;
448 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
449 m_mappedTag = tag;
450 m_mappedPayload = payload;
451 }
452
453 inline void JIT::unmap(RegisterID registerID)
454 {
455 if (m_mappedTag == registerID)
456 m_mappedTag = (RegisterID)-1;
457 else if (m_mappedPayload == registerID)
458 m_mappedPayload = (RegisterID)-1;
459 }
460
461 inline void JIT::unmap()
462 {
463 m_mappedBytecodeIndex = (unsigned)-1;
464 m_mappedVirtualRegisterIndex = (unsigned)-1;
465 m_mappedTag = (RegisterID)-1;
466 m_mappedPayload = (RegisterID)-1;
467 }
468
469 inline bool JIT::isMapped(unsigned virtualRegisterIndex)
470 {
471 if (m_mappedBytecodeIndex != m_bytecodeIndex)
472 return false;
473 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
474 return false;
475 return true;
476 }
477
478 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
479 {
480 if (m_mappedBytecodeIndex != m_bytecodeIndex)
481 return false;
482 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
483 return false;
484 if (m_mappedPayload == (RegisterID)-1)
485 return false;
486 payload = m_mappedPayload;
487 return true;
488 }
489
490 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
491 {
492 if (m_mappedBytecodeIndex != m_bytecodeIndex)
493 return false;
494 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
495 return false;
496 if (m_mappedTag == (RegisterID)-1)
497 return false;
498 tag = m_mappedTag;
499 return true;
500 }
501
502 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
503 {
504 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
505 addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
506 }
507
508 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
509 {
510 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
511 addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
512 }
513
514 inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
515 {
516 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
517 linkSlowCase(iter);
518 }
519
520 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
521 {
522 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
523 }
524
525 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
526 {
527 if (isOperandConstantImmediateInt(op1)) {
528 constant = getConstantOperand(op1).asInt32();
529 op = op2;
530 return true;
531 }
532
533 if (isOperandConstantImmediateInt(op2)) {
534 constant = getConstantOperand(op2).asInt32();
535 op = op1;
536 return true;
537 }
538
539 return false;
540 }
541
542 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
543 {
544 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
545 }
546
547 /* Deprecated: Please use JITStubCall instead. */
548
549 ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
550 {
551 if (m_codeBlock->isConstantRegisterIndex(src)) {
552 JSValue constant = m_codeBlock->getConstant(src);
553 poke(Imm32(constant.payload()), argumentNumber);
554 poke(Imm32(constant.tag()), argumentNumber + 1);
555 } else {
556 emitLoad(src, scratch1, scratch2);
557 poke(scratch2, argumentNumber);
558 poke(scratch1, argumentNumber + 1);
559 }
560 }
561
562 #else // USE(JSVALUE32_64)
563
564 ALWAYS_INLINE void JIT::killLastResultRegister()
565 {
566 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
567 }
568
569 // get arg puts an arg from the SF register array into a h/w register
570 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
571 {
572 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
573
574 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
575 if (m_codeBlock->isConstantRegisterIndex(src)) {
576 JSValue value = m_codeBlock->getConstant(src);
577 move(ImmPtr(JSValue::encode(value)), dst);
578 killLastResultRegister();
579 return;
580 }
581
582 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
583 bool atJumpTarget = false;
584 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
585 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
586 atJumpTarget = true;
587 ++m_jumpTargetsPosition;
588 }
589
590 if (!atJumpTarget) {
591 // The argument we want is already stored in eax
592 if (dst != cachedResultRegister)
593 move(cachedResultRegister, dst);
594 killLastResultRegister();
595 return;
596 }
597 }
598
599 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
600 killLastResultRegister();
601 }
602
603 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
604 {
605 if (src2 == m_lastResultBytecodeRegister) {
606 emitGetVirtualRegister(src2, dst2);
607 emitGetVirtualRegister(src1, dst1);
608 } else {
609 emitGetVirtualRegister(src1, dst1);
610 emitGetVirtualRegister(src2, dst2);
611 }
612 }
613
614 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
615 {
616 return getConstantOperand(src).asInt32();
617 }
618
619 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
620 {
621 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
622 }
623
624 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
625 {
626 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
627 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
628 }
629
630 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
631 {
632 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
633 }
634
635 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
636 {
637 #if USE(JSVALUE64)
638 return branchTestPtr(Zero, reg, tagMaskRegister);
639 #else
640 return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
641 #endif
642 }
643
644 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
645 {
646 move(reg1, scratch);
647 orPtr(reg2, scratch);
648 return emitJumpIfJSCell(scratch);
649 }
650
651 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
652 {
653 addSlowCase(emitJumpIfJSCell(reg));
654 }
655
656 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
657 {
658 #if USE(JSVALUE64)
659 return branchTestPtr(NonZero, reg, tagMaskRegister);
660 #else
661 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
662 #endif
663 }
664
665 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
666 {
667 addSlowCase(emitJumpIfNotJSCell(reg));
668 }
669
670 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
671 {
672 if (!m_codeBlock->isKnownNotImmediate(vReg))
673 emitJumpSlowCaseIfNotJSCell(reg);
674 }
675
676 #if USE(JSVALUE64)
677 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
678 {
679 return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
680 }
681 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
682 {
683 return branchTestPtr(Zero, reg, tagTypeNumberRegister);
684 }
685 #endif
686
687 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
688 {
689 #if USE(JSVALUE64)
690 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
691 #else
692 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
693 #endif
694 }
695
696 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
697 {
698 #if USE(JSVALUE64)
699 return branchPtr(Below, reg, tagTypeNumberRegister);
700 #else
701 return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
702 #endif
703 }
704
705 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
706 {
707 move(reg1, scratch);
708 andPtr(reg2, scratch);
709 return emitJumpIfNotImmediateInteger(scratch);
710 }
711
712 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
713 {
714 addSlowCase(emitJumpIfNotImmediateInteger(reg));
715 }
716
717 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
718 {
719 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
720 }
721
722 #if !USE(JSVALUE64)
723 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
724 {
725 subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
726 }
727
728 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
729 {
730 return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
731 }
732 #endif
733
734 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
735 {
736 #if USE(JSVALUE64)
737 emitFastArithIntToImmNoCheck(src, dest);
738 #else
739 if (src != dest)
740 move(src, dest);
741 addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
742 #endif
743 }
744
745 ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
746 {
747 #if USE(JSVALUE64)
748 UNUSED_PARAM(reg);
749 #else
750 rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
751 #endif
752 }
753
754 // operand is int32_t, must have been zero-extended if register is 64-bit.
755 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
756 {
757 #if USE(JSVALUE64)
758 if (src != dest)
759 move(src, dest);
760 orPtr(tagTypeNumberRegister, dest);
761 #else
762 signExtend32ToPtr(src, dest);
763 addPtr(dest, dest);
764 emitFastArithReTagImmediate(dest, dest);
765 #endif
766 }
767
768 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
769 {
770 lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
771 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
772 }
773
774 /* Deprecated: Please use JITStubCall instead. */
775
776 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
777 ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
778 {
779 if (m_codeBlock->isConstantRegisterIndex(src)) {
780 JSValue value = m_codeBlock->getConstant(src);
781 emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
782 } else {
783 loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
784 emitPutJITStubArg(scratch, argumentNumber);
785 }
786
787 killLastResultRegister();
788 }
789
790 #endif // USE(JSVALUE32_64)
791
792 } // namespace JSC
793
794 #endif // ENABLE(JIT)
795
796 #endif