]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITInlineMethods.h
JavaScriptCore-903.tar.gz
[apple/javascriptcore.git] / jit / JITInlineMethods.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
28
29
30 #if ENABLE(JIT)
31
32 namespace JSC {
33
34 /* Deprecated: Please use JITStubCall instead. */
35
36 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
37 {
38 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
39 peek(dst, argumentStackOffset);
40 }
41
42 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
43 {
44 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
45 }
46
47 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
48 {
49 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
50 return m_codeBlock->getConstant(src);
51 }
52
53 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
54 {
55 storePtr(from, payloadFor(entry, callFrameRegister));
56 }
57
58 ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
59 {
60 #if USE(JSVALUE32_64)
61 store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
62 #endif
63 storePtr(from, payloadFor(entry, callFrameRegister));
64 }
65
66 ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
67 {
68 store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
69 store32(from, intPayloadFor(entry, callFrameRegister));
70 }
71
72 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
73 {
74 storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
75 }
76
77 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
78 {
79 loadPtr(Address(from, entry * sizeof(Register)), to);
80 #if USE(JSVALUE64)
81 killLastResultRegister();
82 #endif
83 }
84
85 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
86 {
87 failures.append(branchPtr(NotEqual, Address(src), TrustedImmPtr(m_globalData->jsStringVPtr)));
88 failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
89 failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
90 loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
91 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
92 load16(MacroAssembler::Address(dst, 0), dst);
93 }
94
95 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
96 {
97 load32(Address(from, entry * sizeof(Register)), to);
98 #if USE(JSVALUE64)
99 killLastResultRegister();
100 #endif
101 }
102
103 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
104 {
105 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
106
107 Call nakedCall = nearCall();
108 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
109 return nakedCall;
110 }
111
112 ALWAYS_INLINE bool JIT::atJumpTarget()
113 {
114 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
115 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
116 return true;
117 ++m_jumpTargetsPosition;
118 }
119 return false;
120 }
121
122 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
123
124 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
125 {
126 JSInterfaceJIT::beginUninterruptedSequence();
127 #if CPU(ARM_TRADITIONAL)
128 #ifndef NDEBUG
129 // Ensure the label after the sequence can also fit
130 insnSpace += sizeof(ARMWord);
131 constSpace += sizeof(uint64_t);
132 #endif
133
134 ensureSpace(insnSpace, constSpace);
135
136 #elif CPU(SH4)
137 #ifndef NDEBUG
138 insnSpace += sizeof(SH4Word);
139 constSpace += sizeof(uint64_t);
140 #endif
141
142 m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
143 #endif
144
145 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
146 #ifndef NDEBUG
147 m_uninterruptedInstructionSequenceBegin = label();
148 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
149 #endif
150 #endif
151 }
152
153 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
154 {
155 UNUSED_PARAM(dst);
156 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
157 /* There are several cases when the uninterrupted sequence is larger than
158 * maximum required offset for pathing the same sequence. Eg.: if in a
159 * uninterrupted sequence the last macroassembler's instruction is a stub
160 * call, it emits store instruction(s) which should not be included in the
161 * calculation of length of uninterrupted sequence. So, the insnSpace and
162 * constSpace should be upper limit instead of hard limit.
163 */
164 #if CPU(SH4)
165 if ((dst > 15) || (dst < -16)) {
166 insnSpace += 8;
167 constSpace += 2;
168 }
169
170 if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
171 insnSpace += 8;
172 #endif
173 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
174 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
175 #endif
176 JSInterfaceJIT::endUninterruptedSequence();
177 }
178
179 #endif
180
181 #if CPU(ARM)
182
183 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
184 {
185 move(linkRegister, reg);
186 }
187
188 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
189 {
190 move(reg, linkRegister);
191 }
192
193 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
194 {
195 loadPtr(address, linkRegister);
196 }
197 #elif CPU(SH4)
198
199 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
200 {
201 m_assembler.stspr(reg);
202 }
203
204 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
205 {
206 m_assembler.ldspr(reg);
207 }
208
209 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
210 {
211 loadPtrLinkReg(address);
212 }
213
214 #elif CPU(MIPS)
215
216 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
217 {
218 move(returnAddressRegister, reg);
219 }
220
221 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
222 {
223 move(reg, returnAddressRegister);
224 }
225
226 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
227 {
228 loadPtr(address, returnAddressRegister);
229 }
230
231 #else // CPU(X86) || CPU(X86_64)
232
233 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
234 {
235 pop(reg);
236 }
237
238 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
239 {
240 push(reg);
241 }
242
243 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
244 {
245 push(address);
246 }
247
248 #endif
249
250 ALWAYS_INLINE void JIT::restoreArgumentReference()
251 {
252 move(stackPointerRegister, firstArgumentRegister);
253 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
254 }
255
256 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
257 {
258 #if CPU(X86)
259 // Within a trampoline the return address will be on the stack at this point.
260 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
261 #elif CPU(ARM)
262 move(stackPointerRegister, firstArgumentRegister);
263 #elif CPU(SH4)
264 move(stackPointerRegister, firstArgumentRegister);
265 #endif
266 // In the trampoline on x86-64, the first argument register is not overwritten.
267 }
268
269 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
270 {
271 return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
272 }
273
274 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
275 {
276 if (!m_codeBlock->isKnownNotImmediate(vReg))
277 linkSlowCase(iter);
278 }
279
280 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
281 {
282 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
283
284 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
285 }
286
287 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
288 {
289 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
290
291 const JumpList::JumpVector& jumpVector = jumpList.jumps();
292 size_t size = jumpVector.size();
293 for (size_t i = 0; i < size; ++i)
294 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
295 }
296
297 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
298 {
299 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
300
301 m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
302 }
303
304 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
305 {
306 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
307
308 jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
309 }
310
311 #if ENABLE(SAMPLING_FLAGS)
312 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
313 {
314 ASSERT(flag >= 1);
315 ASSERT(flag <= 32);
316 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
317 }
318
319 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
320 {
321 ASSERT(flag >= 1);
322 ASSERT(flag <= 32);
323 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
324 }
325 #endif
326
327 #if ENABLE(SAMPLING_COUNTERS)
328 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
329 {
330 #if CPU(X86_64) // Or any other 64-bit plattform.
331 addPtr(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
332 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
333 intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
334 add32(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
335 addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
336 #else
337 #error "SAMPLING_FLAGS not implemented on this platform."
338 #endif
339 }
340 #endif
341
342 #if ENABLE(OPCODE_SAMPLING)
343 #if CPU(X86_64)
344 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
345 {
346 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
347 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
348 }
349 #else
350 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
351 {
352 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
353 }
354 #endif
355 #endif
356
357 #if ENABLE(CODEBLOCK_SAMPLING)
358 #if CPU(X86_64)
359 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
360 {
361 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
362 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
363 }
364 #else
365 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
366 {
367 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
368 }
369 #endif
370 #endif
371
372 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
373 {
374 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
375 }
376
377 #if USE(JSVALUE32_64)
378
379 inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
380 {
381 RegisterID mappedTag;
382 if (getMappedTag(index, mappedTag)) {
383 move(mappedTag, tag);
384 unmap(tag);
385 return;
386 }
387
388 if (m_codeBlock->isConstantRegisterIndex(index)) {
389 move(Imm32(getConstantOperand(index).tag()), tag);
390 unmap(tag);
391 return;
392 }
393
394 load32(tagFor(index), tag);
395 unmap(tag);
396 }
397
398 inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
399 {
400 RegisterID mappedPayload;
401 if (getMappedPayload(index, mappedPayload)) {
402 move(mappedPayload, payload);
403 unmap(payload);
404 return;
405 }
406
407 if (m_codeBlock->isConstantRegisterIndex(index)) {
408 move(Imm32(getConstantOperand(index).payload()), payload);
409 unmap(payload);
410 return;
411 }
412
413 load32(payloadFor(index), payload);
414 unmap(payload);
415 }
416
417 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
418 {
419 move(Imm32(v.payload()), payload);
420 move(Imm32(v.tag()), tag);
421 }
422
423 inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
424 {
425 ASSERT(tag != payload);
426
427 if (base == callFrameRegister) {
428 ASSERT(payload != base);
429 emitLoadPayload(index, payload);
430 emitLoadTag(index, tag);
431 return;
432 }
433
434 if (payload == base) { // avoid stomping base
435 load32(tagFor(index, base), tag);
436 load32(payloadFor(index, base), payload);
437 return;
438 }
439
440 load32(payloadFor(index, base), payload);
441 load32(tagFor(index, base), tag);
442 }
443
444 inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
445 {
446 if (isMapped(index1)) {
447 emitLoad(index1, tag1, payload1);
448 emitLoad(index2, tag2, payload2);
449 return;
450 }
451 emitLoad(index2, tag2, payload2);
452 emitLoad(index1, tag1, payload1);
453 }
454
455 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
456 {
457 if (m_codeBlock->isConstantRegisterIndex(index)) {
458 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
459 loadDouble(&inConstantPool, value);
460 } else
461 loadDouble(addressFor(index), value);
462 }
463
464 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
465 {
466 if (m_codeBlock->isConstantRegisterIndex(index)) {
467 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
468 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
469 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
470 } else
471 convertInt32ToDouble(payloadFor(index), value);
472 }
473
474 inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
475 {
476 store32(payload, payloadFor(index, base));
477 store32(tag, tagFor(index, base));
478 }
479
480 inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
481 {
482 store32(payload, payloadFor(index, callFrameRegister));
483 if (!indexIsInt32)
484 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
485 }
486
487 inline void JIT::emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32)
488 {
489 store32(payload, payloadFor(index, callFrameRegister));
490 if (!indexIsInt32)
491 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
492 }
493
494 inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
495 {
496 store32(payload, payloadFor(index, callFrameRegister));
497 if (!indexIsCell)
498 store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
499 }
500
501 inline void JIT::emitStoreBool(unsigned index, RegisterID payload, bool indexIsBool)
502 {
503 store32(payload, payloadFor(index, callFrameRegister));
504 if (!indexIsBool)
505 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
506 }
507
508 inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
509 {
510 storeDouble(value, addressFor(index));
511 }
512
513 inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
514 {
515 store32(Imm32(constant.payload()), payloadFor(index, base));
516 store32(Imm32(constant.tag()), tagFor(index, base));
517 }
518
519 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
520 {
521 emitStore(dst, jsUndefined());
522 }
523
524 inline bool JIT::isLabeled(unsigned bytecodeOffset)
525 {
526 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
527 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
528 if (jumpTarget == bytecodeOffset)
529 return true;
530 if (jumpTarget > bytecodeOffset)
531 return false;
532 }
533 return false;
534 }
535
536 inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
537 {
538 if (isLabeled(bytecodeOffset))
539 return;
540
541 m_mappedBytecodeOffset = bytecodeOffset;
542 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
543 m_mappedTag = tag;
544 m_mappedPayload = payload;
545 }
546
547 inline void JIT::unmap(RegisterID registerID)
548 {
549 if (m_mappedTag == registerID)
550 m_mappedTag = (RegisterID)-1;
551 else if (m_mappedPayload == registerID)
552 m_mappedPayload = (RegisterID)-1;
553 }
554
555 inline void JIT::unmap()
556 {
557 m_mappedBytecodeOffset = (unsigned)-1;
558 m_mappedVirtualRegisterIndex = (unsigned)-1;
559 m_mappedTag = (RegisterID)-1;
560 m_mappedPayload = (RegisterID)-1;
561 }
562
563 inline bool JIT::isMapped(unsigned virtualRegisterIndex)
564 {
565 if (m_mappedBytecodeOffset != m_bytecodeOffset)
566 return false;
567 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
568 return false;
569 return true;
570 }
571
572 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
573 {
574 if (m_mappedBytecodeOffset != m_bytecodeOffset)
575 return false;
576 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
577 return false;
578 if (m_mappedPayload == (RegisterID)-1)
579 return false;
580 payload = m_mappedPayload;
581 return true;
582 }
583
584 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
585 {
586 if (m_mappedBytecodeOffset != m_bytecodeOffset)
587 return false;
588 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
589 return false;
590 if (m_mappedTag == (RegisterID)-1)
591 return false;
592 tag = m_mappedTag;
593 return true;
594 }
595
596 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
597 {
598 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
599 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
600 addSlowCase(jump());
601 else
602 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
603 }
604 }
605
606 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
607 {
608 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
609 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
610 addSlowCase(jump());
611 else
612 addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
613 }
614 }
615
616 inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
617 {
618 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
619 linkSlowCase(iter);
620 }
621
622 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
623 {
624 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
625 }
626
627 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
628 {
629 if (isOperandConstantImmediateInt(op1)) {
630 constant = getConstantOperand(op1).asInt32();
631 op = op2;
632 return true;
633 }
634
635 if (isOperandConstantImmediateInt(op2)) {
636 constant = getConstantOperand(op2).asInt32();
637 op = op1;
638 return true;
639 }
640
641 return false;
642 }
643
644 #else // USE(JSVALUE32_64)
645
646 ALWAYS_INLINE void JIT::killLastResultRegister()
647 {
648 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
649 }
650
651 // get arg puts an arg from the SF register array into a h/w register
652 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
653 {
654 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
655
656 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
657 if (m_codeBlock->isConstantRegisterIndex(src)) {
658 JSValue value = m_codeBlock->getConstant(src);
659 move(ImmPtr(JSValue::encode(value)), dst);
660 killLastResultRegister();
661 return;
662 }
663
664 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
665 // The argument we want is already stored in eax
666 if (dst != cachedResultRegister)
667 move(cachedResultRegister, dst);
668 killLastResultRegister();
669 return;
670 }
671
672 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
673 killLastResultRegister();
674 }
675
676 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
677 {
678 if (src2 == m_lastResultBytecodeRegister) {
679 emitGetVirtualRegister(src2, dst2);
680 emitGetVirtualRegister(src1, dst1);
681 } else {
682 emitGetVirtualRegister(src1, dst1);
683 emitGetVirtualRegister(src2, dst2);
684 }
685 }
686
687 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
688 {
689 return getConstantOperand(src).asInt32();
690 }
691
692 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
693 {
694 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
695 }
696
697 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
698 {
699 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
700 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
701 }
702
703 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
704 {
705 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
706 }
707
708 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
709 {
710 #if USE(JSVALUE64)
711 return branchTestPtr(Zero, reg, tagMaskRegister);
712 #else
713 return branchTest32(Zero, reg, TrustedImm32(TagMask));
714 #endif
715 }
716
717 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
718 {
719 move(reg1, scratch);
720 orPtr(reg2, scratch);
721 return emitJumpIfJSCell(scratch);
722 }
723
724 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
725 {
726 addSlowCase(emitJumpIfJSCell(reg));
727 }
728
729 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
730 {
731 #if USE(JSVALUE64)
732 return branchTestPtr(NonZero, reg, tagMaskRegister);
733 #else
734 return branchTest32(NonZero, reg, TrustedImm32(TagMask));
735 #endif
736 }
737
738 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
739 {
740 addSlowCase(emitJumpIfNotJSCell(reg));
741 }
742
743 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
744 {
745 if (!m_codeBlock->isKnownNotImmediate(vReg))
746 emitJumpSlowCaseIfNotJSCell(reg);
747 }
748
749 #if USE(JSVALUE64)
750
751 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
752 {
753 if (m_codeBlock->isConstantRegisterIndex(index)) {
754 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
755 loadDouble(&inConstantPool, value);
756 } else
757 loadDouble(addressFor(index), value);
758 }
759
760 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
761 {
762 if (m_codeBlock->isConstantRegisterIndex(index)) {
763 ASSERT(isOperandConstantImmediateInt(index));
764 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
765 } else
766 convertInt32ToDouble(addressFor(index), value);
767 }
768 #endif
769
770 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
771 {
772 #if USE(JSVALUE64)
773 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
774 #else
775 return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
776 #endif
777 }
778
779 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
780 {
781 #if USE(JSVALUE64)
782 return branchPtr(Below, reg, tagTypeNumberRegister);
783 #else
784 return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
785 #endif
786 }
787
788 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
789 {
790 move(reg1, scratch);
791 andPtr(reg2, scratch);
792 return emitJumpIfNotImmediateInteger(scratch);
793 }
794
795 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
796 {
797 addSlowCase(emitJumpIfNotImmediateInteger(reg));
798 }
799
800 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
801 {
802 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
803 }
804
805 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
806 {
807 addSlowCase(emitJumpIfNotImmediateNumber(reg));
808 }
809
810 #if USE(JSVALUE32_64)
811 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
812 {
813 subPtr(TrustedImm32(TagTypeNumber), reg);
814 }
815
816 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
817 {
818 return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
819 }
820 #endif
821
822 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
823 {
824 #if USE(JSVALUE64)
825 emitFastArithIntToImmNoCheck(src, dest);
826 #else
827 if (src != dest)
828 move(src, dest);
829 addPtr(TrustedImm32(TagTypeNumber), dest);
830 #endif
831 }
832
833 // operand is int32_t, must have been zero-extended if register is 64-bit.
834 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
835 {
836 #if USE(JSVALUE64)
837 if (src != dest)
838 move(src, dest);
839 orPtr(tagTypeNumberRegister, dest);
840 #else
841 signExtend32ToPtr(src, dest);
842 addPtr(dest, dest);
843 emitFastArithReTagImmediate(dest, dest);
844 #endif
845 }
846
847 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
848 {
849 or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
850 }
851
852 #endif // USE(JSVALUE32_64)
853
854 } // namespace JSC
855
856 #endif // ENABLE(JIT)
857
858 #endif