JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / jit / JITInlineMethods.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
28
29
30 #if ENABLE(JIT)
31
32 namespace JSC {
33
34 /* Deprecated: Please use JITStubCall instead. */
35
36 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
37 {
38 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
39 peek(dst, argumentStackOffset);
40 }
41
42 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
43 {
44 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
45 }
46
47 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
48 {
49 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
50 return m_codeBlock->getConstant(src);
51 }
52
53 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
54 {
55 storePtr(from, payloadFor(entry, callFrameRegister));
56 }
57
58 ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
59 {
60 #if USE(JSVALUE32_64)
61 store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
62 #endif
63 storePtr(from, payloadFor(entry, callFrameRegister));
64 }
65
66 ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
67 {
68 store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
69 store32(from, intPayloadFor(entry, callFrameRegister));
70 }
71
72 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
73 {
74 storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
75 }
76
77 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
78 {
79 loadPtr(Address(from, entry * sizeof(Register)), to);
80 #if USE(JSVALUE64)
81 killLastResultRegister();
82 #endif
83 }
84
85 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
86 {
87 failures.append(branchPtr(NotEqual, Address(src, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
88 failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
89 loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
90 failures.append(branchTest32(Zero, dst));
91 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1);
92 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
93
94 JumpList is16Bit;
95 JumpList cont8Bit;
96 is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
97 load8(MacroAssembler::Address(dst, 0), dst);
98 cont8Bit.append(jump());
99 is16Bit.link(this);
100 load16(MacroAssembler::Address(dst, 0), dst);
101 cont8Bit.link(this);
102 }
103
104 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
105 {
106 load32(Address(from, entry * sizeof(Register)), to);
107 #if USE(JSVALUE64)
108 killLastResultRegister();
109 #endif
110 }
111
112 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
113 {
114 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
115
116 Call nakedCall = nearCall();
117 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
118 return nakedCall;
119 }
120
121 ALWAYS_INLINE bool JIT::atJumpTarget()
122 {
123 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
124 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
125 return true;
126 ++m_jumpTargetsPosition;
127 }
128 return false;
129 }
130
131 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
132
133 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
134 {
135 #if CPU(ARM_TRADITIONAL)
136 #ifndef NDEBUG
137 // Ensure the label after the sequence can also fit
138 insnSpace += sizeof(ARMWord);
139 constSpace += sizeof(uint64_t);
140 #endif
141
142 ensureSpace(insnSpace, constSpace);
143
144 #elif CPU(SH4)
145 #ifndef NDEBUG
146 insnSpace += sizeof(SH4Word);
147 constSpace += sizeof(uint64_t);
148 #endif
149
150 m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
151 #endif
152
153 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
154 #ifndef NDEBUG
155 m_uninterruptedInstructionSequenceBegin = label();
156 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
157 #endif
158 #endif
159 }
160
161 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
162 {
163 UNUSED_PARAM(dst);
164 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
165 /* There are several cases when the uninterrupted sequence is larger than
166 * maximum required offset for pathing the same sequence. Eg.: if in a
167 * uninterrupted sequence the last macroassembler's instruction is a stub
168 * call, it emits store instruction(s) which should not be included in the
169 * calculation of length of uninterrupted sequence. So, the insnSpace and
170 * constSpace should be upper limit instead of hard limit.
171 */
172 #if CPU(SH4)
173 if ((dst > 15) || (dst < -16)) {
174 insnSpace += 8;
175 constSpace += 2;
176 }
177
178 if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
179 insnSpace += 8;
180 #endif
181 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
182 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
183 #endif
184 }
185
186 #endif
187
188 #if CPU(ARM)
189
190 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
191 {
192 move(linkRegister, reg);
193 }
194
195 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
196 {
197 move(reg, linkRegister);
198 }
199
200 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
201 {
202 loadPtr(address, linkRegister);
203 }
204 #elif CPU(SH4)
205
206 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
207 {
208 m_assembler.stspr(reg);
209 }
210
211 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
212 {
213 m_assembler.ldspr(reg);
214 }
215
216 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
217 {
218 loadPtrLinkReg(address);
219 }
220
221 #elif CPU(MIPS)
222
223 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
224 {
225 move(returnAddressRegister, reg);
226 }
227
228 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
229 {
230 move(reg, returnAddressRegister);
231 }
232
233 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
234 {
235 loadPtr(address, returnAddressRegister);
236 }
237
238 #else // CPU(X86) || CPU(X86_64)
239
240 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
241 {
242 pop(reg);
243 }
244
245 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
246 {
247 push(reg);
248 }
249
250 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
251 {
252 push(address);
253 }
254
255 #endif
256
257 ALWAYS_INLINE void JIT::restoreArgumentReference()
258 {
259 move(stackPointerRegister, firstArgumentRegister);
260 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
261 }
262
263 ALWAYS_INLINE void JIT::updateTopCallFrame()
264 {
265 ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
266 if (m_bytecodeOffset) {
267 #if USE(JSVALUE32_64)
268 storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
269 #else
270 store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
271 #endif
272 }
273 storePtr(callFrameRegister, &m_globalData->topCallFrame);
274 }
275
276 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
277 {
278 #if CPU(X86)
279 // Within a trampoline the return address will be on the stack at this point.
280 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
281 #elif CPU(ARM)
282 move(stackPointerRegister, firstArgumentRegister);
283 #elif CPU(SH4)
284 move(stackPointerRegister, firstArgumentRegister);
285 #endif
286 // In the trampoline on x86-64, the first argument register is not overwritten.
287 }
288
289 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
290 {
291 return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
292 }
293
294 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
295 {
296 if (!m_codeBlock->isKnownNotImmediate(vReg))
297 linkSlowCase(iter);
298 }
299
300 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
301 {
302 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
303
304 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
305 }
306
307 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
308 {
309 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
310
311 const JumpList::JumpVector& jumpVector = jumpList.jumps();
312 size_t size = jumpVector.size();
313 for (size_t i = 0; i < size; ++i)
314 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
315 }
316
317 ALWAYS_INLINE void JIT::addSlowCase()
318 {
319 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
320
321 Jump emptyJump; // Doing it this way to make Windows happy.
322 m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
323 }
324
325 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
326 {
327 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
328
329 m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
330 }
331
332 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
333 {
334 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
335
336 jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
337 }
338
339 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
340 {
341 return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
342 }
343
344 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
345 {
346 loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
347 return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
348 }
349
350 #if ENABLE(SAMPLING_FLAGS)
351 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
352 {
353 ASSERT(flag >= 1);
354 ASSERT(flag <= 32);
355 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
356 }
357
358 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
359 {
360 ASSERT(flag >= 1);
361 ASSERT(flag <= 32);
362 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
363 }
364 #endif
365
366 #if ENABLE(SAMPLING_COUNTERS)
367 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
368 {
369 add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
370 }
371 #endif
372
373 #if ENABLE(OPCODE_SAMPLING)
374 #if CPU(X86_64)
375 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
376 {
377 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
378 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
379 }
380 #else
381 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
382 {
383 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
384 }
385 #endif
386 #endif
387
388 #if ENABLE(CODEBLOCK_SAMPLING)
389 #if CPU(X86_64)
390 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
391 {
392 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
393 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
394 }
395 #else
396 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
397 {
398 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
399 }
400 #endif
401 #endif
402
403 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
404 {
405 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
406 }
407
408 template <typename ClassType, bool destructor, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
409 {
410 MarkedAllocator* allocator = 0;
411 if (destructor)
412 allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
413 else
414 allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
415 loadPtr(&allocator->m_freeList.head, result);
416 addSlowCase(branchTestPtr(Zero, result));
417
418 // remove the object from the free list
419 loadPtr(Address(result), storagePtr);
420 storePtr(storagePtr, &allocator->m_freeList.head);
421
422 // initialize the object's structure
423 storePtr(structure, Address(result, JSCell::structureOffset()));
424
425 // initialize the object's classInfo pointer
426 storePtr(TrustedImmPtr(&ClassType::s_info), Address(result, JSCell::classInfoOffset()));
427
428 // initialize the inheritor ID
429 storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID()));
430
431 // initialize the object's property storage pointer
432 addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr);
433 storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage()));
434 }
435
436 template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
437 {
438 emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
439 }
440
441 inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
442 {
443 emitAllocateBasicJSObject<JSFunction, true>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
444
445 // store the function's scope chain
446 storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
447
448 // store the function's executable member
449 storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable()));
450
451 // store the function's name
452 ASSERT(executable->nameValue());
453 int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset();
454 storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
455 #if USE(JSVALUE32_64)
456 store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
457 #endif
458 }
459
460 inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
461 {
462 CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
463
464 // FIXME: We need to check for wrap-around.
465 // Check to make sure that the allocation will fit in the current block.
466 loadPtr(&allocator->m_currentOffset, result);
467 addPtr(TrustedImm32(size), result);
468 loadPtr(&allocator->m_currentBlock, storagePtr);
469 addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr);
470 addSlowCase(branchPtr(AboveOrEqual, result, storagePtr));
471
472 // Load the original offset.
473 loadPtr(&allocator->m_currentOffset, result);
474
475 // Bump the pointer forward.
476 move(result, storagePtr);
477 addPtr(TrustedImm32(size), storagePtr);
478 storePtr(storagePtr, &allocator->m_currentOffset);
479 }
480
481 inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr)
482 {
483 unsigned initialLength = std::max(length, 4U);
484 size_t initialStorage = JSArray::storageSize(initialLength);
485
486 // We allocate the backing store first to ensure that garbage collection
487 // doesn't happen during JSArray initialization.
488 emitAllocateBasicStorage(initialStorage, storageResult, storagePtr);
489
490 // Allocate the cell for the array.
491 emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr);
492
493 // Store all the necessary info in the ArrayStorage.
494 storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset()));
495 store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset()));
496 store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset()));
497
498 // Store the newly allocated ArrayStorage.
499 storePtr(storageResult, Address(cellResult, JSArray::storageOffset()));
500
501 // Store the vector length and index bias.
502 store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset()));
503 store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset()));
504
505 // Initialize the sparse value map.
506 storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset()));
507
508 // Store the values we have.
509 for (unsigned i = 0; i < length; i++) {
510 #if USE(JSVALUE64)
511 loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
512 storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
513 #else
514 load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
515 store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
516 load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
517 store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
518 #endif
519 }
520
521 // Zero out the remaining slots.
522 for (unsigned i = length; i < initialLength; i++) {
523 #if USE(JSVALUE64)
524 storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
525 #else
526 store32(TrustedImm32(static_cast<int>(JSValue::EmptyValueTag)), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
527 store32(TrustedImm32(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
528 #endif
529 }
530 }
531
532 #if ENABLE(VALUE_PROFILER)
533 inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
534 {
535 ASSERT(shouldEmitProfiling());
536 ASSERT(valueProfile);
537
538 const RegisterID value = regT0;
539 #if USE(JSVALUE32_64)
540 const RegisterID valueTag = regT1;
541 #endif
542 const RegisterID scratch = regT3;
543
544 if (ValueProfile::numberOfBuckets == 1) {
545 // We're in a simple configuration: only one bucket, so we can just do a direct
546 // store.
547 #if USE(JSVALUE64)
548 storePtr(value, valueProfile->m_buckets);
549 #else
550 EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
551 store32(value, &descriptor->asBits.payload);
552 store32(valueTag, &descriptor->asBits.tag);
553 #endif
554 return;
555 }
556
557 if (m_randomGenerator.getUint32() & 1)
558 add32(TrustedImm32(1), bucketCounterRegister);
559 else
560 add32(TrustedImm32(3), bucketCounterRegister);
561 and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
562 move(TrustedImmPtr(valueProfile->m_buckets), scratch);
563 #if USE(JSVALUE64)
564 storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
565 #elif USE(JSVALUE32_64)
566 store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
567 store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
568 #endif
569 }
570
571 inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset)
572 {
573 if (!shouldEmitProfiling())
574 return;
575 emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset));
576 }
577
578 inline void JIT::emitValueProfilingSite()
579 {
580 emitValueProfilingSite(m_bytecodeOffset);
581 }
582 #endif
583
584 #if USE(JSVALUE32_64)
585
586 inline void JIT::emitLoadTag(int index, RegisterID tag)
587 {
588 RegisterID mappedTag;
589 if (getMappedTag(index, mappedTag)) {
590 move(mappedTag, tag);
591 unmap(tag);
592 return;
593 }
594
595 if (m_codeBlock->isConstantRegisterIndex(index)) {
596 move(Imm32(getConstantOperand(index).tag()), tag);
597 unmap(tag);
598 return;
599 }
600
601 load32(tagFor(index), tag);
602 unmap(tag);
603 }
604
605 inline void JIT::emitLoadPayload(int index, RegisterID payload)
606 {
607 RegisterID mappedPayload;
608 if (getMappedPayload(index, mappedPayload)) {
609 move(mappedPayload, payload);
610 unmap(payload);
611 return;
612 }
613
614 if (m_codeBlock->isConstantRegisterIndex(index)) {
615 move(Imm32(getConstantOperand(index).payload()), payload);
616 unmap(payload);
617 return;
618 }
619
620 load32(payloadFor(index), payload);
621 unmap(payload);
622 }
623
624 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
625 {
626 move(Imm32(v.payload()), payload);
627 move(Imm32(v.tag()), tag);
628 }
629
630 inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
631 {
632 ASSERT(tag != payload);
633
634 if (base == callFrameRegister) {
635 ASSERT(payload != base);
636 emitLoadPayload(index, payload);
637 emitLoadTag(index, tag);
638 return;
639 }
640
641 if (payload == base) { // avoid stomping base
642 load32(tagFor(index, base), tag);
643 load32(payloadFor(index, base), payload);
644 return;
645 }
646
647 load32(payloadFor(index, base), payload);
648 load32(tagFor(index, base), tag);
649 }
650
651 inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
652 {
653 if (isMapped(index1)) {
654 emitLoad(index1, tag1, payload1);
655 emitLoad(index2, tag2, payload2);
656 return;
657 }
658 emitLoad(index2, tag2, payload2);
659 emitLoad(index1, tag1, payload1);
660 }
661
662 inline void JIT::emitLoadDouble(int index, FPRegisterID value)
663 {
664 if (m_codeBlock->isConstantRegisterIndex(index)) {
665 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
666 loadDouble(&inConstantPool, value);
667 } else
668 loadDouble(addressFor(index), value);
669 }
670
671 inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
672 {
673 if (m_codeBlock->isConstantRegisterIndex(index)) {
674 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
675 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
676 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
677 } else
678 convertInt32ToDouble(payloadFor(index), value);
679 }
680
681 inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
682 {
683 store32(payload, payloadFor(index, base));
684 store32(tag, tagFor(index, base));
685 }
686
687 inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
688 {
689 store32(payload, payloadFor(index, callFrameRegister));
690 if (!indexIsInt32)
691 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
692 }
693
694 inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
695 {
696 emitStoreInt32(index, payload, indexIsInt32);
697 map(m_bytecodeOffset + opcodeLength, index, tag, payload);
698 }
699
700 inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
701 {
702 store32(payload, payloadFor(index, callFrameRegister));
703 if (!indexIsInt32)
704 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
705 }
706
707 inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
708 {
709 store32(payload, payloadFor(index, callFrameRegister));
710 if (!indexIsCell)
711 store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
712 }
713
714 inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
715 {
716 store32(payload, payloadFor(index, callFrameRegister));
717 if (!indexIsBool)
718 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
719 }
720
721 inline void JIT::emitStoreDouble(int index, FPRegisterID value)
722 {
723 storeDouble(value, addressFor(index));
724 }
725
726 inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
727 {
728 store32(Imm32(constant.payload()), payloadFor(index, base));
729 store32(Imm32(constant.tag()), tagFor(index, base));
730 }
731
732 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
733 {
734 emitStore(dst, jsUndefined());
735 }
736
737 inline bool JIT::isLabeled(unsigned bytecodeOffset)
738 {
739 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
740 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
741 if (jumpTarget == bytecodeOffset)
742 return true;
743 if (jumpTarget > bytecodeOffset)
744 return false;
745 }
746 return false;
747 }
748
749 inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
750 {
751 if (isLabeled(bytecodeOffset))
752 return;
753
754 m_mappedBytecodeOffset = bytecodeOffset;
755 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
756 m_mappedTag = tag;
757 m_mappedPayload = payload;
758
759 ASSERT(!canBeOptimized() || m_mappedPayload == regT0);
760 ASSERT(!canBeOptimized() || m_mappedTag == regT1);
761 }
762
763 inline void JIT::unmap(RegisterID registerID)
764 {
765 if (m_mappedTag == registerID)
766 m_mappedTag = (RegisterID)-1;
767 else if (m_mappedPayload == registerID)
768 m_mappedPayload = (RegisterID)-1;
769 }
770
771 inline void JIT::unmap()
772 {
773 m_mappedBytecodeOffset = (unsigned)-1;
774 m_mappedVirtualRegisterIndex = RegisterFile::ReturnPC;
775 m_mappedTag = (RegisterID)-1;
776 m_mappedPayload = (RegisterID)-1;
777 }
778
779 inline bool JIT::isMapped(int virtualRegisterIndex)
780 {
781 if (m_mappedBytecodeOffset != m_bytecodeOffset)
782 return false;
783 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
784 return false;
785 return true;
786 }
787
788 inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
789 {
790 if (m_mappedBytecodeOffset != m_bytecodeOffset)
791 return false;
792 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
793 return false;
794 if (m_mappedPayload == (RegisterID)-1)
795 return false;
796 payload = m_mappedPayload;
797 return true;
798 }
799
800 inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
801 {
802 if (m_mappedBytecodeOffset != m_bytecodeOffset)
803 return false;
804 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
805 return false;
806 if (m_mappedTag == (RegisterID)-1)
807 return false;
808 tag = m_mappedTag;
809 return true;
810 }
811
812 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
813 {
814 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
815 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
816 addSlowCase(jump());
817 else
818 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
819 }
820 }
821
822 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
823 {
824 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
825 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
826 addSlowCase(jump());
827 else
828 addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
829 }
830 }
831
832 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
833 {
834 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
835 }
836
837 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
838 {
839 if (isOperandConstantImmediateInt(op1)) {
840 constant = getConstantOperand(op1).asInt32();
841 op = op2;
842 return true;
843 }
844
845 if (isOperandConstantImmediateInt(op2)) {
846 constant = getConstantOperand(op2).asInt32();
847 op = op1;
848 return true;
849 }
850
851 return false;
852 }
853
854 #else // USE(JSVALUE32_64)
855
856 ALWAYS_INLINE void JIT::killLastResultRegister()
857 {
858 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
859 }
860
861 // get arg puts an arg from the SF register array into a h/w register
862 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
863 {
864 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
865
866 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
867 if (m_codeBlock->isConstantRegisterIndex(src)) {
868 JSValue value = m_codeBlock->getConstant(src);
869 if (!value.isNumber())
870 move(TrustedImmPtr(JSValue::encode(value)), dst);
871 else
872 move(ImmPtr(JSValue::encode(value)), dst);
873 killLastResultRegister();
874 return;
875 }
876
877 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
878 // The argument we want is already stored in eax
879 if (dst != cachedResultRegister)
880 move(cachedResultRegister, dst);
881 killLastResultRegister();
882 return;
883 }
884
885 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
886 killLastResultRegister();
887 }
888
889 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
890 {
891 if (src2 == m_lastResultBytecodeRegister) {
892 emitGetVirtualRegister(src2, dst2);
893 emitGetVirtualRegister(src1, dst1);
894 } else {
895 emitGetVirtualRegister(src1, dst1);
896 emitGetVirtualRegister(src2, dst2);
897 }
898 }
899
900 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
901 {
902 return getConstantOperand(src).asInt32();
903 }
904
905 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
906 {
907 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
908 }
909
910 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
911 {
912 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
913 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
914 }
915
916 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
917 {
918 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
919 }
920
921 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
922 {
923 #if USE(JSVALUE64)
924 return branchTestPtr(Zero, reg, tagMaskRegister);
925 #else
926 return branchTest32(Zero, reg, TrustedImm32(TagMask));
927 #endif
928 }
929
930 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
931 {
932 move(reg1, scratch);
933 orPtr(reg2, scratch);
934 return emitJumpIfJSCell(scratch);
935 }
936
937 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
938 {
939 addSlowCase(emitJumpIfJSCell(reg));
940 }
941
942 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
943 {
944 #if USE(JSVALUE64)
945 return branchTestPtr(NonZero, reg, tagMaskRegister);
946 #else
947 return branchTest32(NonZero, reg, TrustedImm32(TagMask));
948 #endif
949 }
950
951 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
952 {
953 addSlowCase(emitJumpIfNotJSCell(reg));
954 }
955
956 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
957 {
958 if (!m_codeBlock->isKnownNotImmediate(vReg))
959 emitJumpSlowCaseIfNotJSCell(reg);
960 }
961
962 #if USE(JSVALUE64)
963
964 inline void JIT::emitLoadDouble(int index, FPRegisterID value)
965 {
966 if (m_codeBlock->isConstantRegisterIndex(index)) {
967 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
968 loadDouble(&inConstantPool, value);
969 } else
970 loadDouble(addressFor(index), value);
971 }
972
973 inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
974 {
975 if (m_codeBlock->isConstantRegisterIndex(index)) {
976 ASSERT(isOperandConstantImmediateInt(index));
977 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
978 } else
979 convertInt32ToDouble(addressFor(index), value);
980 }
981 #endif
982
983 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
984 {
985 #if USE(JSVALUE64)
986 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
987 #else
988 return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
989 #endif
990 }
991
992 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
993 {
994 #if USE(JSVALUE64)
995 return branchPtr(Below, reg, tagTypeNumberRegister);
996 #else
997 return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
998 #endif
999 }
1000
1001 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
1002 {
1003 move(reg1, scratch);
1004 andPtr(reg2, scratch);
1005 return emitJumpIfNotImmediateInteger(scratch);
1006 }
1007
1008 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
1009 {
1010 addSlowCase(emitJumpIfNotImmediateInteger(reg));
1011 }
1012
1013 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
1014 {
1015 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
1016 }
1017
1018 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
1019 {
1020 addSlowCase(emitJumpIfNotImmediateNumber(reg));
1021 }
1022
1023 #if USE(JSVALUE32_64)
1024 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
1025 {
1026 subPtr(TrustedImm32(TagTypeNumber), reg);
1027 }
1028
1029 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
1030 {
1031 return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
1032 }
1033 #endif
1034
1035 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
1036 {
1037 #if USE(JSVALUE64)
1038 emitFastArithIntToImmNoCheck(src, dest);
1039 #else
1040 if (src != dest)
1041 move(src, dest);
1042 addPtr(TrustedImm32(TagTypeNumber), dest);
1043 #endif
1044 }
1045
1046 // operand is int32_t, must have been zero-extended if register is 64-bit.
1047 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
1048 {
1049 #if USE(JSVALUE64)
1050 if (src != dest)
1051 move(src, dest);
1052 orPtr(tagTypeNumberRegister, dest);
1053 #else
1054 signExtend32ToPtr(src, dest);
1055 addPtr(dest, dest);
1056 emitFastArithReTagImmediate(dest, dest);
1057 #endif
1058 }
1059
1060 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
1061 {
1062 or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
1063 }
1064
1065 #endif // USE(JSVALUE32_64)
1066
1067 } // namespace JSC
1068
1069 #endif // ENABLE(JIT)
1070
1071 #endif