2 * Copyright (C) 2008, 2009, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "GCAwareJITStubRoutine.h"
33 #include "GetterSetter.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
37 #include "JSFunction.h"
38 #include "JSPropertyNameIterator.h"
39 #include "JSVariableObject.h"
40 #include "LinkBuffer.h"
41 #include "RepatchBuffer.h"
42 #include "ResultType.h"
43 #include "SamplingTool.h"
44 #include <wtf/StringPrintStream.h>
50 JIT::CodeRef
JIT::stringGetByValStubGenerator(VM
* vm
)
52 JSInterfaceJIT
jit(vm
);
54 failures
.append(JSC::branchStructure(jit
,
56 Address(regT0
, JSCell::structureIDOffset()),
57 vm
->stringStructure
.get()));
59 // Load string length to regT2, and start the process of loading the data pointer into regT0
60 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT2
);
61 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
62 failures
.append(jit
.branchTest32(Zero
, regT0
));
64 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
65 failures
.append(jit
.branch32(AboveOrEqual
, regT1
, regT2
));
70 // Load the string flags
71 jit
.loadPtr(Address(regT0
, StringImpl::flagsOffset()), regT2
);
72 jit
.loadPtr(Address(regT0
, StringImpl::dataOffset()), regT0
);
73 is16Bit
.append(jit
.branchTest32(Zero
, regT2
, TrustedImm32(StringImpl::flagIs8Bit())));
74 jit
.load8(BaseIndex(regT0
, regT1
, TimesOne
, 0), regT0
);
75 cont8Bit
.append(jit
.jump());
77 jit
.load16(BaseIndex(regT0
, regT1
, TimesTwo
, 0), regT0
);
80 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
81 jit
.move(TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), regT1
);
82 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
86 jit
.move(TrustedImm32(0), regT0
);
89 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
90 return FINALIZE_CODE(patchBuffer
, ("String get_by_val stub"));
93 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
95 int dst
= currentInstruction
[1].u
.operand
;
96 int base
= currentInstruction
[2].u
.operand
;
97 int property
= currentInstruction
[3].u
.operand
;
98 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
100 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
101 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
103 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
104 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
105 // number was signed since m_vectorLength is always less than intmax (since the total allocation
106 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
107 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
108 // extending since it makes it easier to re-tag the value in the slow case.
109 zeroExtend32ToPtr(regT1
, regT1
);
111 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
112 emitArrayProfilingSiteWithCell(regT0
, regT2
, profile
);
113 and32(TrustedImm32(IndexingShapeMask
), regT2
);
115 PatchableJump badType
;
118 JITArrayMode mode
= chooseArrayMode(profile
);
121 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
124 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
127 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
129 case JITArrayStorage
:
130 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
137 addSlowCase(badType
);
138 addSlowCase(slowCases
);
140 Label done
= label();
142 if (!ASSERT_DISABLED
) {
143 Jump resultOK
= branchTest64(NonZero
, regT0
);
144 abortWithReason(JITGetByValResultIsNotEmpty
);
148 emitValueProfilingSite();
149 emitPutVirtualRegister(dst
);
151 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
154 JIT::JumpList
JIT::emitDoubleGetByVal(Instruction
*, PatchableJump
& badType
)
158 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(DoubleShape
));
159 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
160 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength())));
161 loadDouble(BaseIndex(regT2
, regT1
, TimesEight
), fpRegT0
);
162 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
163 moveDoubleTo64(fpRegT0
, regT0
);
164 sub64(tagTypeNumberRegister
, regT0
);
169 JIT::JumpList
JIT::emitContiguousGetByVal(Instruction
*, PatchableJump
& badType
, IndexingType expectedShape
)
173 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(expectedShape
));
174 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
175 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength())));
176 load64(BaseIndex(regT2
, regT1
, TimesEight
), regT0
);
177 slowCases
.append(branchTest64(Zero
, regT0
));
182 JIT::JumpList
JIT::emitArrayStorageGetByVal(Instruction
*, PatchableJump
& badType
)
186 add32(TrustedImm32(-ArrayStorageShape
), regT2
, regT3
);
187 badType
= patchableBranch32(Above
, regT3
, TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
));
189 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
190 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, ArrayStorage::vectorLengthOffset())));
192 load64(BaseIndex(regT2
, regT1
, TimesEight
, ArrayStorage::vectorOffset()), regT0
);
193 slowCases
.append(branchTest64(Zero
, regT0
));
198 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
200 int dst
= currentInstruction
[1].u
.operand
;
201 int base
= currentInstruction
[2].u
.operand
;
202 int property
= currentInstruction
[3].u
.operand
;
203 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
205 linkSlowCase(iter
); // property int32 check
206 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
207 Jump nonCell
= jump();
208 linkSlowCase(iter
); // base array check
209 Jump notString
= branchStructure(NotEqual
,
210 Address(regT0
, JSCell::structureIDOffset()),
211 m_vm
->stringStructure
.get());
212 emitNakedCall(CodeLocationLabel(m_vm
->getCTIStub(stringGetByValStubGenerator
).code()));
213 Jump failed
= branchTest64(Zero
, regT0
);
214 emitPutVirtualRegister(dst
, regT0
);
215 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
217 notString
.link(this);
220 Jump skipProfiling
= jump();
222 linkSlowCase(iter
); // vector length check
223 linkSlowCase(iter
); // empty value
225 emitArrayProfileOutOfBoundsSpecialCase(profile
);
227 skipProfiling
.link(this);
229 Label slowPath
= label();
231 emitGetVirtualRegister(base
, regT0
);
232 emitGetVirtualRegister(property
, regT1
);
233 Call call
= callOperation(operationGetByValDefault
, dst
, regT0
, regT1
);
235 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
236 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
237 m_byValInstructionIndex
++;
239 emitValueProfilingSite();
242 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID offset
, RegisterID scratch
, FinalObjectMode finalObjectMode
)
244 ASSERT(sizeof(JSValue
) == 8);
246 if (finalObjectMode
== MayBeFinal
) {
247 Jump isInline
= branch32(LessThan
, offset
, TrustedImm32(firstOutOfLineOffset
));
248 loadPtr(Address(base
, JSObject::butterflyOffset()), scratch
);
252 addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), base
, scratch
);
255 if (!ASSERT_DISABLED
) {
256 Jump isOutOfLine
= branch32(GreaterThanOrEqual
, offset
, TrustedImm32(firstOutOfLineOffset
));
257 abortWithReason(JITOffsetIsNotOutOfLine
);
258 isOutOfLine
.link(this);
260 loadPtr(Address(base
, JSObject::butterflyOffset()), scratch
);
263 signExtend32ToPtr(offset
, offset
);
264 load64(BaseIndex(scratch
, offset
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), result
);
267 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
269 int dst
= currentInstruction
[1].u
.operand
;
270 int base
= currentInstruction
[2].u
.operand
;
271 int property
= currentInstruction
[3].u
.operand
;
272 unsigned expected
= currentInstruction
[4].u
.operand
;
273 int iter
= currentInstruction
[5].u
.operand
;
274 int i
= currentInstruction
[6].u
.operand
;
276 emitGetVirtualRegister(property
, regT0
);
277 addSlowCase(branch64(NotEqual
, regT0
, addressFor(expected
)));
278 emitGetVirtualRegisters(base
, regT0
, iter
, regT1
);
279 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
281 // Test base's structure
282 emitLoadStructure(regT0
, regT2
, regT3
);
283 addSlowCase(branchPtr(NotEqual
, regT2
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
284 load32(addressFor(i
), regT3
);
285 sub32(TrustedImm32(1), regT3
);
286 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
287 Jump inlineProperty
= branch32(Below
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)));
288 add32(TrustedImm32(firstOutOfLineOffset
), regT3
);
289 sub32(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)), regT3
);
290 inlineProperty
.link(this);
291 compileGetDirectOffset(regT0
, regT0
, regT3
, regT1
);
293 emitPutVirtualRegister(dst
, regT0
);
296 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
298 int dst
= currentInstruction
[1].u
.operand
;
299 int base
= currentInstruction
[2].u
.operand
;
300 int property
= currentInstruction
[3].u
.operand
;
303 linkSlowCaseIfNotJSCell(iter
, base
);
307 emitGetVirtualRegister(base
, regT0
);
308 emitGetVirtualRegister(property
, regT1
);
309 callOperation(operationGetByValGeneric
, dst
, regT0
, regT1
);
312 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
314 int base
= currentInstruction
[1].u
.operand
;
315 int property
= currentInstruction
[2].u
.operand
;
316 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
318 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
319 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
320 // See comment in op_get_by_val.
321 zeroExtend32ToPtr(regT1
, regT1
);
322 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
323 emitArrayProfilingSiteWithCell(regT0
, regT2
, profile
);
324 and32(TrustedImm32(IndexingShapeMask
), regT2
);
326 PatchableJump badType
;
329 JITArrayMode mode
= chooseArrayMode(profile
);
332 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
335 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
338 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
340 case JITArrayStorage
:
341 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
348 addSlowCase(badType
);
349 addSlowCase(slowCases
);
351 Label done
= label();
353 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
357 JIT::JumpList
JIT::emitGenericContiguousPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, IndexingType indexingShape
)
359 int value
= currentInstruction
[3].u
.operand
;
360 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
364 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(indexingShape
));
366 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
367 Jump outOfBounds
= branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength()));
369 Label storeResult
= label();
370 emitGetVirtualRegister(value
, regT3
);
371 switch (indexingShape
) {
373 slowCases
.append(emitJumpIfNotImmediateInteger(regT3
));
374 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
));
377 Jump notInt
= emitJumpIfNotImmediateInteger(regT3
);
378 convertInt32ToDouble(regT3
, fpRegT0
);
381 add64(tagTypeNumberRegister
, regT3
);
382 move64ToDouble(regT3
, fpRegT0
);
383 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
385 storeDouble(fpRegT0
, BaseIndex(regT2
, regT1
, TimesEight
));
388 case ContiguousShape
:
389 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
));
390 emitWriteBarrier(currentInstruction
[1].u
.operand
, value
, ShouldFilterValue
);
398 outOfBounds
.link(this);
400 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfVectorLength())));
402 emitArrayProfileStoreToHoleSpecialCase(profile
);
404 add32(TrustedImm32(1), regT1
, regT3
);
405 store32(regT3
, Address(regT2
, Butterfly::offsetOfPublicLength()));
406 jump().linkTo(storeResult
, this);
413 JIT::JumpList
JIT::emitArrayStoragePutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
)
415 int value
= currentInstruction
[3].u
.operand
;
416 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
420 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(ArrayStorageShape
));
421 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
422 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, ArrayStorage::vectorLengthOffset())));
424 Jump empty
= branchTest64(Zero
, BaseIndex(regT2
, regT1
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
426 Label
storeResult(this);
427 emitGetVirtualRegister(value
, regT3
);
428 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
429 emitWriteBarrier(currentInstruction
[1].u
.operand
, value
, ShouldFilterValue
);
433 emitArrayProfileStoreToHoleSpecialCase(profile
);
434 add32(TrustedImm32(1), Address(regT2
, ArrayStorage::numValuesInVectorOffset()));
435 branch32(Below
, regT1
, Address(regT2
, ArrayStorage::lengthOffset())).linkTo(storeResult
, this);
437 add32(TrustedImm32(1), regT1
);
438 store32(regT1
, Address(regT2
, ArrayStorage::lengthOffset()));
439 sub32(TrustedImm32(1), regT1
);
440 jump().linkTo(storeResult
, this);
447 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
449 int base
= currentInstruction
[1].u
.operand
;
450 int property
= currentInstruction
[2].u
.operand
;
451 int value
= currentInstruction
[3].u
.operand
;
452 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
454 linkSlowCase(iter
); // property int32 check
455 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
456 linkSlowCase(iter
); // base not array check
458 JITArrayMode mode
= chooseArrayMode(profile
);
462 linkSlowCase(iter
); // value type check
468 Jump skipProfiling
= jump();
469 linkSlowCase(iter
); // out of bounds
470 emitArrayProfileOutOfBoundsSpecialCase(profile
);
471 skipProfiling
.link(this);
473 Label slowPath
= label();
475 emitGetVirtualRegister(property
, regT1
);
476 emitGetVirtualRegister(value
, regT2
);
477 bool isDirect
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
) == op_put_by_val_direct
;
478 Call call
= callOperation(isDirect
? operationDirectPutByVal
: operationPutByVal
, regT0
, regT1
, regT2
);
480 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
481 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
482 m_byValInstructionIndex
++;
485 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
487 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
488 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
489 callOperation(operationPutByIndex
, regT0
, currentInstruction
[2].u
.operand
, regT1
);
492 void JIT::emit_op_put_getter_setter(Instruction
* currentInstruction
)
494 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
495 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
496 emitGetVirtualRegister(currentInstruction
[4].u
.operand
, regT2
);
497 callOperation(operationPutGetterSetter
, regT0
, &m_codeBlock
->identifier(currentInstruction
[2].u
.operand
), regT1
, regT2
);
500 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
502 int dst
= currentInstruction
[1].u
.operand
;
503 int base
= currentInstruction
[2].u
.operand
;
504 int property
= currentInstruction
[3].u
.operand
;
505 emitGetVirtualRegister(base
, regT0
);
506 callOperation(operationDeleteById
, dst
, regT0
, &m_codeBlock
->identifier(property
));
509 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
511 int resultVReg
= currentInstruction
[1].u
.operand
;
512 int baseVReg
= currentInstruction
[2].u
.operand
;
513 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
515 emitGetVirtualRegister(baseVReg
, regT0
);
517 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
519 if (*ident
== m_vm
->propertyNames
->length
&& shouldEmitProfiling())
520 emitArrayProfilingSiteForBytecodeIndexWithCell(regT0
, regT1
, m_bytecodeOffset
);
522 JITGetByIdGenerator
gen(
523 m_codeBlock
, CodeOrigin(m_bytecodeOffset
), RegisterSet::specialRegisters(),
524 JSValueRegs(regT0
), JSValueRegs(regT0
), DontSpill
);
525 gen
.generateFastPath(*this);
526 addSlowCase(gen
.slowPathJump());
527 m_getByIds
.append(gen
);
529 emitValueProfilingSite();
530 emitPutVirtualRegister(resultVReg
);
533 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
535 int resultVReg
= currentInstruction
[1].u
.operand
;
536 int baseVReg
= currentInstruction
[2].u
.operand
;
537 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
539 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
542 JITGetByIdGenerator
& gen
= m_getByIds
[m_getByIdIndex
++];
544 Label coldPathBegin
= label();
546 Call call
= callOperation(WithProfile
, operationGetByIdOptimize
, resultVReg
, gen
.stubInfo(), regT0
, ident
->impl());
548 gen
.reportSlowPathCall(coldPathBegin
, call
);
551 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
553 int baseVReg
= currentInstruction
[1].u
.operand
;
554 int valueVReg
= currentInstruction
[3].u
.operand
;
555 unsigned direct
= currentInstruction
[8].u
.operand
;
557 emitWriteBarrier(baseVReg
, valueVReg
, ShouldFilterBase
);
559 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
560 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
561 // such that the Structure & offset are always at the same distance from this.
563 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
565 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
566 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
568 JITPutByIdGenerator
gen(
569 m_codeBlock
, CodeOrigin(m_bytecodeOffset
), RegisterSet::specialRegisters(),
570 JSValueRegs(regT0
), JSValueRegs(regT1
), regT2
, DontSpill
, m_codeBlock
->ecmaMode(),
571 direct
? Direct
: NotDirect
);
573 gen
.generateFastPath(*this);
574 addSlowCase(gen
.slowPathJump());
576 m_putByIds
.append(gen
);
579 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
581 int baseVReg
= currentInstruction
[1].u
.operand
;
582 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
584 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
587 Label
coldPathBegin(this);
589 JITPutByIdGenerator
& gen
= m_putByIds
[m_putByIdIndex
++];
591 Call call
= callOperation(
592 gen
.slowPathFunction(), gen
.stubInfo(), regT1
, regT0
, ident
->impl());
594 gen
.reportSlowPathCall(coldPathBegin
, call
);
597 // Compile a store into an object's property storage. May overwrite the
598 // value in objectReg.
599 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, PropertyOffset cachedOffset
)
601 if (isInlineOffset(cachedOffset
)) {
602 store64(value
, Address(base
, JSObject::offsetOfInlineStorage() + sizeof(JSValue
) * offsetInInlineStorage(cachedOffset
)));
606 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
607 store64(value
, Address(base
, sizeof(JSValue
) * offsetInButterfly(cachedOffset
)));
610 // Compile a load from an object's property storage. May overwrite base.
611 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, PropertyOffset cachedOffset
)
613 if (isInlineOffset(cachedOffset
)) {
614 load64(Address(base
, JSObject::offsetOfInlineStorage() + sizeof(JSValue
) * offsetInInlineStorage(cachedOffset
)), result
);
618 loadPtr(Address(base
, JSObject::butterflyOffset()), result
);
619 load64(Address(result
, sizeof(JSValue
) * offsetInButterfly(cachedOffset
)), result
);
622 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID result
, PropertyOffset cachedOffset
)
624 if (isInlineOffset(cachedOffset
)) {
625 load64(base
->locationForOffset(cachedOffset
), result
);
629 loadPtr(base
->butterflyAddress(), result
);
630 load64(Address(result
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>)), result
);
633 void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks
)
635 if (!needsVarInjectionChecks
)
637 addSlowCase(branch8(Equal
, AbsoluteAddress(m_codeBlock
->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated
)));
640 void JIT::emitResolveClosure(int dst
, bool needsVarInjectionChecks
, unsigned depth
)
642 emitVarInjectionCheck(needsVarInjectionChecks
);
643 emitGetVirtualRegister(JSStack::ScopeChain
, regT0
);
644 if (m_codeBlock
->needsActivation()) {
645 emitGetVirtualRegister(m_codeBlock
->activationRegister(), regT1
);
646 Jump noActivation
= branchTestPtr(Zero
, regT1
);
647 loadPtr(Address(regT0
, JSScope::offsetOfNext()), regT0
);
648 noActivation
.link(this);
650 for (unsigned i
= 0; i
< depth
; ++i
)
651 loadPtr(Address(regT0
, JSScope::offsetOfNext()), regT0
);
652 emitPutVirtualRegister(dst
);
655 void JIT::emit_op_resolve_scope(Instruction
* currentInstruction
)
657 int dst
= currentInstruction
[1].u
.operand
;
658 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[3].u
.operand
);
659 unsigned depth
= currentInstruction
[4].u
.operand
;
661 switch (resolveType
) {
664 case GlobalPropertyWithVarInjectionChecks
:
665 case GlobalVarWithVarInjectionChecks
:
666 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
667 move(TrustedImmPtr(m_codeBlock
->globalObject()), regT0
);
668 emitPutVirtualRegister(dst
);
671 case ClosureVarWithVarInjectionChecks
:
672 emitResolveClosure(dst
, needsVarInjectionChecks(resolveType
), depth
);
680 void JIT::emitSlow_op_resolve_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
682 int dst
= currentInstruction
[1].u
.operand
;
683 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[3].u
.operand
);
685 if (resolveType
== GlobalProperty
|| resolveType
== GlobalVar
|| resolveType
== ClosureVar
)
689 int32_t indentifierIndex
= currentInstruction
[2].u
.operand
;
690 callOperation(operationResolveScope
, dst
, indentifierIndex
);
693 void JIT::emitLoadWithStructureCheck(int scope
, Structure
** structureSlot
)
695 emitGetVirtualRegister(scope
, regT0
);
696 loadPtr(structureSlot
, regT1
);
697 addSlowCase(branchTestPtr(Zero
, regT1
));
698 load32(Address(regT1
, Structure::structureIDOffset()), regT1
);
699 addSlowCase(branch32(NotEqual
, Address(regT0
, JSCell::structureIDOffset()), regT1
));
702 void JIT::emitGetGlobalProperty(uintptr_t* operandSlot
)
704 load32(operandSlot
, regT1
);
705 compileGetDirectOffset(regT0
, regT0
, regT1
, regT2
, KnownNotFinal
);
708 void JIT::emitGetGlobalVar(uintptr_t operand
)
710 loadPtr(reinterpret_cast<void*>(operand
), regT0
);
713 void JIT::emitGetClosureVar(int scope
, uintptr_t operand
)
715 emitGetVirtualRegister(scope
, regT0
);
716 loadPtr(Address(regT0
, JSVariableObject::offsetOfRegisters()), regT0
);
717 loadPtr(Address(regT0
, operand
* sizeof(Register
)), regT0
);
720 void JIT::emit_op_get_from_scope(Instruction
* currentInstruction
)
722 int dst
= currentInstruction
[1].u
.operand
;
723 int scope
= currentInstruction
[2].u
.operand
;
724 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
725 Structure
** structureSlot
= currentInstruction
[5].u
.structure
.slot();
726 uintptr_t* operandSlot
= reinterpret_cast<uintptr_t*>(¤tInstruction
[6].u
.pointer
);
728 switch (resolveType
) {
730 case GlobalPropertyWithVarInjectionChecks
:
731 emitLoadWithStructureCheck(scope
, structureSlot
); // Structure check covers var injection.
732 emitGetGlobalProperty(operandSlot
);
735 case GlobalVarWithVarInjectionChecks
:
736 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
737 emitGetGlobalVar(*operandSlot
);
740 case ClosureVarWithVarInjectionChecks
:
741 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
742 emitGetClosureVar(scope
, *operandSlot
);
748 emitPutVirtualRegister(dst
);
749 emitValueProfilingSite();
752 void JIT::emitSlow_op_get_from_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
754 int dst
= currentInstruction
[1].u
.operand
;
755 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
757 if (resolveType
== GlobalVar
|| resolveType
== ClosureVar
)
760 if (resolveType
== GlobalProperty
|| resolveType
== GlobalPropertyWithVarInjectionChecks
)
763 callOperation(WithProfile
, operationGetFromScope
, dst
, currentInstruction
);
766 void JIT::emitPutGlobalProperty(uintptr_t* operandSlot
, int value
)
768 emitGetVirtualRegister(value
, regT2
);
770 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT0
);
771 loadPtr(operandSlot
, regT1
);
773 storePtr(regT2
, BaseIndex(regT0
, regT1
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)));
776 void JIT::emitNotifyWrite(RegisterID value
, RegisterID scratch
, VariableWatchpointSet
* set
)
778 if (!set
|| set
->state() == IsInvalidated
)
781 load8(set
->addressOfState(), scratch
);
782 Jump isDone
= branch32(Equal
, scratch
, TrustedImm32(IsInvalidated
));
783 addSlowCase(branch64(NotEqual
, AbsoluteAddress(set
->addressOfInferredValue()), value
));
787 void JIT::emitPutGlobalVar(uintptr_t operand
, int value
, VariableWatchpointSet
* set
)
789 emitGetVirtualRegister(value
, regT0
);
790 emitNotifyWrite(regT0
, regT1
, set
);
791 storePtr(regT0
, reinterpret_cast<void*>(operand
));
794 void JIT::emitPutClosureVar(int scope
, uintptr_t operand
, int value
)
796 emitGetVirtualRegister(value
, regT1
);
797 emitGetVirtualRegister(scope
, regT0
);
798 loadPtr(Address(regT0
, JSVariableObject::offsetOfRegisters()), regT0
);
799 storePtr(regT1
, Address(regT0
, operand
* sizeof(Register
)));
802 void JIT::emit_op_put_to_scope(Instruction
* currentInstruction
)
804 int scope
= currentInstruction
[1].u
.operand
;
805 int value
= currentInstruction
[3].u
.operand
;
806 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
807 Structure
** structureSlot
= currentInstruction
[5].u
.structure
.slot();
808 uintptr_t* operandSlot
= reinterpret_cast<uintptr_t*>(¤tInstruction
[6].u
.pointer
);
810 switch (resolveType
) {
812 case GlobalPropertyWithVarInjectionChecks
:
813 emitWriteBarrier(m_codeBlock
->globalObject(), value
, ShouldFilterValue
);
814 emitLoadWithStructureCheck(scope
, structureSlot
); // Structure check covers var injection.
815 emitPutGlobalProperty(operandSlot
, value
);
818 case GlobalVarWithVarInjectionChecks
:
819 emitWriteBarrier(m_codeBlock
->globalObject(), value
, ShouldFilterValue
);
820 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
821 emitPutGlobalVar(*operandSlot
, value
, currentInstruction
[5].u
.watchpointSet
);
824 case ClosureVarWithVarInjectionChecks
:
825 emitWriteBarrier(scope
, value
, ShouldFilterValue
);
826 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
827 emitPutClosureVar(scope
, *operandSlot
, value
);
835 void JIT::emitSlow_op_put_to_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
837 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
838 unsigned linkCount
= 0;
839 if (resolveType
!= GlobalVar
&& resolveType
!= ClosureVar
)
841 if ((resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
)
842 && currentInstruction
[5].u
.watchpointSet
->state() != IsInvalidated
)
844 if (resolveType
== GlobalProperty
|| resolveType
== GlobalPropertyWithVarInjectionChecks
)
850 callOperation(operationPutToScope
, currentInstruction
);
853 void JIT::emit_op_init_global_const(Instruction
* currentInstruction
)
855 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
856 emitWriteBarrier(globalObject
, currentInstruction
[2].u
.operand
, ShouldFilterValue
);
857 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
858 store64(regT0
, currentInstruction
[1].u
.registerPointer
);
861 #endif // USE(JSVALUE64)
864 void JIT::emitWriteBarrier(unsigned owner
, unsigned value
, WriteBarrierMode mode
)
868 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
) {
869 emitGetVirtualRegister(value
, regT0
);
870 valueNotCell
= branchTest64(NonZero
, regT0
, tagMaskRegister
);
873 emitGetVirtualRegister(owner
, regT0
);
875 if (mode
== ShouldFilterBaseAndValue
|| mode
== ShouldFilterBase
)
876 ownerNotCell
= branchTest64(NonZero
, regT0
, tagMaskRegister
);
878 Jump ownerNotMarkedOrAlreadyRemembered
= checkMarkByte(regT0
);
879 callOperation(operationUnconditionalWriteBarrier
, regT0
);
880 ownerNotMarkedOrAlreadyRemembered
.link(this);
882 if (mode
== ShouldFilterBaseAndValue
|| mode
== ShouldFilterBase
)
883 ownerNotCell
.link(this);
884 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
)
885 valueNotCell
.link(this);
893 void JIT::emitWriteBarrier(JSCell
* owner
, unsigned value
, WriteBarrierMode mode
)
896 emitGetVirtualRegister(value
, regT0
);
898 if (mode
== ShouldFilterValue
)
899 valueNotCell
= branchTest64(NonZero
, regT0
, tagMaskRegister
);
901 emitWriteBarrier(owner
);
903 if (mode
== ShouldFilterValue
)
904 valueNotCell
.link(this);
912 #else // USE(JSVALUE64)
914 void JIT::emitWriteBarrier(unsigned owner
, unsigned value
, WriteBarrierMode mode
)
918 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
) {
919 emitLoadTag(value
, regT0
);
920 valueNotCell
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
923 emitLoad(owner
, regT0
, regT1
);
925 if (mode
== ShouldFilterBase
|| mode
== ShouldFilterBaseAndValue
)
926 ownerNotCell
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
928 Jump ownerNotMarkedOrAlreadyRemembered
= checkMarkByte(regT1
);
929 callOperation(operationUnconditionalWriteBarrier
, regT1
);
930 ownerNotMarkedOrAlreadyRemembered
.link(this);
932 if (mode
== ShouldFilterBase
|| mode
== ShouldFilterBaseAndValue
)
933 ownerNotCell
.link(this);
934 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
)
935 valueNotCell
.link(this);
943 void JIT::emitWriteBarrier(JSCell
* owner
, unsigned value
, WriteBarrierMode mode
)
947 if (mode
== ShouldFilterValue
) {
948 emitLoadTag(value
, regT0
);
949 valueNotCell
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
952 emitWriteBarrier(owner
);
954 if (mode
== ShouldFilterValue
)
955 valueNotCell
.link(this);
963 #endif // USE(JSVALUE64)
965 void JIT::emitWriteBarrier(JSCell
* owner
)
968 if (!MarkedBlock::blockFor(owner
)->isMarked(owner
)) {
969 Jump ownerNotMarkedOrAlreadyRemembered
= checkMarkByte(owner
);
970 callOperation(operationUnconditionalWriteBarrier
, owner
);
971 ownerNotMarkedOrAlreadyRemembered
.link(this);
973 callOperation(operationUnconditionalWriteBarrier
, owner
);
976 #endif // ENABLE(GGC)
979 void JIT::privateCompileGetByVal(ByValInfo
* byValInfo
, ReturnAddressPtr returnAddress
, JITArrayMode arrayMode
)
981 Instruction
* currentInstruction
= m_codeBlock
->instructions().begin() + byValInfo
->bytecodeIndex
;
983 PatchableJump badType
;
988 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
991 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
994 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
996 case JITArrayStorage
:
997 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
1000 TypedArrayType type
= typedArrayTypeForJITArrayMode(arrayMode
);
1002 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, type
);
1004 slowCases
= emitFloatTypedArrayGetByVal(currentInstruction
, badType
, type
);
1010 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
);
1012 patchBuffer
.link(badType
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1013 patchBuffer
.link(slowCases
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1015 patchBuffer
.link(done
, byValInfo
->badTypeJump
.labelAtOffset(byValInfo
->badTypeJumpToDone
));
1017 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1018 m_codeBlock
, patchBuffer
,
1019 ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1021 RepatchBuffer
repatchBuffer(m_codeBlock
);
1022 repatchBuffer
.relink(byValInfo
->badTypeJump
, CodeLocationLabel(byValInfo
->stubRoutine
->code().code()));
1023 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(operationGetByValGeneric
));
1026 void JIT::privateCompilePutByVal(ByValInfo
* byValInfo
, ReturnAddressPtr returnAddress
, JITArrayMode arrayMode
)
1028 Instruction
* currentInstruction
= m_codeBlock
->instructions().begin() + byValInfo
->bytecodeIndex
;
1030 PatchableJump badType
;
1034 bool needsLinkForWriteBarrier
= false;
1037 switch (arrayMode
) {
1039 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
1042 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
1045 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
1047 needsLinkForWriteBarrier
= true;
1050 case JITArrayStorage
:
1051 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
1053 needsLinkForWriteBarrier
= true;
1057 TypedArrayType type
= typedArrayTypeForJITArrayMode(arrayMode
);
1059 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, type
);
1061 slowCases
= emitFloatTypedArrayPutByVal(currentInstruction
, badType
, type
);
1067 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
);
1068 patchBuffer
.link(badType
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1069 patchBuffer
.link(slowCases
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1070 patchBuffer
.link(done
, byValInfo
->badTypeJump
.labelAtOffset(byValInfo
->badTypeJumpToDone
));
1072 if (needsLinkForWriteBarrier
) {
1073 ASSERT(m_calls
.last().to
== operationUnconditionalWriteBarrier
);
1074 patchBuffer
.link(m_calls
.last().from
, operationUnconditionalWriteBarrier
);
1078 bool isDirect
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
) == op_put_by_val_direct
;
1080 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1081 m_codeBlock
, patchBuffer
,
1082 ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1085 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1086 m_codeBlock
, patchBuffer
,
1087 ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1089 RepatchBuffer
repatchBuffer(m_codeBlock
);
1090 repatchBuffer
.relink(byValInfo
->badTypeJump
, CodeLocationLabel(byValInfo
->stubRoutine
->code().code()));
1091 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(isDirect
? operationDirectPutByValGeneric
: operationPutByValGeneric
));
1094 JIT::JumpList
JIT::emitIntTypedArrayGetByVal(Instruction
*, PatchableJump
& badType
, TypedArrayType type
)
1096 ASSERT(isInt(type
));
1098 // The best way to test the array type is to use the classInfo. We need to do so without
1099 // clobbering the register that holds the indexing type, base, and property.
1102 RegisterID base
= regT0
;
1103 RegisterID property
= regT1
;
1104 RegisterID resultPayload
= regT0
;
1105 RegisterID scratch
= regT3
;
1107 RegisterID base
= regT0
;
1108 RegisterID property
= regT2
;
1109 RegisterID resultPayload
= regT0
;
1110 RegisterID resultTag
= regT1
;
1111 RegisterID scratch
= regT3
;
1116 load8(Address(base
, JSCell::typeInfoTypeOffset()), scratch
);
1117 badType
= patchableBranch32(NotEqual
, scratch
, TrustedImm32(typeForTypedArrayType(type
)));
1118 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, JSArrayBufferView::offsetOfLength())));
1119 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), base
);
1121 switch (elementSize(type
)) {
1124 load8Signed(BaseIndex(base
, property
, TimesOne
), resultPayload
);
1126 load8(BaseIndex(base
, property
, TimesOne
), resultPayload
);
1130 load16Signed(BaseIndex(base
, property
, TimesTwo
), resultPayload
);
1132 load16(BaseIndex(base
, property
, TimesTwo
), resultPayload
);
1135 load32(BaseIndex(base
, property
, TimesFour
), resultPayload
);
1142 if (type
== TypeUint32
) {
1143 Jump canBeInt
= branch32(GreaterThanOrEqual
, resultPayload
, TrustedImm32(0));
1145 convertInt32ToDouble(resultPayload
, fpRegT0
);
1146 addDouble(AbsoluteAddress(&twoToThe32
), fpRegT0
);
1148 moveDoubleTo64(fpRegT0
, resultPayload
);
1149 sub64(tagTypeNumberRegister
, resultPayload
);
1151 moveDoubleToInts(fpRegT0
, resultPayload
, resultTag
);
1155 canBeInt
.link(this);
1159 or64(tagTypeNumberRegister
, resultPayload
);
1161 move(TrustedImm32(JSValue::Int32Tag
), resultTag
);
1168 JIT::JumpList
JIT::emitFloatTypedArrayGetByVal(Instruction
*, PatchableJump
& badType
, TypedArrayType type
)
1170 ASSERT(isFloat(type
));
1173 RegisterID base
= regT0
;
1174 RegisterID property
= regT1
;
1175 RegisterID resultPayload
= regT0
;
1176 RegisterID scratch
= regT3
;
1178 RegisterID base
= regT0
;
1179 RegisterID property
= regT2
;
1180 RegisterID resultPayload
= regT0
;
1181 RegisterID resultTag
= regT1
;
1182 RegisterID scratch
= regT3
;
1187 load8(Address(base
, JSCell::typeInfoTypeOffset()), scratch
);
1188 badType
= patchableBranch32(NotEqual
, scratch
, TrustedImm32(typeForTypedArrayType(type
)));
1189 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, JSArrayBufferView::offsetOfLength())));
1190 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), base
);
1192 switch (elementSize(type
)) {
1194 loadFloat(BaseIndex(base
, property
, TimesFour
), fpRegT0
);
1195 convertFloatToDouble(fpRegT0
, fpRegT0
);
1198 loadDouble(BaseIndex(base
, property
, TimesEight
), fpRegT0
);
1205 Jump notNaN
= branchDouble(DoubleEqual
, fpRegT0
, fpRegT0
);
1206 static const double NaN
= PNaN
;
1207 loadDouble(TrustedImmPtr(&NaN
), fpRegT0
);
1211 moveDoubleTo64(fpRegT0
, resultPayload
);
1212 sub64(tagTypeNumberRegister
, resultPayload
);
1214 moveDoubleToInts(fpRegT0
, resultPayload
, resultTag
);
1219 JIT::JumpList
JIT::emitIntTypedArrayPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, TypedArrayType type
)
1221 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
1222 ASSERT(isInt(type
));
1224 int value
= currentInstruction
[3].u
.operand
;
1227 RegisterID base
= regT0
;
1228 RegisterID property
= regT1
;
1229 RegisterID earlyScratch
= regT3
;
1230 RegisterID lateScratch
= regT2
;
1232 RegisterID base
= regT0
;
1233 RegisterID property
= regT2
;
1234 RegisterID earlyScratch
= regT3
;
1235 RegisterID lateScratch
= regT1
;
1240 load8(Address(base
, JSCell::typeInfoTypeOffset()), earlyScratch
);
1241 badType
= patchableBranch32(NotEqual
, earlyScratch
, TrustedImm32(typeForTypedArrayType(type
)));
1242 Jump inBounds
= branch32(Below
, property
, Address(base
, JSArrayBufferView::offsetOfLength()));
1243 emitArrayProfileOutOfBoundsSpecialCase(profile
);
1245 inBounds
.link(this);
1248 emitGetVirtualRegister(value
, earlyScratch
);
1249 slowCases
.append(emitJumpIfNotImmediateInteger(earlyScratch
));
1251 emitLoad(value
, lateScratch
, earlyScratch
);
1252 slowCases
.append(branch32(NotEqual
, lateScratch
, TrustedImm32(JSValue::Int32Tag
)));
1255 // We would be loading this into base as in get_by_val, except that the slow
1256 // path expects the base to be unclobbered.
1257 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), lateScratch
);
1259 if (isClamped(type
)) {
1260 ASSERT(elementSize(type
) == 1);
1261 ASSERT(!isSigned(type
));
1262 Jump inBounds
= branch32(BelowOrEqual
, earlyScratch
, TrustedImm32(0xff));
1263 Jump tooBig
= branch32(GreaterThan
, earlyScratch
, TrustedImm32(0xff));
1264 xor32(earlyScratch
, earlyScratch
);
1265 Jump clamped
= jump();
1267 move(TrustedImm32(0xff), earlyScratch
);
1269 inBounds
.link(this);
1272 switch (elementSize(type
)) {
1274 store8(earlyScratch
, BaseIndex(lateScratch
, property
, TimesOne
));
1277 store16(earlyScratch
, BaseIndex(lateScratch
, property
, TimesTwo
));
1280 store32(earlyScratch
, BaseIndex(lateScratch
, property
, TimesFour
));
1291 JIT::JumpList
JIT::emitFloatTypedArrayPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, TypedArrayType type
)
1293 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
1294 ASSERT(isFloat(type
));
1296 int value
= currentInstruction
[3].u
.operand
;
1299 RegisterID base
= regT0
;
1300 RegisterID property
= regT1
;
1301 RegisterID earlyScratch
= regT3
;
1302 RegisterID lateScratch
= regT2
;
1304 RegisterID base
= regT0
;
1305 RegisterID property
= regT2
;
1306 RegisterID earlyScratch
= regT3
;
1307 RegisterID lateScratch
= regT1
;
1312 load8(Address(base
, JSCell::typeInfoTypeOffset()), earlyScratch
);
1313 badType
= patchableBranch32(NotEqual
, earlyScratch
, TrustedImm32(typeForTypedArrayType(type
)));
1314 Jump inBounds
= branch32(Below
, property
, Address(base
, JSArrayBufferView::offsetOfLength()));
1315 emitArrayProfileOutOfBoundsSpecialCase(profile
);
1317 inBounds
.link(this);
1320 emitGetVirtualRegister(value
, earlyScratch
);
1321 Jump doubleCase
= emitJumpIfNotImmediateInteger(earlyScratch
);
1322 convertInt32ToDouble(earlyScratch
, fpRegT0
);
1323 Jump ready
= jump();
1324 doubleCase
.link(this);
1325 slowCases
.append(emitJumpIfNotImmediateNumber(earlyScratch
));
1326 add64(tagTypeNumberRegister
, earlyScratch
);
1327 move64ToDouble(earlyScratch
, fpRegT0
);
1330 emitLoad(value
, lateScratch
, earlyScratch
);
1331 Jump doubleCase
= branch32(NotEqual
, lateScratch
, TrustedImm32(JSValue::Int32Tag
));
1332 convertInt32ToDouble(earlyScratch
, fpRegT0
);
1333 Jump ready
= jump();
1334 doubleCase
.link(this);
1335 slowCases
.append(branch32(Above
, lateScratch
, TrustedImm32(JSValue::LowestTag
)));
1336 moveIntsToDouble(earlyScratch
, lateScratch
, fpRegT0
, fpRegT1
);
1340 // We would be loading this into base as in get_by_val, except that the slow
1341 // path expects the base to be unclobbered.
1342 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), lateScratch
);
1344 switch (elementSize(type
)) {
1346 convertDoubleToFloat(fpRegT0
, fpRegT0
);
1347 storeFloat(fpRegT0
, BaseIndex(lateScratch
, property
, TimesFour
));
1350 storeDouble(fpRegT0
, BaseIndex(lateScratch
, property
, TimesEight
));
1363 #endif // ENABLE(JIT)