]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITPropertyAccess.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / jit / JITPropertyAccess.cpp
1 /*
2 * Copyright (C) 2008, 2009, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "CodeBlock.h"
32 #include "GCAwareJITStubRoutine.h"
33 #include "GetterSetter.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
36 #include "JSArray.h"
37 #include "JSFunction.h"
38 #include "JSPropertyNameIterator.h"
39 #include "JSVariableObject.h"
40 #include "LinkBuffer.h"
41 #include "RepatchBuffer.h"
42 #include "ResultType.h"
43 #include "SamplingTool.h"
44 #include <wtf/StringPrintStream.h>
45
46
47 namespace JSC {
48 #if USE(JSVALUE64)
49
50 JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
51 {
52 JSInterfaceJIT jit(vm);
53 JumpList failures;
54 failures.append(JSC::branchStructure(jit,
55 NotEqual,
56 Address(regT0, JSCell::structureIDOffset()),
57 vm->stringStructure.get()));
58
59 // Load string length to regT2, and start the process of loading the data pointer into regT0
60 jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
61 jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
62 failures.append(jit.branchTest32(Zero, regT0));
63
64 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
65 failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
66
67 // Load the character
68 JumpList is16Bit;
69 JumpList cont8Bit;
70 // Load the string flags
71 jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT2);
72 jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
73 is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(StringImpl::flagIs8Bit())));
74 jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
75 cont8Bit.append(jit.jump());
76 is16Bit.link(&jit);
77 jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
78 cont8Bit.link(&jit);
79
80 failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
81 jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
82 jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
83 jit.ret();
84
85 failures.link(&jit);
86 jit.move(TrustedImm32(0), regT0);
87 jit.ret();
88
89 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
90 return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
91 }
92
93 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
94 {
95 int dst = currentInstruction[1].u.operand;
96 int base = currentInstruction[2].u.operand;
97 int property = currentInstruction[3].u.operand;
98 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
99
100 emitGetVirtualRegisters(base, regT0, property, regT1);
101 emitJumpSlowCaseIfNotImmediateInteger(regT1);
102
103 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
104 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
105 // number was signed since m_vectorLength is always less than intmax (since the total allocation
106 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
107 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
108 // extending since it makes it easier to re-tag the value in the slow case.
109 zeroExtend32ToPtr(regT1, regT1);
110
111 emitJumpSlowCaseIfNotJSCell(regT0, base);
112 emitArrayProfilingSiteWithCell(regT0, regT2, profile);
113 and32(TrustedImm32(IndexingShapeMask), regT2);
114
115 PatchableJump badType;
116 JumpList slowCases;
117
118 JITArrayMode mode = chooseArrayMode(profile);
119 switch (mode) {
120 case JITInt32:
121 slowCases = emitInt32GetByVal(currentInstruction, badType);
122 break;
123 case JITDouble:
124 slowCases = emitDoubleGetByVal(currentInstruction, badType);
125 break;
126 case JITContiguous:
127 slowCases = emitContiguousGetByVal(currentInstruction, badType);
128 break;
129 case JITArrayStorage:
130 slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
131 break;
132 default:
133 CRASH();
134 break;
135 }
136
137 addSlowCase(badType);
138 addSlowCase(slowCases);
139
140 Label done = label();
141
142 if (!ASSERT_DISABLED) {
143 Jump resultOK = branchTest64(NonZero, regT0);
144 abortWithReason(JITGetByValResultIsNotEmpty);
145 resultOK.link(this);
146 }
147
148 emitValueProfilingSite();
149 emitPutVirtualRegister(dst);
150
151 m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
152 }
153
154 JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
155 {
156 JumpList slowCases;
157
158 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
159 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
160 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
161 loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
162 slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
163 moveDoubleTo64(fpRegT0, regT0);
164 sub64(tagTypeNumberRegister, regT0);
165
166 return slowCases;
167 }
168
169 JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
170 {
171 JumpList slowCases;
172
173 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
174 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
175 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
176 load64(BaseIndex(regT2, regT1, TimesEight), regT0);
177 slowCases.append(branchTest64(Zero, regT0));
178
179 return slowCases;
180 }
181
182 JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
183 {
184 JumpList slowCases;
185
186 add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
187 badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
188
189 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
190 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
191
192 load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
193 slowCases.append(branchTest64(Zero, regT0));
194
195 return slowCases;
196 }
197
198 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
199 {
200 int dst = currentInstruction[1].u.operand;
201 int base = currentInstruction[2].u.operand;
202 int property = currentInstruction[3].u.operand;
203 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
204
205 linkSlowCase(iter); // property int32 check
206 linkSlowCaseIfNotJSCell(iter, base); // base cell check
207 Jump nonCell = jump();
208 linkSlowCase(iter); // base array check
209 Jump notString = branchStructure(NotEqual,
210 Address(regT0, JSCell::structureIDOffset()),
211 m_vm->stringStructure.get());
212 emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
213 Jump failed = branchTest64(Zero, regT0);
214 emitPutVirtualRegister(dst, regT0);
215 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
216 failed.link(this);
217 notString.link(this);
218 nonCell.link(this);
219
220 Jump skipProfiling = jump();
221
222 linkSlowCase(iter); // vector length check
223 linkSlowCase(iter); // empty value
224
225 emitArrayProfileOutOfBoundsSpecialCase(profile);
226
227 skipProfiling.link(this);
228
229 Label slowPath = label();
230
231 emitGetVirtualRegister(base, regT0);
232 emitGetVirtualRegister(property, regT1);
233 Call call = callOperation(operationGetByValDefault, dst, regT0, regT1);
234
235 m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
236 m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
237 m_byValInstructionIndex++;
238
239 emitValueProfilingSite();
240 }
241
242 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
243 {
244 ASSERT(sizeof(JSValue) == 8);
245
246 if (finalObjectMode == MayBeFinal) {
247 Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
248 loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
249 neg32(offset);
250 Jump done = jump();
251 isInline.link(this);
252 addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
253 done.link(this);
254 } else {
255 if (!ASSERT_DISABLED) {
256 Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
257 abortWithReason(JITOffsetIsNotOutOfLine);
258 isOutOfLine.link(this);
259 }
260 loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
261 neg32(offset);
262 }
263 signExtend32ToPtr(offset, offset);
264 load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
265 }
266
267 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
268 {
269 int dst = currentInstruction[1].u.operand;
270 int base = currentInstruction[2].u.operand;
271 int property = currentInstruction[3].u.operand;
272 unsigned expected = currentInstruction[4].u.operand;
273 int iter = currentInstruction[5].u.operand;
274 int i = currentInstruction[6].u.operand;
275
276 emitGetVirtualRegister(property, regT0);
277 addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
278 emitGetVirtualRegisters(base, regT0, iter, regT1);
279 emitJumpSlowCaseIfNotJSCell(regT0, base);
280
281 // Test base's structure
282 emitLoadStructure(regT0, regT2, regT3);
283 addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
284 load32(addressFor(i), regT3);
285 sub32(TrustedImm32(1), regT3);
286 addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
287 Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
288 add32(TrustedImm32(firstOutOfLineOffset), regT3);
289 sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
290 inlineProperty.link(this);
291 compileGetDirectOffset(regT0, regT0, regT3, regT1);
292
293 emitPutVirtualRegister(dst, regT0);
294 }
295
296 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
297 {
298 int dst = currentInstruction[1].u.operand;
299 int base = currentInstruction[2].u.operand;
300 int property = currentInstruction[3].u.operand;
301
302 linkSlowCase(iter);
303 linkSlowCaseIfNotJSCell(iter, base);
304 linkSlowCase(iter);
305 linkSlowCase(iter);
306
307 emitGetVirtualRegister(base, regT0);
308 emitGetVirtualRegister(property, regT1);
309 callOperation(operationGetByValGeneric, dst, regT0, regT1);
310 }
311
312 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
313 {
314 int base = currentInstruction[1].u.operand;
315 int property = currentInstruction[2].u.operand;
316 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
317
318 emitGetVirtualRegisters(base, regT0, property, regT1);
319 emitJumpSlowCaseIfNotImmediateInteger(regT1);
320 // See comment in op_get_by_val.
321 zeroExtend32ToPtr(regT1, regT1);
322 emitJumpSlowCaseIfNotJSCell(regT0, base);
323 emitArrayProfilingSiteWithCell(regT0, regT2, profile);
324 and32(TrustedImm32(IndexingShapeMask), regT2);
325
326 PatchableJump badType;
327 JumpList slowCases;
328
329 JITArrayMode mode = chooseArrayMode(profile);
330 switch (mode) {
331 case JITInt32:
332 slowCases = emitInt32PutByVal(currentInstruction, badType);
333 break;
334 case JITDouble:
335 slowCases = emitDoublePutByVal(currentInstruction, badType);
336 break;
337 case JITContiguous:
338 slowCases = emitContiguousPutByVal(currentInstruction, badType);
339 break;
340 case JITArrayStorage:
341 slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
342 break;
343 default:
344 CRASH();
345 break;
346 }
347
348 addSlowCase(badType);
349 addSlowCase(slowCases);
350
351 Label done = label();
352
353 m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
354
355 }
356
357 JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
358 {
359 int value = currentInstruction[3].u.operand;
360 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
361
362 JumpList slowCases;
363
364 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
365
366 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
367 Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
368
369 Label storeResult = label();
370 emitGetVirtualRegister(value, regT3);
371 switch (indexingShape) {
372 case Int32Shape:
373 slowCases.append(emitJumpIfNotImmediateInteger(regT3));
374 store64(regT3, BaseIndex(regT2, regT1, TimesEight));
375 break;
376 case DoubleShape: {
377 Jump notInt = emitJumpIfNotImmediateInteger(regT3);
378 convertInt32ToDouble(regT3, fpRegT0);
379 Jump ready = jump();
380 notInt.link(this);
381 add64(tagTypeNumberRegister, regT3);
382 move64ToDouble(regT3, fpRegT0);
383 slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
384 ready.link(this);
385 storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight));
386 break;
387 }
388 case ContiguousShape:
389 store64(regT3, BaseIndex(regT2, regT1, TimesEight));
390 emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
391 break;
392 default:
393 CRASH();
394 break;
395 }
396
397 Jump done = jump();
398 outOfBounds.link(this);
399
400 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
401
402 emitArrayProfileStoreToHoleSpecialCase(profile);
403
404 add32(TrustedImm32(1), regT1, regT3);
405 store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
406 jump().linkTo(storeResult, this);
407
408 done.link(this);
409
410 return slowCases;
411 }
412
413 JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
414 {
415 int value = currentInstruction[3].u.operand;
416 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
417
418 JumpList slowCases;
419
420 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
421 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
422 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
423
424 Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
425
426 Label storeResult(this);
427 emitGetVirtualRegister(value, regT3);
428 store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
429 emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
430 Jump end = jump();
431
432 empty.link(this);
433 emitArrayProfileStoreToHoleSpecialCase(profile);
434 add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
435 branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
436
437 add32(TrustedImm32(1), regT1);
438 store32(regT1, Address(regT2, ArrayStorage::lengthOffset()));
439 sub32(TrustedImm32(1), regT1);
440 jump().linkTo(storeResult, this);
441
442 end.link(this);
443
444 return slowCases;
445 }
446
447 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
448 {
449 int base = currentInstruction[1].u.operand;
450 int property = currentInstruction[2].u.operand;
451 int value = currentInstruction[3].u.operand;
452 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
453
454 linkSlowCase(iter); // property int32 check
455 linkSlowCaseIfNotJSCell(iter, base); // base cell check
456 linkSlowCase(iter); // base not array check
457
458 JITArrayMode mode = chooseArrayMode(profile);
459 switch (mode) {
460 case JITInt32:
461 case JITDouble:
462 linkSlowCase(iter); // value type check
463 break;
464 default:
465 break;
466 }
467
468 Jump skipProfiling = jump();
469 linkSlowCase(iter); // out of bounds
470 emitArrayProfileOutOfBoundsSpecialCase(profile);
471 skipProfiling.link(this);
472
473 Label slowPath = label();
474
475 emitGetVirtualRegister(property, regT1);
476 emitGetVirtualRegister(value, regT2);
477 bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
478 Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT0, regT1, regT2);
479
480 m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
481 m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
482 m_byValInstructionIndex++;
483 }
484
485 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
486 {
487 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
488 emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
489 callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
490 }
491
492 void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
493 {
494 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
495 emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
496 emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
497 callOperation(operationPutGetterSetter, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1, regT2);
498 }
499
500 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
501 {
502 int dst = currentInstruction[1].u.operand;
503 int base = currentInstruction[2].u.operand;
504 int property = currentInstruction[3].u.operand;
505 emitGetVirtualRegister(base, regT0);
506 callOperation(operationDeleteById, dst, regT0, &m_codeBlock->identifier(property));
507 }
508
509 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
510 {
511 int resultVReg = currentInstruction[1].u.operand;
512 int baseVReg = currentInstruction[2].u.operand;
513 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
514
515 emitGetVirtualRegister(baseVReg, regT0);
516
517 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
518
519 if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
520 emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset);
521
522 JITGetByIdGenerator gen(
523 m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
524 JSValueRegs(regT0), JSValueRegs(regT0), DontSpill);
525 gen.generateFastPath(*this);
526 addSlowCase(gen.slowPathJump());
527 m_getByIds.append(gen);
528
529 emitValueProfilingSite();
530 emitPutVirtualRegister(resultVReg);
531 }
532
533 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
534 {
535 int resultVReg = currentInstruction[1].u.operand;
536 int baseVReg = currentInstruction[2].u.operand;
537 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
538
539 linkSlowCaseIfNotJSCell(iter, baseVReg);
540 linkSlowCase(iter);
541
542 JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
543
544 Label coldPathBegin = label();
545
546 Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
547
548 gen.reportSlowPathCall(coldPathBegin, call);
549 }
550
551 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
552 {
553 int baseVReg = currentInstruction[1].u.operand;
554 int valueVReg = currentInstruction[3].u.operand;
555 unsigned direct = currentInstruction[8].u.operand;
556
557 emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
558
559 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
560 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
561 // such that the Structure & offset are always at the same distance from this.
562
563 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
564
565 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
566 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
567
568 JITPutByIdGenerator gen(
569 m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
570 JSValueRegs(regT0), JSValueRegs(regT1), regT2, DontSpill, m_codeBlock->ecmaMode(),
571 direct ? Direct : NotDirect);
572
573 gen.generateFastPath(*this);
574 addSlowCase(gen.slowPathJump());
575
576 m_putByIds.append(gen);
577 }
578
579 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
580 {
581 int baseVReg = currentInstruction[1].u.operand;
582 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
583
584 linkSlowCaseIfNotJSCell(iter, baseVReg);
585 linkSlowCase(iter);
586
587 Label coldPathBegin(this);
588
589 JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
590
591 Call call = callOperation(
592 gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl());
593
594 gen.reportSlowPathCall(coldPathBegin, call);
595 }
596
597 // Compile a store into an object's property storage. May overwrite the
598 // value in objectReg.
599 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
600 {
601 if (isInlineOffset(cachedOffset)) {
602 store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
603 return;
604 }
605
606 loadPtr(Address(base, JSObject::butterflyOffset()), base);
607 store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
608 }
609
610 // Compile a load from an object's property storage. May overwrite base.
611 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
612 {
613 if (isInlineOffset(cachedOffset)) {
614 load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
615 return;
616 }
617
618 loadPtr(Address(base, JSObject::butterflyOffset()), result);
619 load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
620 }
621
622 void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
623 {
624 if (isInlineOffset(cachedOffset)) {
625 load64(base->locationForOffset(cachedOffset), result);
626 return;
627 }
628
629 loadPtr(base->butterflyAddress(), result);
630 load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
631 }
632
633 void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
634 {
635 if (!needsVarInjectionChecks)
636 return;
637 addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
638 }
639
640 void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth)
641 {
642 emitVarInjectionCheck(needsVarInjectionChecks);
643 emitGetVirtualRegister(JSStack::ScopeChain, regT0);
644 if (m_codeBlock->needsActivation()) {
645 emitGetVirtualRegister(m_codeBlock->activationRegister(), regT1);
646 Jump noActivation = branchTestPtr(Zero, regT1);
647 loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
648 noActivation.link(this);
649 }
650 for (unsigned i = 0; i < depth; ++i)
651 loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
652 emitPutVirtualRegister(dst);
653 }
654
655 void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
656 {
657 int dst = currentInstruction[1].u.operand;
658 ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
659 unsigned depth = currentInstruction[4].u.operand;
660
661 switch (resolveType) {
662 case GlobalProperty:
663 case GlobalVar:
664 case GlobalPropertyWithVarInjectionChecks:
665 case GlobalVarWithVarInjectionChecks:
666 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
667 move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
668 emitPutVirtualRegister(dst);
669 break;
670 case ClosureVar:
671 case ClosureVarWithVarInjectionChecks:
672 emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
673 break;
674 case Dynamic:
675 addSlowCase(jump());
676 break;
677 }
678 }
679
680 void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
681 {
682 int dst = currentInstruction[1].u.operand;
683 ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
684
685 if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar)
686 return;
687
688 linkSlowCase(iter);
689 int32_t indentifierIndex = currentInstruction[2].u.operand;
690 callOperation(operationResolveScope, dst, indentifierIndex);
691 }
692
693 void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
694 {
695 emitGetVirtualRegister(scope, regT0);
696 loadPtr(structureSlot, regT1);
697 addSlowCase(branchTestPtr(Zero, regT1));
698 load32(Address(regT1, Structure::structureIDOffset()), regT1);
699 addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
700 }
701
702 void JIT::emitGetGlobalProperty(uintptr_t* operandSlot)
703 {
704 load32(operandSlot, regT1);
705 compileGetDirectOffset(regT0, regT0, regT1, regT2, KnownNotFinal);
706 }
707
708 void JIT::emitGetGlobalVar(uintptr_t operand)
709 {
710 loadPtr(reinterpret_cast<void*>(operand), regT0);
711 }
712
713 void JIT::emitGetClosureVar(int scope, uintptr_t operand)
714 {
715 emitGetVirtualRegister(scope, regT0);
716 loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
717 loadPtr(Address(regT0, operand * sizeof(Register)), regT0);
718 }
719
720 void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
721 {
722 int dst = currentInstruction[1].u.operand;
723 int scope = currentInstruction[2].u.operand;
724 ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
725 Structure** structureSlot = currentInstruction[5].u.structure.slot();
726 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
727
728 switch (resolveType) {
729 case GlobalProperty:
730 case GlobalPropertyWithVarInjectionChecks:
731 emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
732 emitGetGlobalProperty(operandSlot);
733 break;
734 case GlobalVar:
735 case GlobalVarWithVarInjectionChecks:
736 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
737 emitGetGlobalVar(*operandSlot);
738 break;
739 case ClosureVar:
740 case ClosureVarWithVarInjectionChecks:
741 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
742 emitGetClosureVar(scope, *operandSlot);
743 break;
744 case Dynamic:
745 addSlowCase(jump());
746 break;
747 }
748 emitPutVirtualRegister(dst);
749 emitValueProfilingSite();
750 }
751
752 void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
753 {
754 int dst = currentInstruction[1].u.operand;
755 ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
756
757 if (resolveType == GlobalVar || resolveType == ClosureVar)
758 return;
759
760 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks)
761 linkSlowCase(iter);
762 linkSlowCase(iter);
763 callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
764 }
765
766 void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value)
767 {
768 emitGetVirtualRegister(value, regT2);
769
770 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
771 loadPtr(operandSlot, regT1);
772 negPtr(regT1);
773 storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
774 }
775
776 void JIT::emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet* set)
777 {
778 if (!set || set->state() == IsInvalidated)
779 return;
780
781 load8(set->addressOfState(), scratch);
782 Jump isDone = branch32(Equal, scratch, TrustedImm32(IsInvalidated));
783 addSlowCase(branch64(NotEqual, AbsoluteAddress(set->addressOfInferredValue()), value));
784 isDone.link(this);
785 }
786
787 void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set)
788 {
789 emitGetVirtualRegister(value, regT0);
790 emitNotifyWrite(regT0, regT1, set);
791 storePtr(regT0, reinterpret_cast<void*>(operand));
792 }
793
794 void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value)
795 {
796 emitGetVirtualRegister(value, regT1);
797 emitGetVirtualRegister(scope, regT0);
798 loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
799 storePtr(regT1, Address(regT0, operand * sizeof(Register)));
800 }
801
802 void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
803 {
804 int scope = currentInstruction[1].u.operand;
805 int value = currentInstruction[3].u.operand;
806 ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
807 Structure** structureSlot = currentInstruction[5].u.structure.slot();
808 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
809
810 switch (resolveType) {
811 case GlobalProperty:
812 case GlobalPropertyWithVarInjectionChecks:
813 emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
814 emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
815 emitPutGlobalProperty(operandSlot, value);
816 break;
817 case GlobalVar:
818 case GlobalVarWithVarInjectionChecks:
819 emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
820 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
821 emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet);
822 break;
823 case ClosureVar:
824 case ClosureVarWithVarInjectionChecks:
825 emitWriteBarrier(scope, value, ShouldFilterValue);
826 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
827 emitPutClosureVar(scope, *operandSlot, value);
828 break;
829 case Dynamic:
830 addSlowCase(jump());
831 break;
832 }
833 }
834
835 void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
836 {
837 ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
838 unsigned linkCount = 0;
839 if (resolveType != GlobalVar && resolveType != ClosureVar)
840 linkCount++;
841 if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
842 && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
843 linkCount++;
844 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks)
845 linkCount++;
846 if (!linkCount)
847 return;
848 while (linkCount--)
849 linkSlowCase(iter);
850 callOperation(operationPutToScope, currentInstruction);
851 }
852
853 void JIT::emit_op_init_global_const(Instruction* currentInstruction)
854 {
855 JSGlobalObject* globalObject = m_codeBlock->globalObject();
856 emitWriteBarrier(globalObject, currentInstruction[2].u.operand, ShouldFilterValue);
857 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
858 store64(regT0, currentInstruction[1].u.registerPointer);
859 }
860
861 #endif // USE(JSVALUE64)
862
863 #if USE(JSVALUE64)
864 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
865 {
866 #if ENABLE(GGC)
867 Jump valueNotCell;
868 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
869 emitGetVirtualRegister(value, regT0);
870 valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
871 }
872
873 emitGetVirtualRegister(owner, regT0);
874 Jump ownerNotCell;
875 if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
876 ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
877
878 Jump ownerNotMarkedOrAlreadyRemembered = checkMarkByte(regT0);
879 callOperation(operationUnconditionalWriteBarrier, regT0);
880 ownerNotMarkedOrAlreadyRemembered.link(this);
881
882 if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
883 ownerNotCell.link(this);
884 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
885 valueNotCell.link(this);
886 #else
887 UNUSED_PARAM(owner);
888 UNUSED_PARAM(value);
889 UNUSED_PARAM(mode);
890 #endif
891 }
892
893 void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
894 {
895 #if ENABLE(GGC)
896 emitGetVirtualRegister(value, regT0);
897 Jump valueNotCell;
898 if (mode == ShouldFilterValue)
899 valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
900
901 emitWriteBarrier(owner);
902
903 if (mode == ShouldFilterValue)
904 valueNotCell.link(this);
905 #else
906 UNUSED_PARAM(owner);
907 UNUSED_PARAM(value);
908 UNUSED_PARAM(mode);
909 #endif
910 }
911
912 #else // USE(JSVALUE64)
913
914 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
915 {
916 #if ENABLE(GGC)
917 Jump valueNotCell;
918 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
919 emitLoadTag(value, regT0);
920 valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
921 }
922
923 emitLoad(owner, regT0, regT1);
924 Jump ownerNotCell;
925 if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
926 ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
927
928 Jump ownerNotMarkedOrAlreadyRemembered = checkMarkByte(regT1);
929 callOperation(operationUnconditionalWriteBarrier, regT1);
930 ownerNotMarkedOrAlreadyRemembered.link(this);
931
932 if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
933 ownerNotCell.link(this);
934 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
935 valueNotCell.link(this);
936 #else
937 UNUSED_PARAM(owner);
938 UNUSED_PARAM(value);
939 UNUSED_PARAM(mode);
940 #endif
941 }
942
943 void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
944 {
945 #if ENABLE(GGC)
946 Jump valueNotCell;
947 if (mode == ShouldFilterValue) {
948 emitLoadTag(value, regT0);
949 valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
950 }
951
952 emitWriteBarrier(owner);
953
954 if (mode == ShouldFilterValue)
955 valueNotCell.link(this);
956 #else
957 UNUSED_PARAM(owner);
958 UNUSED_PARAM(value);
959 UNUSED_PARAM(mode);
960 #endif
961 }
962
963 #endif // USE(JSVALUE64)
964
965 void JIT::emitWriteBarrier(JSCell* owner)
966 {
967 #if ENABLE(GGC)
968 if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
969 Jump ownerNotMarkedOrAlreadyRemembered = checkMarkByte(owner);
970 callOperation(operationUnconditionalWriteBarrier, owner);
971 ownerNotMarkedOrAlreadyRemembered.link(this);
972 } else
973 callOperation(operationUnconditionalWriteBarrier, owner);
974 #else
975 UNUSED_PARAM(owner);
976 #endif // ENABLE(GGC)
977 }
978
979 void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
980 {
981 Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
982
983 PatchableJump badType;
984 JumpList slowCases;
985
986 switch (arrayMode) {
987 case JITInt32:
988 slowCases = emitInt32GetByVal(currentInstruction, badType);
989 break;
990 case JITDouble:
991 slowCases = emitDoubleGetByVal(currentInstruction, badType);
992 break;
993 case JITContiguous:
994 slowCases = emitContiguousGetByVal(currentInstruction, badType);
995 break;
996 case JITArrayStorage:
997 slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
998 break;
999 default:
1000 TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
1001 if (isInt(type))
1002 slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, type);
1003 else
1004 slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, type);
1005 break;
1006 }
1007
1008 Jump done = jump();
1009
1010 LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
1011
1012 patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1013 patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1014
1015 patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
1016
1017 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1018 m_codeBlock, patchBuffer,
1019 ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
1020
1021 RepatchBuffer repatchBuffer(m_codeBlock);
1022 repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
1023 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationGetByValGeneric));
1024 }
1025
1026 void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1027 {
1028 Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
1029
1030 PatchableJump badType;
1031 JumpList slowCases;
1032
1033 #if ENABLE(GGC)
1034 bool needsLinkForWriteBarrier = false;
1035 #endif
1036
1037 switch (arrayMode) {
1038 case JITInt32:
1039 slowCases = emitInt32PutByVal(currentInstruction, badType);
1040 break;
1041 case JITDouble:
1042 slowCases = emitDoublePutByVal(currentInstruction, badType);
1043 break;
1044 case JITContiguous:
1045 slowCases = emitContiguousPutByVal(currentInstruction, badType);
1046 #if ENABLE(GGC)
1047 needsLinkForWriteBarrier = true;
1048 #endif
1049 break;
1050 case JITArrayStorage:
1051 slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
1052 #if ENABLE(GGC)
1053 needsLinkForWriteBarrier = true;
1054 #endif
1055 break;
1056 default:
1057 TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
1058 if (isInt(type))
1059 slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, type);
1060 else
1061 slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, type);
1062 break;
1063 }
1064
1065 Jump done = jump();
1066
1067 LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
1068 patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1069 patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1070 patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
1071 #if ENABLE(GGC)
1072 if (needsLinkForWriteBarrier) {
1073 ASSERT(m_calls.last().to == operationUnconditionalWriteBarrier);
1074 patchBuffer.link(m_calls.last().from, operationUnconditionalWriteBarrier);
1075 }
1076 #endif
1077
1078 bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
1079 if (!isDirect) {
1080 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1081 m_codeBlock, patchBuffer,
1082 ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
1083
1084 } else {
1085 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1086 m_codeBlock, patchBuffer,
1087 ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
1088 }
1089 RepatchBuffer repatchBuffer(m_codeBlock);
1090 repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
1091 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
1092 }
1093
1094 JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
1095 {
1096 ASSERT(isInt(type));
1097
1098 // The best way to test the array type is to use the classInfo. We need to do so without
1099 // clobbering the register that holds the indexing type, base, and property.
1100
1101 #if USE(JSVALUE64)
1102 RegisterID base = regT0;
1103 RegisterID property = regT1;
1104 RegisterID resultPayload = regT0;
1105 RegisterID scratch = regT3;
1106 #else
1107 RegisterID base = regT0;
1108 RegisterID property = regT2;
1109 RegisterID resultPayload = regT0;
1110 RegisterID resultTag = regT1;
1111 RegisterID scratch = regT3;
1112 #endif
1113
1114 JumpList slowCases;
1115
1116 load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1117 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
1118 slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
1119 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
1120
1121 switch (elementSize(type)) {
1122 case 1:
1123 if (isSigned(type))
1124 load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
1125 else
1126 load8(BaseIndex(base, property, TimesOne), resultPayload);
1127 break;
1128 case 2:
1129 if (isSigned(type))
1130 load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
1131 else
1132 load16(BaseIndex(base, property, TimesTwo), resultPayload);
1133 break;
1134 case 4:
1135 load32(BaseIndex(base, property, TimesFour), resultPayload);
1136 break;
1137 default:
1138 CRASH();
1139 }
1140
1141 Jump done;
1142 if (type == TypeUint32) {
1143 Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
1144
1145 convertInt32ToDouble(resultPayload, fpRegT0);
1146 addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
1147 #if USE(JSVALUE64)
1148 moveDoubleTo64(fpRegT0, resultPayload);
1149 sub64(tagTypeNumberRegister, resultPayload);
1150 #else
1151 moveDoubleToInts(fpRegT0, resultPayload, resultTag);
1152 #endif
1153
1154 done = jump();
1155 canBeInt.link(this);
1156 }
1157
1158 #if USE(JSVALUE64)
1159 or64(tagTypeNumberRegister, resultPayload);
1160 #else
1161 move(TrustedImm32(JSValue::Int32Tag), resultTag);
1162 #endif
1163 if (done.isSet())
1164 done.link(this);
1165 return slowCases;
1166 }
1167
1168 JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
1169 {
1170 ASSERT(isFloat(type));
1171
1172 #if USE(JSVALUE64)
1173 RegisterID base = regT0;
1174 RegisterID property = regT1;
1175 RegisterID resultPayload = regT0;
1176 RegisterID scratch = regT3;
1177 #else
1178 RegisterID base = regT0;
1179 RegisterID property = regT2;
1180 RegisterID resultPayload = regT0;
1181 RegisterID resultTag = regT1;
1182 RegisterID scratch = regT3;
1183 #endif
1184
1185 JumpList slowCases;
1186
1187 load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1188 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
1189 slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
1190 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
1191
1192 switch (elementSize(type)) {
1193 case 4:
1194 loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
1195 convertFloatToDouble(fpRegT0, fpRegT0);
1196 break;
1197 case 8: {
1198 loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
1199 break;
1200 }
1201 default:
1202 CRASH();
1203 }
1204
1205 Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
1206 static const double NaN = PNaN;
1207 loadDouble(TrustedImmPtr(&NaN), fpRegT0);
1208 notNaN.link(this);
1209
1210 #if USE(JSVALUE64)
1211 moveDoubleTo64(fpRegT0, resultPayload);
1212 sub64(tagTypeNumberRegister, resultPayload);
1213 #else
1214 moveDoubleToInts(fpRegT0, resultPayload, resultTag);
1215 #endif
1216 return slowCases;
1217 }
1218
1219 JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
1220 {
1221 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
1222 ASSERT(isInt(type));
1223
1224 int value = currentInstruction[3].u.operand;
1225
1226 #if USE(JSVALUE64)
1227 RegisterID base = regT0;
1228 RegisterID property = regT1;
1229 RegisterID earlyScratch = regT3;
1230 RegisterID lateScratch = regT2;
1231 #else
1232 RegisterID base = regT0;
1233 RegisterID property = regT2;
1234 RegisterID earlyScratch = regT3;
1235 RegisterID lateScratch = regT1;
1236 #endif
1237
1238 JumpList slowCases;
1239
1240 load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
1241 badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
1242 Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
1243 emitArrayProfileOutOfBoundsSpecialCase(profile);
1244 Jump done = jump();
1245 inBounds.link(this);
1246
1247 #if USE(JSVALUE64)
1248 emitGetVirtualRegister(value, earlyScratch);
1249 slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
1250 #else
1251 emitLoad(value, lateScratch, earlyScratch);
1252 slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
1253 #endif
1254
1255 // We would be loading this into base as in get_by_val, except that the slow
1256 // path expects the base to be unclobbered.
1257 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
1258
1259 if (isClamped(type)) {
1260 ASSERT(elementSize(type) == 1);
1261 ASSERT(!isSigned(type));
1262 Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
1263 Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
1264 xor32(earlyScratch, earlyScratch);
1265 Jump clamped = jump();
1266 tooBig.link(this);
1267 move(TrustedImm32(0xff), earlyScratch);
1268 clamped.link(this);
1269 inBounds.link(this);
1270 }
1271
1272 switch (elementSize(type)) {
1273 case 1:
1274 store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
1275 break;
1276 case 2:
1277 store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo));
1278 break;
1279 case 4:
1280 store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour));
1281 break;
1282 default:
1283 CRASH();
1284 }
1285
1286 done.link(this);
1287
1288 return slowCases;
1289 }
1290
1291 JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
1292 {
1293 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
1294 ASSERT(isFloat(type));
1295
1296 int value = currentInstruction[3].u.operand;
1297
1298 #if USE(JSVALUE64)
1299 RegisterID base = regT0;
1300 RegisterID property = regT1;
1301 RegisterID earlyScratch = regT3;
1302 RegisterID lateScratch = regT2;
1303 #else
1304 RegisterID base = regT0;
1305 RegisterID property = regT2;
1306 RegisterID earlyScratch = regT3;
1307 RegisterID lateScratch = regT1;
1308 #endif
1309
1310 JumpList slowCases;
1311
1312 load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
1313 badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
1314 Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
1315 emitArrayProfileOutOfBoundsSpecialCase(profile);
1316 Jump done = jump();
1317 inBounds.link(this);
1318
1319 #if USE(JSVALUE64)
1320 emitGetVirtualRegister(value, earlyScratch);
1321 Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
1322 convertInt32ToDouble(earlyScratch, fpRegT0);
1323 Jump ready = jump();
1324 doubleCase.link(this);
1325 slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
1326 add64(tagTypeNumberRegister, earlyScratch);
1327 move64ToDouble(earlyScratch, fpRegT0);
1328 ready.link(this);
1329 #else
1330 emitLoad(value, lateScratch, earlyScratch);
1331 Jump doubleCase = branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag));
1332 convertInt32ToDouble(earlyScratch, fpRegT0);
1333 Jump ready = jump();
1334 doubleCase.link(this);
1335 slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag)));
1336 moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1);
1337 ready.link(this);
1338 #endif
1339
1340 // We would be loading this into base as in get_by_val, except that the slow
1341 // path expects the base to be unclobbered.
1342 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
1343
1344 switch (elementSize(type)) {
1345 case 4:
1346 convertDoubleToFloat(fpRegT0, fpRegT0);
1347 storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
1348 break;
1349 case 8:
1350 storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight));
1351 break;
1352 default:
1353 CRASH();
1354 }
1355
1356 done.link(this);
1357
1358 return slowCases;
1359 }
1360
1361 } // namespace JSC
1362
1363 #endif // ENABLE(JIT)