]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITPropertyAccess.cpp
df969fa185fc674e2a54a2ba1430444ab70bc56e
[apple/javascriptcore.git] / jit / JITPropertyAccess.cpp
1 /*
2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if !USE(JSVALUE32_64)
29
30 #include "JIT.h"
31
32 #if ENABLE(JIT)
33
34 #include "CodeBlock.h"
35 #include "GetterSetter.h"
36 #include "JITInlineMethods.h"
37 #include "JITStubCall.h"
38 #include "JSArray.h"
39 #include "JSFunction.h"
40 #include "JSPropertyNameIterator.h"
41 #include "Interpreter.h"
42 #include "LinkBuffer.h"
43 #include "RepatchBuffer.h"
44 #include "ResultType.h"
45 #include "SamplingTool.h"
46
47 #ifndef NDEBUG
48 #include <stdio.h>
49 #endif
50
51 using namespace std;
52
53 namespace JSC {
54
55 PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
56 {
57 JSInterfaceJIT jit;
58 JumpList failures;
59 failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
60 failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
61 #if USE(JSVALUE64)
62 jit.zeroExtend32ToPtr(regT1, regT1);
63 #else
64 jit.emitFastArithImmToInt(regT1);
65 #endif
66
67 // Load string length to regT1, and start the process of loading the data pointer into regT0
68 jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
69 jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
70 jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
71
72 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
73 failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
74
75 // Load the character
76 jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
77
78 failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
79 jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
80 jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
81 jit.ret();
82
83 failures.link(&jit);
84 jit.move(Imm32(0), regT0);
85 jit.ret();
86
87 LinkBuffer patchBuffer(&jit, pool);
88 return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
89 }
90
91 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
92 {
93 unsigned dst = currentInstruction[1].u.operand;
94 unsigned base = currentInstruction[2].u.operand;
95 unsigned property = currentInstruction[3].u.operand;
96
97 emitGetVirtualRegisters(base, regT0, property, regT1);
98 emitJumpSlowCaseIfNotImmediateInteger(regT1);
99 #if USE(JSVALUE64)
100 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
101 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
102 // number was signed since m_vectorLength is always less than intmax (since the total allocation
103 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
104 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
105 // extending since it makes it easier to re-tag the value in the slow case.
106 zeroExtend32ToPtr(regT1, regT1);
107 #else
108 emitFastArithImmToInt(regT1);
109 #endif
110 emitJumpSlowCaseIfNotJSCell(regT0, base);
111 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
112
113 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
114 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
115
116 loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
117 addSlowCase(branchTestPtr(Zero, regT0));
118
119 emitPutVirtualRegister(dst);
120 }
121
122 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
123 {
124 unsigned dst = currentInstruction[1].u.operand;
125 unsigned base = currentInstruction[2].u.operand;
126 unsigned property = currentInstruction[3].u.operand;
127
128 linkSlowCase(iter); // property int32 check
129 linkSlowCaseIfNotJSCell(iter, base); // base cell check
130 Jump nonCell = jump();
131 linkSlowCase(iter); // base array check
132 Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
133 emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCode().addressForCall());
134 Jump failed = branchTestPtr(Zero, regT0);
135 emitPutVirtualRegister(dst, regT0);
136 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
137 failed.link(this);
138 notString.link(this);
139 nonCell.link(this);
140
141 linkSlowCase(iter); // vector length check
142 linkSlowCase(iter); // empty value
143
144 JITStubCall stubCall(this, cti_op_get_by_val);
145 stubCall.addArgument(base, regT2);
146 stubCall.addArgument(property, regT2);
147 stubCall.call(dst);
148 }
149
150 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
151 {
152 ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
153 ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
154
155 Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
156 loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
157 Jump finishedLoad = jump();
158 notUsingInlineStorage.link(this);
159 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
160 loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
161 finishedLoad.link(this);
162 }
163
164 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
165 {
166 unsigned dst = currentInstruction[1].u.operand;
167 unsigned base = currentInstruction[2].u.operand;
168 unsigned property = currentInstruction[3].u.operand;
169 unsigned expected = currentInstruction[4].u.operand;
170 unsigned iter = currentInstruction[5].u.operand;
171 unsigned i = currentInstruction[6].u.operand;
172
173 emitGetVirtualRegister(property, regT0);
174 addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
175 emitGetVirtualRegisters(base, regT0, iter, regT1);
176 emitJumpSlowCaseIfNotJSCell(regT0, base);
177
178 // Test base's structure
179 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
180 addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
181 load32(addressFor(i), regT3);
182 sub32(Imm32(1), regT3);
183 addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
184 compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
185
186 emitPutVirtualRegister(dst, regT0);
187 }
188
189 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
190 {
191 unsigned dst = currentInstruction[1].u.operand;
192 unsigned base = currentInstruction[2].u.operand;
193 unsigned property = currentInstruction[3].u.operand;
194
195 linkSlowCase(iter);
196 linkSlowCaseIfNotJSCell(iter, base);
197 linkSlowCase(iter);
198 linkSlowCase(iter);
199
200 JITStubCall stubCall(this, cti_op_get_by_val);
201 stubCall.addArgument(base, regT2);
202 stubCall.addArgument(property, regT2);
203 stubCall.call(dst);
204 }
205
206 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
207 {
208 unsigned base = currentInstruction[1].u.operand;
209 unsigned property = currentInstruction[2].u.operand;
210 unsigned value = currentInstruction[3].u.operand;
211
212 emitGetVirtualRegisters(base, regT0, property, regT1);
213 emitJumpSlowCaseIfNotImmediateInteger(regT1);
214 #if USE(JSVALUE64)
215 // See comment in op_get_by_val.
216 zeroExtend32ToPtr(regT1, regT1);
217 #else
218 emitFastArithImmToInt(regT1);
219 #endif
220 emitJumpSlowCaseIfNotJSCell(regT0, base);
221 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
222 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
223
224 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
225
226 Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
227
228 Label storeResult(this);
229 emitGetVirtualRegister(value, regT0);
230 storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
231 Jump end = jump();
232
233 empty.link(this);
234 add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
235 branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
236
237 move(regT1, regT0);
238 add32(Imm32(1), regT0);
239 store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
240 jump().linkTo(storeResult, this);
241
242 end.link(this);
243 }
244
245 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
246 {
247 JITStubCall stubCall(this, cti_op_put_by_index);
248 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
249 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
250 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
251 stubCall.call();
252 }
253
254 void JIT::emit_op_put_getter(Instruction* currentInstruction)
255 {
256 JITStubCall stubCall(this, cti_op_put_getter);
257 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
258 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
259 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
260 stubCall.call();
261 }
262
263 void JIT::emit_op_put_setter(Instruction* currentInstruction)
264 {
265 JITStubCall stubCall(this, cti_op_put_setter);
266 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
267 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
268 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
269 stubCall.call();
270 }
271
272 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
273 {
274 JITStubCall stubCall(this, cti_op_del_by_id);
275 stubCall.addArgument(currentInstruction[2].u.operand, regT2);
276 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
277 stubCall.call(currentInstruction[1].u.operand);
278 }
279
280
281 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
282
283 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
284
285 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
286 void JIT::emit_op_method_check(Instruction*) {}
287 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
288 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
289 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
290 #endif
291
292 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
293 {
294 unsigned resultVReg = currentInstruction[1].u.operand;
295 unsigned baseVReg = currentInstruction[2].u.operand;
296 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
297
298 emitGetVirtualRegister(baseVReg, regT0);
299 JITStubCall stubCall(this, cti_op_get_by_id_generic);
300 stubCall.addArgument(regT0);
301 stubCall.addArgument(ImmPtr(ident));
302 stubCall.call(resultVReg);
303
304 m_propertyAccessInstructionIndex++;
305 }
306
307 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
308 {
309 ASSERT_NOT_REACHED();
310 }
311
312 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
313 {
314 unsigned baseVReg = currentInstruction[1].u.operand;
315 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
316 unsigned valueVReg = currentInstruction[3].u.operand;
317 unsigned direct = currentInstruction[8].u.operand;
318
319 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
320
321 JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
322 stubCall.addArgument(regT0);
323 stubCall.addArgument(ImmPtr(ident));
324 stubCall.addArgument(regT1);
325 stubCall.call();
326
327 m_propertyAccessInstructionIndex++;
328 }
329
330 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
331 {
332 ASSERT_NOT_REACHED();
333 }
334
335 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
336
337 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
338
339 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
340
341 void JIT::emit_op_method_check(Instruction* currentInstruction)
342 {
343 // Assert that the following instruction is a get_by_id.
344 ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
345
346 currentInstruction += OPCODE_LENGTH(op_method_check);
347 unsigned resultVReg = currentInstruction[1].u.operand;
348 unsigned baseVReg = currentInstruction[2].u.operand;
349 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
350
351 emitGetVirtualRegister(baseVReg, regT0);
352
353 // Do the method check - check the object & its prototype's structure inline (this is the common case).
354 m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
355 MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
356
357 Jump notCell = emitJumpIfNotJSCell(regT0);
358
359 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
360
361 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
362 DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
363 Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
364
365 // This will be relinked to load the function without doing a load.
366 DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
367
368 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
369
370 Jump match = jump();
371
372 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
373 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
374 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
375
376 // Link the failure cases here.
377 notCell.link(this);
378 structureCheck.link(this);
379 protoStructureCheck.link(this);
380
381 // Do a regular(ish) get_by_id (the slow case will be link to
382 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
383 compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
384
385 match.link(this);
386 emitPutVirtualRegister(resultVReg);
387
388 // We've already generated the following get_by_id, so make sure it's skipped over.
389 m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
390 }
391
392 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
393 {
394 currentInstruction += OPCODE_LENGTH(op_method_check);
395 unsigned resultVReg = currentInstruction[1].u.operand;
396 unsigned baseVReg = currentInstruction[2].u.operand;
397 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
398
399 compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
400
401 // We've already generated the following get_by_id, so make sure it's skipped over.
402 m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
403 }
404
405 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
406
407 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
408 void JIT::emit_op_method_check(Instruction*) {}
409 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
410
411 #endif
412
413 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
414 {
415 unsigned resultVReg = currentInstruction[1].u.operand;
416 unsigned baseVReg = currentInstruction[2].u.operand;
417 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
418
419 emitGetVirtualRegister(baseVReg, regT0);
420 compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
421 emitPutVirtualRegister(resultVReg);
422 }
423
424 void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
425 {
426 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
427 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
428 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
429 // to jump back to if one of these trampolies finds a match.
430
431 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
432
433 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
434
435 Label hotPathBegin(this);
436 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
437
438 DataLabelPtr structureToCompare;
439 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
440 addSlowCase(structureCheck);
441 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
442 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
443
444 Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
445 Label externalLoadComplete(this);
446 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
447 ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
448
449 DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
450 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
451
452 Label putResult(this);
453
454 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
455
456 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
457 }
458
459 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
460 {
461 unsigned resultVReg = currentInstruction[1].u.operand;
462 unsigned baseVReg = currentInstruction[2].u.operand;
463 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
464
465 compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
466 }
467
468 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
469 {
470 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
471 // so that we only need track one pointer into the slow case code - we track a pointer to the location
472 // of the call (which we can use to look up the patch information), but should a array-length or
473 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
474 // the distance from the call to the head of the slow case.
475
476 linkSlowCaseIfNotJSCell(iter, baseVReg);
477 linkSlowCase(iter);
478
479 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
480
481 #ifndef NDEBUG
482 Label coldPathBegin(this);
483 #endif
484 JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
485 stubCall.addArgument(regT0);
486 stubCall.addArgument(ImmPtr(ident));
487 Call call = stubCall.call(resultVReg);
488
489 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
490
491 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
492
493 // Track the location of the call; this will be used to recover patch information.
494 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
495 m_propertyAccessInstructionIndex++;
496 }
497
498 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
499 {
500 unsigned baseVReg = currentInstruction[1].u.operand;
501 unsigned valueVReg = currentInstruction[3].u.operand;
502
503 unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
504
505 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
506 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
507 // such that the Structure & offset are always at the same distance from this.
508
509 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
510
511 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
512 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
513
514 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
515
516 Label hotPathBegin(this);
517 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
518
519 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
520 DataLabelPtr structureToCompare;
521 addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
522 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
523
524 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
525 Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
526 Label externalLoadComplete(this);
527 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
528 ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
529
530 DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
531
532 END_UNINTERRUPTED_SEQUENCE(sequencePutById);
533
534 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
535 }
536
537 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
538 {
539 unsigned baseVReg = currentInstruction[1].u.operand;
540 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
541 unsigned direct = currentInstruction[8].u.operand;
542
543 unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
544
545 linkSlowCaseIfNotJSCell(iter, baseVReg);
546 linkSlowCase(iter);
547
548 JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
549 stubCall.addArgument(regT0);
550 stubCall.addArgument(ImmPtr(ident));
551 stubCall.addArgument(regT1);
552 Call call = stubCall.call();
553
554 // Track the location of the call; this will be used to recover patch information.
555 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
556 }
557
558 // Compile a store into an object's property storage. May overwrite the
559 // value in objectReg.
560 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
561 {
562 int offset = cachedOffset * sizeof(JSValue);
563 if (structure->isUsingInlineStorage())
564 offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
565 else
566 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
567 storePtr(value, Address(base, offset));
568 }
569
570 // Compile a load from an object's property storage. May overwrite base.
571 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
572 {
573 int offset = cachedOffset * sizeof(JSValue);
574 if (structure->isUsingInlineStorage())
575 offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
576 else
577 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
578 loadPtr(Address(base, offset), result);
579 }
580
581 void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
582 {
583 if (base->isUsingInlineStorage())
584 loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
585 else {
586 PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
587 loadPtr(static_cast<void*>(protoPropertyStorage), temp);
588 loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
589 }
590 }
591
592 void JIT::testPrototype(Structure* structure, JumpList& failureCases)
593 {
594 if (structure->m_prototype.isNull())
595 return;
596
597 move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
598 move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
599 failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
600 }
601
602 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
603 {
604 JumpList failureCases;
605 // Check eax is an object of the right Structure.
606 failureCases.append(emitJumpIfNotJSCell(regT0));
607 failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
608 testPrototype(oldStructure, failureCases);
609
610 // ecx = baseObject->m_structure
611 if (!direct) {
612 for (RefPtr<Structure>* it = chain->head(); *it; ++it)
613 testPrototype(it->get(), failureCases);
614 }
615
616 Call callTarget;
617
618 // emit a call only if storage realloc is needed
619 bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
620 if (willNeedStorageRealloc) {
621 // This trampoline was called to like a JIT stub; before we can can call again we need to
622 // remove the return address from the stack, to prevent the stack from becoming misaligned.
623 preserveReturnAddressAfterCall(regT3);
624
625 JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
626 stubCall.skipArgument(); // base
627 stubCall.skipArgument(); // ident
628 stubCall.skipArgument(); // value
629 stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
630 stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
631 stubCall.call(regT0);
632 emitGetJITStubArg(2, regT1);
633
634 restoreReturnAddressBeforeReturn(regT3);
635 }
636
637 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
638 // codeblock should ensure oldStructure->m_refCount > 0
639 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
640 add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
641 storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
642
643 // write the value
644 compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
645
646 ret();
647
648 ASSERT(!failureCases.empty());
649 failureCases.link(this);
650 restoreArgumentReferenceForTrampoline();
651 Call failureCall = tailRecursiveCall();
652
653 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
654
655 patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
656
657 if (willNeedStorageRealloc) {
658 ASSERT(m_calls.size() == 1);
659 patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
660 }
661
662 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
663 stubInfo->stubRoutine = entryLabel;
664 RepatchBuffer repatchBuffer(m_codeBlock);
665 repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
666 }
667
668 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
669 {
670 RepatchBuffer repatchBuffer(codeBlock);
671
672 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
673 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
674 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
675
676 int offset = sizeof(JSValue) * cachedOffset;
677
678 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
679 // and makes the subsequent load's offset automatically correct
680 if (structure->isUsingInlineStorage())
681 repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
682
683 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
684 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
685 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
686 }
687
688 void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
689 {
690 RepatchBuffer repatchBuffer(codeBlock);
691
692 ASSERT(!methodCallLinkInfo.cachedStructure);
693 methodCallLinkInfo.cachedStructure = structure;
694 structure->ref();
695
696 Structure* prototypeStructure = proto->structure();
697 methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
698 prototypeStructure->ref();
699
700 repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
701 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
702 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
703 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
704
705 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
706 }
707
708 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
709 {
710 RepatchBuffer repatchBuffer(codeBlock);
711
712 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
713 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
714 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
715
716 int offset = sizeof(JSValue) * cachedOffset;
717
718 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
719 // and makes the subsequent load's offset automatically correct
720 if (structure->isUsingInlineStorage())
721 repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
722
723 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
724 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
725 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
726 }
727
728 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
729 {
730 StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
731
732 // Check eax is an array
733 Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
734
735 // Checks out okay! - get the length from the storage
736 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
737 load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
738
739 Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
740
741 emitFastArithIntToImmNoCheck(regT2, regT0);
742 Jump success = jump();
743
744 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
745
746 // Use the patch information to link the failure cases back to the original slow case routine.
747 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
748 patchBuffer.link(failureCases1, slowCaseBegin);
749 patchBuffer.link(failureCases2, slowCaseBegin);
750
751 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
752 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
753
754 // Track the stub we have created so that it will be deleted later.
755 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
756 stubInfo->stubRoutine = entryLabel;
757
758 // Finally patch the jump to slow case back in the hot path to jump here instead.
759 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
760 RepatchBuffer repatchBuffer(m_codeBlock);
761 repatchBuffer.relink(jumpLocation, entryLabel);
762
763 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
764 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
765 }
766
767 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
768 {
769 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
770 // referencing the prototype object - let's speculatively load it's table nice and early!)
771 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
772
773 // Check eax is an object of the right Structure.
774 Jump failureCases1 = checkStructure(regT0, structure);
775
776 // Check the prototype object's Structure had not changed.
777 Structure** prototypeStructureAddress = &(protoObject->m_structure);
778 #if CPU(X86_64)
779 move(ImmPtr(prototypeStructure), regT3);
780 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
781 #else
782 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
783 #endif
784
785 bool needsStubLink = false;
786
787 // Checks out okay!
788 if (slot.cachedPropertyType() == PropertySlot::Getter) {
789 needsStubLink = true;
790 compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
791 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
792 stubCall.addArgument(regT1);
793 stubCall.addArgument(regT0);
794 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
795 stubCall.call();
796 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
797 needsStubLink = true;
798 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
799 stubCall.addArgument(ImmPtr(protoObject));
800 stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
801 stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
802 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
803 stubCall.call();
804 } else
805 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
806 Jump success = jump();
807 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
808
809 // Use the patch information to link the failure cases back to the original slow case routine.
810 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
811 patchBuffer.link(failureCases1, slowCaseBegin);
812 patchBuffer.link(failureCases2, slowCaseBegin);
813
814 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
815 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
816
817 if (needsStubLink) {
818 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
819 if (iter->to)
820 patchBuffer.link(iter->from, FunctionPtr(iter->to));
821 }
822 }
823 // Track the stub we have created so that it will be deleted later.
824 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
825 stubInfo->stubRoutine = entryLabel;
826
827 // Finally patch the jump to slow case back in the hot path to jump here instead.
828 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
829 RepatchBuffer repatchBuffer(m_codeBlock);
830 repatchBuffer.relink(jumpLocation, entryLabel);
831
832 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
833 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
834 }
835
836 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
837 {
838 Jump failureCase = checkStructure(regT0, structure);
839 bool needsStubLink = false;
840 if (slot.cachedPropertyType() == PropertySlot::Getter) {
841 needsStubLink = true;
842 if (!structure->isUsingInlineStorage()) {
843 move(regT0, regT1);
844 compileGetDirectOffset(regT1, regT1, structure, cachedOffset);
845 } else
846 compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
847 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
848 stubCall.addArgument(regT1);
849 stubCall.addArgument(regT0);
850 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
851 stubCall.call();
852 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
853 needsStubLink = true;
854 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
855 stubCall.addArgument(regT0);
856 stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
857 stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
858 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
859 stubCall.call();
860 } else
861 compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
862 Jump success = jump();
863
864 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
865
866 if (needsStubLink) {
867 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
868 if (iter->to)
869 patchBuffer.link(iter->from, FunctionPtr(iter->to));
870 }
871 }
872
873 // Use the patch information to link the failure cases back to the original slow case routine.
874 CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
875 if (!lastProtoBegin)
876 lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
877
878 patchBuffer.link(failureCase, lastProtoBegin);
879
880 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
881 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
882
883 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
884
885 structure->ref();
886 polymorphicStructures->list[currentIndex].set(entryLabel, structure);
887
888 // Finally patch the jump to slow case back in the hot path to jump here instead.
889 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
890 RepatchBuffer repatchBuffer(m_codeBlock);
891 repatchBuffer.relink(jumpLocation, entryLabel);
892 }
893
894 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
895 {
896 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
897 // referencing the prototype object - let's speculatively load it's table nice and early!)
898 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
899
900 // Check eax is an object of the right Structure.
901 Jump failureCases1 = checkStructure(regT0, structure);
902
903 // Check the prototype object's Structure had not changed.
904 Structure** prototypeStructureAddress = &(protoObject->m_structure);
905 #if CPU(X86_64)
906 move(ImmPtr(prototypeStructure), regT3);
907 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
908 #else
909 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
910 #endif
911
912 // Checks out okay!
913 bool needsStubLink = false;
914 if (slot.cachedPropertyType() == PropertySlot::Getter) {
915 needsStubLink = true;
916 compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
917 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
918 stubCall.addArgument(regT1);
919 stubCall.addArgument(regT0);
920 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
921 stubCall.call();
922 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
923 needsStubLink = true;
924 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
925 stubCall.addArgument(ImmPtr(protoObject));
926 stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
927 stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
928 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
929 stubCall.call();
930 } else
931 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
932
933 Jump success = jump();
934
935 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
936
937 if (needsStubLink) {
938 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
939 if (iter->to)
940 patchBuffer.link(iter->from, FunctionPtr(iter->to));
941 }
942 }
943
944 // Use the patch information to link the failure cases back to the original slow case routine.
945 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
946 patchBuffer.link(failureCases1, lastProtoBegin);
947 patchBuffer.link(failureCases2, lastProtoBegin);
948
949 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
950 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
951
952 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
953
954 structure->ref();
955 prototypeStructure->ref();
956 prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
957
958 // Finally patch the jump to slow case back in the hot path to jump here instead.
959 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
960 RepatchBuffer repatchBuffer(m_codeBlock);
961 repatchBuffer.relink(jumpLocation, entryLabel);
962 }
963
964 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
965 {
966 ASSERT(count);
967 JumpList bucketsOfFail;
968
969 // Check eax is an object of the right Structure.
970 Jump baseObjectCheck = checkStructure(regT0, structure);
971 bucketsOfFail.append(baseObjectCheck);
972
973 Structure* currStructure = structure;
974 RefPtr<Structure>* chainEntries = chain->head();
975 JSObject* protoObject = 0;
976 for (unsigned i = 0; i < count; ++i) {
977 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
978 currStructure = chainEntries[i].get();
979
980 // Check the prototype object's Structure had not changed.
981 Structure** prototypeStructureAddress = &(protoObject->m_structure);
982 #if CPU(X86_64)
983 move(ImmPtr(currStructure), regT3);
984 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
985 #else
986 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
987 #endif
988 }
989 ASSERT(protoObject);
990
991 bool needsStubLink = false;
992 if (slot.cachedPropertyType() == PropertySlot::Getter) {
993 needsStubLink = true;
994 compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
995 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
996 stubCall.addArgument(regT1);
997 stubCall.addArgument(regT0);
998 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
999 stubCall.call();
1000 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
1001 needsStubLink = true;
1002 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
1003 stubCall.addArgument(ImmPtr(protoObject));
1004 stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
1005 stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
1006 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
1007 stubCall.call();
1008 } else
1009 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1010 Jump success = jump();
1011
1012 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1013
1014 if (needsStubLink) {
1015 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
1016 if (iter->to)
1017 patchBuffer.link(iter->from, FunctionPtr(iter->to));
1018 }
1019 }
1020
1021 // Use the patch information to link the failure cases back to the original slow case routine.
1022 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
1023
1024 patchBuffer.link(bucketsOfFail, lastProtoBegin);
1025
1026 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1027 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1028
1029 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1030
1031 // Track the stub we have created so that it will be deleted later.
1032 structure->ref();
1033 chain->ref();
1034 prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
1035
1036 // Finally patch the jump to slow case back in the hot path to jump here instead.
1037 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1038 RepatchBuffer repatchBuffer(m_codeBlock);
1039 repatchBuffer.relink(jumpLocation, entryLabel);
1040 }
1041
1042 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
1043 {
1044 ASSERT(count);
1045
1046 JumpList bucketsOfFail;
1047
1048 // Check eax is an object of the right Structure.
1049 bucketsOfFail.append(checkStructure(regT0, structure));
1050
1051 Structure* currStructure = structure;
1052 RefPtr<Structure>* chainEntries = chain->head();
1053 JSObject* protoObject = 0;
1054 for (unsigned i = 0; i < count; ++i) {
1055 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1056 currStructure = chainEntries[i].get();
1057
1058 // Check the prototype object's Structure had not changed.
1059 Structure** prototypeStructureAddress = &(protoObject->m_structure);
1060 #if CPU(X86_64)
1061 move(ImmPtr(currStructure), regT3);
1062 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
1063 #else
1064 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
1065 #endif
1066 }
1067 ASSERT(protoObject);
1068
1069 bool needsStubLink = false;
1070 if (slot.cachedPropertyType() == PropertySlot::Getter) {
1071 needsStubLink = true;
1072 compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
1073 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
1074 stubCall.addArgument(regT1);
1075 stubCall.addArgument(regT0);
1076 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
1077 stubCall.call();
1078 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
1079 needsStubLink = true;
1080 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
1081 stubCall.addArgument(ImmPtr(protoObject));
1082 stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
1083 stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
1084 stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
1085 stubCall.call();
1086 } else
1087 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1088 Jump success = jump();
1089
1090 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1091
1092 if (needsStubLink) {
1093 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
1094 if (iter->to)
1095 patchBuffer.link(iter->from, FunctionPtr(iter->to));
1096 }
1097 }
1098
1099 // Use the patch information to link the failure cases back to the original slow case routine.
1100 patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
1101
1102 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1103 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1104
1105 // Track the stub we have created so that it will be deleted later.
1106 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1107 stubInfo->stubRoutine = entryLabel;
1108
1109 // Finally patch the jump to slow case back in the hot path to jump here instead.
1110 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1111 RepatchBuffer repatchBuffer(m_codeBlock);
1112 repatchBuffer.relink(jumpLocation, entryLabel);
1113
1114 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1115 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1116 }
1117
1118 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1119
1120 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1121
1122 } // namespace JSC
1123
1124 #endif // ENABLE(JIT)
1125
1126 #endif // !USE(JSVALUE32_64)