]> git.saurik.com Git - apple/javascriptcore.git/blame - jit/JITPropertyAccess.cpp
JavaScriptCore-576.tar.gz
[apple/javascriptcore.git] / jit / JITPropertyAccess.cpp
CommitLineData
9dae56ea 1/*
ba379fdc 2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
9dae56ea
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
ba379fdc 33#include "JITStubCall.h"
9dae56ea
A
34#include "JSArray.h"
35#include "JSFunction.h"
f9bf01c6 36#include "JSPropertyNameIterator.h"
9dae56ea 37#include "Interpreter.h"
ba379fdc
A
38#include "LinkBuffer.h"
39#include "RepatchBuffer.h"
9dae56ea
A
40#include "ResultType.h"
41#include "SamplingTool.h"
42
43#ifndef NDEBUG
44#include <stdio.h>
45#endif
46
47using namespace std;
48
49namespace JSC {
50
ba379fdc
A
51#if USE(JSVALUE32_64)
52
53void JIT::emit_op_put_by_index(Instruction* currentInstruction)
54{
55 unsigned base = currentInstruction[1].u.operand;
56 unsigned property = currentInstruction[2].u.operand;
57 unsigned value = currentInstruction[3].u.operand;
58
59 JITStubCall stubCall(this, cti_op_put_by_index);
60 stubCall.addArgument(base);
61 stubCall.addArgument(Imm32(property));
62 stubCall.addArgument(value);
63 stubCall.call();
64}
65
66void JIT::emit_op_put_getter(Instruction* currentInstruction)
67{
68 unsigned base = currentInstruction[1].u.operand;
69 unsigned property = currentInstruction[2].u.operand;
70 unsigned function = currentInstruction[3].u.operand;
71
72 JITStubCall stubCall(this, cti_op_put_getter);
73 stubCall.addArgument(base);
74 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
75 stubCall.addArgument(function);
76 stubCall.call();
77}
78
79void JIT::emit_op_put_setter(Instruction* currentInstruction)
80{
81 unsigned base = currentInstruction[1].u.operand;
82 unsigned property = currentInstruction[2].u.operand;
83 unsigned function = currentInstruction[3].u.operand;
84
85 JITStubCall stubCall(this, cti_op_put_setter);
86 stubCall.addArgument(base);
87 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
88 stubCall.addArgument(function);
89 stubCall.call();
90}
91
92void JIT::emit_op_del_by_id(Instruction* currentInstruction)
93{
94 unsigned dst = currentInstruction[1].u.operand;
95 unsigned base = currentInstruction[2].u.operand;
96 unsigned property = currentInstruction[3].u.operand;
97
98 JITStubCall stubCall(this, cti_op_del_by_id);
99 stubCall.addArgument(base);
100 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
101 stubCall.call(dst);
102}
103
104
9dae56ea
A
105#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
106
ba379fdc
A
107/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
108
109// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
110void JIT::emit_op_method_check(Instruction*) {}
111void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
112#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
113#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
114#endif
115
116void JIT::emit_op_get_by_val(Instruction* currentInstruction)
117{
118 unsigned dst = currentInstruction[1].u.operand;
119 unsigned base = currentInstruction[2].u.operand;
120 unsigned property = currentInstruction[3].u.operand;
121
122 JITStubCall stubCall(this, cti_op_get_by_val);
123 stubCall.addArgument(base);
124 stubCall.addArgument(property);
125 stubCall.call(dst);
126}
127
128void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
129{
130 ASSERT_NOT_REACHED();
131}
132
133void JIT::emit_op_put_by_val(Instruction* currentInstruction)
134{
135 unsigned base = currentInstruction[1].u.operand;
136 unsigned property = currentInstruction[2].u.operand;
137 unsigned value = currentInstruction[3].u.operand;
138
139 JITStubCall stubCall(this, cti_op_put_by_val);
140 stubCall.addArgument(base);
141 stubCall.addArgument(property);
142 stubCall.addArgument(value);
143 stubCall.call();
144}
145
146void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
147{
148 ASSERT_NOT_REACHED();
149}
150
151void JIT::emit_op_get_by_id(Instruction* currentInstruction)
152{
153 int dst = currentInstruction[1].u.operand;
154 int base = currentInstruction[2].u.operand;
155 int ident = currentInstruction[3].u.operand;
156
157 JITStubCall stubCall(this, cti_op_get_by_id_generic);
158 stubCall.addArgument(base);
159 stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
160 stubCall.call(dst);
161
162 m_propertyAccessInstructionIndex++;
163}
164
165void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
166{
167 m_propertyAccessInstructionIndex++;
168 ASSERT_NOT_REACHED();
169}
170
171void JIT::emit_op_put_by_id(Instruction* currentInstruction)
172{
173 int base = currentInstruction[1].u.operand;
174 int ident = currentInstruction[2].u.operand;
175 int value = currentInstruction[3].u.operand;
176
177 JITStubCall stubCall(this, cti_op_put_by_id_generic);
178 stubCall.addArgument(base);
179 stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
180 stubCall.addArgument(value);
181 stubCall.call();
182
183 m_propertyAccessInstructionIndex++;
184}
185
186void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
187{
188 m_propertyAccessInstructionIndex++;
189 ASSERT_NOT_REACHED();
190}
191
192#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
193
194/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
195
196#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
197
198void JIT::emit_op_method_check(Instruction* currentInstruction)
199{
200 // Assert that the following instruction is a get_by_id.
201 ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
202
203 currentInstruction += OPCODE_LENGTH(op_method_check);
204
205 // Do the method check - check the object & its prototype's structure inline (this is the common case).
206 m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
207 MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
208
209 int dst = currentInstruction[1].u.operand;
210 int base = currentInstruction[2].u.operand;
211
212 emitLoad(base, regT1, regT0);
213 emitJumpSlowCaseIfNotJSCell(base, regT1);
214
f9bf01c6
A
215 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
216
ba379fdc
A
217 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
218 DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
219 Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
220
221 // This will be relinked to load the function without doing a load.
222 DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
f9bf01c6
A
223
224 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
225
ba379fdc
A
226 move(Imm32(JSValue::CellTag), regT1);
227 Jump match = jump();
228
229 ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
230 ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
231 ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
232
233 // Link the failure cases here.
234 structureCheck.link(this);
235 protoStructureCheck.link(this);
236
237 // Do a regular(ish) get_by_id (the slow case will be link to
238 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
239 compileGetByIdHotPath();
240
241 match.link(this);
242 emitStore(dst, regT1, regT0);
243 map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
244
245 // We've already generated the following get_by_id, so make sure it's skipped over.
246 m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
247}
248
249void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
250{
251 currentInstruction += OPCODE_LENGTH(op_method_check);
252
253 int dst = currentInstruction[1].u.operand;
254 int base = currentInstruction[2].u.operand;
255 int ident = currentInstruction[3].u.operand;
256
257 compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
258
259 // We've already generated the following get_by_id, so make sure it's skipped over.
260 m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
261}
262
263#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
264
265// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
266void JIT::emit_op_method_check(Instruction*) {}
267void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
268
269#endif
270
271void JIT::emit_op_get_by_val(Instruction* currentInstruction)
272{
273 unsigned dst = currentInstruction[1].u.operand;
274 unsigned base = currentInstruction[2].u.operand;
275 unsigned property = currentInstruction[3].u.operand;
276
277 emitLoad2(base, regT1, regT0, property, regT3, regT2);
278
279 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
280 emitJumpSlowCaseIfNotJSCell(base, regT1);
281 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
ba379fdc 282
f9bf01c6
A
283 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
284 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
285
286 load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
287 load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
288 addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
289
ba379fdc
A
290 emitStore(dst, regT1, regT0);
291 map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
292}
293
294void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
295{
296 unsigned dst = currentInstruction[1].u.operand;
297 unsigned base = currentInstruction[2].u.operand;
298 unsigned property = currentInstruction[3].u.operand;
299
ba379fdc
A
300 linkSlowCase(iter); // property int32 check
301 linkSlowCaseIfNotJSCell(iter, base); // base cell check
302 linkSlowCase(iter); // base array check
f9bf01c6
A
303 linkSlowCase(iter); // vector length check
304 linkSlowCase(iter); // empty value
ba379fdc
A
305
306 JITStubCall stubCall(this, cti_op_get_by_val);
307 stubCall.addArgument(base);
308 stubCall.addArgument(property);
309 stubCall.call(dst);
ba379fdc
A
310}
311
312void JIT::emit_op_put_by_val(Instruction* currentInstruction)
313{
314 unsigned base = currentInstruction[1].u.operand;
315 unsigned property = currentInstruction[2].u.operand;
316 unsigned value = currentInstruction[3].u.operand;
317
318 emitLoad2(base, regT1, regT0, property, regT3, regT2);
319
320 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
321 emitJumpSlowCaseIfNotJSCell(base, regT1);
322 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
f9bf01c6 323 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
ba379fdc 324
f9bf01c6 325 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
ba379fdc 326
f9bf01c6 327 Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
ba379fdc 328
f9bf01c6 329 Label storeResult(this);
ba379fdc
A
330 emitLoad(value, regT1, regT0);
331 store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
332 store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
f9bf01c6
A
333 Jump end = jump();
334
335 empty.link(this);
336 add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
337 branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
338
339 add32(Imm32(1), regT2, regT0);
340 store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
341 jump().linkTo(storeResult, this);
342
343 end.link(this);
ba379fdc
A
344}
345
346void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
347{
348 unsigned base = currentInstruction[1].u.operand;
349 unsigned property = currentInstruction[2].u.operand;
350 unsigned value = currentInstruction[3].u.operand;
351
352 linkSlowCase(iter); // property int32 check
353 linkSlowCaseIfNotJSCell(iter, base); // base cell check
354 linkSlowCase(iter); // base not array check
f9bf01c6 355 linkSlowCase(iter); // in vector check
ba379fdc
A
356
357 JITStubCall stubPutByValCall(this, cti_op_put_by_val);
358 stubPutByValCall.addArgument(base);
359 stubPutByValCall.addArgument(property);
360 stubPutByValCall.addArgument(value);
361 stubPutByValCall.call();
ba379fdc
A
362}
363
364void JIT::emit_op_get_by_id(Instruction* currentInstruction)
365{
366 int dst = currentInstruction[1].u.operand;
367 int base = currentInstruction[2].u.operand;
368
369 emitLoad(base, regT1, regT0);
370 emitJumpSlowCaseIfNotJSCell(base, regT1);
371 compileGetByIdHotPath();
372 emitStore(dst, regT1, regT0);
373 map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
374}
375
376void JIT::compileGetByIdHotPath()
377{
378 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
379 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
380 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
381 // to jump back to if one of these trampolies finds a match.
f9bf01c6
A
382
383 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
384
ba379fdc
A
385 Label hotPathBegin(this);
386 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
387 m_propertyAccessInstructionIndex++;
388
389 DataLabelPtr structureToCompare;
390 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
391 addSlowCase(structureCheck);
392 ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
393 ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
394
395 Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
396 Label externalLoadComplete(this);
397 ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
398 ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
399
400 DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
401 ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
402 DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
403 ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
404
405 Label putResult(this);
406 ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
f9bf01c6
A
407
408 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
ba379fdc
A
409}
410
411void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
412{
413 int dst = currentInstruction[1].u.operand;
414 int base = currentInstruction[2].u.operand;
415 int ident = currentInstruction[3].u.operand;
416
417 compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
418}
419
420void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
421{
422 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
423 // so that we only need track one pointer into the slow case code - we track a pointer to the location
424 // of the call (which we can use to look up the patch information), but should a array-length or
425 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
426 // the distance from the call to the head of the slow case.
427 linkSlowCaseIfNotJSCell(iter, base);
428 linkSlowCase(iter);
429
f9bf01c6 430 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
ba379fdc 431
f9bf01c6
A
432#ifndef NDEBUG
433 Label coldPathBegin(this);
434#endif
ba379fdc
A
435 JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
436 stubCall.addArgument(regT1, regT0);
437 stubCall.addArgument(ImmPtr(ident));
438 Call call = stubCall.call(dst);
439
f9bf01c6
A
440 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
441
ba379fdc
A
442 ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
443
444 // Track the location of the call; this will be used to recover patch information.
445 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
446 m_propertyAccessInstructionIndex++;
447}
448
449void JIT::emit_op_put_by_id(Instruction* currentInstruction)
450{
451 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
452 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
453 // such that the Structure & offset are always at the same distance from this.
454
455 int base = currentInstruction[1].u.operand;
456 int value = currentInstruction[3].u.operand;
457
458 emitLoad2(base, regT1, regT0, value, regT3, regT2);
459
460 emitJumpSlowCaseIfNotJSCell(base, regT1);
461
f9bf01c6
A
462 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
463
ba379fdc
A
464 Label hotPathBegin(this);
465 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
466 m_propertyAccessInstructionIndex++;
467
468 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
469 DataLabelPtr structureToCompare;
470 addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
471 ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
472
473 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
474 Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
475 Label externalLoadComplete(this);
476 ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
477 ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
478
479 DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
480 DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
f9bf01c6
A
481
482 END_UNINTERRUPTED_SEQUENCE(sequencePutById);
483
ba379fdc
A
484 ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
485 ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
486}
487
488void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
489{
490 int base = currentInstruction[1].u.operand;
491 int ident = currentInstruction[2].u.operand;
492
493 linkSlowCaseIfNotJSCell(iter, base);
494 linkSlowCase(iter);
495
496 JITStubCall stubCall(this, cti_op_put_by_id);
497 stubCall.addArgument(regT1, regT0);
498 stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
499 stubCall.addArgument(regT3, regT2);
500 Call call = stubCall.call();
501
502 // Track the location of the call; this will be used to recover patch information.
503 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
504 m_propertyAccessInstructionIndex++;
505}
506
507// Compile a store into an object's property storage. May overwrite base.
508void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
509{
510 int offset = cachedOffset;
511 if (structure->isUsingInlineStorage())
512 offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
513 else
514 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
515 emitStore(offset, valueTag, valuePayload, base);
516}
517
518// Compile a load from an object's property storage. May overwrite base.
519void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
520{
521 int offset = cachedOffset;
522 if (structure->isUsingInlineStorage())
523 offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
524 else
525 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
526 emitLoad(offset, resultTag, resultPayload, base);
527}
528
529void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
530{
531 if (base->isUsingInlineStorage()) {
532 load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
533 load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
534 return;
535 }
536
537 size_t offset = cachedOffset * sizeof(JSValue);
538
539 PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
540 loadPtr(static_cast<void*>(protoPropertyStorage), temp);
541 load32(Address(temp, offset), resultPayload);
542 load32(Address(temp, offset + 4), resultTag);
543}
544
f9bf01c6
A
545void JIT::testPrototype(Structure* structure, JumpList& failureCases)
546{
547 if (structure->m_prototype.isNull())
548 return;
549
550 failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
551}
552
ba379fdc
A
553void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
554{
555 // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
556
557 JumpList failureCases;
558 failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
f9bf01c6
A
559 failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
560 testPrototype(oldStructure, failureCases);
ba379fdc
A
561
562 // Verify that nothing in the prototype chain has a setter for this property.
f9bf01c6
A
563 for (RefPtr<Structure>* it = chain->head(); *it; ++it)
564 testPrototype(it->get(), failureCases);
ba379fdc
A
565
566 // Reallocate property storage if needed.
567 Call callTarget;
568 bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
569 if (willNeedStorageRealloc) {
570 // This trampoline was called to like a JIT stub; before we can can call again we need to
571 // remove the return address from the stack, to prevent the stack from becoming misaligned.
572 preserveReturnAddressAfterCall(regT3);
573
574 JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
575 stubCall.skipArgument(); // base
576 stubCall.skipArgument(); // ident
577 stubCall.skipArgument(); // value
578 stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
579 stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
580 stubCall.call(regT0);
581
582 restoreReturnAddressBeforeReturn(regT3);
583 }
584
585 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
586 add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
587 storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
588
589 load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
590 load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
591
592 // Write the value
593 compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
594
595 ret();
596
597 ASSERT(!failureCases.empty());
598 failureCases.link(this);
599 restoreArgumentReferenceForTrampoline();
600 Call failureCall = tailRecursiveCall();
601
602 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
603
604 patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
605
606 if (willNeedStorageRealloc) {
607 ASSERT(m_calls.size() == 1);
608 patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
609 }
610
611 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
612 stubInfo->stubRoutine = entryLabel;
613 RepatchBuffer repatchBuffer(m_codeBlock);
614 repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
615}
616
617void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
618{
619 RepatchBuffer repatchBuffer(codeBlock);
620
621 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
622 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
623 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
624
625 int offset = sizeof(JSValue) * cachedOffset;
626
627 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
628 // and makes the subsequent load's offset automatically correct
629 if (structure->isUsingInlineStorage())
630 repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
631
632 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
633 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
634 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
635 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
636}
637
f9bf01c6 638void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
ba379fdc
A
639{
640 RepatchBuffer repatchBuffer(codeBlock);
641
642 ASSERT(!methodCallLinkInfo.cachedStructure);
643 methodCallLinkInfo.cachedStructure = structure;
644 structure->ref();
645
646 Structure* prototypeStructure = proto->structure();
647 ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
648 methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
649 prototypeStructure->ref();
650
651 repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
652 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
653 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
654 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
f9bf01c6
A
655
656 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
ba379fdc
A
657}
658
659void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
660{
661 RepatchBuffer repatchBuffer(codeBlock);
662
663 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
664 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
665 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
666
667 int offset = sizeof(JSValue) * cachedOffset;
668
669 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
670 // and makes the subsequent load's offset automatically correct
671 if (structure->isUsingInlineStorage())
672 repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
673
674 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
675 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
676 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
677 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
678}
679
680void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
681{
682 StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
683
684 // regT0 holds a JSCell*
685
686 // Check for array
687 Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
688
689 // Checks out okay! - get the length from the storage
690 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
691 load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
692
693 Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
694 move(regT2, regT0);
695 move(Imm32(JSValue::Int32Tag), regT1);
696 Jump success = jump();
697
698 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
699
700 // Use the patch information to link the failure cases back to the original slow case routine.
701 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
702 patchBuffer.link(failureCases1, slowCaseBegin);
703 patchBuffer.link(failureCases2, slowCaseBegin);
704
705 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
706 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
707
708 // Track the stub we have created so that it will be deleted later.
709 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
710 stubInfo->stubRoutine = entryLabel;
711
712 // Finally patch the jump to slow case back in the hot path to jump here instead.
713 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
714 RepatchBuffer repatchBuffer(m_codeBlock);
715 repatchBuffer.relink(jumpLocation, entryLabel);
716
717 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
718 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
719}
720
721void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
722{
723 // regT0 holds a JSCell*
724
725 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
726 // referencing the prototype object - let's speculatively load it's table nice and early!)
727 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
728
729 Jump failureCases1 = checkStructure(regT0, structure);
730
731 // Check the prototype object's Structure had not changed.
732 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 733#if CPU(X86_64)
ba379fdc
A
734 move(ImmPtr(prototypeStructure), regT3);
735 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
736#else
737 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
738#endif
739
740 // Checks out okay! - getDirectOffset
741 compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
742
743 Jump success = jump();
744
745 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
746
747 // Use the patch information to link the failure cases back to the original slow case routine.
748 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
749 patchBuffer.link(failureCases1, slowCaseBegin);
750 patchBuffer.link(failureCases2, slowCaseBegin);
751
752 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
753 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
754
755 // Track the stub we have created so that it will be deleted later.
756 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
757 stubInfo->stubRoutine = entryLabel;
758
759 // Finally patch the jump to slow case back in the hot path to jump here instead.
760 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
761 RepatchBuffer repatchBuffer(m_codeBlock);
762 repatchBuffer.relink(jumpLocation, entryLabel);
763
764 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
765 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
766}
767
768
769void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
770{
771 // regT0 holds a JSCell*
772
773 Jump failureCase = checkStructure(regT0, structure);
774 compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
775 Jump success = jump();
776
777 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
778
779 // Use the patch information to link the failure cases back to the original slow case routine.
780 CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
781 if (!lastProtoBegin)
782 lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
783
784 patchBuffer.link(failureCase, lastProtoBegin);
785
786 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
787 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
788
789 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
790
791 structure->ref();
792 polymorphicStructures->list[currentIndex].set(entryLabel, structure);
793
794 // Finally patch the jump to slow case back in the hot path to jump here instead.
795 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
796 RepatchBuffer repatchBuffer(m_codeBlock);
797 repatchBuffer.relink(jumpLocation, entryLabel);
798}
799
800void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
801{
802 // regT0 holds a JSCell*
803
804 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
805 // referencing the prototype object - let's speculatively load it's table nice and early!)
806 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
807
808 // Check eax is an object of the right Structure.
809 Jump failureCases1 = checkStructure(regT0, structure);
810
811 // Check the prototype object's Structure had not changed.
812 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 813#if CPU(X86_64)
ba379fdc
A
814 move(ImmPtr(prototypeStructure), regT3);
815 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
816#else
817 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
818#endif
819
820 compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
821
822 Jump success = jump();
823
824 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
825
826 // Use the patch information to link the failure cases back to the original slow case routine.
827 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
828 patchBuffer.link(failureCases1, lastProtoBegin);
829 patchBuffer.link(failureCases2, lastProtoBegin);
830
831 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
832 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
833
834 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
835
836 structure->ref();
837 prototypeStructure->ref();
838 prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
839
840 // Finally patch the jump to slow case back in the hot path to jump here instead.
841 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
842 RepatchBuffer repatchBuffer(m_codeBlock);
843 repatchBuffer.relink(jumpLocation, entryLabel);
844}
845
846void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
9dae56ea 847{
ba379fdc
A
848 // regT0 holds a JSCell*
849
850 ASSERT(count);
851
852 JumpList bucketsOfFail;
9dae56ea 853
ba379fdc
A
854 // Check eax is an object of the right Structure.
855 bucketsOfFail.append(checkStructure(regT0, structure));
9dae56ea 856
ba379fdc
A
857 Structure* currStructure = structure;
858 RefPtr<Structure>* chainEntries = chain->head();
859 JSObject* protoObject = 0;
860 for (unsigned i = 0; i < count; ++i) {
861 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
862 currStructure = chainEntries[i].get();
863
864 // Check the prototype object's Structure had not changed.
865 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 866#if CPU(X86_64)
ba379fdc
A
867 move(ImmPtr(currStructure), regT3);
868 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
869#else
870 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
871#endif
872 }
873 ASSERT(protoObject);
874
875 compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
876 Jump success = jump();
877
878 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
879
880 // Use the patch information to link the failure cases back to the original slow case routine.
881 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
882
883 patchBuffer.link(bucketsOfFail, lastProtoBegin);
884
885 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
886 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
887
888 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
889
890 // Track the stub we have created so that it will be deleted later.
891 structure->ref();
892 chain->ref();
893 prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
894
895 // Finally patch the jump to slow case back in the hot path to jump here instead.
896 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
897 RepatchBuffer repatchBuffer(m_codeBlock);
898 repatchBuffer.relink(jumpLocation, entryLabel);
899}
900
901void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
902{
903 // regT0 holds a JSCell*
904
905 ASSERT(count);
906
907 JumpList bucketsOfFail;
908
909 // Check eax is an object of the right Structure.
910 bucketsOfFail.append(checkStructure(regT0, structure));
911
912 Structure* currStructure = structure;
913 RefPtr<Structure>* chainEntries = chain->head();
914 JSObject* protoObject = 0;
915 for (unsigned i = 0; i < count; ++i) {
916 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
917 currStructure = chainEntries[i].get();
918
919 // Check the prototype object's Structure had not changed.
920 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 921#if CPU(X86_64)
ba379fdc
A
922 move(ImmPtr(currStructure), regT3);
923 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
924#else
925 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
926#endif
927 }
928 ASSERT(protoObject);
929
930 compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
931 Jump success = jump();
932
933 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
934
935 // Use the patch information to link the failure cases back to the original slow case routine.
936 patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
937
938 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
939 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
940
941 // Track the stub we have created so that it will be deleted later.
942 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
943 stubInfo->stubRoutine = entryLabel;
944
945 // Finally patch the jump to slow case back in the hot path to jump here instead.
946 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
947 RepatchBuffer repatchBuffer(m_codeBlock);
948 repatchBuffer.relink(jumpLocation, entryLabel);
949
950 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
951 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
952}
953
954/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
955
956#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
957
f9bf01c6
A
958void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
959{
960 ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
961 ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
962 ASSERT(sizeof(JSValue) == 8);
963
964 Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
965 loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
966 loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
967 Jump finishedLoad = jump();
968 notUsingInlineStorage.link(this);
969 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
970 loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
971 loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
972 finishedLoad.link(this);
973}
974
975void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
976{
977 unsigned dst = currentInstruction[1].u.operand;
978 unsigned base = currentInstruction[2].u.operand;
979 unsigned property = currentInstruction[3].u.operand;
980 unsigned expected = currentInstruction[4].u.operand;
981 unsigned iter = currentInstruction[5].u.operand;
982 unsigned i = currentInstruction[6].u.operand;
983
984 emitLoad2(property, regT1, regT0, base, regT3, regT2);
985 emitJumpSlowCaseIfNotJSCell(property, regT1);
986 addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
987 // Property registers are now available as the property is known
988 emitJumpSlowCaseIfNotJSCell(base, regT3);
989 emitLoadPayload(iter, regT1);
990
991 // Test base's structure
992 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
993 addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
994 load32(addressFor(i), regT3);
995 sub32(Imm32(1), regT3);
996 addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
997 compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
998
999 emitStore(dst, regT1, regT0);
1000 map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
1001}
1002
1003void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1004{
1005 unsigned dst = currentInstruction[1].u.operand;
1006 unsigned base = currentInstruction[2].u.operand;
1007 unsigned property = currentInstruction[3].u.operand;
1008
1009 linkSlowCaseIfNotJSCell(iter, property);
1010 linkSlowCase(iter);
1011 linkSlowCaseIfNotJSCell(iter, base);
1012 linkSlowCase(iter);
1013 linkSlowCase(iter);
1014
1015 JITStubCall stubCall(this, cti_op_get_by_val);
1016 stubCall.addArgument(base);
1017 stubCall.addArgument(property);
1018 stubCall.call(dst);
1019}
1020
ba379fdc
A
1021#else // USE(JSVALUE32_64)
1022
1023void JIT::emit_op_get_by_val(Instruction* currentInstruction)
1024{
f9bf01c6
A
1025 unsigned dst = currentInstruction[1].u.operand;
1026 unsigned base = currentInstruction[2].u.operand;
1027 unsigned property = currentInstruction[3].u.operand;
1028
1029 emitGetVirtualRegisters(base, regT0, property, regT1);
ba379fdc
A
1030 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1031#if USE(JSVALUE64)
1032 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
f9bf01c6
A
1033 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
1034 // number was signed since m_vectorLength is always less than intmax (since the total allocation
ba379fdc
A
1035 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
1036 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
1037 // extending since it makes it easier to re-tag the value in the slow case.
1038 zeroExtend32ToPtr(regT1, regT1);
1039#else
1040 emitFastArithImmToInt(regT1);
1041#endif
f9bf01c6 1042 emitJumpSlowCaseIfNotJSCell(regT0, base);
ba379fdc
A
1043 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
1044
ba379fdc 1045 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
f9bf01c6 1046 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
ba379fdc 1047
ba379fdc 1048 loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
f9bf01c6
A
1049 addSlowCase(branchTestPtr(Zero, regT0));
1050
1051 emitPutVirtualRegister(dst);
1052}
1053
1054void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
1055{
1056 ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
1057 ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
1058
1059 Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
1060 loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
1061 Jump finishedLoad = jump();
1062 notUsingInlineStorage.link(this);
1063 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
1064 loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
1065 finishedLoad.link(this);
1066}
1067
1068void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
1069{
1070 unsigned dst = currentInstruction[1].u.operand;
1071 unsigned base = currentInstruction[2].u.operand;
1072 unsigned property = currentInstruction[3].u.operand;
1073 unsigned expected = currentInstruction[4].u.operand;
1074 unsigned iter = currentInstruction[5].u.operand;
1075 unsigned i = currentInstruction[6].u.operand;
1076
1077 emitGetVirtualRegister(property, regT0);
1078 addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
1079 emitGetVirtualRegisters(base, regT0, iter, regT1);
1080 emitJumpSlowCaseIfNotJSCell(regT0, base);
1081
1082 // Test base's structure
1083 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
1084 addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
1085 load32(addressFor(i), regT3);
1086 sub32(Imm32(1), regT3);
1087 addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
1088 compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
1089
1090 emitPutVirtualRegister(dst, regT0);
1091}
1092
1093void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1094{
1095 unsigned dst = currentInstruction[1].u.operand;
1096 unsigned base = currentInstruction[2].u.operand;
1097 unsigned property = currentInstruction[3].u.operand;
1098
1099 linkSlowCase(iter);
1100 linkSlowCaseIfNotJSCell(iter, base);
1101 linkSlowCase(iter);
1102 linkSlowCase(iter);
1103
1104 JITStubCall stubCall(this, cti_op_get_by_val);
1105 stubCall.addArgument(base, regT2);
1106 stubCall.addArgument(property, regT2);
1107 stubCall.call(dst);
ba379fdc
A
1108}
1109
1110void JIT::emit_op_put_by_val(Instruction* currentInstruction)
1111{
f9bf01c6
A
1112 unsigned base = currentInstruction[1].u.operand;
1113 unsigned property = currentInstruction[2].u.operand;
1114 unsigned value = currentInstruction[3].u.operand;
1115
1116 emitGetVirtualRegisters(base, regT0, property, regT1);
ba379fdc
A
1117 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1118#if USE(JSVALUE64)
1119 // See comment in op_get_by_val.
1120 zeroExtend32ToPtr(regT1, regT1);
1121#else
1122 emitFastArithImmToInt(regT1);
1123#endif
f9bf01c6 1124 emitJumpSlowCaseIfNotJSCell(regT0, base);
ba379fdc 1125 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
f9bf01c6 1126 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
ba379fdc 1127
ba379fdc 1128 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
ba379fdc 1129
f9bf01c6 1130 Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
ba379fdc 1131
f9bf01c6
A
1132 Label storeResult(this);
1133 emitGetVirtualRegister(value, regT0);
ba379fdc 1134 storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
f9bf01c6
A
1135 Jump end = jump();
1136
1137 empty.link(this);
1138 add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
1139 branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
1140
1141 move(regT1, regT0);
1142 add32(Imm32(1), regT0);
1143 store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
1144 jump().linkTo(storeResult, this);
1145
1146 end.link(this);
ba379fdc
A
1147}
1148
1149void JIT::emit_op_put_by_index(Instruction* currentInstruction)
1150{
1151 JITStubCall stubCall(this, cti_op_put_by_index);
1152 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1153 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
1154 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1155 stubCall.call();
1156}
1157
1158void JIT::emit_op_put_getter(Instruction* currentInstruction)
1159{
1160 JITStubCall stubCall(this, cti_op_put_getter);
1161 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1162 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1163 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1164 stubCall.call();
1165}
1166
1167void JIT::emit_op_put_setter(Instruction* currentInstruction)
1168{
1169 JITStubCall stubCall(this, cti_op_put_setter);
1170 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1171 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1172 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1173 stubCall.call();
1174}
1175
1176void JIT::emit_op_del_by_id(Instruction* currentInstruction)
1177{
1178 JITStubCall stubCall(this, cti_op_del_by_id);
1179 stubCall.addArgument(currentInstruction[2].u.operand, regT2);
1180 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
1181 stubCall.call(currentInstruction[1].u.operand);
9dae56ea
A
1182}
1183
1184
ba379fdc
A
1185#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1186
1187/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1188
1189// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1190void JIT::emit_op_method_check(Instruction*) {}
1191void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
1192#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1193#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
1194#endif
1195
1196void JIT::emit_op_get_by_id(Instruction* currentInstruction)
1197{
1198 unsigned resultVReg = currentInstruction[1].u.operand;
1199 unsigned baseVReg = currentInstruction[2].u.operand;
1200 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1201
1202 emitGetVirtualRegister(baseVReg, regT0);
1203 JITStubCall stubCall(this, cti_op_get_by_id_generic);
1204 stubCall.addArgument(regT0);
1205 stubCall.addArgument(ImmPtr(ident));
1206 stubCall.call(resultVReg);
1207
1208 m_propertyAccessInstructionIndex++;
1209}
1210
1211void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
9dae56ea
A
1212{
1213 ASSERT_NOT_REACHED();
1214}
1215
ba379fdc 1216void JIT::emit_op_put_by_id(Instruction* currentInstruction)
9dae56ea 1217{
ba379fdc
A
1218 unsigned baseVReg = currentInstruction[1].u.operand;
1219 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
1220 unsigned valueVReg = currentInstruction[3].u.operand;
1221
1222 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
9dae56ea 1223
ba379fdc
A
1224 JITStubCall stubCall(this, cti_op_put_by_id_generic);
1225 stubCall.addArgument(regT0);
1226 stubCall.addArgument(ImmPtr(ident));
1227 stubCall.addArgument(regT1);
1228 stubCall.call();
9dae56ea 1229
ba379fdc 1230 m_propertyAccessInstructionIndex++;
9dae56ea
A
1231}
1232
ba379fdc 1233void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
9dae56ea
A
1234{
1235 ASSERT_NOT_REACHED();
1236}
1237
ba379fdc
A
1238#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1239
1240/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1241
1242#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1243
1244void JIT::emit_op_method_check(Instruction* currentInstruction)
1245{
1246 // Assert that the following instruction is a get_by_id.
1247 ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
1248
1249 currentInstruction += OPCODE_LENGTH(op_method_check);
1250 unsigned resultVReg = currentInstruction[1].u.operand;
1251 unsigned baseVReg = currentInstruction[2].u.operand;
1252 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1253
1254 emitGetVirtualRegister(baseVReg, regT0);
1255
1256 // Do the method check - check the object & its prototype's structure inline (this is the common case).
1257 m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
1258 MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
f9bf01c6 1259
ba379fdc 1260 Jump notCell = emitJumpIfNotJSCell(regT0);
f9bf01c6
A
1261
1262 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
1263
ba379fdc
A
1264 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1265 DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
1266 Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1267
1268 // This will be relinked to load the function without doing a load.
1269 DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
f9bf01c6
A
1270
1271 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
1272
ba379fdc
A
1273 Jump match = jump();
1274
f9bf01c6
A
1275 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
1276 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
1277 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
ba379fdc
A
1278
1279 // Link the failure cases here.
1280 notCell.link(this);
1281 structureCheck.link(this);
1282 protoStructureCheck.link(this);
1283
1284 // Do a regular(ish) get_by_id (the slow case will be link to
1285 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
1286 compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
1287
1288 match.link(this);
1289 emitPutVirtualRegister(resultVReg);
1290
1291 // We've already generated the following get_by_id, so make sure it's skipped over.
1292 m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
1293}
9dae56ea 1294
ba379fdc
A
1295void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1296{
1297 currentInstruction += OPCODE_LENGTH(op_method_check);
1298 unsigned resultVReg = currentInstruction[1].u.operand;
1299 unsigned baseVReg = currentInstruction[2].u.operand;
1300 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1301
1302 compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
1303
1304 // We've already generated the following get_by_id, so make sure it's skipped over.
1305 m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
1306}
1307
1308#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1309
1310// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1311void JIT::emit_op_method_check(Instruction*) {}
1312void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
1313
1314#endif
1315
1316void JIT::emit_op_get_by_id(Instruction* currentInstruction)
1317{
1318 unsigned resultVReg = currentInstruction[1].u.operand;
1319 unsigned baseVReg = currentInstruction[2].u.operand;
1320 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1321
1322 emitGetVirtualRegister(baseVReg, regT0);
1323 compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
1324 emitPutVirtualRegister(resultVReg);
1325}
1326
1327void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
9dae56ea
A
1328{
1329 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
1330 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
1331 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1332 // to jump back to if one of these trampolies finds a match.
1333
ba379fdc 1334 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
9dae56ea 1335
f9bf01c6
A
1336 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
1337
9dae56ea
A
1338 Label hotPathBegin(this);
1339 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1340
1341 DataLabelPtr structureToCompare;
ba379fdc 1342 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
9dae56ea 1343 addSlowCase(structureCheck);
f9bf01c6
A
1344 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
1345 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
9dae56ea 1346
ba379fdc
A
1347 Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
1348 Label externalLoadComplete(this);
f9bf01c6
A
1349 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
1350 ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
ba379fdc
A
1351
1352 DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
f9bf01c6 1353 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
9dae56ea
A
1354
1355 Label putResult(this);
f9bf01c6
A
1356
1357 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
1358
1359 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
9dae56ea
A
1360}
1361
ba379fdc
A
1362void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1363{
1364 unsigned resultVReg = currentInstruction[1].u.operand;
1365 unsigned baseVReg = currentInstruction[2].u.operand;
1366 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1367
1368 compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
1369}
9dae56ea 1370
ba379fdc 1371void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
9dae56ea
A
1372{
1373 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1374 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1375 // of the call (which we can use to look up the patch information), but should a array-length or
1376 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
1377 // the distance from the call to the head of the slow case.
1378
1379 linkSlowCaseIfNotJSCell(iter, baseVReg);
1380 linkSlowCase(iter);
1381
f9bf01c6
A
1382 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
1383
9dae56ea
A
1384#ifndef NDEBUG
1385 Label coldPathBegin(this);
1386#endif
ba379fdc
A
1387 JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
1388 stubCall.addArgument(regT0);
1389 stubCall.addArgument(ImmPtr(ident));
1390 Call call = stubCall.call(resultVReg);
9dae56ea 1391
f9bf01c6
A
1392 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
1393
1394 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
9dae56ea
A
1395
1396 // Track the location of the call; this will be used to recover patch information.
ba379fdc
A
1397 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
1398 m_propertyAccessInstructionIndex++;
9dae56ea
A
1399}
1400
ba379fdc 1401void JIT::emit_op_put_by_id(Instruction* currentInstruction)
9dae56ea 1402{
ba379fdc
A
1403 unsigned baseVReg = currentInstruction[1].u.operand;
1404 unsigned valueVReg = currentInstruction[3].u.operand;
1405
1406 unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
1407
9dae56ea
A
1408 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
1409 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1410 // such that the Structure & offset are always at the same distance from this.
1411
ba379fdc 1412 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
9dae56ea
A
1413
1414 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
ba379fdc 1415 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
9dae56ea 1416
f9bf01c6
A
1417 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
1418
9dae56ea
A
1419 Label hotPathBegin(this);
1420 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1421
1422 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1423 DataLabelPtr structureToCompare;
ba379fdc 1424 addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
f9bf01c6 1425 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
9dae56ea
A
1426
1427 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
ba379fdc
A
1428 Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
1429 Label externalLoadComplete(this);
f9bf01c6
A
1430 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
1431 ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
ba379fdc
A
1432
1433 DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
f9bf01c6
A
1434
1435 END_UNINTERRUPTED_SEQUENCE(sequencePutById);
1436
1437 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
9dae56ea
A
1438}
1439
ba379fdc 1440void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
9dae56ea 1441{
ba379fdc
A
1442 unsigned baseVReg = currentInstruction[1].u.operand;
1443 Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
1444
1445 unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
1446
9dae56ea
A
1447 linkSlowCaseIfNotJSCell(iter, baseVReg);
1448 linkSlowCase(iter);
1449
ba379fdc
A
1450 JITStubCall stubCall(this, cti_op_put_by_id);
1451 stubCall.addArgument(regT0);
1452 stubCall.addArgument(ImmPtr(ident));
1453 stubCall.addArgument(regT1);
1454 Call call = stubCall.call();
9dae56ea
A
1455
1456 // Track the location of the call; this will be used to recover patch information.
1457 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
1458}
1459
ba379fdc
A
1460// Compile a store into an object's property storage. May overwrite the
1461// value in objectReg.
1462void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
1463{
1464 int offset = cachedOffset * sizeof(JSValue);
1465 if (structure->isUsingInlineStorage())
1466 offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
1467 else
1468 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
1469 storePtr(value, Address(base, offset));
1470}
1471
1472// Compile a load from an object's property storage. May overwrite base.
1473void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
9dae56ea 1474{
ba379fdc
A
1475 int offset = cachedOffset * sizeof(JSValue);
1476 if (structure->isUsingInlineStorage())
1477 offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
1478 else
1479 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
1480 loadPtr(Address(base, offset), result);
9dae56ea
A
1481}
1482
ba379fdc 1483void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
9dae56ea 1484{
ba379fdc
A
1485 if (base->isUsingInlineStorage())
1486 loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
1487 else {
1488 PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
1489 loadPtr(static_cast<void*>(protoPropertyStorage), temp);
1490 loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
1491 }
9dae56ea
A
1492}
1493
f9bf01c6
A
1494void JIT::testPrototype(Structure* structure, JumpList& failureCases)
1495{
1496 if (structure->m_prototype.isNull())
1497 return;
1498
1499 move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
1500 move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
1501 failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
1502}
1503
ba379fdc 1504void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
9dae56ea
A
1505{
1506 JumpList failureCases;
1507 // Check eax is an object of the right Structure.
ba379fdc
A
1508 failureCases.append(emitJumpIfNotJSCell(regT0));
1509 failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
f9bf01c6 1510 testPrototype(oldStructure, failureCases);
9dae56ea 1511
9dae56ea 1512 // ecx = baseObject->m_structure
f9bf01c6
A
1513 for (RefPtr<Structure>* it = chain->head(); *it; ++it)
1514 testPrototype(it->get(), failureCases);
9dae56ea 1515
ba379fdc 1516 Call callTarget;
9dae56ea
A
1517
1518 // emit a call only if storage realloc is needed
ba379fdc
A
1519 bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
1520 if (willNeedStorageRealloc) {
1521 // This trampoline was called to like a JIT stub; before we can can call again we need to
1522 // remove the return address from the stack, to prevent the stack from becoming misaligned.
1523 preserveReturnAddressAfterCall(regT3);
1524
1525 JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
1526 stubCall.skipArgument(); // base
1527 stubCall.skipArgument(); // ident
1528 stubCall.skipArgument(); // value
1529 stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
1530 stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
1531 stubCall.call(regT0);
f9bf01c6 1532 emitGetJITStubArg(2, regT1);
ba379fdc
A
1533
1534 restoreReturnAddressBeforeReturn(regT3);
9dae56ea
A
1535 }
1536
1537 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
1538 // codeblock should ensure oldStructure->m_refCount > 0
1539 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
1540 add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
ba379fdc 1541 storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
9dae56ea
A
1542
1543 // write the value
ba379fdc 1544 compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
9dae56ea
A
1545
1546 ret();
1547
ba379fdc
A
1548 ASSERT(!failureCases.empty());
1549 failureCases.link(this);
1550 restoreArgumentReferenceForTrampoline();
1551 Call failureCall = tailRecursiveCall();
9dae56ea 1552
ba379fdc 1553 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea 1554
ba379fdc 1555 patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
9dae56ea 1556
ba379fdc
A
1557 if (willNeedStorageRealloc) {
1558 ASSERT(m_calls.size() == 1);
1559 patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
1560 }
9dae56ea 1561
ba379fdc
A
1562 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1563 stubInfo->stubRoutine = entryLabel;
1564 RepatchBuffer repatchBuffer(m_codeBlock);
1565 repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
9dae56ea
A
1566}
1567
ba379fdc 1568void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
9dae56ea 1569{
ba379fdc
A
1570 RepatchBuffer repatchBuffer(codeBlock);
1571
9dae56ea 1572 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
ba379fdc
A
1573 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
1574 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
1575
1576 int offset = sizeof(JSValue) * cachedOffset;
1577
1578 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1579 // and makes the subsequent load's offset automatically correct
1580 if (structure->isUsingInlineStorage())
1581 repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
9dae56ea
A
1582
1583 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
ba379fdc
A
1584 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
1585 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
1586}
1587
f9bf01c6 1588void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
ba379fdc
A
1589{
1590 RepatchBuffer repatchBuffer(codeBlock);
1591
1592 ASSERT(!methodCallLinkInfo.cachedStructure);
1593 methodCallLinkInfo.cachedStructure = structure;
1594 structure->ref();
1595
1596 Structure* prototypeStructure = proto->structure();
1597 ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
1598 methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
1599 prototypeStructure->ref();
1600
1601 repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
1602 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
1603 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
1604 repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
f9bf01c6
A
1605
1606 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
9dae56ea
A
1607}
1608
ba379fdc 1609void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
9dae56ea 1610{
ba379fdc
A
1611 RepatchBuffer repatchBuffer(codeBlock);
1612
9dae56ea 1613 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
ba379fdc
A
1614 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
1615 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
1616
1617 int offset = sizeof(JSValue) * cachedOffset;
1618
1619 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1620 // and makes the subsequent load's offset automatically correct
1621 if (structure->isUsingInlineStorage())
1622 repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
9dae56ea
A
1623
1624 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
ba379fdc
A
1625 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
1626 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
9dae56ea
A
1627}
1628
ba379fdc 1629void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
9dae56ea
A
1630{
1631 StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
1632
9dae56ea 1633 // Check eax is an array
ba379fdc 1634 Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
9dae56ea
A
1635
1636 // Checks out okay! - get the length from the storage
ba379fdc
A
1637 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
1638 load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
9dae56ea 1639
ba379fdc 1640 Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
9dae56ea 1641
ba379fdc 1642 emitFastArithIntToImmNoCheck(regT2, regT0);
9dae56ea
A
1643 Jump success = jump();
1644
ba379fdc 1645 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea
A
1646
1647 // Use the patch information to link the failure cases back to the original slow case routine.
ba379fdc 1648 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
9dae56ea
A
1649 patchBuffer.link(failureCases1, slowCaseBegin);
1650 patchBuffer.link(failureCases2, slowCaseBegin);
1651
1652 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
ba379fdc 1653 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
9dae56ea
A
1654
1655 // Track the stub we have created so that it will be deleted later.
ba379fdc
A
1656 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1657 stubInfo->stubRoutine = entryLabel;
9dae56ea 1658
ba379fdc
A
1659 // Finally patch the jump to slow case back in the hot path to jump here instead.
1660 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1661 RepatchBuffer repatchBuffer(m_codeBlock);
1662 repatchBuffer.relink(jumpLocation, entryLabel);
9dae56ea 1663
ba379fdc
A
1664 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1665 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
9dae56ea
A
1666}
1667
ba379fdc 1668void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
9dae56ea 1669{
9dae56ea
A
1670 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1671 // referencing the prototype object - let's speculatively load it's table nice and early!)
1672 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
9dae56ea
A
1673
1674 // Check eax is an object of the right Structure.
ba379fdc 1675 Jump failureCases1 = checkStructure(regT0, structure);
9dae56ea
A
1676
1677 // Check the prototype object's Structure had not changed.
1678 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 1679#if CPU(X86_64)
ba379fdc
A
1680 move(ImmPtr(prototypeStructure), regT3);
1681 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
9dae56ea 1682#else
ba379fdc 1683 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
9dae56ea
A
1684#endif
1685
1686 // Checks out okay! - getDirectOffset
ba379fdc 1687 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
9dae56ea
A
1688
1689 Jump success = jump();
1690
ba379fdc 1691 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea
A
1692
1693 // Use the patch information to link the failure cases back to the original slow case routine.
ba379fdc 1694 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
9dae56ea
A
1695 patchBuffer.link(failureCases1, slowCaseBegin);
1696 patchBuffer.link(failureCases2, slowCaseBegin);
1697
1698 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
ba379fdc 1699 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
9dae56ea
A
1700
1701 // Track the stub we have created so that it will be deleted later.
ba379fdc
A
1702 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1703 stubInfo->stubRoutine = entryLabel;
9dae56ea
A
1704
1705 // Finally patch the jump to slow case back in the hot path to jump here instead.
ba379fdc
A
1706 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1707 RepatchBuffer repatchBuffer(m_codeBlock);
1708 repatchBuffer.relink(jumpLocation, entryLabel);
9dae56ea 1709
ba379fdc
A
1710 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1711 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
9dae56ea
A
1712}
1713
9dae56ea
A
1714void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
1715{
ba379fdc
A
1716 Jump failureCase = checkStructure(regT0, structure);
1717 compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
9dae56ea
A
1718 Jump success = jump();
1719
ba379fdc 1720 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea
A
1721
1722 // Use the patch information to link the failure cases back to the original slow case routine.
ba379fdc 1723 CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
9dae56ea 1724 if (!lastProtoBegin)
ba379fdc 1725 lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
9dae56ea
A
1726
1727 patchBuffer.link(failureCase, lastProtoBegin);
1728
1729 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
ba379fdc
A
1730 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1731
1732 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
9dae56ea
A
1733
1734 structure->ref();
ba379fdc 1735 polymorphicStructures->list[currentIndex].set(entryLabel, structure);
9dae56ea
A
1736
1737 // Finally patch the jump to slow case back in the hot path to jump here instead.
ba379fdc
A
1738 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1739 RepatchBuffer repatchBuffer(m_codeBlock);
1740 repatchBuffer.relink(jumpLocation, entryLabel);
9dae56ea
A
1741}
1742
1743void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
1744{
1745 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1746 // referencing the prototype object - let's speculatively load it's table nice and early!)
1747 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
9dae56ea
A
1748
1749 // Check eax is an object of the right Structure.
ba379fdc 1750 Jump failureCases1 = checkStructure(regT0, structure);
9dae56ea
A
1751
1752 // Check the prototype object's Structure had not changed.
1753 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 1754#if CPU(X86_64)
ba379fdc
A
1755 move(ImmPtr(prototypeStructure), regT3);
1756 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
9dae56ea 1757#else
ba379fdc 1758 Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
9dae56ea
A
1759#endif
1760
1761 // Checks out okay! - getDirectOffset
ba379fdc 1762 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
9dae56ea
A
1763
1764 Jump success = jump();
1765
ba379fdc 1766 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea
A
1767
1768 // Use the patch information to link the failure cases back to the original slow case routine.
ba379fdc 1769 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
9dae56ea
A
1770 patchBuffer.link(failureCases1, lastProtoBegin);
1771 patchBuffer.link(failureCases2, lastProtoBegin);
1772
1773 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
ba379fdc
A
1774 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1775
1776 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
9dae56ea
A
1777
1778 structure->ref();
1779 prototypeStructure->ref();
ba379fdc 1780 prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
9dae56ea
A
1781
1782 // Finally patch the jump to slow case back in the hot path to jump here instead.
ba379fdc
A
1783 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1784 RepatchBuffer repatchBuffer(m_codeBlock);
1785 repatchBuffer.relink(jumpLocation, entryLabel);
9dae56ea
A
1786}
1787
1788void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
1789{
1790 ASSERT(count);
1791
1792 JumpList bucketsOfFail;
1793
1794 // Check eax is an object of the right Structure.
ba379fdc 1795 Jump baseObjectCheck = checkStructure(regT0, structure);
9dae56ea
A
1796 bucketsOfFail.append(baseObjectCheck);
1797
1798 Structure* currStructure = structure;
1799 RefPtr<Structure>* chainEntries = chain->head();
1800 JSObject* protoObject = 0;
1801 for (unsigned i = 0; i < count; ++i) {
1802 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1803 currStructure = chainEntries[i].get();
1804
1805 // Check the prototype object's Structure had not changed.
1806 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 1807#if CPU(X86_64)
ba379fdc
A
1808 move(ImmPtr(currStructure), regT3);
1809 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
9dae56ea 1810#else
ba379fdc 1811 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
9dae56ea
A
1812#endif
1813 }
1814 ASSERT(protoObject);
1815
ba379fdc 1816 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
9dae56ea
A
1817 Jump success = jump();
1818
ba379fdc 1819 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea
A
1820
1821 // Use the patch information to link the failure cases back to the original slow case routine.
ba379fdc 1822 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
9dae56ea
A
1823
1824 patchBuffer.link(bucketsOfFail, lastProtoBegin);
1825
1826 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
ba379fdc
A
1827 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1828
1829 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
9dae56ea
A
1830
1831 // Track the stub we have created so that it will be deleted later.
1832 structure->ref();
1833 chain->ref();
ba379fdc 1834 prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
9dae56ea
A
1835
1836 // Finally patch the jump to slow case back in the hot path to jump here instead.
ba379fdc
A
1837 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1838 RepatchBuffer repatchBuffer(m_codeBlock);
1839 repatchBuffer.relink(jumpLocation, entryLabel);
9dae56ea 1840}
9dae56ea 1841
ba379fdc 1842void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
9dae56ea 1843{
9dae56ea
A
1844 ASSERT(count);
1845
1846 JumpList bucketsOfFail;
1847
1848 // Check eax is an object of the right Structure.
ba379fdc 1849 bucketsOfFail.append(checkStructure(regT0, structure));
9dae56ea
A
1850
1851 Structure* currStructure = structure;
1852 RefPtr<Structure>* chainEntries = chain->head();
1853 JSObject* protoObject = 0;
1854 for (unsigned i = 0; i < count; ++i) {
1855 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1856 currStructure = chainEntries[i].get();
1857
1858 // Check the prototype object's Structure had not changed.
1859 Structure** prototypeStructureAddress = &(protoObject->m_structure);
f9bf01c6 1860#if CPU(X86_64)
ba379fdc
A
1861 move(ImmPtr(currStructure), regT3);
1862 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
9dae56ea 1863#else
ba379fdc 1864 bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
9dae56ea
A
1865#endif
1866 }
1867 ASSERT(protoObject);
1868
ba379fdc 1869 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
9dae56ea
A
1870 Jump success = jump();
1871
ba379fdc 1872 LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
9dae56ea
A
1873
1874 // Use the patch information to link the failure cases back to the original slow case routine.
ba379fdc 1875 patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
9dae56ea
A
1876
1877 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
ba379fdc 1878 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
9dae56ea
A
1879
1880 // Track the stub we have created so that it will be deleted later.
ba379fdc
A
1881 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1882 stubInfo->stubRoutine = entryLabel;
9dae56ea
A
1883
1884 // Finally patch the jump to slow case back in the hot path to jump here instead.
ba379fdc
A
1885 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1886 RepatchBuffer repatchBuffer(m_codeBlock);
1887 repatchBuffer.relink(jumpLocation, entryLabel);
9dae56ea 1888
ba379fdc
A
1889 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1890 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
9dae56ea
A
1891}
1892
ba379fdc 1893/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
9dae56ea 1894
ba379fdc 1895#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
9dae56ea 1896
ba379fdc 1897#endif // USE(JSVALUE32_64)
9dae56ea
A
1898
1899} // namespace JSC
1900
1901#endif // ENABLE(JIT)