]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITPropertyAccess32_64.cpp
JavaScriptCore-903.5.tar.gz
[apple/javascriptcore.git] / jit / JITPropertyAccess32_64.cpp
1 /*
2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43
44 #ifndef NDEBUG
45 #include <stdio.h>
46 #endif
47
48 using namespace std;
49
50 namespace JSC {
51
52 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
53 {
54 unsigned base = currentInstruction[1].u.operand;
55 unsigned property = currentInstruction[2].u.operand;
56 unsigned value = currentInstruction[3].u.operand;
57
58 JITStubCall stubCall(this, cti_op_put_by_index);
59 stubCall.addArgument(base);
60 stubCall.addArgument(Imm32(property));
61 stubCall.addArgument(value);
62 stubCall.call();
63 }
64
65 void JIT::emit_op_put_getter(Instruction* currentInstruction)
66 {
67 unsigned base = currentInstruction[1].u.operand;
68 unsigned property = currentInstruction[2].u.operand;
69 unsigned function = currentInstruction[3].u.operand;
70
71 JITStubCall stubCall(this, cti_op_put_getter);
72 stubCall.addArgument(base);
73 stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
74 stubCall.addArgument(function);
75 stubCall.call();
76 }
77
78 void JIT::emit_op_put_setter(Instruction* currentInstruction)
79 {
80 unsigned base = currentInstruction[1].u.operand;
81 unsigned property = currentInstruction[2].u.operand;
82 unsigned function = currentInstruction[3].u.operand;
83
84 JITStubCall stubCall(this, cti_op_put_setter);
85 stubCall.addArgument(base);
86 stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
87 stubCall.addArgument(function);
88 stubCall.call();
89 }
90
91 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
92 {
93 unsigned dst = currentInstruction[1].u.operand;
94 unsigned base = currentInstruction[2].u.operand;
95 unsigned property = currentInstruction[3].u.operand;
96
97 JITStubCall stubCall(this, cti_op_del_by_id);
98 stubCall.addArgument(base);
99 stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
100 stubCall.call(dst);
101 }
102
103
104 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
105
106 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
107
108 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
109 void JIT::emit_op_method_check(Instruction*) {}
110 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
111 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
112 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
113 #endif
114
115 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
116 {
117 unsigned dst = currentInstruction[1].u.operand;
118 unsigned base = currentInstruction[2].u.operand;
119 unsigned property = currentInstruction[3].u.operand;
120
121 JITStubCall stubCall(this, cti_op_get_by_val);
122 stubCall.addArgument(base);
123 stubCall.addArgument(property);
124 stubCall.call(dst);
125 }
126
127 void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
128 {
129 ASSERT_NOT_REACHED();
130 }
131
132 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
133 {
134 unsigned base = currentInstruction[1].u.operand;
135 unsigned property = currentInstruction[2].u.operand;
136 unsigned value = currentInstruction[3].u.operand;
137
138 JITStubCall stubCall(this, cti_op_put_by_val);
139 stubCall.addArgument(base);
140 stubCall.addArgument(property);
141 stubCall.addArgument(value);
142 stubCall.call();
143 }
144
145 void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
146 {
147 ASSERT_NOT_REACHED();
148 }
149
150 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
151 {
152 int dst = currentInstruction[1].u.operand;
153 int base = currentInstruction[2].u.operand;
154 int ident = currentInstruction[3].u.operand;
155
156 JITStubCall stubCall(this, cti_op_get_by_id_generic);
157 stubCall.addArgument(base);
158 stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
159 stubCall.call(dst);
160
161 m_propertyAccessInstructionIndex++;
162 }
163
164 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
165 {
166 m_propertyAccessInstructionIndex++;
167 ASSERT_NOT_REACHED();
168 }
169
170 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
171 {
172 int base = currentInstruction[1].u.operand;
173 int ident = currentInstruction[2].u.operand;
174 int value = currentInstruction[3].u.operand;
175
176 JITStubCall stubCall(this, cti_op_put_by_id_generic);
177 stubCall.addArgument(base);
178 stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
179 stubCall.addArgument(value);
180 stubCall.call();
181
182 m_propertyAccessInstructionIndex++;
183 }
184
185 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
186 {
187 m_propertyAccessInstructionIndex++;
188 ASSERT_NOT_REACHED();
189 }
190
191 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
192
193 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
194
195 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
196
197 void JIT::emit_op_method_check(Instruction* currentInstruction)
198 {
199 // Assert that the following instruction is a get_by_id.
200 ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
201
202 currentInstruction += OPCODE_LENGTH(op_method_check);
203
204 // Do the method check - check the object & its prototype's structure inline (this is the common case).
205 m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
206 MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
207
208 int dst = currentInstruction[1].u.operand;
209 int base = currentInstruction[2].u.operand;
210
211 emitLoad(base, regT1, regT0);
212 emitJumpSlowCaseIfNotJSCell(base, regT1);
213
214 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
215
216 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
217 DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
218 Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
219
220 // This will be relinked to load the function without doing a load.
221 DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
222
223 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
224
225 move(TrustedImm32(JSValue::CellTag), regT1);
226 Jump match = jump();
227
228 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
229 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
230 ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
231
232 // Link the failure cases here.
233 structureCheck.link(this);
234 protoStructureCheck.link(this);
235
236 // Do a regular(ish) get_by_id (the slow case will be link to
237 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
238 compileGetByIdHotPath();
239
240 match.link(this);
241 emitStore(dst, regT1, regT0);
242 map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
243
244 // We've already generated the following get_by_id, so make sure it's skipped over.
245 m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
246 }
247
248 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
249 {
250 currentInstruction += OPCODE_LENGTH(op_method_check);
251
252 int dst = currentInstruction[1].u.operand;
253 int base = currentInstruction[2].u.operand;
254 int ident = currentInstruction[3].u.operand;
255
256 compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
257
258 // We've already generated the following get_by_id, so make sure it's skipped over.
259 m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
260 }
261
262 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
263
264 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
265 void JIT::emit_op_method_check(Instruction*) {}
266 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
267
268 #endif
269
270 JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
271 {
272 JSInterfaceJIT jit;
273 JumpList failures;
274 failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
275 failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
276
277 // Load string length to regT1, and start the process of loading the data pointer into regT0
278 jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
279 jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
280 jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
281
282 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
283 failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
284
285 // Load the character
286 jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
287
288 failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
289 jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
290 jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
291 jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
292 jit.ret();
293
294 failures.link(&jit);
295 jit.move(TrustedImm32(0), regT0);
296 jit.ret();
297
298 LinkBuffer patchBuffer(*globalData, &jit, pool);
299 return patchBuffer.finalizeCode().m_code;
300 }
301
302 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
303 {
304 unsigned dst = currentInstruction[1].u.operand;
305 unsigned base = currentInstruction[2].u.operand;
306 unsigned property = currentInstruction[3].u.operand;
307
308 emitLoad2(base, regT1, regT0, property, regT3, regT2);
309
310 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
311 emitJumpSlowCaseIfNotJSCell(base, regT1);
312 addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
313
314 loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
315 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
316
317 load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
318 load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
319 addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
320
321 emitStore(dst, regT1, regT0);
322 map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
323 }
324
325 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
326 {
327 unsigned dst = currentInstruction[1].u.operand;
328 unsigned base = currentInstruction[2].u.operand;
329 unsigned property = currentInstruction[3].u.operand;
330
331 linkSlowCase(iter); // property int32 check
332 linkSlowCaseIfNotJSCell(iter, base); // base cell check
333
334 Jump nonCell = jump();
335 linkSlowCase(iter); // base array check
336 Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
337 emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
338 Jump failed = branchTestPtr(Zero, regT0);
339 emitStore(dst, regT1, regT0);
340 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
341 failed.link(this);
342 notString.link(this);
343 nonCell.link(this);
344
345 linkSlowCase(iter); // vector length check
346 linkSlowCase(iter); // empty value
347
348 JITStubCall stubCall(this, cti_op_get_by_val);
349 stubCall.addArgument(base);
350 stubCall.addArgument(property);
351 stubCall.call(dst);
352 }
353
354 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
355 {
356 unsigned base = currentInstruction[1].u.operand;
357 unsigned property = currentInstruction[2].u.operand;
358 unsigned value = currentInstruction[3].u.operand;
359
360 emitLoad2(base, regT1, regT0, property, regT3, regT2);
361
362 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
363 emitJumpSlowCaseIfNotJSCell(base, regT1);
364 addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
365 addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
366
367 loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
368
369 Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
370
371 Label storeResult(this);
372 emitLoad(value, regT1, regT0);
373 store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
374 store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
375 Jump end = jump();
376
377 empty.link(this);
378 add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
379 branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
380
381 add32(TrustedImm32(1), regT2, regT0);
382 store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
383 jump().linkTo(storeResult, this);
384
385 end.link(this);
386 }
387
388 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
389 {
390 unsigned base = currentInstruction[1].u.operand;
391 unsigned property = currentInstruction[2].u.operand;
392 unsigned value = currentInstruction[3].u.operand;
393
394 linkSlowCase(iter); // property int32 check
395 linkSlowCaseIfNotJSCell(iter, base); // base cell check
396 linkSlowCase(iter); // base not array check
397 linkSlowCase(iter); // in vector check
398
399 JITStubCall stubPutByValCall(this, cti_op_put_by_val);
400 stubPutByValCall.addArgument(base);
401 stubPutByValCall.addArgument(property);
402 stubPutByValCall.addArgument(value);
403 stubPutByValCall.call();
404 }
405
406 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
407 {
408 int dst = currentInstruction[1].u.operand;
409 int base = currentInstruction[2].u.operand;
410
411 emitLoad(base, regT1, regT0);
412 emitJumpSlowCaseIfNotJSCell(base, regT1);
413 compileGetByIdHotPath();
414 emitStore(dst, regT1, regT0);
415 map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
416 }
417
418 void JIT::compileGetByIdHotPath()
419 {
420 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
421 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
422 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
423 // to jump back to if one of these trampolies finds a match.
424
425 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
426
427 Label hotPathBegin(this);
428 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
429 m_propertyAccessInstructionIndex++;
430
431 DataLabelPtr structureToCompare;
432 Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
433 addSlowCase(structureCheck);
434 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
435 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
436
437 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT2);
438 DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
439 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1);
440 DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
441 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2);
442
443 Label putResult(this);
444 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
445
446 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
447 }
448
449 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
450 {
451 int dst = currentInstruction[1].u.operand;
452 int base = currentInstruction[2].u.operand;
453 int ident = currentInstruction[3].u.operand;
454
455 compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
456 }
457
458 void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
459 {
460 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
461 // so that we only need track one pointer into the slow case code - we track a pointer to the location
462 // of the call (which we can use to look up the patch information), but should a array-length or
463 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
464 // the distance from the call to the head of the slow case.
465 linkSlowCaseIfNotJSCell(iter, base);
466 linkSlowCase(iter);
467
468 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
469
470 #ifndef NDEBUG
471 Label coldPathBegin(this);
472 #endif
473 JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
474 stubCall.addArgument(regT1, regT0);
475 stubCall.addArgument(TrustedImmPtr(ident));
476 Call call = stubCall.call(dst);
477
478 END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
479
480 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
481
482 // Track the location of the call; this will be used to recover patch information.
483 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
484 m_propertyAccessInstructionIndex++;
485 }
486
487 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
488 {
489 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
490 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
491 // such that the Structure & offset are always at the same distance from this.
492
493 int base = currentInstruction[1].u.operand;
494 int value = currentInstruction[3].u.operand;
495
496 emitLoad2(base, regT1, regT0, value, regT3, regT2);
497
498 emitJumpSlowCaseIfNotJSCell(base, regT1);
499
500 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
501
502 Label hotPathBegin(this);
503 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
504 m_propertyAccessInstructionIndex++;
505
506 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
507 DataLabelPtr structureToCompare;
508 addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
509 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
510
511 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
512 DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchPutByIdDefaultOffset)); // payload
513 DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchPutByIdDefaultOffset)); // tag
514
515 END_UNINTERRUPTED_SEQUENCE(sequencePutById);
516
517 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
518 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
519 }
520
521 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
522 {
523 int base = currentInstruction[1].u.operand;
524 int ident = currentInstruction[2].u.operand;
525 int direct = currentInstruction[8].u.operand;
526
527 linkSlowCaseIfNotJSCell(iter, base);
528 linkSlowCase(iter);
529
530 JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
531 stubCall.addArgument(regT1, regT0);
532 stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
533 stubCall.addArgument(regT3, regT2);
534 Call call = stubCall.call();
535
536 // Track the location of the call; this will be used to recover patch information.
537 m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
538 m_propertyAccessInstructionIndex++;
539 }
540
541 // Compile a store into an object's property storage. May overwrite base.
542 void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
543 {
544 int offset = cachedOffset;
545 if (structure->isUsingInlineStorage())
546 offset += JSObject::offsetOfInlineStorage() / sizeof(Register);
547 else
548 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
549 emitStore(offset, valueTag, valuePayload, base);
550 }
551
552 // Compile a load from an object's property storage. May overwrite base.
553 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
554 {
555 int offset = cachedOffset;
556 if (structure->isUsingInlineStorage()) {
557 offset += JSObject::offsetOfInlineStorage() / sizeof(Register);
558 emitLoad(offset, resultTag, resultPayload, base);
559 } else {
560 RegisterID temp = resultPayload;
561 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), temp);
562 emitLoad(offset, resultTag, resultPayload, temp);
563 }
564 }
565
566 void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
567 {
568 load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), resultPayload);
569 load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), resultTag);
570 }
571
572 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
573 {
574 // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
575 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
576 // For MIPS, we don't add sizeof(void*) to the stack offset.
577 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
578 // For MIPS, we don't add sizeof(void*) to the stack offset.
579 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
580 #else
581 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
582 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
583 #endif
584
585 JumpList failureCases;
586 failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
587 failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
588 testPrototype(oldStructure->storedPrototype(), failureCases);
589
590 if (!direct) {
591 // Verify that nothing in the prototype chain has a setter for this property.
592 for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
593 testPrototype((*it)->storedPrototype(), failureCases);
594 }
595
596 // Reallocate property storage if needed.
597 Call callTarget;
598 bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
599 if (willNeedStorageRealloc) {
600 // This trampoline was called to like a JIT stub; before we can can call again we need to
601 // remove the return address from the stack, to prevent the stack from becoming misaligned.
602 preserveReturnAddressAfterCall(regT3);
603
604 JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
605 stubCall.skipArgument(); // base
606 stubCall.skipArgument(); // ident
607 stubCall.skipArgument(); // value
608 stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
609 stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
610 stubCall.call(regT0);
611
612 restoreReturnAddressBeforeReturn(regT3);
613
614 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
615 // For MIPS, we don't add sizeof(void*) to the stack offset.
616 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
617 // For MIPS, we don't add sizeof(void*) to the stack offset.
618 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
619 #else
620 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
621 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
622 #endif
623 }
624
625 storePtrWithWriteBarrier(TrustedImmPtr(newStructure), regT0, Address(regT0, JSCell::structureOffset()));
626
627 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
628 // For MIPS, we don't add sizeof(void*) to the stack offset.
629 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
630 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
631 #else
632 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
633 load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
634 #endif
635
636 // Write the value
637 compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
638
639 ret();
640
641 ASSERT(!failureCases.empty());
642 failureCases.link(this);
643 restoreArgumentReferenceForTrampoline();
644 Call failureCall = tailRecursiveCall();
645
646 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
647
648 patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
649
650 if (willNeedStorageRealloc) {
651 ASSERT(m_calls.size() == 1);
652 patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
653 }
654
655 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
656 stubInfo->stubRoutine = entryLabel;
657 RepatchBuffer repatchBuffer(m_codeBlock);
658 repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
659 }
660
661 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
662 {
663 RepatchBuffer repatchBuffer(codeBlock);
664
665 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
666 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
667 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
668
669 int offset = sizeof(JSValue) * cachedOffset;
670
671 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
672 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
673 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
674 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
675 }
676
677 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
678 {
679 RepatchBuffer repatchBuffer(codeBlock);
680
681 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
682 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
683 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
684
685 int offset = sizeof(JSValue) * cachedOffset;
686
687 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
688 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
689 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
690 repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
691 }
692
693 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
694 {
695 StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
696
697 // regT0 holds a JSCell*
698
699 // Check for array
700 Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
701
702 // Checks out okay! - get the length from the storage
703 loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
704 load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
705
706 Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
707 move(regT2, regT0);
708 move(TrustedImm32(JSValue::Int32Tag), regT1);
709 Jump success = jump();
710
711 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
712
713 // Use the patch information to link the failure cases back to the original slow case routine.
714 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
715 patchBuffer.link(failureCases1, slowCaseBegin);
716 patchBuffer.link(failureCases2, slowCaseBegin);
717
718 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
719 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
720
721 // Track the stub we have created so that it will be deleted later.
722 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
723 stubInfo->stubRoutine = entryLabel;
724
725 // Finally patch the jump to slow case back in the hot path to jump here instead.
726 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
727 RepatchBuffer repatchBuffer(m_codeBlock);
728 repatchBuffer.relink(jumpLocation, entryLabel);
729
730 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
731 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
732 }
733
734 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
735 {
736 // regT0 holds a JSCell*
737
738 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
739 // referencing the prototype object - let's speculatively load it's table nice and early!)
740 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
741
742 Jump failureCases1 = checkStructure(regT0, structure);
743
744 // Check the prototype object's Structure had not changed.
745 move(TrustedImmPtr(protoObject), regT3);
746 Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
747
748 bool needsStubLink = false;
749 // Checks out okay!
750 if (slot.cachedPropertyType() == PropertySlot::Getter) {
751 needsStubLink = true;
752 compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
753 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
754 stubCall.addArgument(regT1);
755 stubCall.addArgument(regT0);
756 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
757 stubCall.call();
758 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
759 needsStubLink = true;
760 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
761 stubCall.addArgument(TrustedImmPtr(protoObject));
762 stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
763 stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
764 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
765 stubCall.call();
766 } else
767 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
768
769 Jump success = jump();
770
771 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
772
773 // Use the patch information to link the failure cases back to the original slow case routine.
774 CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
775 patchBuffer.link(failureCases1, slowCaseBegin);
776 patchBuffer.link(failureCases2, slowCaseBegin);
777
778 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
779 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
780
781 if (needsStubLink) {
782 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
783 if (iter->to)
784 patchBuffer.link(iter->from, FunctionPtr(iter->to));
785 }
786 }
787
788 // Track the stub we have created so that it will be deleted later.
789 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
790 stubInfo->stubRoutine = entryLabel;
791
792 // Finally patch the jump to slow case back in the hot path to jump here instead.
793 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
794 RepatchBuffer repatchBuffer(m_codeBlock);
795 repatchBuffer.relink(jumpLocation, entryLabel);
796
797 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
798 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
799 }
800
801
802 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
803 {
804 // regT0 holds a JSCell*
805 Jump failureCase = checkStructure(regT0, structure);
806 bool needsStubLink = false;
807 if (slot.cachedPropertyType() == PropertySlot::Getter) {
808 needsStubLink = true;
809 compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
810 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
811 stubCall.addArgument(regT1);
812 stubCall.addArgument(regT0);
813 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
814 stubCall.call();
815 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
816 needsStubLink = true;
817 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
818 stubCall.addArgument(regT0);
819 stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
820 stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
821 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
822 stubCall.call();
823 } else
824 compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
825
826 Jump success = jump();
827
828 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
829 if (needsStubLink) {
830 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
831 if (iter->to)
832 patchBuffer.link(iter->from, FunctionPtr(iter->to));
833 }
834 }
835 // Use the patch information to link the failure cases back to the original slow case routine.
836 CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
837 if (!lastProtoBegin)
838 lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
839
840 patchBuffer.link(failureCase, lastProtoBegin);
841
842 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
843 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
844
845 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
846
847 polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
848
849 // Finally patch the jump to slow case back in the hot path to jump here instead.
850 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
851 RepatchBuffer repatchBuffer(m_codeBlock);
852 repatchBuffer.relink(jumpLocation, entryLabel);
853 }
854
855 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
856 {
857 // regT0 holds a JSCell*
858
859 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
860 // referencing the prototype object - let's speculatively load it's table nice and early!)
861 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
862
863 // Check eax is an object of the right Structure.
864 Jump failureCases1 = checkStructure(regT0, structure);
865
866 // Check the prototype object's Structure had not changed.
867 move(TrustedImmPtr(protoObject), regT3);
868 Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
869
870 bool needsStubLink = false;
871 if (slot.cachedPropertyType() == PropertySlot::Getter) {
872 needsStubLink = true;
873 compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
874 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
875 stubCall.addArgument(regT1);
876 stubCall.addArgument(regT0);
877 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
878 stubCall.call();
879 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
880 needsStubLink = true;
881 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
882 stubCall.addArgument(TrustedImmPtr(protoObject));
883 stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
884 stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
885 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
886 stubCall.call();
887 } else
888 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
889
890 Jump success = jump();
891
892 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
893 if (needsStubLink) {
894 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
895 if (iter->to)
896 patchBuffer.link(iter->from, FunctionPtr(iter->to));
897 }
898 }
899 // Use the patch information to link the failure cases back to the original slow case routine.
900 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
901 patchBuffer.link(failureCases1, lastProtoBegin);
902 patchBuffer.link(failureCases2, lastProtoBegin);
903
904 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
905 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
906
907 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
908
909 prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
910
911 // Finally patch the jump to slow case back in the hot path to jump here instead.
912 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
913 RepatchBuffer repatchBuffer(m_codeBlock);
914 repatchBuffer.relink(jumpLocation, entryLabel);
915 }
916
917 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
918 {
919 // regT0 holds a JSCell*
920 ASSERT(count);
921
922 JumpList bucketsOfFail;
923
924 // Check eax is an object of the right Structure.
925 bucketsOfFail.append(checkStructure(regT0, structure));
926
927 Structure* currStructure = structure;
928 WriteBarrier<Structure>* it = chain->head();
929 JSObject* protoObject = 0;
930 for (unsigned i = 0; i < count; ++i, ++it) {
931 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
932 currStructure = it->get();
933 testPrototype(protoObject, bucketsOfFail);
934 }
935 ASSERT(protoObject);
936
937 bool needsStubLink = false;
938 if (slot.cachedPropertyType() == PropertySlot::Getter) {
939 needsStubLink = true;
940 compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
941 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
942 stubCall.addArgument(regT1);
943 stubCall.addArgument(regT0);
944 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
945 stubCall.call();
946 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
947 needsStubLink = true;
948 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
949 stubCall.addArgument(TrustedImmPtr(protoObject));
950 stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
951 stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
952 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
953 stubCall.call();
954 } else
955 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
956
957 Jump success = jump();
958
959 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
960 if (needsStubLink) {
961 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
962 if (iter->to)
963 patchBuffer.link(iter->from, FunctionPtr(iter->to));
964 }
965 }
966 // Use the patch information to link the failure cases back to the original slow case routine.
967 CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
968
969 patchBuffer.link(bucketsOfFail, lastProtoBegin);
970
971 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
972 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
973
974 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
975
976 // Track the stub we have created so that it will be deleted later.
977 prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
978
979 // Finally patch the jump to slow case back in the hot path to jump here instead.
980 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
981 RepatchBuffer repatchBuffer(m_codeBlock);
982 repatchBuffer.relink(jumpLocation, entryLabel);
983 }
984
985 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
986 {
987 // regT0 holds a JSCell*
988 ASSERT(count);
989
990 JumpList bucketsOfFail;
991
992 // Check eax is an object of the right Structure.
993 bucketsOfFail.append(checkStructure(regT0, structure));
994
995 Structure* currStructure = structure;
996 WriteBarrier<Structure>* it = chain->head();
997 JSObject* protoObject = 0;
998 for (unsigned i = 0; i < count; ++i, ++it) {
999 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1000 currStructure = it->get();
1001 testPrototype(protoObject, bucketsOfFail);
1002 }
1003 ASSERT(protoObject);
1004
1005 bool needsStubLink = false;
1006 if (slot.cachedPropertyType() == PropertySlot::Getter) {
1007 needsStubLink = true;
1008 compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
1009 JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
1010 stubCall.addArgument(regT1);
1011 stubCall.addArgument(regT0);
1012 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
1013 stubCall.call();
1014 } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
1015 needsStubLink = true;
1016 JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
1017 stubCall.addArgument(TrustedImmPtr(protoObject));
1018 stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
1019 stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
1020 stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
1021 stubCall.call();
1022 } else
1023 compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1024 Jump success = jump();
1025
1026 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
1027 if (needsStubLink) {
1028 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
1029 if (iter->to)
1030 patchBuffer.link(iter->from, FunctionPtr(iter->to));
1031 }
1032 }
1033 // Use the patch information to link the failure cases back to the original slow case routine.
1034 patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
1035
1036 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1037 patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1038
1039 // Track the stub we have created so that it will be deleted later.
1040 CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1041 stubInfo->stubRoutine = entryLabel;
1042
1043 // Finally patch the jump to slow case back in the hot path to jump here instead.
1044 CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1045 RepatchBuffer repatchBuffer(m_codeBlock);
1046 repatchBuffer.relink(jumpLocation, entryLabel);
1047
1048 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1049 repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1050 }
1051
1052 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1053
1054 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1055
1056 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
1057 {
1058 ASSERT(sizeof(JSValue) == 8);
1059
1060 loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
1061 loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
1062 loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
1063 }
1064
1065 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
1066 {
1067 unsigned dst = currentInstruction[1].u.operand;
1068 unsigned base = currentInstruction[2].u.operand;
1069 unsigned property = currentInstruction[3].u.operand;
1070 unsigned expected = currentInstruction[4].u.operand;
1071 unsigned iter = currentInstruction[5].u.operand;
1072 unsigned i = currentInstruction[6].u.operand;
1073
1074 emitLoad2(property, regT1, regT0, base, regT3, regT2);
1075 emitJumpSlowCaseIfNotJSCell(property, regT1);
1076 addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
1077 // Property registers are now available as the property is known
1078 emitJumpSlowCaseIfNotJSCell(base, regT3);
1079 emitLoadPayload(iter, regT1);
1080
1081 // Test base's structure
1082 loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
1083 addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
1084 load32(addressFor(i), regT3);
1085 sub32(TrustedImm32(1), regT3);
1086 addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
1087 compileGetDirectOffset(regT2, regT1, regT0, regT3);
1088
1089 emitStore(dst, regT1, regT0);
1090 map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
1091 }
1092
1093 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1094 {
1095 unsigned dst = currentInstruction[1].u.operand;
1096 unsigned base = currentInstruction[2].u.operand;
1097 unsigned property = currentInstruction[3].u.operand;
1098
1099 linkSlowCaseIfNotJSCell(iter, property);
1100 linkSlowCase(iter);
1101 linkSlowCaseIfNotJSCell(iter, base);
1102 linkSlowCase(iter);
1103 linkSlowCase(iter);
1104
1105 JITStubCall stubCall(this, cti_op_get_by_val);
1106 stubCall.addArgument(base);
1107 stubCall.addArgument(property);
1108 stubCall.call(dst);
1109 }
1110
1111 } // namespace JSC
1112
1113 #endif // USE(JSVALUE32_64)
1114 #endif // ENABLE(JIT)