]>
Commit | Line | Data |
---|---|---|
9dae56ea | 1 | /* |
ba379fdc | 2 | * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
9dae56ea A |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
ba379fdc | 27 | |
4e4e5a6f | 28 | #if !USE(JSVALUE32_64) |
ba379fdc | 29 | |
4e4e5a6f | 30 | #include "JIT.h" |
ba379fdc | 31 | |
4e4e5a6f | 32 | #if ENABLE(JIT) |
ba379fdc | 33 | |
4e4e5a6f A |
34 | #include "CodeBlock.h" |
35 | #include "GetterSetter.h" | |
36 | #include "JITInlineMethods.h" | |
37 | #include "JITStubCall.h" | |
38 | #include "JSArray.h" | |
39 | #include "JSFunction.h" | |
40 | #include "JSPropertyNameIterator.h" | |
41 | #include "Interpreter.h" | |
42 | #include "LinkBuffer.h" | |
43 | #include "RepatchBuffer.h" | |
44 | #include "ResultType.h" | |
45 | #include "SamplingTool.h" | |
ba379fdc | 46 | |
4e4e5a6f A |
47 | #ifndef NDEBUG |
48 | #include <stdio.h> | |
ba379fdc A |
49 | #endif |
50 | ||
4e4e5a6f | 51 | using namespace std; |
ba379fdc | 52 | |
4e4e5a6f | 53 | namespace JSC { |
ba379fdc | 54 | |
4e4e5a6f | 55 | PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool) |
9dae56ea | 56 | { |
4e4e5a6f A |
57 | JSInterfaceJIT jit; |
58 | JumpList failures; | |
59 | failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr))); | |
60 | failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount)))); | |
61 | #if USE(JSVALUE64) | |
62 | jit.zeroExtend32ToPtr(regT1, regT1); | |
ba379fdc | 63 | #else |
4e4e5a6f | 64 | jit.emitFastArithImmToInt(regT1); |
ba379fdc | 65 | #endif |
ba379fdc | 66 | |
4e4e5a6f A |
67 | // Load string length to regT1, and start the process of loading the data pointer into regT0 |
68 | jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2); | |
69 | jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0); | |
70 | jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0); | |
ba379fdc | 71 | |
4e4e5a6f A |
72 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large |
73 | failures.append(jit.branch32(AboveOrEqual, regT1, regT2)); | |
ba379fdc | 74 | |
4e4e5a6f A |
75 | // Load the character |
76 | jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0); | |
f9bf01c6 | 77 | |
4e4e5a6f A |
78 | failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100))); |
79 | jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1); | |
80 | jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0); | |
81 | jit.ret(); | |
f9bf01c6 | 82 | |
4e4e5a6f A |
83 | failures.link(&jit); |
84 | jit.move(Imm32(0), regT0); | |
85 | jit.ret(); | |
f9bf01c6 | 86 | |
4e4e5a6f A |
87 | LinkBuffer patchBuffer(&jit, pool); |
88 | return adoptRef(new NativeExecutable(patchBuffer.finalizeCode())); | |
f9bf01c6 A |
89 | } |
90 | ||
ba379fdc A |
91 | void JIT::emit_op_get_by_val(Instruction* currentInstruction) |
92 | { | |
f9bf01c6 A |
93 | unsigned dst = currentInstruction[1].u.operand; |
94 | unsigned base = currentInstruction[2].u.operand; | |
95 | unsigned property = currentInstruction[3].u.operand; | |
96 | ||
97 | emitGetVirtualRegisters(base, regT0, property, regT1); | |
ba379fdc A |
98 | emitJumpSlowCaseIfNotImmediateInteger(regT1); |
99 | #if USE(JSVALUE64) | |
100 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. | |
f9bf01c6 A |
101 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if |
102 | // number was signed since m_vectorLength is always less than intmax (since the total allocation | |
ba379fdc A |
103 | // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value |
104 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign | |
105 | // extending since it makes it easier to re-tag the value in the slow case. | |
106 | zeroExtend32ToPtr(regT1, regT1); | |
107 | #else | |
108 | emitFastArithImmToInt(regT1); | |
109 | #endif | |
f9bf01c6 | 110 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
ba379fdc A |
111 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); |
112 | ||
ba379fdc | 113 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); |
f9bf01c6 | 114 | addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); |
ba379fdc | 115 | |
ba379fdc | 116 | loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); |
f9bf01c6 A |
117 | addSlowCase(branchTestPtr(Zero, regT0)); |
118 | ||
119 | emitPutVirtualRegister(dst); | |
120 | } | |
121 | ||
4e4e5a6f A |
122 | void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
123 | { | |
124 | unsigned dst = currentInstruction[1].u.operand; | |
125 | unsigned base = currentInstruction[2].u.operand; | |
126 | unsigned property = currentInstruction[3].u.operand; | |
127 | ||
128 | linkSlowCase(iter); // property int32 check | |
129 | linkSlowCaseIfNotJSCell(iter, base); // base cell check | |
130 | Jump nonCell = jump(); | |
131 | linkSlowCase(iter); // base array check | |
132 | Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); | |
133 | emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCode().addressForCall()); | |
134 | Jump failed = branchTestPtr(Zero, regT0); | |
135 | emitPutVirtualRegister(dst, regT0); | |
136 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); | |
137 | failed.link(this); | |
138 | notString.link(this); | |
139 | nonCell.link(this); | |
140 | ||
141 | linkSlowCase(iter); // vector length check | |
142 | linkSlowCase(iter); // empty value | |
143 | ||
144 | JITStubCall stubCall(this, cti_op_get_by_val); | |
145 | stubCall.addArgument(base, regT2); | |
146 | stubCall.addArgument(property, regT2); | |
147 | stubCall.call(dst); | |
148 | } | |
149 | ||
f9bf01c6 A |
150 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch) |
151 | { | |
152 | ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); | |
153 | ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); | |
154 | ||
155 | Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity)); | |
156 | loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result); | |
157 | Jump finishedLoad = jump(); | |
158 | notUsingInlineStorage.link(this); | |
159 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch); | |
160 | loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result); | |
161 | finishedLoad.link(this); | |
162 | } | |
163 | ||
164 | void JIT::emit_op_get_by_pname(Instruction* currentInstruction) | |
165 | { | |
166 | unsigned dst = currentInstruction[1].u.operand; | |
167 | unsigned base = currentInstruction[2].u.operand; | |
168 | unsigned property = currentInstruction[3].u.operand; | |
169 | unsigned expected = currentInstruction[4].u.operand; | |
170 | unsigned iter = currentInstruction[5].u.operand; | |
171 | unsigned i = currentInstruction[6].u.operand; | |
172 | ||
173 | emitGetVirtualRegister(property, regT0); | |
174 | addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected))); | |
175 | emitGetVirtualRegisters(base, regT0, iter, regT1); | |
176 | emitJumpSlowCaseIfNotJSCell(regT0, base); | |
177 | ||
178 | // Test base's structure | |
179 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); | |
180 | addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); | |
181 | load32(addressFor(i), regT3); | |
182 | sub32(Imm32(1), regT3); | |
183 | addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); | |
184 | compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1); | |
185 | ||
186 | emitPutVirtualRegister(dst, regT0); | |
187 | } | |
188 | ||
189 | void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
190 | { | |
191 | unsigned dst = currentInstruction[1].u.operand; | |
192 | unsigned base = currentInstruction[2].u.operand; | |
193 | unsigned property = currentInstruction[3].u.operand; | |
194 | ||
195 | linkSlowCase(iter); | |
196 | linkSlowCaseIfNotJSCell(iter, base); | |
197 | linkSlowCase(iter); | |
198 | linkSlowCase(iter); | |
199 | ||
200 | JITStubCall stubCall(this, cti_op_get_by_val); | |
201 | stubCall.addArgument(base, regT2); | |
202 | stubCall.addArgument(property, regT2); | |
203 | stubCall.call(dst); | |
ba379fdc A |
204 | } |
205 | ||
206 | void JIT::emit_op_put_by_val(Instruction* currentInstruction) | |
207 | { | |
f9bf01c6 A |
208 | unsigned base = currentInstruction[1].u.operand; |
209 | unsigned property = currentInstruction[2].u.operand; | |
210 | unsigned value = currentInstruction[3].u.operand; | |
211 | ||
212 | emitGetVirtualRegisters(base, regT0, property, regT1); | |
ba379fdc A |
213 | emitJumpSlowCaseIfNotImmediateInteger(regT1); |
214 | #if USE(JSVALUE64) | |
215 | // See comment in op_get_by_val. | |
216 | zeroExtend32ToPtr(regT1, regT1); | |
217 | #else | |
218 | emitFastArithImmToInt(regT1); | |
219 | #endif | |
f9bf01c6 | 220 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
ba379fdc | 221 | addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); |
f9bf01c6 | 222 | addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); |
ba379fdc | 223 | |
ba379fdc | 224 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); |
ba379fdc | 225 | |
f9bf01c6 | 226 | Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); |
ba379fdc | 227 | |
f9bf01c6 A |
228 | Label storeResult(this); |
229 | emitGetVirtualRegister(value, regT0); | |
ba379fdc | 230 | storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); |
f9bf01c6 A |
231 | Jump end = jump(); |
232 | ||
233 | empty.link(this); | |
234 | add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); | |
235 | branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this); | |
236 | ||
237 | move(regT1, regT0); | |
238 | add32(Imm32(1), regT0); | |
239 | store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))); | |
240 | jump().linkTo(storeResult, this); | |
241 | ||
242 | end.link(this); | |
ba379fdc A |
243 | } |
244 | ||
245 | void JIT::emit_op_put_by_index(Instruction* currentInstruction) | |
246 | { | |
247 | JITStubCall stubCall(this, cti_op_put_by_index); | |
248 | stubCall.addArgument(currentInstruction[1].u.operand, regT2); | |
249 | stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); | |
250 | stubCall.addArgument(currentInstruction[3].u.operand, regT2); | |
251 | stubCall.call(); | |
252 | } | |
253 | ||
254 | void JIT::emit_op_put_getter(Instruction* currentInstruction) | |
255 | { | |
256 | JITStubCall stubCall(this, cti_op_put_getter); | |
257 | stubCall.addArgument(currentInstruction[1].u.operand, regT2); | |
258 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); | |
259 | stubCall.addArgument(currentInstruction[3].u.operand, regT2); | |
260 | stubCall.call(); | |
261 | } | |
262 | ||
263 | void JIT::emit_op_put_setter(Instruction* currentInstruction) | |
264 | { | |
265 | JITStubCall stubCall(this, cti_op_put_setter); | |
266 | stubCall.addArgument(currentInstruction[1].u.operand, regT2); | |
267 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); | |
268 | stubCall.addArgument(currentInstruction[3].u.operand, regT2); | |
269 | stubCall.call(); | |
270 | } | |
271 | ||
272 | void JIT::emit_op_del_by_id(Instruction* currentInstruction) | |
273 | { | |
274 | JITStubCall stubCall(this, cti_op_del_by_id); | |
275 | stubCall.addArgument(currentInstruction[2].u.operand, regT2); | |
276 | stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); | |
277 | stubCall.call(currentInstruction[1].u.operand); | |
9dae56ea A |
278 | } |
279 | ||
280 | ||
ba379fdc A |
281 | #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
282 | ||
283 | /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ | |
284 | ||
285 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. | |
286 | void JIT::emit_op_method_check(Instruction*) {} | |
287 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); } | |
288 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) | |
289 | #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS" | |
290 | #endif | |
291 | ||
292 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) | |
293 | { | |
294 | unsigned resultVReg = currentInstruction[1].u.operand; | |
295 | unsigned baseVReg = currentInstruction[2].u.operand; | |
296 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
297 | ||
298 | emitGetVirtualRegister(baseVReg, regT0); | |
299 | JITStubCall stubCall(this, cti_op_get_by_id_generic); | |
300 | stubCall.addArgument(regT0); | |
301 | stubCall.addArgument(ImmPtr(ident)); | |
302 | stubCall.call(resultVReg); | |
303 | ||
304 | m_propertyAccessInstructionIndex++; | |
305 | } | |
306 | ||
307 | void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&) | |
9dae56ea A |
308 | { |
309 | ASSERT_NOT_REACHED(); | |
310 | } | |
311 | ||
ba379fdc | 312 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) |
9dae56ea | 313 | { |
ba379fdc A |
314 | unsigned baseVReg = currentInstruction[1].u.operand; |
315 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); | |
316 | unsigned valueVReg = currentInstruction[3].u.operand; | |
4e4e5a6f | 317 | unsigned direct = currentInstruction[8].u.operand; |
ba379fdc A |
318 | |
319 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); | |
9dae56ea | 320 | |
4e4e5a6f | 321 | JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic); |
ba379fdc A |
322 | stubCall.addArgument(regT0); |
323 | stubCall.addArgument(ImmPtr(ident)); | |
324 | stubCall.addArgument(regT1); | |
325 | stubCall.call(); | |
9dae56ea | 326 | |
ba379fdc | 327 | m_propertyAccessInstructionIndex++; |
9dae56ea A |
328 | } |
329 | ||
ba379fdc | 330 | void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&) |
9dae56ea A |
331 | { |
332 | ASSERT_NOT_REACHED(); | |
333 | } | |
334 | ||
ba379fdc A |
335 | #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
336 | ||
337 | /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ | |
338 | ||
339 | #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) | |
340 | ||
341 | void JIT::emit_op_method_check(Instruction* currentInstruction) | |
342 | { | |
343 | // Assert that the following instruction is a get_by_id. | |
344 | ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); | |
345 | ||
346 | currentInstruction += OPCODE_LENGTH(op_method_check); | |
347 | unsigned resultVReg = currentInstruction[1].u.operand; | |
348 | unsigned baseVReg = currentInstruction[2].u.operand; | |
349 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
350 | ||
351 | emitGetVirtualRegister(baseVReg, regT0); | |
352 | ||
353 | // Do the method check - check the object & its prototype's structure inline (this is the common case). | |
354 | m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex)); | |
355 | MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); | |
f9bf01c6 | 356 | |
ba379fdc | 357 | Jump notCell = emitJumpIfNotJSCell(regT0); |
f9bf01c6 A |
358 | |
359 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); | |
360 | ||
ba379fdc A |
361 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
362 | DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1); | |
363 | Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); | |
364 | ||
365 | // This will be relinked to load the function without doing a load. | |
366 | DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0); | |
f9bf01c6 A |
367 | |
368 | END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); | |
369 | ||
ba379fdc A |
370 | Jump match = jump(); |
371 | ||
f9bf01c6 A |
372 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj); |
373 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct); | |
374 | ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction); | |
ba379fdc A |
375 | |
376 | // Link the failure cases here. | |
377 | notCell.link(this); | |
378 | structureCheck.link(this); | |
379 | protoStructureCheck.link(this); | |
380 | ||
381 | // Do a regular(ish) get_by_id (the slow case will be link to | |
382 | // cti_op_get_by_id_method_check instead of cti_op_get_by_id. | |
383 | compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++); | |
384 | ||
385 | match.link(this); | |
386 | emitPutVirtualRegister(resultVReg); | |
387 | ||
388 | // We've already generated the following get_by_id, so make sure it's skipped over. | |
389 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); | |
390 | } | |
9dae56ea | 391 | |
ba379fdc A |
392 | void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
393 | { | |
394 | currentInstruction += OPCODE_LENGTH(op_method_check); | |
395 | unsigned resultVReg = currentInstruction[1].u.operand; | |
396 | unsigned baseVReg = currentInstruction[2].u.operand; | |
397 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
398 | ||
399 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true); | |
400 | ||
401 | // We've already generated the following get_by_id, so make sure it's skipped over. | |
402 | m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); | |
403 | } | |
404 | ||
405 | #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS) | |
406 | ||
407 | // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. | |
408 | void JIT::emit_op_method_check(Instruction*) {} | |
409 | void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); } | |
410 | ||
411 | #endif | |
412 | ||
413 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) | |
414 | { | |
415 | unsigned resultVReg = currentInstruction[1].u.operand; | |
416 | unsigned baseVReg = currentInstruction[2].u.operand; | |
417 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
418 | ||
419 | emitGetVirtualRegister(baseVReg, regT0); | |
420 | compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++); | |
421 | emitPutVirtualRegister(resultVReg); | |
422 | } | |
423 | ||
424 | void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex) | |
9dae56ea A |
425 | { |
426 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. | |
427 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump | |
428 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label | |
429 | // to jump back to if one of these trampolies finds a match. | |
430 | ||
ba379fdc | 431 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
9dae56ea | 432 | |
f9bf01c6 A |
433 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); |
434 | ||
9dae56ea A |
435 | Label hotPathBegin(this); |
436 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; | |
437 | ||
438 | DataLabelPtr structureToCompare; | |
ba379fdc | 439 | Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); |
9dae56ea | 440 | addSlowCase(structureCheck); |
f9bf01c6 A |
441 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure); |
442 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase) | |
9dae56ea | 443 | |
ba379fdc A |
444 | Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); |
445 | Label externalLoadComplete(this); | |
f9bf01c6 A |
446 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad); |
447 | ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad); | |
ba379fdc A |
448 | |
449 | DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0); | |
f9bf01c6 | 450 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset); |
9dae56ea A |
451 | |
452 | Label putResult(this); | |
f9bf01c6 A |
453 | |
454 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); | |
455 | ||
456 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult); | |
9dae56ea A |
457 | } |
458 | ||
ba379fdc A |
459 | void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
460 | { | |
461 | unsigned resultVReg = currentInstruction[1].u.operand; | |
462 | unsigned baseVReg = currentInstruction[2].u.operand; | |
463 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
464 | ||
465 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false); | |
466 | } | |
9dae56ea | 467 | |
ba379fdc | 468 | void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck) |
9dae56ea A |
469 | { |
470 | // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset | |
471 | // so that we only need track one pointer into the slow case code - we track a pointer to the location | |
472 | // of the call (which we can use to look up the patch information), but should a array-length or | |
473 | // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back | |
474 | // the distance from the call to the head of the slow case. | |
475 | ||
476 | linkSlowCaseIfNotJSCell(iter, baseVReg); | |
477 | linkSlowCase(iter); | |
478 | ||
f9bf01c6 A |
479 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); |
480 | ||
9dae56ea A |
481 | #ifndef NDEBUG |
482 | Label coldPathBegin(this); | |
483 | #endif | |
ba379fdc A |
484 | JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); |
485 | stubCall.addArgument(regT0); | |
486 | stubCall.addArgument(ImmPtr(ident)); | |
487 | Call call = stubCall.call(resultVReg); | |
9dae56ea | 488 | |
f9bf01c6 A |
489 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); |
490 | ||
491 | ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall); | |
9dae56ea A |
492 | |
493 | // Track the location of the call; this will be used to recover patch information. | |
ba379fdc A |
494 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; |
495 | m_propertyAccessInstructionIndex++; | |
9dae56ea A |
496 | } |
497 | ||
ba379fdc | 498 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) |
9dae56ea | 499 | { |
ba379fdc A |
500 | unsigned baseVReg = currentInstruction[1].u.operand; |
501 | unsigned valueVReg = currentInstruction[3].u.operand; | |
502 | ||
503 | unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++; | |
504 | ||
9dae56ea A |
505 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, |
506 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code | |
507 | // such that the Structure & offset are always at the same distance from this. | |
508 | ||
ba379fdc | 509 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); |
9dae56ea A |
510 | |
511 | // Jump to a slow case if either the base object is an immediate, or if the Structure does not match. | |
ba379fdc | 512 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
9dae56ea | 513 | |
f9bf01c6 A |
514 | BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); |
515 | ||
9dae56ea A |
516 | Label hotPathBegin(this); |
517 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; | |
518 | ||
519 | // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. | |
520 | DataLabelPtr structureToCompare; | |
ba379fdc | 521 | addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); |
f9bf01c6 | 522 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure); |
9dae56ea A |
523 | |
524 | // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. | |
ba379fdc A |
525 | Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); |
526 | Label externalLoadComplete(this); | |
f9bf01c6 A |
527 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad); |
528 | ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad); | |
ba379fdc A |
529 | |
530 | DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset)); | |
f9bf01c6 A |
531 | |
532 | END_UNINTERRUPTED_SEQUENCE(sequencePutById); | |
533 | ||
534 | ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset); | |
9dae56ea A |
535 | } |
536 | ||
ba379fdc | 537 | void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
9dae56ea | 538 | { |
ba379fdc A |
539 | unsigned baseVReg = currentInstruction[1].u.operand; |
540 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); | |
4e4e5a6f | 541 | unsigned direct = currentInstruction[8].u.operand; |
ba379fdc A |
542 | |
543 | unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++; | |
544 | ||
9dae56ea A |
545 | linkSlowCaseIfNotJSCell(iter, baseVReg); |
546 | linkSlowCase(iter); | |
547 | ||
4e4e5a6f | 548 | JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id); |
ba379fdc A |
549 | stubCall.addArgument(regT0); |
550 | stubCall.addArgument(ImmPtr(ident)); | |
551 | stubCall.addArgument(regT1); | |
552 | Call call = stubCall.call(); | |
9dae56ea A |
553 | |
554 | // Track the location of the call; this will be used to recover patch information. | |
555 | m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; | |
556 | } | |
557 | ||
ba379fdc A |
558 | // Compile a store into an object's property storage. May overwrite the |
559 | // value in objectReg. | |
560 | void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset) | |
561 | { | |
562 | int offset = cachedOffset * sizeof(JSValue); | |
563 | if (structure->isUsingInlineStorage()) | |
564 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage); | |
565 | else | |
566 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); | |
567 | storePtr(value, Address(base, offset)); | |
568 | } | |
569 | ||
570 | // Compile a load from an object's property storage. May overwrite base. | |
571 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset) | |
9dae56ea | 572 | { |
ba379fdc A |
573 | int offset = cachedOffset * sizeof(JSValue); |
574 | if (structure->isUsingInlineStorage()) | |
575 | offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage); | |
576 | else | |
577 | loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); | |
578 | loadPtr(Address(base, offset), result); | |
9dae56ea A |
579 | } |
580 | ||
ba379fdc | 581 | void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset) |
9dae56ea | 582 | { |
ba379fdc A |
583 | if (base->isUsingInlineStorage()) |
584 | loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result); | |
585 | else { | |
586 | PropertyStorage* protoPropertyStorage = &base->m_externalStorage; | |
587 | loadPtr(static_cast<void*>(protoPropertyStorage), temp); | |
588 | loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result); | |
589 | } | |
9dae56ea A |
590 | } |
591 | ||
f9bf01c6 A |
592 | void JIT::testPrototype(Structure* structure, JumpList& failureCases) |
593 | { | |
594 | if (structure->m_prototype.isNull()) | |
595 | return; | |
596 | ||
597 | move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2); | |
598 | move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3); | |
599 | failureCases.append(branchPtr(NotEqual, Address(regT2), regT3)); | |
600 | } | |
601 | ||
4e4e5a6f | 602 | void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) |
9dae56ea A |
603 | { |
604 | JumpList failureCases; | |
605 | // Check eax is an object of the right Structure. | |
ba379fdc A |
606 | failureCases.append(emitJumpIfNotJSCell(regT0)); |
607 | failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure))); | |
f9bf01c6 | 608 | testPrototype(oldStructure, failureCases); |
9dae56ea | 609 | |
9dae56ea | 610 | // ecx = baseObject->m_structure |
4e4e5a6f A |
611 | if (!direct) { |
612 | for (RefPtr<Structure>* it = chain->head(); *it; ++it) | |
613 | testPrototype(it->get(), failureCases); | |
614 | } | |
9dae56ea | 615 | |
ba379fdc | 616 | Call callTarget; |
9dae56ea A |
617 | |
618 | // emit a call only if storage realloc is needed | |
ba379fdc A |
619 | bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); |
620 | if (willNeedStorageRealloc) { | |
621 | // This trampoline was called to like a JIT stub; before we can can call again we need to | |
622 | // remove the return address from the stack, to prevent the stack from becoming misaligned. | |
623 | preserveReturnAddressAfterCall(regT3); | |
624 | ||
625 | JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc); | |
626 | stubCall.skipArgument(); // base | |
627 | stubCall.skipArgument(); // ident | |
628 | stubCall.skipArgument(); // value | |
629 | stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity())); | |
630 | stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity())); | |
631 | stubCall.call(regT0); | |
f9bf01c6 | 632 | emitGetJITStubArg(2, regT1); |
ba379fdc A |
633 | |
634 | restoreReturnAddressBeforeReturn(regT3); | |
9dae56ea A |
635 | } |
636 | ||
637 | // Assumes m_refCount can be decremented easily, refcount decrement is safe as | |
638 | // codeblock should ensure oldStructure->m_refCount > 0 | |
639 | sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); | |
640 | add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); | |
ba379fdc | 641 | storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); |
9dae56ea A |
642 | |
643 | // write the value | |
ba379fdc | 644 | compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset); |
9dae56ea A |
645 | |
646 | ret(); | |
647 | ||
ba379fdc A |
648 | ASSERT(!failureCases.empty()); |
649 | failureCases.link(this); | |
650 | restoreArgumentReferenceForTrampoline(); | |
651 | Call failureCall = tailRecursiveCall(); | |
9dae56ea | 652 | |
ba379fdc | 653 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
9dae56ea | 654 | |
4e4e5a6f | 655 | patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail)); |
9dae56ea | 656 | |
ba379fdc A |
657 | if (willNeedStorageRealloc) { |
658 | ASSERT(m_calls.size() == 1); | |
659 | patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); | |
660 | } | |
9dae56ea | 661 | |
ba379fdc A |
662 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
663 | stubInfo->stubRoutine = entryLabel; | |
664 | RepatchBuffer repatchBuffer(m_codeBlock); | |
665 | repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel); | |
9dae56ea A |
666 | } |
667 | ||
ba379fdc | 668 | void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) |
9dae56ea | 669 | { |
ba379fdc A |
670 | RepatchBuffer repatchBuffer(codeBlock); |
671 | ||
9dae56ea | 672 | // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. |
ba379fdc A |
673 | // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now. |
674 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); | |
675 | ||
676 | int offset = sizeof(JSValue) * cachedOffset; | |
677 | ||
678 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load | |
679 | // and makes the subsequent load's offset automatically correct | |
680 | if (structure->isUsingInlineStorage()) | |
681 | repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad)); | |
9dae56ea A |
682 | |
683 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. | |
ba379fdc A |
684 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); |
685 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset); | |
686 | } | |
687 | ||
f9bf01c6 | 688 | void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) |
ba379fdc A |
689 | { |
690 | RepatchBuffer repatchBuffer(codeBlock); | |
691 | ||
692 | ASSERT(!methodCallLinkInfo.cachedStructure); | |
693 | methodCallLinkInfo.cachedStructure = structure; | |
694 | structure->ref(); | |
695 | ||
696 | Structure* prototypeStructure = proto->structure(); | |
ba379fdc A |
697 | methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure; |
698 | prototypeStructure->ref(); | |
699 | ||
700 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure); | |
701 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto); | |
702 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure); | |
703 | repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee); | |
f9bf01c6 A |
704 | |
705 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id)); | |
9dae56ea A |
706 | } |
707 | ||
4e4e5a6f | 708 | void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct) |
9dae56ea | 709 | { |
ba379fdc A |
710 | RepatchBuffer repatchBuffer(codeBlock); |
711 | ||
9dae56ea | 712 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
ba379fdc | 713 | // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. |
4e4e5a6f | 714 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic)); |
ba379fdc A |
715 | |
716 | int offset = sizeof(JSValue) * cachedOffset; | |
717 | ||
718 | // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load | |
719 | // and makes the subsequent load's offset automatically correct | |
720 | if (structure->isUsingInlineStorage()) | |
721 | repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad)); | |
9dae56ea A |
722 | |
723 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. | |
ba379fdc A |
724 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); |
725 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset); | |
9dae56ea A |
726 | } |
727 | ||
ba379fdc | 728 | void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) |
9dae56ea A |
729 | { |
730 | StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); | |
731 | ||
9dae56ea | 732 | // Check eax is an array |
ba379fdc | 733 | Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); |
9dae56ea A |
734 | |
735 | // Checks out okay! - get the length from the storage | |
ba379fdc A |
736 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); |
737 | load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2); | |
9dae56ea | 738 | |
ba379fdc | 739 | Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt)); |
9dae56ea | 740 | |
ba379fdc | 741 | emitFastArithIntToImmNoCheck(regT2, regT0); |
9dae56ea A |
742 | Jump success = jump(); |
743 | ||
ba379fdc | 744 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
9dae56ea A |
745 | |
746 | // Use the patch information to link the failure cases back to the original slow case routine. | |
ba379fdc | 747 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); |
9dae56ea A |
748 | patchBuffer.link(failureCases1, slowCaseBegin); |
749 | patchBuffer.link(failureCases2, slowCaseBegin); | |
750 | ||
751 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
ba379fdc | 752 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
9dae56ea A |
753 | |
754 | // Track the stub we have created so that it will be deleted later. | |
ba379fdc A |
755 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
756 | stubInfo->stubRoutine = entryLabel; | |
9dae56ea | 757 | |
ba379fdc A |
758 | // Finally patch the jump to slow case back in the hot path to jump here instead. |
759 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); | |
760 | RepatchBuffer repatchBuffer(m_codeBlock); | |
761 | repatchBuffer.relink(jumpLocation, entryLabel); | |
9dae56ea | 762 | |
ba379fdc A |
763 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
764 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); | |
9dae56ea A |
765 | } |
766 | ||
4e4e5a6f | 767 | void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) |
9dae56ea | 768 | { |
9dae56ea A |
769 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is |
770 | // referencing the prototype object - let's speculatively load it's table nice and early!) | |
771 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); | |
9dae56ea A |
772 | |
773 | // Check eax is an object of the right Structure. | |
ba379fdc | 774 | Jump failureCases1 = checkStructure(regT0, structure); |
9dae56ea A |
775 | |
776 | // Check the prototype object's Structure had not changed. | |
777 | Structure** prototypeStructureAddress = &(protoObject->m_structure); | |
f9bf01c6 | 778 | #if CPU(X86_64) |
ba379fdc A |
779 | move(ImmPtr(prototypeStructure), regT3); |
780 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); | |
9dae56ea | 781 | #else |
ba379fdc | 782 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); |
9dae56ea A |
783 | #endif |
784 | ||
4e4e5a6f A |
785 | bool needsStubLink = false; |
786 | ||
787 | // Checks out okay! | |
788 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
789 | needsStubLink = true; | |
790 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset); | |
791 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
792 | stubCall.addArgument(regT1); | |
793 | stubCall.addArgument(regT0); | |
794 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
795 | stubCall.call(); | |
796 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
797 | needsStubLink = true; | |
798 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
799 | stubCall.addArgument(ImmPtr(protoObject)); | |
800 | stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
801 | stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident))); | |
802 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
803 | stubCall.call(); | |
804 | } else | |
805 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); | |
9dae56ea | 806 | Jump success = jump(); |
ba379fdc | 807 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
9dae56ea A |
808 | |
809 | // Use the patch information to link the failure cases back to the original slow case routine. | |
ba379fdc | 810 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); |
9dae56ea A |
811 | patchBuffer.link(failureCases1, slowCaseBegin); |
812 | patchBuffer.link(failureCases2, slowCaseBegin); | |
813 | ||
814 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
ba379fdc | 815 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
9dae56ea | 816 | |
4e4e5a6f A |
817 | if (needsStubLink) { |
818 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
819 | if (iter->to) | |
820 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
821 | } | |
822 | } | |
9dae56ea | 823 | // Track the stub we have created so that it will be deleted later. |
ba379fdc A |
824 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
825 | stubInfo->stubRoutine = entryLabel; | |
9dae56ea A |
826 | |
827 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
ba379fdc A |
828 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
829 | RepatchBuffer repatchBuffer(m_codeBlock); | |
830 | repatchBuffer.relink(jumpLocation, entryLabel); | |
9dae56ea | 831 | |
ba379fdc A |
832 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
833 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); | |
9dae56ea A |
834 | } |
835 | ||
4e4e5a6f | 836 | void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) |
9dae56ea | 837 | { |
ba379fdc | 838 | Jump failureCase = checkStructure(regT0, structure); |
4e4e5a6f A |
839 | bool needsStubLink = false; |
840 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
841 | needsStubLink = true; | |
842 | if (!structure->isUsingInlineStorage()) { | |
843 | move(regT0, regT1); | |
844 | compileGetDirectOffset(regT1, regT1, structure, cachedOffset); | |
845 | } else | |
846 | compileGetDirectOffset(regT0, regT1, structure, cachedOffset); | |
847 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
848 | stubCall.addArgument(regT1); | |
849 | stubCall.addArgument(regT0); | |
850 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
851 | stubCall.call(); | |
852 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
853 | needsStubLink = true; | |
854 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
855 | stubCall.addArgument(regT0); | |
856 | stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
857 | stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident))); | |
858 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
859 | stubCall.call(); | |
860 | } else | |
861 | compileGetDirectOffset(regT0, regT0, structure, cachedOffset); | |
9dae56ea A |
862 | Jump success = jump(); |
863 | ||
ba379fdc | 864 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
9dae56ea | 865 | |
4e4e5a6f A |
866 | if (needsStubLink) { |
867 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
868 | if (iter->to) | |
869 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
870 | } | |
871 | } | |
872 | ||
9dae56ea | 873 | // Use the patch information to link the failure cases back to the original slow case routine. |
ba379fdc | 874 | CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; |
9dae56ea | 875 | if (!lastProtoBegin) |
ba379fdc | 876 | lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); |
9dae56ea A |
877 | |
878 | patchBuffer.link(failureCase, lastProtoBegin); | |
879 | ||
880 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
ba379fdc A |
881 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
882 | ||
883 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); | |
9dae56ea A |
884 | |
885 | structure->ref(); | |
ba379fdc | 886 | polymorphicStructures->list[currentIndex].set(entryLabel, structure); |
9dae56ea A |
887 | |
888 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
ba379fdc A |
889 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
890 | RepatchBuffer repatchBuffer(m_codeBlock); | |
891 | repatchBuffer.relink(jumpLocation, entryLabel); | |
9dae56ea A |
892 | } |
893 | ||
4e4e5a6f | 894 | void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame) |
9dae56ea A |
895 | { |
896 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is | |
897 | // referencing the prototype object - let's speculatively load it's table nice and early!) | |
898 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); | |
9dae56ea A |
899 | |
900 | // Check eax is an object of the right Structure. | |
ba379fdc | 901 | Jump failureCases1 = checkStructure(regT0, structure); |
9dae56ea A |
902 | |
903 | // Check the prototype object's Structure had not changed. | |
904 | Structure** prototypeStructureAddress = &(protoObject->m_structure); | |
f9bf01c6 | 905 | #if CPU(X86_64) |
ba379fdc A |
906 | move(ImmPtr(prototypeStructure), regT3); |
907 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); | |
9dae56ea | 908 | #else |
ba379fdc | 909 | Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); |
9dae56ea A |
910 | #endif |
911 | ||
4e4e5a6f A |
912 | // Checks out okay! |
913 | bool needsStubLink = false; | |
914 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
915 | needsStubLink = true; | |
916 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset); | |
917 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
918 | stubCall.addArgument(regT1); | |
919 | stubCall.addArgument(regT0); | |
920 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
921 | stubCall.call(); | |
922 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
923 | needsStubLink = true; | |
924 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
925 | stubCall.addArgument(ImmPtr(protoObject)); | |
926 | stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
927 | stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident))); | |
928 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
929 | stubCall.call(); | |
930 | } else | |
931 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); | |
9dae56ea A |
932 | |
933 | Jump success = jump(); | |
934 | ||
ba379fdc | 935 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
9dae56ea | 936 | |
4e4e5a6f A |
937 | if (needsStubLink) { |
938 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
939 | if (iter->to) | |
940 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
941 | } | |
942 | } | |
943 | ||
9dae56ea | 944 | // Use the patch information to link the failure cases back to the original slow case routine. |
ba379fdc | 945 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; |
9dae56ea A |
946 | patchBuffer.link(failureCases1, lastProtoBegin); |
947 | patchBuffer.link(failureCases2, lastProtoBegin); | |
948 | ||
949 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
ba379fdc A |
950 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
951 | ||
952 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); | |
9dae56ea A |
953 | |
954 | structure->ref(); | |
955 | prototypeStructure->ref(); | |
ba379fdc | 956 | prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure); |
9dae56ea A |
957 | |
958 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
ba379fdc A |
959 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
960 | RepatchBuffer repatchBuffer(m_codeBlock); | |
961 | repatchBuffer.relink(jumpLocation, entryLabel); | |
9dae56ea A |
962 | } |
963 | ||
4e4e5a6f | 964 | void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame) |
9dae56ea A |
965 | { |
966 | ASSERT(count); | |
9dae56ea A |
967 | JumpList bucketsOfFail; |
968 | ||
969 | // Check eax is an object of the right Structure. | |
ba379fdc | 970 | Jump baseObjectCheck = checkStructure(regT0, structure); |
9dae56ea A |
971 | bucketsOfFail.append(baseObjectCheck); |
972 | ||
973 | Structure* currStructure = structure; | |
974 | RefPtr<Structure>* chainEntries = chain->head(); | |
975 | JSObject* protoObject = 0; | |
976 | for (unsigned i = 0; i < count; ++i) { | |
977 | protoObject = asObject(currStructure->prototypeForLookup(callFrame)); | |
978 | currStructure = chainEntries[i].get(); | |
979 | ||
980 | // Check the prototype object's Structure had not changed. | |
981 | Structure** prototypeStructureAddress = &(protoObject->m_structure); | |
f9bf01c6 | 982 | #if CPU(X86_64) |
ba379fdc A |
983 | move(ImmPtr(currStructure), regT3); |
984 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); | |
9dae56ea | 985 | #else |
ba379fdc | 986 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); |
9dae56ea A |
987 | #endif |
988 | } | |
989 | ASSERT(protoObject); | |
4e4e5a6f A |
990 | |
991 | bool needsStubLink = false; | |
992 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
993 | needsStubLink = true; | |
994 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset); | |
995 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
996 | stubCall.addArgument(regT1); | |
997 | stubCall.addArgument(regT0); | |
998 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
999 | stubCall.call(); | |
1000 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
1001 | needsStubLink = true; | |
1002 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
1003 | stubCall.addArgument(ImmPtr(protoObject)); | |
1004 | stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
1005 | stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident))); | |
1006 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1007 | stubCall.call(); | |
1008 | } else | |
1009 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); | |
9dae56ea A |
1010 | Jump success = jump(); |
1011 | ||
ba379fdc | 1012 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
4e4e5a6f A |
1013 | |
1014 | if (needsStubLink) { | |
1015 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
1016 | if (iter->to) | |
1017 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
1018 | } | |
1019 | } | |
9dae56ea A |
1020 | |
1021 | // Use the patch information to link the failure cases back to the original slow case routine. | |
ba379fdc | 1022 | CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; |
9dae56ea A |
1023 | |
1024 | patchBuffer.link(bucketsOfFail, lastProtoBegin); | |
1025 | ||
1026 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
ba379fdc A |
1027 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
1028 | ||
1029 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); | |
9dae56ea A |
1030 | |
1031 | // Track the stub we have created so that it will be deleted later. | |
1032 | structure->ref(); | |
1033 | chain->ref(); | |
ba379fdc | 1034 | prototypeStructures->list[currentIndex].set(entryLabel, structure, chain); |
9dae56ea A |
1035 | |
1036 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
ba379fdc A |
1037 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
1038 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1039 | repatchBuffer.relink(jumpLocation, entryLabel); | |
9dae56ea | 1040 | } |
9dae56ea | 1041 | |
4e4e5a6f | 1042 | void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) |
9dae56ea | 1043 | { |
9dae56ea | 1044 | ASSERT(count); |
4e4e5a6f | 1045 | |
9dae56ea A |
1046 | JumpList bucketsOfFail; |
1047 | ||
1048 | // Check eax is an object of the right Structure. | |
ba379fdc | 1049 | bucketsOfFail.append(checkStructure(regT0, structure)); |
9dae56ea A |
1050 | |
1051 | Structure* currStructure = structure; | |
1052 | RefPtr<Structure>* chainEntries = chain->head(); | |
1053 | JSObject* protoObject = 0; | |
1054 | for (unsigned i = 0; i < count; ++i) { | |
1055 | protoObject = asObject(currStructure->prototypeForLookup(callFrame)); | |
1056 | currStructure = chainEntries[i].get(); | |
1057 | ||
1058 | // Check the prototype object's Structure had not changed. | |
1059 | Structure** prototypeStructureAddress = &(protoObject->m_structure); | |
f9bf01c6 | 1060 | #if CPU(X86_64) |
ba379fdc A |
1061 | move(ImmPtr(currStructure), regT3); |
1062 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); | |
9dae56ea | 1063 | #else |
ba379fdc | 1064 | bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); |
9dae56ea A |
1065 | #endif |
1066 | } | |
1067 | ASSERT(protoObject); | |
1068 | ||
4e4e5a6f A |
1069 | bool needsStubLink = false; |
1070 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
1071 | needsStubLink = true; | |
1072 | compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset); | |
1073 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
1074 | stubCall.addArgument(regT1); | |
1075 | stubCall.addArgument(regT0); | |
1076 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1077 | stubCall.call(); | |
1078 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
1079 | needsStubLink = true; | |
1080 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
1081 | stubCall.addArgument(ImmPtr(protoObject)); | |
1082 | stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
1083 | stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident))); | |
1084 | stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1085 | stubCall.call(); | |
1086 | } else | |
1087 | compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); | |
9dae56ea A |
1088 | Jump success = jump(); |
1089 | ||
ba379fdc | 1090 | LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); |
9dae56ea | 1091 | |
4e4e5a6f A |
1092 | if (needsStubLink) { |
1093 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
1094 | if (iter->to) | |
1095 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
1096 | } | |
1097 | } | |
1098 | ||
9dae56ea | 1099 | // Use the patch information to link the failure cases back to the original slow case routine. |
ba379fdc | 1100 | patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); |
9dae56ea A |
1101 | |
1102 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
ba379fdc | 1103 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); |
9dae56ea A |
1104 | |
1105 | // Track the stub we have created so that it will be deleted later. | |
ba379fdc A |
1106 | CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); |
1107 | stubInfo->stubRoutine = entryLabel; | |
9dae56ea A |
1108 | |
1109 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
ba379fdc A |
1110 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); |
1111 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1112 | repatchBuffer.relink(jumpLocation, entryLabel); | |
9dae56ea | 1113 | |
ba379fdc A |
1114 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. |
1115 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); | |
9dae56ea A |
1116 | } |
1117 | ||
ba379fdc | 1118 | /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ |
9dae56ea | 1119 | |
ba379fdc | 1120 | #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
9dae56ea | 1121 | |
9dae56ea A |
1122 | } // namespace JSC |
1123 | ||
1124 | #endif // ENABLE(JIT) | |
4e4e5a6f A |
1125 | |
1126 | #endif // !USE(JSVALUE32_64) |