]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITOpcodes.cpp
85997c2de308bc34bebb0e56621ad58f04064a26
[apple/javascriptcore.git] / jit / JITOpcodes.cpp
1 /*
2 * Copyright (C) 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "JIT.h"
28
29 #if ENABLE(JIT)
30
31 #include "JITInlineMethods.h"
32 #include "JITStubCall.h"
33 #include "JSArray.h"
34 #include "JSCell.h"
35 #include "JSFunction.h"
36 #include "LinkBuffer.h"
37
38 namespace JSC {
39
40 #if USE(JSVALUE32_64)
41
42 void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
43 {
44 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
45 // (1) This function provides fast property access for string length
46 Label stringLengthBegin = align();
47
48 // regT0 holds payload, regT1 holds tag
49
50 Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
51 Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
52
53 // Checks out okay! - get the length from the Ustring.
54 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT2);
55 load32(Address(regT2, OBJECT_OFFSETOF(UString::Rep, len)), regT2);
56
57 Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
58 move(regT2, regT0);
59 move(Imm32(JSValue::Int32Tag), regT1);
60
61 ret();
62 #endif
63
64 // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
65
66 #if ENABLE(JIT_OPTIMIZE_CALL)
67 /* VirtualCallPreLink Trampoline */
68 Label virtualCallPreLinkBegin = align();
69
70 // regT0 holds callee, regT1 holds argCount.
71 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
72 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
73 Jump hasCodeBlock1 = branchTestPtr(NonZero, regT2);
74
75 // Lazily generate a CodeBlock.
76 preserveReturnAddressAfterCall(regT3); // return address
77 restoreArgumentReference();
78 Call callJSFunction1 = call();
79 move(regT0, regT2);
80 emitGetJITStubArg(1, regT0); // callee
81 emitGetJITStubArg(5, regT1); // argCount
82 restoreReturnAddressBeforeReturn(regT3); // return address
83 hasCodeBlock1.link(this);
84
85 // regT2 holds codeBlock.
86 Jump isNativeFunc1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
87
88 // Check argCount matches callee arity.
89 Jump arityCheckOkay1 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
90 preserveReturnAddressAfterCall(regT3);
91 emitPutJITStubArg(regT3, 3); // return address
92 emitPutJITStubArg(regT2, 7); // codeBlock
93 restoreArgumentReference();
94 Call callArityCheck1 = call();
95 move(regT1, callFrameRegister);
96 emitGetJITStubArg(1, regT0); // callee
97 emitGetJITStubArg(5, regT1); // argCount
98 restoreReturnAddressBeforeReturn(regT3); // return address
99
100 arityCheckOkay1.link(this);
101 isNativeFunc1.link(this);
102
103 compileOpCallInitializeCallFrame();
104
105 preserveReturnAddressAfterCall(regT3);
106 emitPutJITStubArg(regT3, 3);
107 restoreArgumentReference();
108 Call callDontLazyLinkCall = call();
109 restoreReturnAddressBeforeReturn(regT3);
110 jump(regT0);
111
112 /* VirtualCallLink Trampoline */
113 Label virtualCallLinkBegin = align();
114
115 // regT0 holds callee, regT1 holds argCount.
116 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
117 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
118 Jump hasCodeBlock2 = branchTestPtr(NonZero, regT2);
119
120 // Lazily generate a CodeBlock.
121 preserveReturnAddressAfterCall(regT3); // return address
122 restoreArgumentReference();
123 Call callJSFunction2 = call();
124 move(regT0, regT2);
125 emitGetJITStubArg(1, regT0); // callee
126 emitGetJITStubArg(5, regT1); // argCount
127 restoreReturnAddressBeforeReturn(regT3); // return address
128 hasCodeBlock2.link(this);
129
130 // regT2 holds codeBlock.
131 Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
132
133 // Check argCount matches callee arity.
134 Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
135 preserveReturnAddressAfterCall(regT3);
136 emitPutJITStubArg(regT3, 3); // return address
137 emitPutJITStubArg(regT2, 7); // codeBlock
138 restoreArgumentReference();
139 Call callArityCheck2 = call();
140 move(regT1, callFrameRegister);
141 emitGetJITStubArg(1, regT0); // callee
142 emitGetJITStubArg(5, regT1); // argCount
143 restoreReturnAddressBeforeReturn(regT3); // return address
144
145 arityCheckOkay2.link(this);
146 isNativeFunc2.link(this);
147
148 compileOpCallInitializeCallFrame();
149
150 preserveReturnAddressAfterCall(regT3);
151 emitPutJITStubArg(regT3, 3);
152 restoreArgumentReference();
153 Call callLazyLinkCall = call();
154 restoreReturnAddressBeforeReturn(regT3);
155 jump(regT0);
156 #endif // ENABLE(JIT_OPTIMIZE_CALL)
157
158 /* VirtualCall Trampoline */
159 Label virtualCallBegin = align();
160
161 // regT0 holds callee, regT1 holds argCount.
162 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT2);
163 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT2);
164 Jump hasCodeBlock3 = branchTestPtr(NonZero, regT2);
165
166 // Lazily generate a CodeBlock.
167 preserveReturnAddressAfterCall(regT3); // return address
168 restoreArgumentReference();
169 Call callJSFunction3 = call();
170 move(regT0, regT2);
171 emitGetJITStubArg(1, regT0); // callee
172 emitGetJITStubArg(5, regT1); // argCount
173 restoreReturnAddressBeforeReturn(regT3); // return address
174 hasCodeBlock3.link(this);
175
176 // regT2 holds codeBlock.
177 Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
178
179 // Check argCount matches callee.
180 Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
181 preserveReturnAddressAfterCall(regT3);
182 emitPutJITStubArg(regT3, 3); // return address
183 emitPutJITStubArg(regT2, 7); // codeBlock
184 restoreArgumentReference();
185 Call callArityCheck3 = call();
186 move(regT1, callFrameRegister);
187 emitGetJITStubArg(1, regT0); // callee
188 emitGetJITStubArg(5, regT1); // argCount
189 restoreReturnAddressBeforeReturn(regT3); // return address
190
191 arityCheckOkay3.link(this);
192 isNativeFunc3.link(this);
193 compileOpCallInitializeCallFrame();
194 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_body)), regT0);
195 loadPtr(Address(regT0, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
196 jump(regT0);
197
198 #if PLATFORM(X86)
199 Label nativeCallThunk = align();
200 preserveReturnAddressAfterCall(regT0);
201 emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
202
203 // Load caller frame's scope chain into this callframe so that whatever we call can
204 // get to its global data.
205 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
206 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
207 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
208
209 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
210
211 /* We have two structs that we use to describe the stackframe we set up for our
212 * call to native code. NativeCallFrameStructure describes the how we set up the stack
213 * in advance of the call. NativeFunctionCalleeSignature describes the callframe
214 * as the native code expects it. We do this as we are using the fastcall calling
215 * convention which results in the callee popping its arguments off the stack, but
216 * not the rest of the callframe so we need a nice way to ensure we increment the
217 * stack pointer by the right amount after the call.
218 */
219
220 #if COMPILER(MSVC) || PLATFORM(LINUX)
221 #if COMPILER(MSVC)
222 #pragma pack(push)
223 #pragma pack(4)
224 #endif // COMPILER(MSVC)
225 struct NativeCallFrameStructure {
226 // CallFrame* callFrame; // passed in EDX
227 JSObject* callee;
228 JSValue thisValue;
229 ArgList* argPointer;
230 ArgList args;
231 JSValue result;
232 };
233 struct NativeFunctionCalleeSignature {
234 JSObject* callee;
235 JSValue thisValue;
236 ArgList* argPointer;
237 };
238 #if COMPILER(MSVC)
239 #pragma pack(pop)
240 #endif // COMPILER(MSVC)
241 #else
242 struct NativeCallFrameStructure {
243 // CallFrame* callFrame; // passed in ECX
244 // JSObject* callee; // passed in EDX
245 JSValue thisValue;
246 ArgList* argPointer;
247 ArgList args;
248 };
249 struct NativeFunctionCalleeSignature {
250 JSValue thisValue;
251 ArgList* argPointer;
252 };
253 #endif
254
255 const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
256 // Allocate system stack frame
257 subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
258
259 // Set up arguments
260 subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
261
262 // push argcount
263 storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
264
265 // Calculate the start of the callframe header, and store in regT1
266 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
267
268 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
269 mul32(Imm32(sizeof(Register)), regT0, regT0);
270 subPtr(regT0, regT1);
271 storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
272
273 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
274 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
275 storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
276
277 // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
278 loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
279 loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
280 storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
281 storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
282
283 #if COMPILER(MSVC) || PLATFORM(LINUX)
284 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
285 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
286
287 // Plant callee
288 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
289 storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
290
291 // Plant callframe
292 move(callFrameRegister, X86::edx);
293
294 call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
295
296 // JSValue is a non-POD type, so eax points to it
297 emitLoad(0, regT1, regT0, X86::eax);
298 #else
299 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx); // callee
300 move(callFrameRegister, X86::ecx); // callFrame
301 call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
302 #endif
303
304 // We've put a few temporaries on the stack in addition to the actual arguments
305 // so pull them off now
306 addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
307
308 // Check for an exception
309 // FIXME: Maybe we can optimize this comparison to JSValue().
310 move(ImmPtr(&globalData->exception), regT2);
311 Jump sawException1 = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::CellTag));
312 Jump sawException2 = branch32(NonZero, payloadFor(0, regT2), Imm32(0));
313
314 // Grab the return address.
315 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
316
317 // Restore our caller's "r".
318 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
319
320 // Return.
321 restoreReturnAddressBeforeReturn(regT3);
322 ret();
323
324 // Handle an exception
325 sawException1.link(this);
326 sawException2.link(this);
327 // Grab the return address.
328 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
329 move(ImmPtr(&globalData->exceptionLocation), regT2);
330 storePtr(regT1, regT2);
331 move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
332 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
333 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
334 restoreReturnAddressBeforeReturn(regT2);
335 ret();
336
337 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
338 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
339 #else
340 breakpoint();
341 #endif
342
343 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
344 Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
345 Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
346 Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
347 #endif
348
349 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
350 LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
351
352 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
353 patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
354 patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
355 patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
356 #endif
357 #if ENABLE(JIT_OPTIMIZE_CALL)
358 patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
359 patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
360 patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
361 patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
362 patchBuffer.link(callDontLazyLinkCall, FunctionPtr(cti_vm_dontLazyLinkCall));
363 patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
364 #endif
365 patchBuffer.link(callArityCheck3, FunctionPtr(cti_op_call_arityCheck));
366 patchBuffer.link(callJSFunction3, FunctionPtr(cti_op_call_JSFunction));
367
368 CodeRef finalCode = patchBuffer.finalizeCode();
369 *executablePool = finalCode.m_executablePool;
370
371 *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
372 *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
373 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
374 *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
375 #else
376 UNUSED_PARAM(ctiStringLengthTrampoline);
377 #endif
378 #if ENABLE(JIT_OPTIMIZE_CALL)
379 *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
380 *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
381 #else
382 UNUSED_PARAM(ctiVirtualCallPreLink);
383 UNUSED_PARAM(ctiVirtualCallLink);
384 #endif
385 }
386
387 void JIT::emit_op_mov(Instruction* currentInstruction)
388 {
389 unsigned dst = currentInstruction[1].u.operand;
390 unsigned src = currentInstruction[2].u.operand;
391
392 if (m_codeBlock->isConstantRegisterIndex(src))
393 emitStore(dst, getConstantOperand(src));
394 else {
395 emitLoad(src, regT1, regT0);
396 emitStore(dst, regT1, regT0);
397 map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
398 }
399 }
400
401 void JIT::emit_op_end(Instruction* currentInstruction)
402 {
403 if (m_codeBlock->needsFullScopeChain())
404 JITStubCall(this, cti_op_end).call();
405 ASSERT(returnValueRegister != callFrameRegister);
406 emitLoad(currentInstruction[1].u.operand, regT1, regT0);
407 restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
408 ret();
409 }
410
411 void JIT::emit_op_jmp(Instruction* currentInstruction)
412 {
413 unsigned target = currentInstruction[1].u.operand;
414 addJump(jump(), target + 1);
415 }
416
417 void JIT::emit_op_loop(Instruction* currentInstruction)
418 {
419 unsigned target = currentInstruction[1].u.operand;
420 emitTimeoutCheck();
421 addJump(jump(), target + 1);
422 }
423
424 void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
425 {
426 unsigned op1 = currentInstruction[1].u.operand;
427 unsigned op2 = currentInstruction[2].u.operand;
428 unsigned target = currentInstruction[3].u.operand;
429
430 emitTimeoutCheck();
431
432 if (isOperandConstantImmediateInt(op1)) {
433 emitLoad(op2, regT1, regT0);
434 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
435 addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
436 return;
437 }
438
439 if (isOperandConstantImmediateInt(op2)) {
440 emitLoad(op1, regT1, regT0);
441 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
442 addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
443 return;
444 }
445
446 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
447 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
448 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
449 addJump(branch32(LessThan, regT0, regT2), target + 3);
450 }
451
452 void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
453 {
454 unsigned op1 = currentInstruction[1].u.operand;
455 unsigned op2 = currentInstruction[2].u.operand;
456 unsigned target = currentInstruction[3].u.operand;
457
458 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
459 linkSlowCase(iter); // int32 check
460 linkSlowCase(iter); // int32 check
461
462 JITStubCall stubCall(this, cti_op_loop_if_less);
463 stubCall.addArgument(op1);
464 stubCall.addArgument(op2);
465 stubCall.call();
466 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
467 }
468
469 void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
470 {
471 unsigned op1 = currentInstruction[1].u.operand;
472 unsigned op2 = currentInstruction[2].u.operand;
473 unsigned target = currentInstruction[3].u.operand;
474
475 emitTimeoutCheck();
476
477 if (isOperandConstantImmediateInt(op1)) {
478 emitLoad(op2, regT1, regT0);
479 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
480 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
481 return;
482 }
483
484 if (isOperandConstantImmediateInt(op2)) {
485 emitLoad(op1, regT1, regT0);
486 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
487 addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
488 return;
489 }
490
491 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
492 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
493 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
494 addJump(branch32(LessThanOrEqual, regT0, regT2), target + 3);
495 }
496
497 void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
498 {
499 unsigned op1 = currentInstruction[1].u.operand;
500 unsigned op2 = currentInstruction[2].u.operand;
501 unsigned target = currentInstruction[3].u.operand;
502
503 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
504 linkSlowCase(iter); // int32 check
505 linkSlowCase(iter); // int32 check
506
507 JITStubCall stubCall(this, cti_op_loop_if_lesseq);
508 stubCall.addArgument(op1);
509 stubCall.addArgument(op2);
510 stubCall.call();
511 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
512 }
513
514 void JIT::emit_op_new_object(Instruction* currentInstruction)
515 {
516 JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
517 }
518
519 void JIT::emit_op_instanceof(Instruction* currentInstruction)
520 {
521 unsigned dst = currentInstruction[1].u.operand;
522 unsigned value = currentInstruction[2].u.operand;
523 unsigned baseVal = currentInstruction[3].u.operand;
524 unsigned proto = currentInstruction[4].u.operand;
525
526 // Load the operands (baseVal, proto, and value respectively) into registers.
527 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
528 emitLoadPayload(proto, regT1);
529 emitLoadPayload(baseVal, regT0);
530 emitLoadPayload(value, regT2);
531
532 // Check that baseVal & proto are cells.
533 emitJumpSlowCaseIfNotJSCell(proto);
534 emitJumpSlowCaseIfNotJSCell(baseVal);
535
536 // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
537 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
538 addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); // FIXME: Maybe remove this test.
539 addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance))); // FIXME: TOT checks ImplementsDefaultHasInstance.
540
541 // If value is not an Object, return false.
542 emitLoadTag(value, regT0);
543 Jump valueIsImmediate = branch32(NotEqual, regT0, Imm32(JSValue::CellTag));
544 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
545 Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); // FIXME: Maybe remove this test.
546
547 // Check proto is object.
548 loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
549 addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
550
551 // Optimistically load the result true, and start looping.
552 // Initially, regT1 still contains proto and regT2 still contains value.
553 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
554 move(Imm32(JSValue::TrueTag), regT0);
555 Label loop(this);
556
557 // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
558 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
559 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
560 load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
561 Jump isInstance = branchPtr(Equal, regT2, regT1);
562 branch32(NotEqual, regT2, Imm32(0), loop);
563
564 // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
565 valueIsImmediate.link(this);
566 valueIsNotObject.link(this);
567 move(Imm32(JSValue::FalseTag), regT0);
568
569 // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
570 isInstance.link(this);
571 emitStoreBool(dst, regT0);
572 }
573
574 void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
575 {
576 unsigned dst = currentInstruction[1].u.operand;
577 unsigned value = currentInstruction[2].u.operand;
578 unsigned baseVal = currentInstruction[3].u.operand;
579 unsigned proto = currentInstruction[4].u.operand;
580
581 linkSlowCaseIfNotJSCell(iter, baseVal);
582 linkSlowCaseIfNotJSCell(iter, proto);
583 linkSlowCase(iter);
584 linkSlowCase(iter);
585 linkSlowCase(iter);
586
587 JITStubCall stubCall(this, cti_op_instanceof);
588 stubCall.addArgument(value);
589 stubCall.addArgument(baseVal);
590 stubCall.addArgument(proto);
591 stubCall.call(dst);
592 }
593
594 void JIT::emit_op_new_func(Instruction* currentInstruction)
595 {
596 JITStubCall stubCall(this, cti_op_new_func);
597 stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
598 stubCall.call(currentInstruction[1].u.operand);
599 }
600
601 void JIT::emit_op_get_global_var(Instruction* currentInstruction)
602 {
603 int dst = currentInstruction[1].u.operand;
604 JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
605 ASSERT(globalObject->isGlobalObject());
606 int index = currentInstruction[3].u.operand;
607
608 loadPtr(&globalObject->d()->registers, regT2);
609
610 emitLoad(index, regT1, regT0, regT2);
611 emitStore(dst, regT1, regT0);
612 map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
613 }
614
615 void JIT::emit_op_put_global_var(Instruction* currentInstruction)
616 {
617 JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
618 ASSERT(globalObject->isGlobalObject());
619 int index = currentInstruction[2].u.operand;
620 int value = currentInstruction[3].u.operand;
621
622 emitLoad(value, regT1, regT0);
623
624 loadPtr(&globalObject->d()->registers, regT2);
625 emitStore(index, regT1, regT0, regT2);
626 map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
627 }
628
629 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
630 {
631 int dst = currentInstruction[1].u.operand;
632 int index = currentInstruction[2].u.operand;
633 int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
634
635 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
636 while (skip--)
637 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
638
639 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
640 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
641 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
642
643 emitLoad(index, regT1, regT0, regT2);
644 emitStore(dst, regT1, regT0);
645 map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
646 }
647
648 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
649 {
650 int index = currentInstruction[1].u.operand;
651 int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
652 int value = currentInstruction[3].u.operand;
653
654 emitLoad(value, regT1, regT0);
655
656 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
657 while (skip--)
658 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
659
660 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
661 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
662 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
663
664 emitStore(index, regT1, regT0, regT2);
665 map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
666 }
667
668 void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
669 {
670 JITStubCall stubCall(this, cti_op_tear_off_activation);
671 stubCall.addArgument(currentInstruction[1].u.operand);
672 stubCall.call();
673 }
674
675 void JIT::emit_op_tear_off_arguments(Instruction*)
676 {
677 JITStubCall(this, cti_op_tear_off_arguments).call();
678 }
679
680 void JIT::emit_op_new_array(Instruction* currentInstruction)
681 {
682 JITStubCall stubCall(this, cti_op_new_array);
683 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
684 stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
685 stubCall.call(currentInstruction[1].u.operand);
686 }
687
688 void JIT::emit_op_resolve(Instruction* currentInstruction)
689 {
690 JITStubCall stubCall(this, cti_op_resolve);
691 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
692 stubCall.call(currentInstruction[1].u.operand);
693 }
694
695 void JIT::emit_op_to_primitive(Instruction* currentInstruction)
696 {
697 int dst = currentInstruction[1].u.operand;
698 int src = currentInstruction[2].u.operand;
699
700 emitLoad(src, regT1, regT0);
701
702 Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
703 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
704 isImm.link(this);
705
706 if (dst != src)
707 emitStore(dst, regT1, regT0);
708 map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
709 }
710
711 void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
712 {
713 int dst = currentInstruction[1].u.operand;
714
715 linkSlowCase(iter);
716
717 JITStubCall stubCall(this, cti_op_to_primitive);
718 stubCall.addArgument(regT1, regT0);
719 stubCall.call(dst);
720 }
721
722 void JIT::emit_op_strcat(Instruction* currentInstruction)
723 {
724 JITStubCall stubCall(this, cti_op_strcat);
725 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
726 stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
727 stubCall.call(currentInstruction[1].u.operand);
728 }
729
730 void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
731 {
732 unsigned cond = currentInstruction[1].u.operand;
733 unsigned target = currentInstruction[2].u.operand;
734
735 emitTimeoutCheck();
736
737 emitLoad(cond, regT1, regT0);
738
739 Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
740 addJump(branch32(NotEqual, regT0, Imm32(0)), target + 2);
741 Jump isNotZero = jump();
742
743 isNotInteger.link(this);
744
745 addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
746 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag)));
747
748 isNotZero.link(this);
749 }
750
751 void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
752 {
753 unsigned cond = currentInstruction[1].u.operand;
754 unsigned target = currentInstruction[2].u.operand;
755
756 linkSlowCase(iter);
757
758 JITStubCall stubCall(this, cti_op_jtrue);
759 stubCall.addArgument(cond);
760 stubCall.call();
761 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
762 }
763
764 void JIT::emit_op_resolve_base(Instruction* currentInstruction)
765 {
766 JITStubCall stubCall(this, cti_op_resolve_base);
767 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
768 stubCall.call(currentInstruction[1].u.operand);
769 }
770
771 void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
772 {
773 JITStubCall stubCall(this, cti_op_resolve_skip);
774 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
775 stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
776 stubCall.call(currentInstruction[1].u.operand);
777 }
778
779 void JIT::emit_op_resolve_global(Instruction* currentInstruction)
780 {
781 // FIXME: Optimize to use patching instead of so many memory accesses.
782
783 unsigned dst = currentInstruction[1].u.operand;
784 void* globalObject = currentInstruction[2].u.jsCell;
785
786 unsigned currentIndex = m_globalResolveInfoIndex++;
787 void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
788 void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
789
790 // Verify structure.
791 move(ImmPtr(globalObject), regT0);
792 loadPtr(structureAddress, regT1);
793 addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
794
795 // Load property.
796 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
797 load32(offsetAddr, regT3);
798 load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
799 load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
800 emitStore(dst, regT1, regT0);
801 map(m_bytecodeIndex + OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
802 }
803
804 void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
805 {
806 unsigned dst = currentInstruction[1].u.operand;
807 void* globalObject = currentInstruction[2].u.jsCell;
808 Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
809
810 unsigned currentIndex = m_globalResolveInfoIndex++;
811
812 linkSlowCase(iter);
813 JITStubCall stubCall(this, cti_op_resolve_global);
814 stubCall.addArgument(ImmPtr(globalObject));
815 stubCall.addArgument(ImmPtr(ident));
816 stubCall.addArgument(Imm32(currentIndex));
817 stubCall.call(dst);
818 }
819
820 void JIT::emit_op_not(Instruction* currentInstruction)
821 {
822 unsigned dst = currentInstruction[1].u.operand;
823 unsigned src = currentInstruction[2].u.operand;
824
825 emitLoadTag(src, regT0);
826
827 xor32(Imm32(JSValue::FalseTag), regT0);
828 addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
829 xor32(Imm32(JSValue::TrueTag), regT0);
830
831 emitStoreBool(dst, regT0, (dst == src));
832 }
833
834 void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
835 {
836 unsigned dst = currentInstruction[1].u.operand;
837 unsigned src = currentInstruction[2].u.operand;
838
839 linkSlowCase(iter);
840
841 JITStubCall stubCall(this, cti_op_not);
842 stubCall.addArgument(src);
843 stubCall.call(dst);
844 }
845
846 void JIT::emit_op_jfalse(Instruction* currentInstruction)
847 {
848 unsigned cond = currentInstruction[1].u.operand;
849 unsigned target = currentInstruction[2].u.operand;
850
851 emitLoad(cond, regT1, regT0);
852
853 Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
854 addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target + 2);
855
856 Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
857 Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
858 addJump(jump(), target + 2);
859
860 if (supportsFloatingPoint()) {
861 isNotInteger.link(this);
862
863 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
864
865 zeroDouble(fpRegT0);
866 emitLoadDouble(cond, fpRegT1);
867 addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2);
868 } else
869 addSlowCase(isNotInteger);
870
871 isTrue.link(this);
872 isTrue2.link(this);
873 }
874
875 void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
876 {
877 unsigned cond = currentInstruction[1].u.operand;
878 unsigned target = currentInstruction[2].u.operand;
879
880 linkSlowCase(iter);
881 JITStubCall stubCall(this, cti_op_jtrue);
882 stubCall.addArgument(cond);
883 stubCall.call();
884 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // Inverted.
885 }
886
887 void JIT::emit_op_jtrue(Instruction* currentInstruction)
888 {
889 unsigned cond = currentInstruction[1].u.operand;
890 unsigned target = currentInstruction[2].u.operand;
891
892 emitLoad(cond, regT1, regT0);
893
894 Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
895 addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
896
897 Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
898 Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
899 addJump(jump(), target + 2);
900
901 if (supportsFloatingPoint()) {
902 isNotInteger.link(this);
903
904 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
905
906 zeroDouble(fpRegT0);
907 emitLoadDouble(cond, fpRegT1);
908 addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2);
909 } else
910 addSlowCase(isNotInteger);
911
912 isFalse.link(this);
913 isFalse2.link(this);
914 }
915
916 void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
917 {
918 unsigned cond = currentInstruction[1].u.operand;
919 unsigned target = currentInstruction[2].u.operand;
920
921 linkSlowCase(iter);
922 JITStubCall stubCall(this, cti_op_jtrue);
923 stubCall.addArgument(cond);
924 stubCall.call();
925 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
926 }
927
928 void JIT::emit_op_jeq_null(Instruction* currentInstruction)
929 {
930 unsigned src = currentInstruction[1].u.operand;
931 unsigned target = currentInstruction[2].u.operand;
932
933 emitLoad(src, regT1, regT0);
934
935 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
936
937 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
938 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
939 addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
940
941 Jump wasNotImmediate = jump();
942
943 // Now handle the immediate cases - undefined & null
944 isImmediate.link(this);
945
946 set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
947 set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
948 or32(regT2, regT1);
949
950 addJump(branchTest32(NonZero, regT1), target + 2);
951
952 wasNotImmediate.link(this);
953 }
954
955 void JIT::emit_op_jneq_null(Instruction* currentInstruction)
956 {
957 unsigned src = currentInstruction[1].u.operand;
958 unsigned target = currentInstruction[2].u.operand;
959
960 emitLoad(src, regT1, regT0);
961
962 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
963
964 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
965 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
966 addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
967
968 Jump wasNotImmediate = jump();
969
970 // Now handle the immediate cases - undefined & null
971 isImmediate.link(this);
972
973 set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
974 set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
975 or32(regT2, regT1);
976
977 addJump(branchTest32(Zero, regT1), target + 2);
978
979 wasNotImmediate.link(this);
980 }
981
982 void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
983 {
984 unsigned src = currentInstruction[1].u.operand;
985 JSCell* ptr = currentInstruction[2].u.jsCell;
986 unsigned target = currentInstruction[3].u.operand;
987
988 emitLoad(src, regT1, regT0);
989 addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target + 3);
990 addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target + 3);
991 }
992
993 void JIT::emit_op_jsr(Instruction* currentInstruction)
994 {
995 int retAddrDst = currentInstruction[1].u.operand;
996 int target = currentInstruction[2].u.operand;
997 DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
998 addJump(jump(), target + 2);
999 m_jsrSites.append(JSRInfo(storeLocation, label()));
1000 }
1001
1002 void JIT::emit_op_sret(Instruction* currentInstruction)
1003 {
1004 jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
1005 }
1006
1007 void JIT::emit_op_eq(Instruction* currentInstruction)
1008 {
1009 unsigned dst = currentInstruction[1].u.operand;
1010 unsigned src1 = currentInstruction[2].u.operand;
1011 unsigned src2 = currentInstruction[3].u.operand;
1012
1013 emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
1014 addSlowCase(branch32(NotEqual, regT1, regT3));
1015 addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
1016 addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
1017
1018 set8(Equal, regT0, regT2, regT0);
1019 or32(Imm32(JSValue::FalseTag), regT0);
1020
1021 emitStoreBool(dst, regT0);
1022 }
1023
1024 void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1025 {
1026 unsigned dst = currentInstruction[1].u.operand;
1027 unsigned op1 = currentInstruction[2].u.operand;
1028 unsigned op2 = currentInstruction[3].u.operand;
1029
1030 JumpList storeResult;
1031 JumpList genericCase;
1032
1033 genericCase.append(getSlowCase(iter)); // tags not equal
1034
1035 linkSlowCase(iter); // tags equal and JSCell
1036 genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
1037 genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
1038
1039 // String case.
1040 JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
1041 stubCallEqStrings.addArgument(regT0);
1042 stubCallEqStrings.addArgument(regT2);
1043 stubCallEqStrings.call();
1044 storeResult.append(jump());
1045
1046 // Generic case.
1047 genericCase.append(getSlowCase(iter)); // doubles
1048 genericCase.link(this);
1049 JITStubCall stubCallEq(this, cti_op_eq);
1050 stubCallEq.addArgument(op1);
1051 stubCallEq.addArgument(op2);
1052 stubCallEq.call(regT0);
1053
1054 storeResult.link(this);
1055 or32(Imm32(JSValue::FalseTag), regT0);
1056 emitStoreBool(dst, regT0);
1057 }
1058
1059 void JIT::emit_op_neq(Instruction* currentInstruction)
1060 {
1061 unsigned dst = currentInstruction[1].u.operand;
1062 unsigned src1 = currentInstruction[2].u.operand;
1063 unsigned src2 = currentInstruction[3].u.operand;
1064
1065 emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
1066 addSlowCase(branch32(NotEqual, regT1, regT3));
1067 addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
1068 addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
1069
1070 set8(NotEqual, regT0, regT2, regT0);
1071 or32(Imm32(JSValue::FalseTag), regT0);
1072
1073 emitStoreBool(dst, regT0);
1074 }
1075
1076 void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1077 {
1078 unsigned dst = currentInstruction[1].u.operand;
1079
1080 JumpList storeResult;
1081 JumpList genericCase;
1082
1083 genericCase.append(getSlowCase(iter)); // tags not equal
1084
1085 linkSlowCase(iter); // tags equal and JSCell
1086 genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
1087 genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
1088
1089 // String case.
1090 JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
1091 stubCallEqStrings.addArgument(regT0);
1092 stubCallEqStrings.addArgument(regT2);
1093 stubCallEqStrings.call(regT0);
1094 storeResult.append(jump());
1095
1096 // Generic case.
1097 genericCase.append(getSlowCase(iter)); // doubles
1098 genericCase.link(this);
1099 JITStubCall stubCallEq(this, cti_op_eq);
1100 stubCallEq.addArgument(regT1, regT0);
1101 stubCallEq.addArgument(regT3, regT2);
1102 stubCallEq.call(regT0);
1103
1104 storeResult.link(this);
1105 xor32(Imm32(0x1), regT0);
1106 or32(Imm32(JSValue::FalseTag), regT0);
1107 emitStoreBool(dst, regT0);
1108 }
1109
1110 void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
1111 {
1112 unsigned dst = currentInstruction[1].u.operand;
1113 unsigned src1 = currentInstruction[2].u.operand;
1114 unsigned src2 = currentInstruction[3].u.operand;
1115
1116 emitLoadTag(src1, regT0);
1117 emitLoadTag(src2, regT1);
1118
1119 // Jump to a slow case if either operand is double, or if both operands are
1120 // cells and/or Int32s.
1121 move(regT0, regT2);
1122 and32(regT1, regT2);
1123 addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
1124 addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
1125
1126 if (type == OpStrictEq)
1127 set8(Equal, regT0, regT1, regT0);
1128 else
1129 set8(NotEqual, regT0, regT1, regT0);
1130
1131 or32(Imm32(JSValue::FalseTag), regT0);
1132
1133 emitStoreBool(dst, regT0);
1134 }
1135
1136 void JIT::emit_op_stricteq(Instruction* currentInstruction)
1137 {
1138 compileOpStrictEq(currentInstruction, OpStrictEq);
1139 }
1140
1141 void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1142 {
1143 unsigned dst = currentInstruction[1].u.operand;
1144 unsigned src1 = currentInstruction[2].u.operand;
1145 unsigned src2 = currentInstruction[3].u.operand;
1146
1147 linkSlowCase(iter);
1148 linkSlowCase(iter);
1149
1150 JITStubCall stubCall(this, cti_op_stricteq);
1151 stubCall.addArgument(src1);
1152 stubCall.addArgument(src2);
1153 stubCall.call(dst);
1154 }
1155
1156 void JIT::emit_op_nstricteq(Instruction* currentInstruction)
1157 {
1158 compileOpStrictEq(currentInstruction, OpNStrictEq);
1159 }
1160
1161 void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1162 {
1163 unsigned dst = currentInstruction[1].u.operand;
1164 unsigned src1 = currentInstruction[2].u.operand;
1165 unsigned src2 = currentInstruction[3].u.operand;
1166
1167 linkSlowCase(iter);
1168 linkSlowCase(iter);
1169
1170 JITStubCall stubCall(this, cti_op_nstricteq);
1171 stubCall.addArgument(src1);
1172 stubCall.addArgument(src2);
1173 stubCall.call(dst);
1174 }
1175
1176 void JIT::emit_op_eq_null(Instruction* currentInstruction)
1177 {
1178 unsigned dst = currentInstruction[1].u.operand;
1179 unsigned src = currentInstruction[2].u.operand;
1180
1181 emitLoad(src, regT1, regT0);
1182 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
1183
1184 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
1185 setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
1186
1187 Jump wasNotImmediate = jump();
1188
1189 isImmediate.link(this);
1190
1191 set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
1192 set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
1193 or32(regT2, regT1);
1194
1195 wasNotImmediate.link(this);
1196
1197 or32(Imm32(JSValue::FalseTag), regT1);
1198
1199 emitStoreBool(dst, regT1);
1200 }
1201
1202 void JIT::emit_op_neq_null(Instruction* currentInstruction)
1203 {
1204 unsigned dst = currentInstruction[1].u.operand;
1205 unsigned src = currentInstruction[2].u.operand;
1206
1207 emitLoad(src, regT1, regT0);
1208 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
1209
1210 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
1211 setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
1212
1213 Jump wasNotImmediate = jump();
1214
1215 isImmediate.link(this);
1216
1217 set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
1218 set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
1219 and32(regT2, regT1);
1220
1221 wasNotImmediate.link(this);
1222
1223 or32(Imm32(JSValue::FalseTag), regT1);
1224
1225 emitStoreBool(dst, regT1);
1226 }
1227
1228 void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
1229 {
1230 JITStubCall stubCall(this, cti_op_resolve_with_base);
1231 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
1232 stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
1233 stubCall.call(currentInstruction[2].u.operand);
1234 }
1235
1236 void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
1237 {
1238 JITStubCall stubCall(this, cti_op_new_func_exp);
1239 stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
1240 stubCall.call(currentInstruction[1].u.operand);
1241 }
1242
1243 void JIT::emit_op_new_regexp(Instruction* currentInstruction)
1244 {
1245 JITStubCall stubCall(this, cti_op_new_regexp);
1246 stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
1247 stubCall.call(currentInstruction[1].u.operand);
1248 }
1249
1250 void JIT::emit_op_throw(Instruction* currentInstruction)
1251 {
1252 unsigned exception = currentInstruction[1].u.operand;
1253 JITStubCall stubCall(this, cti_op_throw);
1254 stubCall.addArgument(exception);
1255 stubCall.call();
1256
1257 #ifndef NDEBUG
1258 // cti_op_throw always changes it's return address,
1259 // this point in the code should never be reached.
1260 breakpoint();
1261 #endif
1262 }
1263
1264 void JIT::emit_op_next_pname(Instruction* currentInstruction)
1265 {
1266 int dst = currentInstruction[1].u.operand;
1267 int iter = currentInstruction[2].u.operand;
1268 int target = currentInstruction[3].u.operand;
1269
1270 load32(Address(callFrameRegister, (iter * sizeof(Register))), regT0);
1271
1272 JITStubCall stubCall(this, cti_op_next_pname);
1273 stubCall.addArgument(regT0);
1274 stubCall.call();
1275
1276 Jump endOfIter = branchTestPtr(Zero, regT0);
1277 emitStore(dst, regT1, regT0);
1278 map(m_bytecodeIndex + OPCODE_LENGTH(op_next_pname), dst, regT1, regT0);
1279 addJump(jump(), target + 3);
1280 endOfIter.link(this);
1281 }
1282
1283 void JIT::emit_op_push_scope(Instruction* currentInstruction)
1284 {
1285 JITStubCall stubCall(this, cti_op_push_scope);
1286 stubCall.addArgument(currentInstruction[1].u.operand);
1287 stubCall.call(currentInstruction[1].u.operand);
1288 }
1289
1290 void JIT::emit_op_pop_scope(Instruction*)
1291 {
1292 JITStubCall(this, cti_op_pop_scope).call();
1293 }
1294
1295 void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
1296 {
1297 int dst = currentInstruction[1].u.operand;
1298 int src = currentInstruction[2].u.operand;
1299
1300 emitLoad(src, regT1, regT0);
1301
1302 Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
1303 addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::DeletedValueTag)));
1304 isInt32.link(this);
1305
1306 if (src != dst)
1307 emitStore(dst, regT1, regT0);
1308 map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
1309 }
1310
1311 void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1312 {
1313 int dst = currentInstruction[1].u.operand;
1314
1315 linkSlowCase(iter);
1316
1317 JITStubCall stubCall(this, cti_op_to_jsnumber);
1318 stubCall.addArgument(regT1, regT0);
1319 stubCall.call(dst);
1320 }
1321
1322 void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
1323 {
1324 JITStubCall stubCall(this, cti_op_push_new_scope);
1325 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1326 stubCall.addArgument(currentInstruction[3].u.operand);
1327 stubCall.call(currentInstruction[1].u.operand);
1328 }
1329
1330 void JIT::emit_op_catch(Instruction* currentInstruction)
1331 {
1332 unsigned exception = currentInstruction[1].u.operand;
1333
1334 // This opcode only executes after a return from cti_op_throw.
1335
1336 // cti_op_throw may have taken us to a call frame further up the stack; reload
1337 // the call frame pointer to adjust.
1338 peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
1339
1340 // Now store the exception returned by cti_op_throw.
1341 emitStore(exception, regT1, regT0);
1342 map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
1343 }
1344
1345 void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
1346 {
1347 JITStubCall stubCall(this, cti_op_jmp_scopes);
1348 stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
1349 stubCall.call();
1350 addJump(jump(), currentInstruction[2].u.operand + 2);
1351 }
1352
1353 void JIT::emit_op_switch_imm(Instruction* currentInstruction)
1354 {
1355 unsigned tableIndex = currentInstruction[1].u.operand;
1356 unsigned defaultOffset = currentInstruction[2].u.operand;
1357 unsigned scrutinee = currentInstruction[3].u.operand;
1358
1359 // create jump table for switch destinations, track this switch statement.
1360 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
1361 m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
1362 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1363
1364 JITStubCall stubCall(this, cti_op_switch_imm);
1365 stubCall.addArgument(scrutinee);
1366 stubCall.addArgument(Imm32(tableIndex));
1367 stubCall.call();
1368 jump(regT0);
1369 }
1370
1371 void JIT::emit_op_switch_char(Instruction* currentInstruction)
1372 {
1373 unsigned tableIndex = currentInstruction[1].u.operand;
1374 unsigned defaultOffset = currentInstruction[2].u.operand;
1375 unsigned scrutinee = currentInstruction[3].u.operand;
1376
1377 // create jump table for switch destinations, track this switch statement.
1378 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
1379 m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
1380 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1381
1382 JITStubCall stubCall(this, cti_op_switch_char);
1383 stubCall.addArgument(scrutinee);
1384 stubCall.addArgument(Imm32(tableIndex));
1385 stubCall.call();
1386 jump(regT0);
1387 }
1388
1389 void JIT::emit_op_switch_string(Instruction* currentInstruction)
1390 {
1391 unsigned tableIndex = currentInstruction[1].u.operand;
1392 unsigned defaultOffset = currentInstruction[2].u.operand;
1393 unsigned scrutinee = currentInstruction[3].u.operand;
1394
1395 // create jump table for switch destinations, track this switch statement.
1396 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
1397 m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
1398
1399 JITStubCall stubCall(this, cti_op_switch_string);
1400 stubCall.addArgument(scrutinee);
1401 stubCall.addArgument(Imm32(tableIndex));
1402 stubCall.call();
1403 jump(regT0);
1404 }
1405
1406 void JIT::emit_op_new_error(Instruction* currentInstruction)
1407 {
1408 unsigned dst = currentInstruction[1].u.operand;
1409 unsigned type = currentInstruction[2].u.operand;
1410 unsigned message = currentInstruction[3].u.operand;
1411
1412 JITStubCall stubCall(this, cti_op_new_error);
1413 stubCall.addArgument(Imm32(type));
1414 stubCall.addArgument(m_codeBlock->getConstant(message));
1415 stubCall.addArgument(Imm32(m_bytecodeIndex));
1416 stubCall.call(dst);
1417 }
1418
1419 void JIT::emit_op_debug(Instruction* currentInstruction)
1420 {
1421 JITStubCall stubCall(this, cti_op_debug);
1422 stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
1423 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
1424 stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
1425 stubCall.call();
1426 }
1427
1428
1429 void JIT::emit_op_enter(Instruction*)
1430 {
1431 // Even though JIT code doesn't use them, we initialize our constant
1432 // registers to zap stale pointers, to avoid unnecessarily prolonging
1433 // object lifetime and increasing GC pressure.
1434 for (int i = 0; i < m_codeBlock->m_numVars; ++i)
1435 emitStore(i, jsUndefined());
1436 }
1437
1438 void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
1439 {
1440 emit_op_enter(currentInstruction);
1441
1442 JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
1443 }
1444
1445 void JIT::emit_op_create_arguments(Instruction*)
1446 {
1447 Jump argsNotCell = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::CellTag));
1448 Jump argsNotNull = branchTestPtr(NonZero, payloadFor(RegisterFile::ArgumentsRegister, callFrameRegister));
1449
1450 // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
1451 if (m_codeBlock->m_numParameters == 1)
1452 JITStubCall(this, cti_op_create_arguments_no_params).call();
1453 else
1454 JITStubCall(this, cti_op_create_arguments).call();
1455
1456 argsNotCell.link(this);
1457 argsNotNull.link(this);
1458 }
1459
1460 void JIT::emit_op_init_arguments(Instruction*)
1461 {
1462 emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
1463 }
1464
1465 void JIT::emit_op_convert_this(Instruction* currentInstruction)
1466 {
1467 unsigned thisRegister = currentInstruction[1].u.operand;
1468
1469 emitLoad(thisRegister, regT1, regT0);
1470
1471 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
1472
1473 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
1474 addSlowCase(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
1475
1476 map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
1477 }
1478
1479 void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1480 {
1481 unsigned thisRegister = currentInstruction[1].u.operand;
1482
1483 linkSlowCase(iter);
1484 linkSlowCase(iter);
1485
1486 JITStubCall stubCall(this, cti_op_convert_this);
1487 stubCall.addArgument(regT1, regT0);
1488 stubCall.call(thisRegister);
1489 }
1490
1491 void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
1492 {
1493 peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
1494 Jump noProfiler = branchTestPtr(Zero, Address(regT2));
1495
1496 JITStubCall stubCall(this, cti_op_profile_will_call);
1497 stubCall.addArgument(currentInstruction[1].u.operand);
1498 stubCall.call();
1499 noProfiler.link(this);
1500 }
1501
1502 void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
1503 {
1504 peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
1505 Jump noProfiler = branchTestPtr(Zero, Address(regT2));
1506
1507 JITStubCall stubCall(this, cti_op_profile_did_call);
1508 stubCall.addArgument(currentInstruction[1].u.operand);
1509 stubCall.call();
1510 noProfiler.link(this);
1511 }
1512
1513 #else // USE(JSVALUE32_64)
1514
1515 #define RECORD_JUMP_TARGET(targetOffset) \
1516 do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
1517
1518 void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
1519 {
1520 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1521 // (2) The second function provides fast property access for string length
1522 Label stringLengthBegin = align();
1523
1524 // Check eax is a string
1525 Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
1526 Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
1527
1528 // Checks out okay! - get the length from the Ustring.
1529 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
1530 load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
1531
1532 Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
1533
1534 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
1535 emitFastArithIntToImmNoCheck(regT0, regT0);
1536
1537 ret();
1538 #endif
1539
1540 // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
1541 COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
1542
1543 Label virtualCallPreLinkBegin = align();
1544
1545 // Load the callee CodeBlock* into eax
1546 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
1547 loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
1548 Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
1549 preserveReturnAddressAfterCall(regT3);
1550 restoreArgumentReference();
1551 Call callJSFunction1 = call();
1552 emitGetJITStubArg(1, regT2);
1553 emitGetJITStubArg(3, regT1);
1554 restoreReturnAddressBeforeReturn(regT3);
1555 hasCodeBlock1.link(this);
1556
1557 Jump isNativeFunc1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
1558
1559 // Check argCount matches callee arity.
1560 Jump arityCheckOkay1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
1561 preserveReturnAddressAfterCall(regT3);
1562 emitPutJITStubArg(regT3, 2);
1563 emitPutJITStubArg(regT0, 4);
1564 restoreArgumentReference();
1565 Call callArityCheck1 = call();
1566 move(regT1, callFrameRegister);
1567 emitGetJITStubArg(1, regT2);
1568 emitGetJITStubArg(3, regT1);
1569 restoreReturnAddressBeforeReturn(regT3);
1570 arityCheckOkay1.link(this);
1571 isNativeFunc1.link(this);
1572
1573 compileOpCallInitializeCallFrame();
1574
1575 preserveReturnAddressAfterCall(regT3);
1576 emitPutJITStubArg(regT3, 2);
1577 restoreArgumentReference();
1578 Call callDontLazyLinkCall = call();
1579 emitGetJITStubArg(1, regT2);
1580 restoreReturnAddressBeforeReturn(regT3);
1581
1582 jump(regT0);
1583
1584 Label virtualCallLinkBegin = align();
1585
1586 // Load the callee CodeBlock* into eax
1587 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
1588 loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
1589 Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
1590 preserveReturnAddressAfterCall(regT3);
1591 restoreArgumentReference();
1592 Call callJSFunction2 = call();
1593 emitGetJITStubArg(1, regT2);
1594 emitGetJITStubArg(3, regT1);
1595 restoreReturnAddressBeforeReturn(regT3);
1596 hasCodeBlock2.link(this);
1597
1598 Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
1599
1600 // Check argCount matches callee arity.
1601 Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
1602 preserveReturnAddressAfterCall(regT3);
1603 emitPutJITStubArg(regT3, 2);
1604 emitPutJITStubArg(regT0, 4);
1605 restoreArgumentReference();
1606 Call callArityCheck2 = call();
1607 move(regT1, callFrameRegister);
1608 emitGetJITStubArg(1, regT2);
1609 emitGetJITStubArg(3, regT1);
1610 restoreReturnAddressBeforeReturn(regT3);
1611 arityCheckOkay2.link(this);
1612 isNativeFunc2.link(this);
1613
1614 compileOpCallInitializeCallFrame();
1615
1616 preserveReturnAddressAfterCall(regT3);
1617 emitPutJITStubArg(regT3, 2);
1618 restoreArgumentReference();
1619 Call callLazyLinkCall = call();
1620 restoreReturnAddressBeforeReturn(regT3);
1621
1622 jump(regT0);
1623
1624 Label virtualCallBegin = align();
1625
1626 // Load the callee CodeBlock* into eax
1627 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
1628 loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
1629 Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
1630 preserveReturnAddressAfterCall(regT3);
1631 restoreArgumentReference();
1632 Call callJSFunction3 = call();
1633 emitGetJITStubArg(1, regT2);
1634 emitGetJITStubArg(3, regT1);
1635 restoreReturnAddressBeforeReturn(regT3);
1636 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
1637 hasCodeBlock3.link(this);
1638
1639 Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
1640
1641 // Check argCount matches callee arity.
1642 Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
1643 preserveReturnAddressAfterCall(regT3);
1644 emitPutJITStubArg(regT3, 2);
1645 emitPutJITStubArg(regT0, 4);
1646 restoreArgumentReference();
1647 Call callArityCheck3 = call();
1648 move(regT1, callFrameRegister);
1649 emitGetJITStubArg(1, regT2);
1650 emitGetJITStubArg(3, regT1);
1651 restoreReturnAddressBeforeReturn(regT3);
1652 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
1653 arityCheckOkay3.link(this);
1654 isNativeFunc3.link(this);
1655
1656 // load ctiCode from the new codeBlock.
1657 loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
1658
1659 compileOpCallInitializeCallFrame();
1660 jump(regT0);
1661
1662
1663 Label nativeCallThunk = align();
1664 preserveReturnAddressAfterCall(regT0);
1665 emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
1666
1667 // Load caller frame's scope chain into this callframe so that whatever we call can
1668 // get to its global data.
1669 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
1670 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
1671 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
1672
1673
1674 #if PLATFORM(X86_64)
1675 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
1676
1677 // Allocate stack space for our arglist
1678 subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
1679 COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
1680
1681 // Set up arguments
1682 subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
1683
1684 // Push argcount
1685 storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
1686
1687 // Calculate the start of the callframe header, and store in edx
1688 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
1689
1690 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
1691 mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
1692 subPtr(X86::ecx, X86::edx);
1693
1694 // push pointer to arguments
1695 storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
1696
1697 // ArgList is passed by reference so is stackPointerRegister
1698 move(stackPointerRegister, X86::ecx);
1699
1700 // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
1701 loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
1702
1703 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
1704
1705 move(callFrameRegister, X86::edi);
1706
1707 call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
1708
1709 addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
1710 #elif PLATFORM(X86)
1711 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
1712
1713 /* We have two structs that we use to describe the stackframe we set up for our
1714 * call to native code. NativeCallFrameStructure describes the how we set up the stack
1715 * in advance of the call. NativeFunctionCalleeSignature describes the callframe
1716 * as the native code expects it. We do this as we are using the fastcall calling
1717 * convention which results in the callee popping its arguments off the stack, but
1718 * not the rest of the callframe so we need a nice way to ensure we increment the
1719 * stack pointer by the right amount after the call.
1720 */
1721 #if COMPILER(MSVC) || PLATFORM(LINUX)
1722 struct NativeCallFrameStructure {
1723 // CallFrame* callFrame; // passed in EDX
1724 JSObject* callee;
1725 JSValue thisValue;
1726 ArgList* argPointer;
1727 ArgList args;
1728 JSValue result;
1729 };
1730 struct NativeFunctionCalleeSignature {
1731 JSObject* callee;
1732 JSValue thisValue;
1733 ArgList* argPointer;
1734 };
1735 #else
1736 struct NativeCallFrameStructure {
1737 // CallFrame* callFrame; // passed in ECX
1738 // JSObject* callee; // passed in EDX
1739 JSValue thisValue;
1740 ArgList* argPointer;
1741 ArgList args;
1742 };
1743 struct NativeFunctionCalleeSignature {
1744 JSValue thisValue;
1745 ArgList* argPointer;
1746 };
1747 #endif
1748 const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
1749 // Allocate system stack frame
1750 subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
1751
1752 // Set up arguments
1753 subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
1754
1755 // push argcount
1756 storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
1757
1758 // Calculate the start of the callframe header, and store in regT1
1759 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
1760
1761 // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
1762 mul32(Imm32(sizeof(Register)), regT0, regT0);
1763 subPtr(regT0, regT1);
1764 storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
1765
1766 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
1767 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
1768 storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
1769
1770 // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
1771 loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
1772 storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
1773
1774 #if COMPILER(MSVC) || PLATFORM(LINUX)
1775 // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
1776 addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
1777
1778 // Plant callee
1779 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
1780 storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
1781
1782 // Plant callframe
1783 move(callFrameRegister, X86::edx);
1784
1785 call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
1786
1787 // JSValue is a non-POD type
1788 loadPtr(Address(X86::eax), X86::eax);
1789 #else
1790 // Plant callee
1791 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
1792
1793 // Plant callframe
1794 move(callFrameRegister, X86::ecx);
1795 call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
1796 #endif
1797
1798 // We've put a few temporaries on the stack in addition to the actual arguments
1799 // so pull them off now
1800 addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
1801
1802 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
1803 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
1804 #else
1805 breakpoint();
1806 #endif
1807
1808 // Check for an exception
1809 loadPtr(&(globalData->exception), regT2);
1810 Jump exceptionHandler = branchTestPtr(NonZero, regT2);
1811
1812 // Grab the return address.
1813 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
1814
1815 // Restore our caller's "r".
1816 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
1817
1818 // Return.
1819 restoreReturnAddressBeforeReturn(regT1);
1820 ret();
1821
1822 // Handle an exception
1823 exceptionHandler.link(this);
1824 // Grab the return address.
1825 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
1826 move(ImmPtr(&globalData->exceptionLocation), regT2);
1827 storePtr(regT1, regT2);
1828 move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
1829 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
1830 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
1831 restoreReturnAddressBeforeReturn(regT2);
1832 ret();
1833
1834
1835 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1836 Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
1837 Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
1838 Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
1839 #endif
1840
1841 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
1842 LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
1843
1844 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1845 patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
1846 patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
1847 patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
1848 #endif
1849 patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
1850 patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
1851 patchBuffer.link(callArityCheck3, FunctionPtr(cti_op_call_arityCheck));
1852 patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
1853 patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
1854 patchBuffer.link(callJSFunction3, FunctionPtr(cti_op_call_JSFunction));
1855 patchBuffer.link(callDontLazyLinkCall, FunctionPtr(cti_vm_dontLazyLinkCall));
1856 patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
1857
1858 CodeRef finalCode = patchBuffer.finalizeCode();
1859 *executablePool = finalCode.m_executablePool;
1860
1861 *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
1862 *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
1863 *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
1864 *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
1865 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1866 *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
1867 #else
1868 UNUSED_PARAM(ctiStringLengthTrampoline);
1869 #endif
1870 }
1871
1872 void JIT::emit_op_mov(Instruction* currentInstruction)
1873 {
1874 int dst = currentInstruction[1].u.operand;
1875 int src = currentInstruction[2].u.operand;
1876
1877 if (m_codeBlock->isConstantRegisterIndex(src)) {
1878 storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
1879 if (dst == m_lastResultBytecodeRegister)
1880 killLastResultRegister();
1881 } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
1882 // If either the src or dst is the cached register go though
1883 // get/put registers to make sure we track this correctly.
1884 emitGetVirtualRegister(src, regT0);
1885 emitPutVirtualRegister(dst);
1886 } else {
1887 // Perform the copy via regT1; do not disturb any mapping in regT0.
1888 loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
1889 storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
1890 }
1891 }
1892
1893 void JIT::emit_op_end(Instruction* currentInstruction)
1894 {
1895 if (m_codeBlock->needsFullScopeChain())
1896 JITStubCall(this, cti_op_end).call();
1897 ASSERT(returnValueRegister != callFrameRegister);
1898 emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
1899 restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
1900 ret();
1901 }
1902
1903 void JIT::emit_op_jmp(Instruction* currentInstruction)
1904 {
1905 unsigned target = currentInstruction[1].u.operand;
1906 addJump(jump(), target + 1);
1907 RECORD_JUMP_TARGET(target + 1);
1908 }
1909
1910 void JIT::emit_op_loop(Instruction* currentInstruction)
1911 {
1912 emitTimeoutCheck();
1913
1914 unsigned target = currentInstruction[1].u.operand;
1915 addJump(jump(), target + 1);
1916 }
1917
1918 void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
1919 {
1920 emitTimeoutCheck();
1921
1922 unsigned op1 = currentInstruction[1].u.operand;
1923 unsigned op2 = currentInstruction[2].u.operand;
1924 unsigned target = currentInstruction[3].u.operand;
1925 if (isOperandConstantImmediateInt(op2)) {
1926 emitGetVirtualRegister(op1, regT0);
1927 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1928 #if USE(JSVALUE64)
1929 int32_t op2imm = getConstantOperandImmediateInt(op2);
1930 #else
1931 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1932 #endif
1933 addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
1934 } else if (isOperandConstantImmediateInt(op1)) {
1935 emitGetVirtualRegister(op2, regT0);
1936 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1937 #if USE(JSVALUE64)
1938 int32_t op1imm = getConstantOperandImmediateInt(op1);
1939 #else
1940 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1941 #endif
1942 addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target + 3);
1943 } else {
1944 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1945 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1946 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1947 addJump(branch32(LessThan, regT0, regT1), target + 3);
1948 }
1949 }
1950
1951 void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
1952 {
1953 emitTimeoutCheck();
1954
1955 unsigned op1 = currentInstruction[1].u.operand;
1956 unsigned op2 = currentInstruction[2].u.operand;
1957 unsigned target = currentInstruction[3].u.operand;
1958 if (isOperandConstantImmediateInt(op2)) {
1959 emitGetVirtualRegister(op1, regT0);
1960 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1961 #if USE(JSVALUE64)
1962 int32_t op2imm = getConstantOperandImmediateInt(op2);
1963 #else
1964 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1965 #endif
1966 addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
1967 } else {
1968 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1969 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1970 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1971 addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
1972 }
1973 }
1974
1975 void JIT::emit_op_new_object(Instruction* currentInstruction)
1976 {
1977 JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
1978 }
1979
1980 void JIT::emit_op_instanceof(Instruction* currentInstruction)
1981 {
1982 // Load the operands (baseVal, proto, and value respectively) into registers.
1983 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
1984 emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
1985 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
1986 emitGetVirtualRegister(currentInstruction[2].u.operand, regT2);
1987
1988 // Check that baseVal & proto are cells.
1989 emitJumpSlowCaseIfNotJSCell(regT0);
1990 emitJumpSlowCaseIfNotJSCell(regT1);
1991
1992 // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
1993 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
1994 addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
1995 addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
1996
1997 // If value is not an Object, return false.
1998 Jump valueIsImmediate = emitJumpIfNotJSCell(regT2);
1999 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
2000 Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
2001
2002 // Check proto is object.
2003 loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
2004 addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
2005
2006 // Optimistically load the result true, and start looping.
2007 // Initially, regT1 still contains proto and regT2 still contains value.
2008 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
2009 move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
2010 Label loop(this);
2011
2012 // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
2013 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
2014 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2015 loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
2016 Jump isInstance = branchPtr(Equal, regT2, regT1);
2017 branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
2018
2019 // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
2020 valueIsImmediate.link(this);
2021 valueIsNotObject.link(this);
2022 move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
2023
2024 // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
2025 isInstance.link(this);
2026 emitPutVirtualRegister(currentInstruction[1].u.operand);
2027 }
2028
2029 void JIT::emit_op_new_func(Instruction* currentInstruction)
2030 {
2031 JITStubCall stubCall(this, cti_op_new_func);
2032 stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
2033 stubCall.call(currentInstruction[1].u.operand);
2034 }
2035
2036 void JIT::emit_op_call(Instruction* currentInstruction)
2037 {
2038 compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
2039 }
2040
2041 void JIT::emit_op_call_eval(Instruction* currentInstruction)
2042 {
2043 compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
2044 }
2045
2046 void JIT::emit_op_load_varargs(Instruction* currentInstruction)
2047 {
2048 int argCountDst = currentInstruction[1].u.operand;
2049 int argsOffset = currentInstruction[2].u.operand;
2050
2051 JITStubCall stubCall(this, cti_op_load_varargs);
2052 stubCall.addArgument(Imm32(argsOffset));
2053 stubCall.call();
2054 // Stores a naked int32 in the register file.
2055 store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
2056 }
2057
2058 void JIT::emit_op_call_varargs(Instruction* currentInstruction)
2059 {
2060 compileOpCallVarargs(currentInstruction);
2061 }
2062
2063 void JIT::emit_op_construct(Instruction* currentInstruction)
2064 {
2065 compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
2066 }
2067
2068 void JIT::emit_op_get_global_var(Instruction* currentInstruction)
2069 {
2070 JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
2071 move(ImmPtr(globalObject), regT0);
2072 emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
2073 emitPutVirtualRegister(currentInstruction[1].u.operand);
2074 }
2075
2076 void JIT::emit_op_put_global_var(Instruction* currentInstruction)
2077 {
2078 emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
2079 JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
2080 move(ImmPtr(globalObject), regT0);
2081 emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
2082 }
2083
2084 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
2085 {
2086 int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
2087
2088 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
2089 while (skip--)
2090 loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
2091
2092 loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
2093 emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
2094 emitPutVirtualRegister(currentInstruction[1].u.operand);
2095 }
2096
2097 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
2098 {
2099 int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
2100
2101 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
2102 emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
2103 while (skip--)
2104 loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
2105
2106 loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
2107 emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
2108 }
2109
2110 void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
2111 {
2112 JITStubCall stubCall(this, cti_op_tear_off_activation);
2113 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
2114 stubCall.call();
2115 }
2116
2117 void JIT::emit_op_tear_off_arguments(Instruction*)
2118 {
2119 JITStubCall(this, cti_op_tear_off_arguments).call();
2120 }
2121
2122 void JIT::emit_op_ret(Instruction* currentInstruction)
2123 {
2124 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
2125 if (m_codeBlock->needsFullScopeChain())
2126 JITStubCall(this, cti_op_ret_scopeChain).call();
2127
2128 ASSERT(callFrameRegister != regT1);
2129 ASSERT(regT1 != returnValueRegister);
2130 ASSERT(returnValueRegister != callFrameRegister);
2131
2132 // Return the result in %eax.
2133 emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
2134
2135 // Grab the return address.
2136 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
2137
2138 // Restore our caller's "r".
2139 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
2140
2141 // Return.
2142 restoreReturnAddressBeforeReturn(regT1);
2143 ret();
2144 }
2145
2146 void JIT::emit_op_new_array(Instruction* currentInstruction)
2147 {
2148 JITStubCall stubCall(this, cti_op_new_array);
2149 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
2150 stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
2151 stubCall.call(currentInstruction[1].u.operand);
2152 }
2153
2154 void JIT::emit_op_resolve(Instruction* currentInstruction)
2155 {
2156 JITStubCall stubCall(this, cti_op_resolve);
2157 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
2158 stubCall.call(currentInstruction[1].u.operand);
2159 }
2160
2161 void JIT::emit_op_construct_verify(Instruction* currentInstruction)
2162 {
2163 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
2164
2165 emitJumpSlowCaseIfNotJSCell(regT0);
2166 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2167 addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
2168
2169 }
2170
2171 void JIT::emit_op_to_primitive(Instruction* currentInstruction)
2172 {
2173 int dst = currentInstruction[1].u.operand;
2174 int src = currentInstruction[2].u.operand;
2175
2176 emitGetVirtualRegister(src, regT0);
2177
2178 Jump isImm = emitJumpIfNotJSCell(regT0);
2179 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
2180 isImm.link(this);
2181
2182 if (dst != src)
2183 emitPutVirtualRegister(dst);
2184
2185 }
2186
2187 void JIT::emit_op_strcat(Instruction* currentInstruction)
2188 {
2189 JITStubCall stubCall(this, cti_op_strcat);
2190 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
2191 stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
2192 stubCall.call(currentInstruction[1].u.operand);
2193 }
2194
2195 void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
2196 {
2197 emitTimeoutCheck();
2198
2199 unsigned target = currentInstruction[2].u.operand;
2200 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
2201
2202 Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
2203 addJump(emitJumpIfImmediateInteger(regT0), target + 2);
2204
2205 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
2206 addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
2207
2208 isZero.link(this);
2209 };
2210 void JIT::emit_op_resolve_base(Instruction* currentInstruction)
2211 {
2212 JITStubCall stubCall(this, cti_op_resolve_base);
2213 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
2214 stubCall.call(currentInstruction[1].u.operand);
2215 }
2216
2217 void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
2218 {
2219 JITStubCall stubCall(this, cti_op_resolve_skip);
2220 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
2221 stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
2222 stubCall.call(currentInstruction[1].u.operand);
2223 }
2224
2225 void JIT::emit_op_resolve_global(Instruction* currentInstruction)
2226 {
2227 // Fast case
2228 void* globalObject = currentInstruction[2].u.jsCell;
2229 Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
2230
2231 unsigned currentIndex = m_globalResolveInfoIndex++;
2232 void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
2233 void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
2234
2235 // Check Structure of global object
2236 move(ImmPtr(globalObject), regT0);
2237 loadPtr(structureAddress, regT1);
2238 Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match
2239
2240 // Load cached property
2241 // Assume that the global object always uses external storage.
2242 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0);
2243 load32(offsetAddr, regT1);
2244 loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
2245 emitPutVirtualRegister(currentInstruction[1].u.operand);
2246 Jump end = jump();
2247
2248 // Slow case
2249 noMatch.link(this);
2250 JITStubCall stubCall(this, cti_op_resolve_global);
2251 stubCall.addArgument(ImmPtr(globalObject));
2252 stubCall.addArgument(ImmPtr(ident));
2253 stubCall.addArgument(Imm32(currentIndex));
2254 stubCall.call(currentInstruction[1].u.operand);
2255 end.link(this);
2256 }
2257
2258 void JIT::emit_op_not(Instruction* currentInstruction)
2259 {
2260 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
2261 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
2262 addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
2263 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
2264 emitPutVirtualRegister(currentInstruction[1].u.operand);
2265 }
2266
2267 void JIT::emit_op_jfalse(Instruction* currentInstruction)
2268 {
2269 unsigned target = currentInstruction[2].u.operand;
2270 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
2271
2272 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2);
2273 Jump isNonZero = emitJumpIfImmediateInteger(regT0);
2274
2275 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2);
2276 addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
2277
2278 isNonZero.link(this);
2279 RECORD_JUMP_TARGET(target + 2);
2280 };
2281 void JIT::emit_op_jeq_null(Instruction* currentInstruction)
2282 {
2283 unsigned src = currentInstruction[1].u.operand;
2284 unsigned target = currentInstruction[2].u.operand;
2285
2286 emitGetVirtualRegister(src, regT0);
2287 Jump isImmediate = emitJumpIfNotJSCell(regT0);
2288
2289 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
2290 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2291 addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
2292 Jump wasNotImmediate = jump();
2293
2294 // Now handle the immediate cases - undefined & null
2295 isImmediate.link(this);
2296 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
2297 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
2298
2299 wasNotImmediate.link(this);
2300 RECORD_JUMP_TARGET(target + 2);
2301 };
2302 void JIT::emit_op_jneq_null(Instruction* currentInstruction)
2303 {
2304 unsigned src = currentInstruction[1].u.operand;
2305 unsigned target = currentInstruction[2].u.operand;
2306
2307 emitGetVirtualRegister(src, regT0);
2308 Jump isImmediate = emitJumpIfNotJSCell(regT0);
2309
2310 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
2311 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2312 addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
2313 Jump wasNotImmediate = jump();
2314
2315 // Now handle the immediate cases - undefined & null
2316 isImmediate.link(this);
2317 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
2318 addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
2319
2320 wasNotImmediate.link(this);
2321 RECORD_JUMP_TARGET(target + 2);
2322 }
2323
2324 void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
2325 {
2326 unsigned src = currentInstruction[1].u.operand;
2327 JSCell* ptr = currentInstruction[2].u.jsCell;
2328 unsigned target = currentInstruction[3].u.operand;
2329
2330 emitGetVirtualRegister(src, regT0);
2331 addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3);
2332
2333 RECORD_JUMP_TARGET(target + 3);
2334 }
2335
2336 void JIT::emit_op_jsr(Instruction* currentInstruction)
2337 {
2338 int retAddrDst = currentInstruction[1].u.operand;
2339 int target = currentInstruction[2].u.operand;
2340 DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
2341 addJump(jump(), target + 2);
2342 m_jsrSites.append(JSRInfo(storeLocation, label()));
2343 killLastResultRegister();
2344 RECORD_JUMP_TARGET(target + 2);
2345 }
2346
2347 void JIT::emit_op_sret(Instruction* currentInstruction)
2348 {
2349 jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
2350 killLastResultRegister();
2351 }
2352
2353 void JIT::emit_op_eq(Instruction* currentInstruction)
2354 {
2355 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
2356 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2357 set32(Equal, regT1, regT0, regT0);
2358 emitTagAsBoolImmediate(regT0);
2359 emitPutVirtualRegister(currentInstruction[1].u.operand);
2360 }
2361
2362 void JIT::emit_op_bitnot(Instruction* currentInstruction)
2363 {
2364 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
2365 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2366 #if USE(JSVALUE64)
2367 not32(regT0);
2368 emitFastArithIntToImmNoCheck(regT0, regT0);
2369 #else
2370 xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
2371 #endif
2372 emitPutVirtualRegister(currentInstruction[1].u.operand);
2373 }
2374
2375 void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
2376 {
2377 JITStubCall stubCall(this, cti_op_resolve_with_base);
2378 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
2379 stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
2380 stubCall.call(currentInstruction[2].u.operand);
2381 }
2382
2383 void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
2384 {
2385 JITStubCall stubCall(this, cti_op_new_func_exp);
2386 stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
2387 stubCall.call(currentInstruction[1].u.operand);
2388 }
2389
2390 void JIT::emit_op_jtrue(Instruction* currentInstruction)
2391 {
2392 unsigned target = currentInstruction[2].u.operand;
2393 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
2394
2395 Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
2396 addJump(emitJumpIfImmediateInteger(regT0), target + 2);
2397
2398 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
2399 addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
2400
2401 isZero.link(this);
2402 RECORD_JUMP_TARGET(target + 2);
2403 }
2404
2405 void JIT::emit_op_neq(Instruction* currentInstruction)
2406 {
2407 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
2408 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2409 set32(NotEqual, regT1, regT0, regT0);
2410 emitTagAsBoolImmediate(regT0);
2411
2412 emitPutVirtualRegister(currentInstruction[1].u.operand);
2413
2414 }
2415
2416 void JIT::emit_op_bitxor(Instruction* currentInstruction)
2417 {
2418 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
2419 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2420 xorPtr(regT1, regT0);
2421 emitFastArithReTagImmediate(regT0, regT0);
2422 emitPutVirtualRegister(currentInstruction[1].u.operand);
2423 }
2424
2425 void JIT::emit_op_new_regexp(Instruction* currentInstruction)
2426 {
2427 JITStubCall stubCall(this, cti_op_new_regexp);
2428 stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
2429 stubCall.call(currentInstruction[1].u.operand);
2430 }
2431
2432 void JIT::emit_op_bitor(Instruction* currentInstruction)
2433 {
2434 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
2435 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2436 orPtr(regT1, regT0);
2437 emitPutVirtualRegister(currentInstruction[1].u.operand);
2438 }
2439
2440 void JIT::emit_op_throw(Instruction* currentInstruction)
2441 {
2442 JITStubCall stubCall(this, cti_op_throw);
2443 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
2444 stubCall.call();
2445 ASSERT(regT0 == returnValueRegister);
2446 #ifndef NDEBUG
2447 // cti_op_throw always changes it's return address,
2448 // this point in the code should never be reached.
2449 breakpoint();
2450 #endif
2451 }
2452
2453 void JIT::emit_op_next_pname(Instruction* currentInstruction)
2454 {
2455 JITStubCall stubCall(this, cti_op_next_pname);
2456 stubCall.addArgument(currentInstruction[2].u.operand, regT2);
2457 stubCall.call();
2458 Jump endOfIter = branchTestPtr(Zero, regT0);
2459 emitPutVirtualRegister(currentInstruction[1].u.operand);
2460 addJump(jump(), currentInstruction[3].u.operand + 3);
2461 endOfIter.link(this);
2462 }
2463
2464 void JIT::emit_op_push_scope(Instruction* currentInstruction)
2465 {
2466 JITStubCall stubCall(this, cti_op_push_scope);
2467 stubCall.addArgument(currentInstruction[1].u.operand, regT2);
2468 stubCall.call(currentInstruction[1].u.operand);
2469 }
2470
2471 void JIT::emit_op_pop_scope(Instruction*)
2472 {
2473 JITStubCall(this, cti_op_pop_scope).call();
2474 }
2475
2476 void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
2477 {
2478 unsigned dst = currentInstruction[1].u.operand;
2479 unsigned src1 = currentInstruction[2].u.operand;
2480 unsigned src2 = currentInstruction[3].u.operand;
2481
2482 emitGetVirtualRegisters(src1, regT0, src2, regT1);
2483
2484 // Jump to a slow case if either operand is a number, or if both are JSCell*s.
2485 move(regT0, regT2);
2486 orPtr(regT1, regT2);
2487 addSlowCase(emitJumpIfJSCell(regT2));
2488 addSlowCase(emitJumpIfImmediateNumber(regT2));
2489
2490 if (type == OpStrictEq)
2491 set32(Equal, regT1, regT0, regT0);
2492 else
2493 set32(NotEqual, regT1, regT0, regT0);
2494 emitTagAsBoolImmediate(regT0);
2495
2496 emitPutVirtualRegister(dst);
2497 }
2498
2499 void JIT::emit_op_stricteq(Instruction* currentInstruction)
2500 {
2501 compileOpStrictEq(currentInstruction, OpStrictEq);
2502 }
2503
2504 void JIT::emit_op_nstricteq(Instruction* currentInstruction)
2505 {
2506 compileOpStrictEq(currentInstruction, OpNStrictEq);
2507 }
2508
2509 void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
2510 {
2511 int srcVReg = currentInstruction[2].u.operand;
2512 emitGetVirtualRegister(srcVReg, regT0);
2513
2514 Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
2515
2516 emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
2517 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2518 addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
2519
2520 wasImmediate.link(this);
2521
2522 emitPutVirtualRegister(currentInstruction[1].u.operand);
2523 }
2524
2525 void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
2526 {
2527 JITStubCall stubCall(this, cti_op_push_new_scope);
2528 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
2529 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
2530 stubCall.call(currentInstruction[1].u.operand);
2531 }
2532
2533 void JIT::emit_op_catch(Instruction* currentInstruction)
2534 {
2535 killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
2536 peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
2537 emitPutVirtualRegister(currentInstruction[1].u.operand);
2538 }
2539
2540 void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
2541 {
2542 JITStubCall stubCall(this, cti_op_jmp_scopes);
2543 stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
2544 stubCall.call();
2545 addJump(jump(), currentInstruction[2].u.operand + 2);
2546 RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2);
2547 }
2548
2549 void JIT::emit_op_switch_imm(Instruction* currentInstruction)
2550 {
2551 unsigned tableIndex = currentInstruction[1].u.operand;
2552 unsigned defaultOffset = currentInstruction[2].u.operand;
2553 unsigned scrutinee = currentInstruction[3].u.operand;
2554
2555 // create jump table for switch destinations, track this switch statement.
2556 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
2557 m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
2558 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
2559
2560 JITStubCall stubCall(this, cti_op_switch_imm);
2561 stubCall.addArgument(scrutinee, regT2);
2562 stubCall.addArgument(Imm32(tableIndex));
2563 stubCall.call();
2564 jump(regT0);
2565 }
2566
2567 void JIT::emit_op_switch_char(Instruction* currentInstruction)
2568 {
2569 unsigned tableIndex = currentInstruction[1].u.operand;
2570 unsigned defaultOffset = currentInstruction[2].u.operand;
2571 unsigned scrutinee = currentInstruction[3].u.operand;
2572
2573 // create jump table for switch destinations, track this switch statement.
2574 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
2575 m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
2576 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
2577
2578 JITStubCall stubCall(this, cti_op_switch_char);
2579 stubCall.addArgument(scrutinee, regT2);
2580 stubCall.addArgument(Imm32(tableIndex));
2581 stubCall.call();
2582 jump(regT0);
2583 }
2584
2585 void JIT::emit_op_switch_string(Instruction* currentInstruction)
2586 {
2587 unsigned tableIndex = currentInstruction[1].u.operand;
2588 unsigned defaultOffset = currentInstruction[2].u.operand;
2589 unsigned scrutinee = currentInstruction[3].u.operand;
2590
2591 // create jump table for switch destinations, track this switch statement.
2592 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
2593 m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
2594
2595 JITStubCall stubCall(this, cti_op_switch_string);
2596 stubCall.addArgument(scrutinee, regT2);
2597 stubCall.addArgument(Imm32(tableIndex));
2598 stubCall.call();
2599 jump(regT0);
2600 }
2601
2602 void JIT::emit_op_new_error(Instruction* currentInstruction)
2603 {
2604 JITStubCall stubCall(this, cti_op_new_error);
2605 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
2606 stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand))));
2607 stubCall.addArgument(Imm32(m_bytecodeIndex));
2608 stubCall.call(currentInstruction[1].u.operand);
2609 }
2610
2611 void JIT::emit_op_debug(Instruction* currentInstruction)
2612 {
2613 JITStubCall stubCall(this, cti_op_debug);
2614 stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
2615 stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
2616 stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
2617 stubCall.call();
2618 }
2619
2620 void JIT::emit_op_eq_null(Instruction* currentInstruction)
2621 {
2622 unsigned dst = currentInstruction[1].u.operand;
2623 unsigned src1 = currentInstruction[2].u.operand;
2624
2625 emitGetVirtualRegister(src1, regT0);
2626 Jump isImmediate = emitJumpIfNotJSCell(regT0);
2627
2628 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2629 setTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
2630
2631 Jump wasNotImmediate = jump();
2632
2633 isImmediate.link(this);
2634
2635 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
2636 setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
2637
2638 wasNotImmediate.link(this);
2639
2640 emitTagAsBoolImmediate(regT0);
2641 emitPutVirtualRegister(dst);
2642
2643 }
2644
2645 void JIT::emit_op_neq_null(Instruction* currentInstruction)
2646 {
2647 unsigned dst = currentInstruction[1].u.operand;
2648 unsigned src1 = currentInstruction[2].u.operand;
2649
2650 emitGetVirtualRegister(src1, regT0);
2651 Jump isImmediate = emitJumpIfNotJSCell(regT0);
2652
2653 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
2654 setTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
2655
2656 Jump wasNotImmediate = jump();
2657
2658 isImmediate.link(this);
2659
2660 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
2661 setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
2662
2663 wasNotImmediate.link(this);
2664
2665 emitTagAsBoolImmediate(regT0);
2666 emitPutVirtualRegister(dst);
2667
2668 }
2669
2670 void JIT::emit_op_enter(Instruction*)
2671 {
2672 // Even though CTI doesn't use them, we initialize our constant
2673 // registers to zap stale pointers, to avoid unnecessarily prolonging
2674 // object lifetime and increasing GC pressure.
2675 size_t count = m_codeBlock->m_numVars;
2676 for (size_t j = 0; j < count; ++j)
2677 emitInitRegister(j);
2678
2679 }
2680
2681 void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
2682 {
2683 // Even though CTI doesn't use them, we initialize our constant
2684 // registers to zap stale pointers, to avoid unnecessarily prolonging
2685 // object lifetime and increasing GC pressure.
2686 size_t count = m_codeBlock->m_numVars;
2687 for (size_t j = 0; j < count; ++j)
2688 emitInitRegister(j);
2689
2690 JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
2691 }
2692
2693 void JIT::emit_op_create_arguments(Instruction*)
2694 {
2695 Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
2696 if (m_codeBlock->m_numParameters == 1)
2697 JITStubCall(this, cti_op_create_arguments_no_params).call();
2698 else
2699 JITStubCall(this, cti_op_create_arguments).call();
2700 argsCreated.link(this);
2701 }
2702
2703 void JIT::emit_op_init_arguments(Instruction*)
2704 {
2705 storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
2706 }
2707
2708 void JIT::emit_op_convert_this(Instruction* currentInstruction)
2709 {
2710 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
2711
2712 emitJumpSlowCaseIfNotJSCell(regT0);
2713 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
2714 addSlowCase(branchTest32(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
2715
2716 }
2717
2718 void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
2719 {
2720 peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
2721 Jump noProfiler = branchTestPtr(Zero, Address(regT1));
2722
2723 JITStubCall stubCall(this, cti_op_profile_will_call);
2724 stubCall.addArgument(currentInstruction[1].u.operand, regT1);
2725 stubCall.call();
2726 noProfiler.link(this);
2727
2728 }
2729
2730 void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
2731 {
2732 peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
2733 Jump noProfiler = branchTestPtr(Zero, Address(regT1));
2734
2735 JITStubCall stubCall(this, cti_op_profile_did_call);
2736 stubCall.addArgument(currentInstruction[1].u.operand, regT1);
2737 stubCall.call();
2738 noProfiler.link(this);
2739 }
2740
2741
2742 // Slow cases
2743
2744 void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2745 {
2746 linkSlowCase(iter);
2747 linkSlowCase(iter);
2748 JITStubCall stubCall(this, cti_op_convert_this);
2749 stubCall.addArgument(regT0);
2750 stubCall.call(currentInstruction[1].u.operand);
2751 }
2752
2753 void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2754 {
2755 linkSlowCase(iter);
2756 linkSlowCase(iter);
2757 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
2758 emitPutVirtualRegister(currentInstruction[1].u.operand);
2759 }
2760
2761 void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2762 {
2763 linkSlowCase(iter);
2764
2765 JITStubCall stubCall(this, cti_op_to_primitive);
2766 stubCall.addArgument(regT0);
2767 stubCall.call(currentInstruction[1].u.operand);
2768 }
2769
2770 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2771 {
2772 // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
2773 Label beginGetByValSlow(this);
2774
2775 Jump notImm = getSlowCase(iter);
2776 linkSlowCase(iter);
2777 linkSlowCase(iter);
2778 emitFastArithIntToImmNoCheck(regT1, regT1);
2779
2780 notImm.link(this);
2781 JITStubCall stubCall(this, cti_op_get_by_val);
2782 stubCall.addArgument(regT0);
2783 stubCall.addArgument(regT1);
2784 stubCall.call(currentInstruction[1].u.operand);
2785 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
2786
2787 // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
2788 // First, check if this is an access to the vector
2789 linkSlowCase(iter);
2790 branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow);
2791
2792 // okay, missed the fast region, but it is still in the vector. Get the value.
2793 loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2);
2794 // Check whether the value loaded is zero; if so we need to return undefined.
2795 branchTestPtr(Zero, regT2, beginGetByValSlow);
2796 move(regT2, regT0);
2797 emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
2798 }
2799
2800 void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2801 {
2802 unsigned op1 = currentInstruction[1].u.operand;
2803 unsigned op2 = currentInstruction[2].u.operand;
2804 unsigned target = currentInstruction[3].u.operand;
2805 if (isOperandConstantImmediateInt(op2)) {
2806 linkSlowCase(iter);
2807 JITStubCall stubCall(this, cti_op_loop_if_less);
2808 stubCall.addArgument(regT0);
2809 stubCall.addArgument(op2, regT2);
2810 stubCall.call();
2811 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
2812 } else if (isOperandConstantImmediateInt(op1)) {
2813 linkSlowCase(iter);
2814 JITStubCall stubCall(this, cti_op_loop_if_less);
2815 stubCall.addArgument(op1, regT2);
2816 stubCall.addArgument(regT0);
2817 stubCall.call();
2818 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
2819 } else {
2820 linkSlowCase(iter);
2821 linkSlowCase(iter);
2822 JITStubCall stubCall(this, cti_op_loop_if_less);
2823 stubCall.addArgument(regT0);
2824 stubCall.addArgument(regT1);
2825 stubCall.call();
2826 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
2827 }
2828 }
2829
2830 void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2831 {
2832 unsigned op2 = currentInstruction[2].u.operand;
2833 unsigned target = currentInstruction[3].u.operand;
2834 if (isOperandConstantImmediateInt(op2)) {
2835 linkSlowCase(iter);
2836 JITStubCall stubCall(this, cti_op_loop_if_lesseq);
2837 stubCall.addArgument(regT0);
2838 stubCall.addArgument(currentInstruction[2].u.operand, regT2);
2839 stubCall.call();
2840 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
2841 } else {
2842 linkSlowCase(iter);
2843 linkSlowCase(iter);
2844 JITStubCall stubCall(this, cti_op_loop_if_lesseq);
2845 stubCall.addArgument(regT0);
2846 stubCall.addArgument(regT1);
2847 stubCall.call();
2848 emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
2849 }
2850 }
2851
2852 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2853 {
2854 // Normal slow cases - either is not an immediate imm, or is an array.
2855 Jump notImm = getSlowCase(iter);
2856 linkSlowCase(iter);
2857 linkSlowCase(iter);
2858 emitFastArithIntToImmNoCheck(regT1, regT1);
2859
2860 notImm.link(this); {
2861 JITStubCall stubCall(this, cti_op_put_by_val);
2862 stubCall.addArgument(regT0);
2863 stubCall.addArgument(regT1);
2864 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
2865 stubCall.call();
2866 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
2867 }
2868
2869 // slow cases for immediate int accesses to arrays
2870 linkSlowCase(iter);
2871 linkSlowCase(iter); {
2872 JITStubCall stubCall(this, cti_op_put_by_val_array);
2873 stubCall.addArgument(regT0);
2874 stubCall.addArgument(regT1);
2875 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
2876 stubCall.call();
2877 }
2878 }
2879
2880 void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2881 {
2882 linkSlowCase(iter);
2883 JITStubCall stubCall(this, cti_op_jtrue);
2884 stubCall.addArgument(regT0);
2885 stubCall.call();
2886 emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
2887 }
2888
2889 void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2890 {
2891 linkSlowCase(iter);
2892 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
2893 JITStubCall stubCall(this, cti_op_not);
2894 stubCall.addArgument(regT0);
2895 stubCall.call(currentInstruction[1].u.operand);
2896 }
2897
2898 void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2899 {
2900 linkSlowCase(iter);
2901 JITStubCall stubCall(this, cti_op_jtrue);
2902 stubCall.addArgument(regT0);
2903 stubCall.call();
2904 emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
2905 }
2906
2907 void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2908 {
2909 linkSlowCase(iter);
2910 JITStubCall stubCall(this, cti_op_bitnot);
2911 stubCall.addArgument(regT0);
2912 stubCall.call(currentInstruction[1].u.operand);
2913 }
2914
2915 void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2916 {
2917 linkSlowCase(iter);
2918 JITStubCall stubCall(this, cti_op_jtrue);
2919 stubCall.addArgument(regT0);
2920 stubCall.call();
2921 emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
2922 }
2923
2924 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2925 {
2926 linkSlowCase(iter);
2927 JITStubCall stubCall(this, cti_op_bitxor);
2928 stubCall.addArgument(regT0);
2929 stubCall.addArgument(regT1);
2930 stubCall.call(currentInstruction[1].u.operand);
2931 }
2932
2933 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2934 {
2935 linkSlowCase(iter);
2936 JITStubCall stubCall(this, cti_op_bitor);
2937 stubCall.addArgument(regT0);
2938 stubCall.addArgument(regT1);
2939 stubCall.call(currentInstruction[1].u.operand);
2940 }
2941
2942 void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2943 {
2944 linkSlowCase(iter);
2945 JITStubCall stubCall(this, cti_op_eq);
2946 stubCall.addArgument(regT0);
2947 stubCall.addArgument(regT1);
2948 stubCall.call();
2949 emitTagAsBoolImmediate(regT0);
2950 emitPutVirtualRegister(currentInstruction[1].u.operand);
2951 }
2952
2953 void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2954 {
2955 linkSlowCase(iter);
2956 JITStubCall stubCall(this, cti_op_eq);
2957 stubCall.addArgument(regT0);
2958 stubCall.addArgument(regT1);
2959 stubCall.call();
2960 xor32(Imm32(0x1), regT0);
2961 emitTagAsBoolImmediate(regT0);
2962 emitPutVirtualRegister(currentInstruction[1].u.operand);
2963 }
2964
2965 void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2966 {
2967 linkSlowCase(iter);
2968 linkSlowCase(iter);
2969 JITStubCall stubCall(this, cti_op_stricteq);
2970 stubCall.addArgument(regT0);
2971 stubCall.addArgument(regT1);
2972 stubCall.call(currentInstruction[1].u.operand);
2973 }
2974
2975 void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2976 {
2977 linkSlowCase(iter);
2978 linkSlowCase(iter);
2979 JITStubCall stubCall(this, cti_op_nstricteq);
2980 stubCall.addArgument(regT0);
2981 stubCall.addArgument(regT1);
2982 stubCall.call(currentInstruction[1].u.operand);
2983 }
2984
2985 void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2986 {
2987 linkSlowCase(iter);
2988 linkSlowCase(iter);
2989 linkSlowCase(iter);
2990 linkSlowCase(iter);
2991 linkSlowCase(iter);
2992 JITStubCall stubCall(this, cti_op_instanceof);
2993 stubCall.addArgument(currentInstruction[2].u.operand, regT2);
2994 stubCall.addArgument(currentInstruction[3].u.operand, regT2);
2995 stubCall.addArgument(currentInstruction[4].u.operand, regT2);
2996 stubCall.call(currentInstruction[1].u.operand);
2997 }
2998
2999 void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
3000 {
3001 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
3002 }
3003
3004 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
3005 {
3006 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
3007 }
3008
3009 void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
3010 {
3011 compileOpCallVarargsSlowCase(currentInstruction, iter);
3012 }
3013
3014 void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
3015 {
3016 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
3017 }
3018
3019 void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
3020 {
3021 linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
3022 linkSlowCase(iter);
3023
3024 JITStubCall stubCall(this, cti_op_to_jsnumber);
3025 stubCall.addArgument(regT0);
3026 stubCall.call(currentInstruction[1].u.operand);
3027 }
3028
3029 #endif // USE(JSVALUE32_64)
3030
3031 } // namespace JSC
3032
3033 #endif // ENABLE(JIT)