]>
Commit | Line | Data |
---|---|---|
9dae56ea A |
1 | /* |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "JIT.h" | |
28 | ||
29 | #if ENABLE(JIT) | |
30 | ||
31 | #include "CodeBlock.h" | |
32 | #include "JITInlineMethods.h" | |
33 | #include "JSArray.h" | |
34 | #include "JSFunction.h" | |
35 | #include "Interpreter.h" | |
36 | #include "ResultType.h" | |
37 | #include "SamplingTool.h" | |
38 | ||
39 | #ifndef NDEBUG | |
40 | #include <stdio.h> | |
41 | #endif | |
42 | ||
43 | #define __ m_assembler. | |
44 | ||
45 | using namespace std; | |
46 | ||
47 | namespace JSC { | |
48 | ||
49 | void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2) | |
50 | { | |
51 | emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); | |
52 | // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent. | |
53 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
54 | emitJumpSlowCaseIfNotImmediateInteger(X86::ecx); | |
55 | emitFastArithImmToInt(X86::eax); | |
56 | emitFastArithImmToInt(X86::ecx); | |
57 | #if !PLATFORM(X86) | |
58 | // Mask with 0x1f as per ecma-262 11.7.2 step 7. | |
59 | // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction. | |
60 | and32(Imm32(0x1f), X86::ecx); | |
61 | #endif | |
62 | lshift32(X86::ecx, X86::eax); | |
63 | #if !USE(ALTERNATE_JSIMMEDIATE) | |
64 | addSlowCase(joAdd32(X86::eax, X86::eax)); | |
65 | signExtend32ToPtr(X86::eax, X86::eax); | |
66 | #endif | |
67 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
68 | emitPutVirtualRegister(result); | |
69 | } | |
70 | void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter) | |
71 | { | |
72 | #if USE(ALTERNATE_JSIMMEDIATE) | |
73 | UNUSED_PARAM(op1); | |
74 | UNUSED_PARAM(op2); | |
75 | linkSlowCase(iter); | |
76 | linkSlowCase(iter); | |
77 | #else | |
78 | // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded. | |
79 | Jump notImm1 = getSlowCase(iter); | |
80 | Jump notImm2 = getSlowCase(iter); | |
81 | linkSlowCase(iter); | |
82 | emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); | |
83 | notImm1.link(this); | |
84 | notImm2.link(this); | |
85 | #endif | |
86 | emitPutJITStubArg(X86::eax, 1); | |
87 | emitPutJITStubArg(X86::ecx, 2); | |
88 | emitCTICall(Interpreter::cti_op_lshift); | |
89 | emitPutVirtualRegister(result); | |
90 | } | |
91 | ||
92 | void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2) | |
93 | { | |
94 | if (isOperandConstantImmediateInt(op2)) { | |
95 | emitGetVirtualRegister(op1, X86::eax); | |
96 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
97 | // Mask with 0x1f as per ecma-262 11.7.2 step 7. | |
98 | #if USE(ALTERNATE_JSIMMEDIATE) | |
99 | rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), X86::eax); | |
100 | #else | |
101 | rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), X86::eax); | |
102 | #endif | |
103 | } else { | |
104 | emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); | |
105 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
106 | emitJumpSlowCaseIfNotImmediateInteger(X86::ecx); | |
107 | emitFastArithImmToInt(X86::ecx); | |
108 | #if !PLATFORM(X86) | |
109 | // Mask with 0x1f as per ecma-262 11.7.2 step 7. | |
110 | // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction. | |
111 | and32(Imm32(0x1f), X86::ecx); | |
112 | #endif | |
113 | #if USE(ALTERNATE_JSIMMEDIATE) | |
114 | rshift32(X86::ecx, X86::eax); | |
115 | #else | |
116 | rshiftPtr(X86::ecx, X86::eax); | |
117 | #endif | |
118 | } | |
119 | #if USE(ALTERNATE_JSIMMEDIATE) | |
120 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
121 | #else | |
122 | orPtr(Imm32(JSImmediate::TagTypeNumber), X86::eax); | |
123 | #endif | |
124 | emitPutVirtualRegister(result); | |
125 | } | |
126 | void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2, Vector<SlowCaseEntry>::iterator& iter) | |
127 | { | |
128 | linkSlowCase(iter); | |
129 | if (isOperandConstantImmediateInt(op2)) | |
130 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
131 | else { | |
132 | linkSlowCase(iter); | |
133 | emitPutJITStubArg(X86::ecx, 2); | |
134 | } | |
135 | ||
136 | emitPutJITStubArg(X86::eax, 1); | |
137 | emitCTICall(Interpreter::cti_op_rshift); | |
138 | emitPutVirtualRegister(result); | |
139 | } | |
140 | ||
141 | void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2) | |
142 | { | |
143 | if (isOperandConstantImmediateInt(op1)) { | |
144 | emitGetVirtualRegister(op2, X86::eax); | |
145 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
146 | #if USE(ALTERNATE_JSIMMEDIATE) | |
147 | int32_t imm = getConstantOperandImmediateInt(op1); | |
148 | andPtr(Imm32(imm), X86::eax); | |
149 | if (imm >= 0) | |
150 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
151 | #else | |
152 | andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), X86::eax); | |
153 | #endif | |
154 | } else if (isOperandConstantImmediateInt(op2)) { | |
155 | emitGetVirtualRegister(op1, X86::eax); | |
156 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
157 | #if USE(ALTERNATE_JSIMMEDIATE) | |
158 | int32_t imm = getConstantOperandImmediateInt(op2); | |
159 | andPtr(Imm32(imm), X86::eax); | |
160 | if (imm >= 0) | |
161 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
162 | #else | |
163 | andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), X86::eax); | |
164 | #endif | |
165 | } else { | |
166 | emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx); | |
167 | andPtr(X86::edx, X86::eax); | |
168 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
169 | } | |
170 | emitPutVirtualRegister(result); | |
171 | } | |
172 | void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter) | |
173 | { | |
174 | linkSlowCase(iter); | |
175 | if (isOperandConstantImmediateInt(op1)) { | |
176 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
177 | emitPutJITStubArg(X86::eax, 2); | |
178 | } else if (isOperandConstantImmediateInt(op2)) { | |
179 | emitPutJITStubArg(X86::eax, 1); | |
180 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
181 | } else { | |
182 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
183 | emitPutJITStubArg(X86::edx, 2); | |
184 | } | |
185 | emitCTICall(Interpreter::cti_op_bitand); | |
186 | emitPutVirtualRegister(result); | |
187 | } | |
188 | ||
189 | void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2) | |
190 | { | |
191 | emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); | |
192 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
193 | emitJumpSlowCaseIfNotImmediateInteger(X86::ecx); | |
194 | #if USE(ALTERNATE_JSIMMEDIATE) | |
195 | addSlowCase(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(js0())))); | |
196 | mod32(X86::ecx, X86::eax, X86::edx); | |
197 | #else | |
198 | emitFastArithDeTagImmediate(X86::eax); | |
199 | addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx)); | |
200 | mod32(X86::ecx, X86::eax, X86::edx); | |
201 | signExtend32ToPtr(X86::edx, X86::edx); | |
202 | #endif | |
203 | emitFastArithReTagImmediate(X86::edx, X86::eax); | |
204 | emitPutVirtualRegister(result); | |
205 | } | |
206 | void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter) | |
207 | { | |
208 | #if USE(ALTERNATE_JSIMMEDIATE) | |
209 | linkSlowCase(iter); | |
210 | linkSlowCase(iter); | |
211 | linkSlowCase(iter); | |
212 | #else | |
213 | Jump notImm1 = getSlowCase(iter); | |
214 | Jump notImm2 = getSlowCase(iter); | |
215 | linkSlowCase(iter); | |
216 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
217 | emitFastArithReTagImmediate(X86::ecx, X86::ecx); | |
218 | notImm1.link(this); | |
219 | notImm2.link(this); | |
220 | #endif | |
221 | emitPutJITStubArg(X86::eax, 1); | |
222 | emitPutJITStubArg(X86::ecx, 2); | |
223 | emitCTICall(Interpreter::cti_op_mod); | |
224 | emitPutVirtualRegister(result); | |
225 | } | |
226 | ||
227 | void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst) | |
228 | { | |
229 | emitGetVirtualRegister(srcDst, X86::eax); | |
230 | move(X86::eax, X86::edx); | |
231 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
232 | #if USE(ALTERNATE_JSIMMEDIATE) | |
233 | addSlowCase(joAdd32(Imm32(1), X86::edx)); | |
234 | emitFastArithIntToImmNoCheck(X86::edx, X86::edx); | |
235 | #else | |
236 | addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx)); | |
237 | signExtend32ToPtr(X86::edx, X86::edx); | |
238 | #endif | |
239 | emitPutVirtualRegister(srcDst, X86::edx); | |
240 | emitPutVirtualRegister(result); | |
241 | } | |
242 | void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter) | |
243 | { | |
244 | linkSlowCase(iter); | |
245 | linkSlowCase(iter); | |
246 | emitPutJITStubArg(X86::eax, 1); | |
247 | emitCTICall(Interpreter::cti_op_post_inc); | |
248 | emitPutVirtualRegister(srcDst, X86::edx); | |
249 | emitPutVirtualRegister(result); | |
250 | } | |
251 | ||
252 | void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst) | |
253 | { | |
254 | emitGetVirtualRegister(srcDst, X86::eax); | |
255 | move(X86::eax, X86::edx); | |
256 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
257 | #if USE(ALTERNATE_JSIMMEDIATE) | |
258 | addSlowCase(joSub32(Imm32(1), X86::edx)); | |
259 | emitFastArithIntToImmNoCheck(X86::edx, X86::edx); | |
260 | #else | |
261 | addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx)); | |
262 | signExtend32ToPtr(X86::edx, X86::edx); | |
263 | #endif | |
264 | emitPutVirtualRegister(srcDst, X86::edx); | |
265 | emitPutVirtualRegister(result); | |
266 | } | |
267 | void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter) | |
268 | { | |
269 | linkSlowCase(iter); | |
270 | linkSlowCase(iter); | |
271 | emitPutJITStubArg(X86::eax, 1); | |
272 | emitCTICall(Interpreter::cti_op_post_dec); | |
273 | emitPutVirtualRegister(srcDst, X86::edx); | |
274 | emitPutVirtualRegister(result); | |
275 | } | |
276 | ||
277 | void JIT::compileFastArith_op_pre_inc(unsigned srcDst) | |
278 | { | |
279 | emitGetVirtualRegister(srcDst, X86::eax); | |
280 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
281 | #if USE(ALTERNATE_JSIMMEDIATE) | |
282 | addSlowCase(joAdd32(Imm32(1), X86::eax)); | |
283 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
284 | #else | |
285 | addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax)); | |
286 | signExtend32ToPtr(X86::eax, X86::eax); | |
287 | #endif | |
288 | emitPutVirtualRegister(srcDst); | |
289 | } | |
290 | void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter) | |
291 | { | |
292 | Jump notImm = getSlowCase(iter); | |
293 | linkSlowCase(iter); | |
294 | emitGetVirtualRegister(srcDst, X86::eax); | |
295 | notImm.link(this); | |
296 | emitPutJITStubArg(X86::eax, 1); | |
297 | emitCTICall(Interpreter::cti_op_pre_inc); | |
298 | emitPutVirtualRegister(srcDst); | |
299 | } | |
300 | ||
301 | void JIT::compileFastArith_op_pre_dec(unsigned srcDst) | |
302 | { | |
303 | emitGetVirtualRegister(srcDst, X86::eax); | |
304 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
305 | #if USE(ALTERNATE_JSIMMEDIATE) | |
306 | addSlowCase(joSub32(Imm32(1), X86::eax)); | |
307 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
308 | #else | |
309 | addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax)); | |
310 | signExtend32ToPtr(X86::eax, X86::eax); | |
311 | #endif | |
312 | emitPutVirtualRegister(srcDst); | |
313 | } | |
314 | void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter) | |
315 | { | |
316 | Jump notImm = getSlowCase(iter); | |
317 | linkSlowCase(iter); | |
318 | emitGetVirtualRegister(srcDst, X86::eax); | |
319 | notImm.link(this); | |
320 | emitPutJITStubArg(X86::eax, 1); | |
321 | emitCTICall(Interpreter::cti_op_pre_dec); | |
322 | emitPutVirtualRegister(srcDst); | |
323 | } | |
324 | ||
325 | ||
326 | #if !ENABLE(JIT_OPTIMIZE_ARITHMETIC) | |
327 | ||
328 | void JIT::compileFastArith_op_add(Instruction* currentInstruction) | |
329 | { | |
330 | unsigned result = currentInstruction[1].u.operand; | |
331 | unsigned op1 = currentInstruction[2].u.operand; | |
332 | unsigned op2 = currentInstruction[3].u.operand; | |
333 | ||
334 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
335 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
336 | emitCTICall(Interpreter::cti_op_add); | |
337 | emitPutVirtualRegister(result); | |
338 | } | |
339 | void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&) | |
340 | { | |
341 | ASSERT_NOT_REACHED(); | |
342 | } | |
343 | ||
344 | void JIT::compileFastArith_op_mul(Instruction* currentInstruction) | |
345 | { | |
346 | unsigned result = currentInstruction[1].u.operand; | |
347 | unsigned op1 = currentInstruction[2].u.operand; | |
348 | unsigned op2 = currentInstruction[3].u.operand; | |
349 | ||
350 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
351 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
352 | emitCTICall(Interpreter::cti_op_mul); | |
353 | emitPutVirtualRegister(result); | |
354 | } | |
355 | void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&) | |
356 | { | |
357 | ASSERT_NOT_REACHED(); | |
358 | } | |
359 | ||
360 | void JIT::compileFastArith_op_sub(Instruction* currentInstruction) | |
361 | { | |
362 | unsigned result = currentInstruction[1].u.operand; | |
363 | unsigned op1 = currentInstruction[2].u.operand; | |
364 | unsigned op2 = currentInstruction[3].u.operand; | |
365 | ||
366 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
367 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
368 | emitCTICall(Interpreter::cti_op_sub); | |
369 | emitPutVirtualRegister(result); | |
370 | } | |
371 | void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&) | |
372 | { | |
373 | ASSERT_NOT_REACHED(); | |
374 | } | |
375 | ||
376 | #elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC) | |
377 | ||
378 | void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes) | |
379 | { | |
380 | emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx); | |
381 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
382 | emitJumpSlowCaseIfNotImmediateInteger(X86::edx); | |
383 | if (opcodeID == op_add) | |
384 | addSlowCase(joAdd32(X86::edx, X86::eax)); | |
385 | else if (opcodeID == op_sub) | |
386 | addSlowCase(joSub32(X86::edx, X86::eax)); | |
387 | else { | |
388 | ASSERT(opcodeID == op_mul); | |
389 | addSlowCase(joMul32(X86::edx, X86::eax)); | |
390 | addSlowCase(jz32(X86::eax)); | |
391 | } | |
392 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
393 | } | |
394 | ||
395 | void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned, unsigned op1, unsigned, OperandTypes types) | |
396 | { | |
397 | // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset. | |
398 | COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0); | |
399 | ||
400 | Jump notImm1 = getSlowCase(iter); | |
401 | Jump notImm2 = getSlowCase(iter); | |
402 | ||
403 | linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare. | |
404 | if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number. | |
405 | linkSlowCase(iter); | |
406 | emitGetVirtualRegister(op1, X86::eax); | |
407 | ||
408 | Label stubFunctionCall(this); | |
409 | emitPutJITStubArg(X86::eax, 1); | |
410 | emitPutJITStubArg(X86::edx, 2); | |
411 | if (opcodeID == op_add) | |
412 | emitCTICall(Interpreter::cti_op_add); | |
413 | else if (opcodeID == op_sub) | |
414 | emitCTICall(Interpreter::cti_op_sub); | |
415 | else { | |
416 | ASSERT(opcodeID == op_mul); | |
417 | emitCTICall(Interpreter::cti_op_mul); | |
418 | } | |
419 | Jump end = jump(); | |
420 | ||
421 | // if we get here, eax is not an int32, edx not yet checked. | |
422 | notImm1.link(this); | |
423 | if (!types.first().definitelyIsNumber()) | |
424 | emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this); | |
425 | if (!types.second().definitelyIsNumber()) | |
426 | emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this); | |
427 | addPtr(tagTypeNumberRegister, X86::eax); | |
428 | m_assembler.movq_rr(X86::eax, X86::xmm1); | |
429 | Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx); | |
430 | m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2); | |
431 | Jump op2wasInteger = jump(); | |
432 | ||
433 | // if we get here, eax IS an int32, edx is not. | |
434 | notImm2.link(this); | |
435 | if (!types.second().definitelyIsNumber()) | |
436 | emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this); | |
437 | m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1); | |
438 | op2isDouble.link(this); | |
439 | addPtr(tagTypeNumberRegister, X86::edx); | |
440 | m_assembler.movq_rr(X86::edx, X86::xmm2); | |
441 | op2wasInteger.link(this); | |
442 | ||
443 | if (opcodeID == op_add) | |
444 | m_assembler.addsd_rr(X86::xmm2, X86::xmm1); | |
445 | else if (opcodeID == op_sub) | |
446 | m_assembler.subsd_rr(X86::xmm2, X86::xmm1); | |
447 | else { | |
448 | ASSERT(opcodeID == op_mul); | |
449 | m_assembler.mulsd_rr(X86::xmm2, X86::xmm1); | |
450 | } | |
451 | m_assembler.movq_rr(X86::xmm1, X86::eax); | |
452 | subPtr(tagTypeNumberRegister, X86::eax); | |
453 | ||
454 | end.link(this); | |
455 | } | |
456 | ||
457 | void JIT::compileFastArith_op_add(Instruction* currentInstruction) | |
458 | { | |
459 | unsigned result = currentInstruction[1].u.operand; | |
460 | unsigned op1 = currentInstruction[2].u.operand; | |
461 | unsigned op2 = currentInstruction[3].u.operand; | |
462 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
463 | ||
464 | if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { | |
465 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
466 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
467 | emitCTICall(Interpreter::cti_op_add); | |
468 | emitPutVirtualRegister(result); | |
469 | return; | |
470 | } | |
471 | ||
472 | if (isOperandConstantImmediateInt(op1)) { | |
473 | emitGetVirtualRegister(op2, X86::eax); | |
474 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
475 | addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1)), X86::eax)); | |
476 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
477 | } else if (isOperandConstantImmediateInt(op2)) { | |
478 | emitGetVirtualRegister(op1, X86::eax); | |
479 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
480 | addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2)), X86::eax)); | |
481 | emitFastArithIntToImmNoCheck(X86::eax, X86::eax); | |
482 | } else | |
483 | compileBinaryArithOp(op_add, result, op1, op2, types); | |
484 | ||
485 | emitPutVirtualRegister(result); | |
486 | } | |
487 | void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
488 | { | |
489 | unsigned result = currentInstruction[1].u.operand; | |
490 | unsigned op1 = currentInstruction[2].u.operand; | |
491 | unsigned op2 = currentInstruction[3].u.operand; | |
492 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
493 | ||
494 | if (isOperandConstantImmediateInt(op1)) { | |
495 | linkSlowCase(iter); | |
496 | linkSlowCase(iter); | |
497 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
498 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
499 | emitCTICall(Interpreter::cti_op_add); | |
500 | } else if (isOperandConstantImmediateInt(op2)) { | |
501 | linkSlowCase(iter); | |
502 | linkSlowCase(iter); | |
503 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
504 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
505 | emitCTICall(Interpreter::cti_op_add); | |
506 | } else | |
507 | compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types); | |
508 | ||
509 | emitPutVirtualRegister(result); | |
510 | } | |
511 | ||
512 | void JIT::compileFastArith_op_mul(Instruction* currentInstruction) | |
513 | { | |
514 | unsigned result = currentInstruction[1].u.operand; | |
515 | unsigned op1 = currentInstruction[2].u.operand; | |
516 | unsigned op2 = currentInstruction[3].u.operand; | |
517 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
518 | ||
519 | // For now, only plant a fast int case if the constant operand is greater than zero. | |
520 | int32_t value; | |
521 | if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { | |
522 | emitGetVirtualRegister(op2, X86::eax); | |
523 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
524 | addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); | |
525 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
526 | } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { | |
527 | emitGetVirtualRegister(op1, X86::eax); | |
528 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
529 | addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); | |
530 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
531 | } else | |
532 | compileBinaryArithOp(op_mul, result, op1, op2, types); | |
533 | ||
534 | emitPutVirtualRegister(result); | |
535 | } | |
536 | void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
537 | { | |
538 | unsigned result = currentInstruction[1].u.operand; | |
539 | unsigned op1 = currentInstruction[2].u.operand; | |
540 | unsigned op2 = currentInstruction[3].u.operand; | |
541 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
542 | ||
543 | if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0)) | |
544 | || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) { | |
545 | linkSlowCase(iter); | |
546 | linkSlowCase(iter); | |
547 | // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. | |
548 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
549 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
550 | emitCTICall(Interpreter::cti_op_mul); | |
551 | } else | |
552 | compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types); | |
553 | ||
554 | emitPutVirtualRegister(result); | |
555 | } | |
556 | ||
557 | void JIT::compileFastArith_op_sub(Instruction* currentInstruction) | |
558 | { | |
559 | unsigned result = currentInstruction[1].u.operand; | |
560 | unsigned op1 = currentInstruction[2].u.operand; | |
561 | unsigned op2 = currentInstruction[3].u.operand; | |
562 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
563 | ||
564 | compileBinaryArithOp(op_sub, result, op1, op2, types); | |
565 | ||
566 | emitPutVirtualRegister(result); | |
567 | } | |
568 | void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
569 | { | |
570 | unsigned result = currentInstruction[1].u.operand; | |
571 | unsigned op1 = currentInstruction[2].u.operand; | |
572 | unsigned op2 = currentInstruction[3].u.operand; | |
573 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
574 | ||
575 | compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types); | |
576 | ||
577 | emitPutVirtualRegister(result); | |
578 | } | |
579 | ||
580 | #else | |
581 | ||
582 | typedef X86Assembler::JmpSrc JmpSrc; | |
583 | typedef X86Assembler::JmpDst JmpDst; | |
584 | typedef X86Assembler::XMMRegisterID XMMRegisterID; | |
585 | ||
586 | #if PLATFORM(MAC) | |
587 | ||
588 | static inline bool isSSE2Present() | |
589 | { | |
590 | return true; // All X86 Macs are guaranteed to support at least SSE2 | |
591 | } | |
592 | ||
593 | #else | |
594 | ||
595 | static bool isSSE2Present() | |
596 | { | |
597 | static const int SSE2FeatureBit = 1 << 26; | |
598 | struct SSE2Check { | |
599 | SSE2Check() | |
600 | { | |
601 | int flags; | |
602 | #if COMPILER(MSVC) | |
603 | _asm { | |
604 | mov eax, 1 // cpuid function 1 gives us the standard feature set | |
605 | cpuid; | |
606 | mov flags, edx; | |
607 | } | |
608 | #else | |
609 | flags = 0; | |
610 | // FIXME: Add GCC code to do above asm | |
611 | #endif | |
612 | present = (flags & SSE2FeatureBit) != 0; | |
613 | } | |
614 | bool present; | |
615 | }; | |
616 | static SSE2Check check; | |
617 | return check.present; | |
618 | } | |
619 | ||
620 | #endif | |
621 | ||
622 | /* | |
623 | This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell. | |
624 | ||
625 | In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell' | |
626 | is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell). | |
627 | ||
628 | However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow | |
629 | control will fall through from the code planted. | |
630 | */ | |
631 | void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2) | |
632 | { | |
633 | // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate. | |
634 | __ cvttsd2si_rr(xmmSource, tempReg1); | |
635 | __ addl_rr(tempReg1, tempReg1); | |
636 | __ sarl_i8r(1, tempReg1); | |
637 | __ cvtsi2sd_rr(tempReg1, tempXmm); | |
638 | // Compare & branch if immediate. | |
639 | __ ucomisd_rr(tempXmm, xmmSource); | |
640 | JmpSrc resultIsImm = __ je(); | |
641 | JmpDst resultLookedLikeImmButActuallyIsnt = __ label(); | |
642 | ||
643 | // Store the result to the JSNumberCell and jump. | |
644 | __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell); | |
645 | if (jsNumberCell != X86::eax) | |
646 | __ movl_rr(jsNumberCell, X86::eax); | |
647 | emitPutVirtualRegister(dst); | |
648 | *wroteJSNumberCell = __ jmp(); | |
649 | ||
650 | __ link(resultIsImm, __ label()); | |
651 | // value == (double)(JSImmediate)value... or at least, it looks that way... | |
652 | // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered). | |
653 | __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN | |
654 | __ pextrw_irr(3, xmmSource, tempReg2); | |
655 | __ cmpl_ir(0x8000, tempReg2); | |
656 | __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0 | |
657 | // Yes it really really really is representable as a JSImmediate. | |
658 | emitFastArithIntToImmNoCheck(tempReg1, X86::eax); | |
659 | emitPutVirtualRegister(dst); | |
660 | } | |
661 | ||
662 | void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types) | |
663 | { | |
664 | Structure* numberStructure = m_globalData->numberStructure.get(); | |
665 | JmpSrc wasJSNumberCell1; | |
666 | JmpSrc wasJSNumberCell1b; | |
667 | JmpSrc wasJSNumberCell2; | |
668 | JmpSrc wasJSNumberCell2b; | |
669 | ||
670 | emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx); | |
671 | ||
672 | if (types.second().isReusable() && isSSE2Present()) { | |
673 | ASSERT(types.second().mightBeNumber()); | |
674 | ||
675 | // Check op2 is a number | |
676 | __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx); | |
677 | JmpSrc op2imm = __ jne(); | |
678 | if (!types.second().definitelyIsNumber()) { | |
679 | emitJumpSlowCaseIfNotJSCell(X86::edx, src2); | |
680 | __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx); | |
681 | addSlowCase(__ jne()); | |
682 | } | |
683 | ||
684 | // (1) In this case src2 is a reusable number cell. | |
685 | // Slow case if src1 is not a number type. | |
686 | __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax); | |
687 | JmpSrc op1imm = __ jne(); | |
688 | if (!types.first().definitelyIsNumber()) { | |
689 | emitJumpSlowCaseIfNotJSCell(X86::eax, src1); | |
690 | __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax); | |
691 | addSlowCase(__ jne()); | |
692 | } | |
693 | ||
694 | // (1a) if we get here, src1 is also a number cell | |
695 | __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0); | |
696 | JmpSrc loadedDouble = __ jmp(); | |
697 | // (1b) if we get here, src1 is an immediate | |
698 | __ link(op1imm, __ label()); | |
699 | emitFastArithImmToInt(X86::eax); | |
700 | __ cvtsi2sd_rr(X86::eax, X86::xmm0); | |
701 | // (1c) | |
702 | __ link(loadedDouble, __ label()); | |
703 | if (opcodeID == op_add) | |
704 | __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0); | |
705 | else if (opcodeID == op_sub) | |
706 | __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0); | |
707 | else { | |
708 | ASSERT(opcodeID == op_mul); | |
709 | __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0); | |
710 | } | |
711 | ||
712 | putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax); | |
713 | wasJSNumberCell2b = __ jmp(); | |
714 | ||
715 | // (2) This handles cases where src2 is an immediate number. | |
716 | // Two slow cases - either src1 isn't an immediate, or the subtract overflows. | |
717 | __ link(op2imm, __ label()); | |
718 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
719 | } else if (types.first().isReusable() && isSSE2Present()) { | |
720 | ASSERT(types.first().mightBeNumber()); | |
721 | ||
722 | // Check op1 is a number | |
723 | __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax); | |
724 | JmpSrc op1imm = __ jne(); | |
725 | if (!types.first().definitelyIsNumber()) { | |
726 | emitJumpSlowCaseIfNotJSCell(X86::eax, src1); | |
727 | __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax); | |
728 | addSlowCase(__ jne()); | |
729 | } | |
730 | ||
731 | // (1) In this case src1 is a reusable number cell. | |
732 | // Slow case if src2 is not a number type. | |
733 | __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx); | |
734 | JmpSrc op2imm = __ jne(); | |
735 | if (!types.second().definitelyIsNumber()) { | |
736 | emitJumpSlowCaseIfNotJSCell(X86::edx, src2); | |
737 | __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx); | |
738 | addSlowCase(__ jne()); | |
739 | } | |
740 | ||
741 | // (1a) if we get here, src2 is also a number cell | |
742 | __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1); | |
743 | JmpSrc loadedDouble = __ jmp(); | |
744 | // (1b) if we get here, src2 is an immediate | |
745 | __ link(op2imm, __ label()); | |
746 | emitFastArithImmToInt(X86::edx); | |
747 | __ cvtsi2sd_rr(X86::edx, X86::xmm1); | |
748 | // (1c) | |
749 | __ link(loadedDouble, __ label()); | |
750 | __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0); | |
751 | if (opcodeID == op_add) | |
752 | __ addsd_rr(X86::xmm1, X86::xmm0); | |
753 | else if (opcodeID == op_sub) | |
754 | __ subsd_rr(X86::xmm1, X86::xmm0); | |
755 | else { | |
756 | ASSERT(opcodeID == op_mul); | |
757 | __ mulsd_rr(X86::xmm1, X86::xmm0); | |
758 | } | |
759 | __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax); | |
760 | emitPutVirtualRegister(dst); | |
761 | ||
762 | putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx); | |
763 | wasJSNumberCell1b = __ jmp(); | |
764 | ||
765 | // (2) This handles cases where src1 is an immediate number. | |
766 | // Two slow cases - either src2 isn't an immediate, or the subtract overflows. | |
767 | __ link(op1imm, __ label()); | |
768 | emitJumpSlowCaseIfNotImmediateInteger(X86::edx); | |
769 | } else | |
770 | emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx); | |
771 | ||
772 | if (opcodeID == op_add) { | |
773 | emitFastArithDeTagImmediate(X86::eax); | |
774 | __ addl_rr(X86::edx, X86::eax); | |
775 | addSlowCase(__ jo()); | |
776 | } else if (opcodeID == op_sub) { | |
777 | __ subl_rr(X86::edx, X86::eax); | |
778 | addSlowCase(__ jo()); | |
779 | signExtend32ToPtr(X86::eax, X86::eax); | |
780 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
781 | } else { | |
782 | ASSERT(opcodeID == op_mul); | |
783 | // convert eax & edx from JSImmediates to ints, and check if either are zero | |
784 | emitFastArithImmToInt(X86::edx); | |
785 | JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax); | |
786 | __ testl_rr(X86::edx, X86::edx); | |
787 | JmpSrc op2NonZero = __ jne(); | |
788 | __ link(op1Zero, __ label()); | |
789 | // if either input is zero, add the two together, and check if the result is < 0. | |
790 | // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate. | |
791 | __ movl_rr(X86::eax, X86::ecx); | |
792 | __ addl_rr(X86::edx, X86::ecx); | |
793 | addSlowCase(__ js()); | |
794 | // Skip the above check if neither input is zero | |
795 | __ link(op2NonZero, __ label()); | |
796 | __ imull_rr(X86::edx, X86::eax); | |
797 | addSlowCase(__ jo()); | |
798 | signExtend32ToPtr(X86::eax, X86::eax); | |
799 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
800 | } | |
801 | emitPutVirtualRegister(dst); | |
802 | ||
803 | if (types.second().isReusable() && isSSE2Present()) { | |
804 | __ link(wasJSNumberCell2, __ label()); | |
805 | __ link(wasJSNumberCell2b, __ label()); | |
806 | } | |
807 | else if (types.first().isReusable() && isSSE2Present()) { | |
808 | __ link(wasJSNumberCell1, __ label()); | |
809 | __ link(wasJSNumberCell1b, __ label()); | |
810 | } | |
811 | } | |
812 | ||
813 | void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types) | |
814 | { | |
815 | linkSlowCase(iter); | |
816 | if (types.second().isReusable() && isSSE2Present()) { | |
817 | if (!types.first().definitelyIsNumber()) { | |
818 | linkSlowCaseIfNotJSCell(iter, src1); | |
819 | linkSlowCase(iter); | |
820 | } | |
821 | if (!types.second().definitelyIsNumber()) { | |
822 | linkSlowCaseIfNotJSCell(iter, src2); | |
823 | linkSlowCase(iter); | |
824 | } | |
825 | } else if (types.first().isReusable() && isSSE2Present()) { | |
826 | if (!types.first().definitelyIsNumber()) { | |
827 | linkSlowCaseIfNotJSCell(iter, src1); | |
828 | linkSlowCase(iter); | |
829 | } | |
830 | if (!types.second().definitelyIsNumber()) { | |
831 | linkSlowCaseIfNotJSCell(iter, src2); | |
832 | linkSlowCase(iter); | |
833 | } | |
834 | } | |
835 | linkSlowCase(iter); | |
836 | ||
837 | // additional entry point to handle -0 cases. | |
838 | if (opcodeID == op_mul) | |
839 | linkSlowCase(iter); | |
840 | ||
841 | emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx); | |
842 | emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx); | |
843 | if (opcodeID == op_add) | |
844 | emitCTICall(Interpreter::cti_op_add); | |
845 | else if (opcodeID == op_sub) | |
846 | emitCTICall(Interpreter::cti_op_sub); | |
847 | else { | |
848 | ASSERT(opcodeID == op_mul); | |
849 | emitCTICall(Interpreter::cti_op_mul); | |
850 | } | |
851 | emitPutVirtualRegister(dst); | |
852 | } | |
853 | ||
854 | void JIT::compileFastArith_op_add(Instruction* currentInstruction) | |
855 | { | |
856 | unsigned result = currentInstruction[1].u.operand; | |
857 | unsigned op1 = currentInstruction[2].u.operand; | |
858 | unsigned op2 = currentInstruction[3].u.operand; | |
859 | ||
860 | if (isOperandConstantImmediateInt(op1)) { | |
861 | emitGetVirtualRegister(op2, X86::eax); | |
862 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
863 | addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax)); | |
864 | signExtend32ToPtr(X86::eax, X86::eax); | |
865 | emitPutVirtualRegister(result); | |
866 | } else if (isOperandConstantImmediateInt(op2)) { | |
867 | emitGetVirtualRegister(op1, X86::eax); | |
868 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
869 | addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax)); | |
870 | signExtend32ToPtr(X86::eax, X86::eax); | |
871 | emitPutVirtualRegister(result); | |
872 | } else { | |
873 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
874 | if (types.first().mightBeNumber() && types.second().mightBeNumber()) | |
875 | compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); | |
876 | else { | |
877 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
878 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
879 | emitCTICall(Interpreter::cti_op_add); | |
880 | emitPutVirtualRegister(result); | |
881 | } | |
882 | } | |
883 | } | |
884 | void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
885 | { | |
886 | unsigned result = currentInstruction[1].u.operand; | |
887 | unsigned op1 = currentInstruction[2].u.operand; | |
888 | unsigned op2 = currentInstruction[3].u.operand; | |
889 | ||
890 | if (isOperandConstantImmediateInt(op1)) { | |
891 | Jump notImm = getSlowCase(iter); | |
892 | linkSlowCase(iter); | |
893 | sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax); | |
894 | notImm.link(this); | |
895 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
896 | emitPutJITStubArg(X86::eax, 2); | |
897 | emitCTICall(Interpreter::cti_op_add); | |
898 | emitPutVirtualRegister(result); | |
899 | } else if (isOperandConstantImmediateInt(op2)) { | |
900 | Jump notImm = getSlowCase(iter); | |
901 | linkSlowCase(iter); | |
902 | sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax); | |
903 | notImm.link(this); | |
904 | emitPutJITStubArg(X86::eax, 1); | |
905 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
906 | emitCTICall(Interpreter::cti_op_add); | |
907 | emitPutVirtualRegister(result); | |
908 | } else { | |
909 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
910 | ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber()); | |
911 | compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types); | |
912 | } | |
913 | } | |
914 | ||
915 | void JIT::compileFastArith_op_mul(Instruction* currentInstruction) | |
916 | { | |
917 | unsigned result = currentInstruction[1].u.operand; | |
918 | unsigned op1 = currentInstruction[2].u.operand; | |
919 | unsigned op2 = currentInstruction[3].u.operand; | |
920 | ||
921 | // For now, only plant a fast int case if the constant operand is greater than zero. | |
922 | int32_t value; | |
923 | if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { | |
924 | emitGetVirtualRegister(op2, X86::eax); | |
925 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
926 | emitFastArithDeTagImmediate(X86::eax); | |
927 | addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); | |
928 | signExtend32ToPtr(X86::eax, X86::eax); | |
929 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
930 | emitPutVirtualRegister(result); | |
931 | } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { | |
932 | emitGetVirtualRegister(op1, X86::eax); | |
933 | emitJumpSlowCaseIfNotImmediateInteger(X86::eax); | |
934 | emitFastArithDeTagImmediate(X86::eax); | |
935 | addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); | |
936 | signExtend32ToPtr(X86::eax, X86::eax); | |
937 | emitFastArithReTagImmediate(X86::eax, X86::eax); | |
938 | emitPutVirtualRegister(result); | |
939 | } else | |
940 | compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); | |
941 | } | |
942 | void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
943 | { | |
944 | unsigned result = currentInstruction[1].u.operand; | |
945 | unsigned op1 = currentInstruction[2].u.operand; | |
946 | unsigned op2 = currentInstruction[3].u.operand; | |
947 | ||
948 | if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0)) | |
949 | || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) { | |
950 | linkSlowCase(iter); | |
951 | linkSlowCase(iter); | |
952 | // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. | |
953 | emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); | |
954 | emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); | |
955 | emitCTICall(Interpreter::cti_op_mul); | |
956 | emitPutVirtualRegister(result); | |
957 | } else | |
958 | compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); | |
959 | } | |
960 | ||
961 | void JIT::compileFastArith_op_sub(Instruction* currentInstruction) | |
962 | { | |
963 | compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand)); | |
964 | } | |
965 | void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
966 | { | |
967 | compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand)); | |
968 | } | |
969 | ||
970 | #endif | |
971 | ||
972 | } // namespace JSC | |
973 | ||
974 | #endif // ENABLE(JIT) |