]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86_64.h
JavaScriptCore-621.1.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86_64.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
28
29 #if ENABLE(ASSEMBLER) && CPU(X86_64)
30
31 #include "MacroAssemblerX86Common.h"
32
33 #define REPTACH_OFFSET_CALL_R11 3
34
35 namespace JSC {
36
37 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
38 protected:
39 static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40
41 public:
42 static const Scale ScalePtr = TimesEight;
43
44 using MacroAssemblerX86Common::add32;
45 using MacroAssemblerX86Common::and32;
46 using MacroAssemblerX86Common::or32;
47 using MacroAssemblerX86Common::sub32;
48 using MacroAssemblerX86Common::load32;
49 using MacroAssemblerX86Common::store32;
50 using MacroAssemblerX86Common::call;
51 using MacroAssemblerX86Common::loadDouble;
52 using MacroAssemblerX86Common::convertInt32ToDouble;
53
54 void add32(Imm32 imm, AbsoluteAddress address)
55 {
56 move(ImmPtr(address.m_ptr), scratchRegister);
57 add32(imm, Address(scratchRegister));
58 }
59
60 void and32(Imm32 imm, AbsoluteAddress address)
61 {
62 move(ImmPtr(address.m_ptr), scratchRegister);
63 and32(imm, Address(scratchRegister));
64 }
65
66 void or32(Imm32 imm, AbsoluteAddress address)
67 {
68 move(ImmPtr(address.m_ptr), scratchRegister);
69 or32(imm, Address(scratchRegister));
70 }
71
72 void sub32(Imm32 imm, AbsoluteAddress address)
73 {
74 move(ImmPtr(address.m_ptr), scratchRegister);
75 sub32(imm, Address(scratchRegister));
76 }
77
78 void load32(void* address, RegisterID dest)
79 {
80 if (dest == X86Registers::eax)
81 m_assembler.movl_mEAX(address);
82 else {
83 move(X86Registers::eax, dest);
84 m_assembler.movl_mEAX(address);
85 swap(X86Registers::eax, dest);
86 }
87 }
88
89 void loadDouble(const void* address, FPRegisterID dest)
90 {
91 move(ImmPtr(address), scratchRegister);
92 loadDouble(scratchRegister, dest);
93 }
94
95 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
96 {
97 move(Imm32(*static_cast<int32_t*>(src.m_ptr)), scratchRegister);
98 m_assembler.cvtsi2sd_rr(scratchRegister, dest);
99 }
100
101 void store32(Imm32 imm, void* address)
102 {
103 move(X86Registers::eax, scratchRegister);
104 move(imm, X86Registers::eax);
105 m_assembler.movl_EAXm(address);
106 move(scratchRegister, X86Registers::eax);
107 }
108
109 Call call()
110 {
111 DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
112 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
113 ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
114 return result;
115 }
116
117 Call tailRecursiveCall()
118 {
119 DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
120 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
121 ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
122 return Call::fromTailJump(newJump);
123 }
124
125 Call makeTailRecursiveCall(Jump oldJump)
126 {
127 oldJump.link(this);
128 DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
129 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
130 ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
131 return Call::fromTailJump(newJump);
132 }
133
134
135 void addPtr(RegisterID src, RegisterID dest)
136 {
137 m_assembler.addq_rr(src, dest);
138 }
139
140 void addPtr(Imm32 imm, RegisterID srcDest)
141 {
142 m_assembler.addq_ir(imm.m_value, srcDest);
143 }
144
145 void addPtr(ImmPtr imm, RegisterID dest)
146 {
147 move(imm, scratchRegister);
148 m_assembler.addq_rr(scratchRegister, dest);
149 }
150
151 void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
152 {
153 m_assembler.leaq_mr(imm.m_value, src, dest);
154 }
155
156 void addPtr(Imm32 imm, Address address)
157 {
158 m_assembler.addq_im(imm.m_value, address.offset, address.base);
159 }
160
161 void addPtr(Imm32 imm, AbsoluteAddress address)
162 {
163 move(ImmPtr(address.m_ptr), scratchRegister);
164 addPtr(imm, Address(scratchRegister));
165 }
166
167 void andPtr(RegisterID src, RegisterID dest)
168 {
169 m_assembler.andq_rr(src, dest);
170 }
171
172 void andPtr(Imm32 imm, RegisterID srcDest)
173 {
174 m_assembler.andq_ir(imm.m_value, srcDest);
175 }
176
177 void orPtr(RegisterID src, RegisterID dest)
178 {
179 m_assembler.orq_rr(src, dest);
180 }
181
182 void orPtr(ImmPtr imm, RegisterID dest)
183 {
184 move(imm, scratchRegister);
185 m_assembler.orq_rr(scratchRegister, dest);
186 }
187
188 void orPtr(Imm32 imm, RegisterID dest)
189 {
190 m_assembler.orq_ir(imm.m_value, dest);
191 }
192
193 void subPtr(RegisterID src, RegisterID dest)
194 {
195 m_assembler.subq_rr(src, dest);
196 }
197
198 void subPtr(Imm32 imm, RegisterID dest)
199 {
200 m_assembler.subq_ir(imm.m_value, dest);
201 }
202
203 void subPtr(ImmPtr imm, RegisterID dest)
204 {
205 move(imm, scratchRegister);
206 m_assembler.subq_rr(scratchRegister, dest);
207 }
208
209 void xorPtr(RegisterID src, RegisterID dest)
210 {
211 m_assembler.xorq_rr(src, dest);
212 }
213
214 void xorPtr(Imm32 imm, RegisterID srcDest)
215 {
216 m_assembler.xorq_ir(imm.m_value, srcDest);
217 }
218
219
220 void loadPtr(ImplicitAddress address, RegisterID dest)
221 {
222 m_assembler.movq_mr(address.offset, address.base, dest);
223 }
224
225 void loadPtr(BaseIndex address, RegisterID dest)
226 {
227 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
228 }
229
230 void loadPtr(void* address, RegisterID dest)
231 {
232 if (dest == X86Registers::eax)
233 m_assembler.movq_mEAX(address);
234 else {
235 move(X86Registers::eax, dest);
236 m_assembler.movq_mEAX(address);
237 swap(X86Registers::eax, dest);
238 }
239 }
240
241 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
242 {
243 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
244 return DataLabel32(this);
245 }
246
247 void storePtr(RegisterID src, ImplicitAddress address)
248 {
249 m_assembler.movq_rm(src, address.offset, address.base);
250 }
251
252 void storePtr(RegisterID src, BaseIndex address)
253 {
254 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
255 }
256
257 void storePtr(RegisterID src, void* address)
258 {
259 if (src == X86Registers::eax)
260 m_assembler.movq_EAXm(address);
261 else {
262 swap(X86Registers::eax, src);
263 m_assembler.movq_EAXm(address);
264 swap(X86Registers::eax, src);
265 }
266 }
267
268 void storePtr(ImmPtr imm, ImplicitAddress address)
269 {
270 move(imm, scratchRegister);
271 storePtr(scratchRegister, address);
272 }
273
274 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
275 {
276 m_assembler.movq_rm_disp32(src, address.offset, address.base);
277 return DataLabel32(this);
278 }
279
280 void movePtrToDouble(RegisterID src, FPRegisterID dest)
281 {
282 m_assembler.movq_rr(src, dest);
283 }
284
285 void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
286 {
287 m_assembler.movq_rr(src, dest);
288 }
289
290 void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
291 {
292 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
293 m_assembler.testq_rr(left, left);
294 else
295 m_assembler.cmpq_ir(right.m_value, left);
296 m_assembler.setCC_r(x86Condition(cond), dest);
297 m_assembler.movzbl_rr(dest, dest);
298 }
299
300 Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
301 {
302 m_assembler.cmpq_rr(right, left);
303 return Jump(m_assembler.jCC(x86Condition(cond)));
304 }
305
306 Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
307 {
308 move(right, scratchRegister);
309 return branchPtr(cond, left, scratchRegister);
310 }
311
312 Jump branchPtr(Condition cond, RegisterID left, Address right)
313 {
314 m_assembler.cmpq_mr(right.offset, right.base, left);
315 return Jump(m_assembler.jCC(x86Condition(cond)));
316 }
317
318 Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
319 {
320 move(ImmPtr(left.m_ptr), scratchRegister);
321 return branchPtr(cond, Address(scratchRegister), right);
322 }
323
324 Jump branchPtr(Condition cond, Address left, RegisterID right)
325 {
326 m_assembler.cmpq_rm(right, left.offset, left.base);
327 return Jump(m_assembler.jCC(x86Condition(cond)));
328 }
329
330 Jump branchPtr(Condition cond, Address left, ImmPtr right)
331 {
332 move(right, scratchRegister);
333 return branchPtr(cond, left, scratchRegister);
334 }
335
336 Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
337 {
338 m_assembler.testq_rr(reg, mask);
339 return Jump(m_assembler.jCC(x86Condition(cond)));
340 }
341
342 Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
343 {
344 // if we are only interested in the low seven bits, this can be tested with a testb
345 if (mask.m_value == -1)
346 m_assembler.testq_rr(reg, reg);
347 else if ((mask.m_value & ~0x7f) == 0)
348 m_assembler.testb_i8r(mask.m_value, reg);
349 else
350 m_assembler.testq_i32r(mask.m_value, reg);
351 return Jump(m_assembler.jCC(x86Condition(cond)));
352 }
353
354 Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
355 {
356 if (mask.m_value == -1)
357 m_assembler.cmpq_im(0, address.offset, address.base);
358 else
359 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
360 return Jump(m_assembler.jCC(x86Condition(cond)));
361 }
362
363 Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
364 {
365 if (mask.m_value == -1)
366 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
367 else
368 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
369 return Jump(m_assembler.jCC(x86Condition(cond)));
370 }
371
372
373 Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
374 {
375 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
376 addPtr(src, dest);
377 return Jump(m_assembler.jCC(x86Condition(cond)));
378 }
379
380 Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
381 {
382 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
383 subPtr(imm, dest);
384 return Jump(m_assembler.jCC(x86Condition(cond)));
385 }
386
387 DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
388 {
389 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
390 return DataLabelPtr(this);
391 }
392
393 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
394 {
395 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
396 return branchPtr(cond, left, scratchRegister);
397 }
398
399 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
400 {
401 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
402 return branchPtr(cond, left, scratchRegister);
403 }
404
405 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
406 {
407 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
408 storePtr(scratchRegister, address);
409 return label;
410 }
411
412 using MacroAssemblerX86Common::branchTest8;
413 Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
414 {
415 ImmPtr addr(reinterpret_cast<void*>(address.offset));
416 MacroAssemblerX86Common::move(addr, scratchRegister);
417 return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
418 }
419
420 Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
421 {
422 Label label(this);
423 loadPtr(address, dest);
424 return label;
425 }
426
427 bool supportsFloatingPoint() const { return true; }
428 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
429 bool supportsFloatingPointTruncate() const { return true; }
430 bool supportsFloatingPointSqrt() const { return true; }
431
432 private:
433 friend class LinkBuffer;
434 friend class RepatchBuffer;
435
436 static void linkCall(void* code, Call call, FunctionPtr function)
437 {
438 if (!call.isFlagSet(Call::Near))
439 X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
440 else
441 X86Assembler::linkCall(code, call.m_jmp, function.value());
442 }
443
444 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
445 {
446 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
447 }
448
449 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
450 {
451 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
452 }
453
454 };
455
456 } // namespace JSC
457
458 #endif // ENABLE(ASSEMBLER)
459
460 #endif // MacroAssemblerX86_64_h