]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/X86Assembler.h
JavaScriptCore-554.1.tar.gz
[apple/javascriptcore.git] / assembler / X86Assembler.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
28
29 #include <wtf/Platform.h>
30
31 #if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33 #include "AssemblerBuffer.h"
34 #include <stdint.h>
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
37
38 namespace JSC {
39
40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41 #if PLATFORM(X86_64)
42 inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43 inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44 #endif
45
46 namespace X86 {
47 typedef enum {
48 eax,
49 ecx,
50 edx,
51 ebx,
52 esp,
53 ebp,
54 esi,
55 edi,
56
57 #if PLATFORM(X86_64)
58 r8,
59 r9,
60 r10,
61 r11,
62 r12,
63 r13,
64 r14,
65 r15,
66 #endif
67 } RegisterID;
68
69 typedef enum {
70 xmm0,
71 xmm1,
72 xmm2,
73 xmm3,
74 xmm4,
75 xmm5,
76 xmm6,
77 xmm7,
78 } XMMRegisterID;
79 }
80
81 class X86Assembler {
82 public:
83 typedef X86::RegisterID RegisterID;
84 typedef X86::XMMRegisterID XMMRegisterID;
85 typedef XMMRegisterID FPRegisterID;
86
87 typedef enum {
88 ConditionO,
89 ConditionNO,
90 ConditionB,
91 ConditionAE,
92 ConditionE,
93 ConditionNE,
94 ConditionBE,
95 ConditionA,
96 ConditionS,
97 ConditionNS,
98 ConditionP,
99 ConditionNP,
100 ConditionL,
101 ConditionGE,
102 ConditionLE,
103 ConditionG,
104
105 ConditionC = ConditionB,
106 ConditionNC = ConditionAE,
107 } Condition;
108
109 private:
110 typedef enum {
111 OP_ADD_EvGv = 0x01,
112 OP_ADD_GvEv = 0x03,
113 OP_OR_EvGv = 0x09,
114 OP_OR_GvEv = 0x0B,
115 OP_2BYTE_ESCAPE = 0x0F,
116 OP_AND_EvGv = 0x21,
117 OP_AND_GvEv = 0x23,
118 OP_SUB_EvGv = 0x29,
119 OP_SUB_GvEv = 0x2B,
120 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
121 OP_XOR_EvGv = 0x31,
122 OP_XOR_GvEv = 0x33,
123 OP_CMP_EvGv = 0x39,
124 OP_CMP_GvEv = 0x3B,
125 #if PLATFORM(X86_64)
126 PRE_REX = 0x40,
127 #endif
128 OP_PUSH_EAX = 0x50,
129 OP_POP_EAX = 0x58,
130 #if PLATFORM(X86_64)
131 OP_MOVSXD_GvEv = 0x63,
132 #endif
133 PRE_OPERAND_SIZE = 0x66,
134 PRE_SSE_66 = 0x66,
135 OP_PUSH_Iz = 0x68,
136 OP_IMUL_GvEvIz = 0x69,
137 OP_GROUP1_EvIz = 0x81,
138 OP_GROUP1_EvIb = 0x83,
139 OP_TEST_EvGv = 0x85,
140 OP_XCHG_EvGv = 0x87,
141 OP_MOV_EvGv = 0x89,
142 OP_MOV_GvEv = 0x8B,
143 OP_LEA = 0x8D,
144 OP_GROUP1A_Ev = 0x8F,
145 OP_CDQ = 0x99,
146 OP_MOV_EAXOv = 0xA1,
147 OP_MOV_OvEAX = 0xA3,
148 OP_MOV_EAXIv = 0xB8,
149 OP_GROUP2_EvIb = 0xC1,
150 OP_RET = 0xC3,
151 OP_GROUP11_EvIz = 0xC7,
152 OP_INT3 = 0xCC,
153 OP_GROUP2_Ev1 = 0xD1,
154 OP_GROUP2_EvCL = 0xD3,
155 OP_CALL_rel32 = 0xE8,
156 OP_JMP_rel32 = 0xE9,
157 PRE_SSE_F2 = 0xF2,
158 OP_HLT = 0xF4,
159 OP_GROUP3_EbIb = 0xF6,
160 OP_GROUP3_Ev = 0xF7,
161 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
162 OP_GROUP5_Ev = 0xFF,
163 } OneByteOpcodeID;
164
165 typedef enum {
166 OP2_MOVSD_VsdWsd = 0x10,
167 OP2_MOVSD_WsdVsd = 0x11,
168 OP2_CVTSI2SD_VsdEd = 0x2A,
169 OP2_CVTTSD2SI_GdWsd = 0x2C,
170 OP2_UCOMISD_VsdWsd = 0x2E,
171 OP2_ADDSD_VsdWsd = 0x58,
172 OP2_MULSD_VsdWsd = 0x59,
173 OP2_SUBSD_VsdWsd = 0x5C,
174 OP2_DIVSD_VsdWsd = 0x5E,
175 OP2_XORPD_VpdWpd = 0x57,
176 OP2_MOVD_VdEd = 0x6E,
177 OP2_MOVD_EdVd = 0x7E,
178 OP2_JCC_rel32 = 0x80,
179 OP_SETCC = 0x90,
180 OP2_IMUL_GvEv = 0xAF,
181 OP2_MOVZX_GvEb = 0xB6,
182 OP2_MOVZX_GvEw = 0xB7,
183 OP2_PEXTRW_GdUdIb = 0xC5,
184 } TwoByteOpcodeID;
185
186 TwoByteOpcodeID jccRel32(Condition cond)
187 {
188 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
189 }
190
191 TwoByteOpcodeID setccOpcode(Condition cond)
192 {
193 return (TwoByteOpcodeID)(OP_SETCC + cond);
194 }
195
196 typedef enum {
197 GROUP1_OP_ADD = 0,
198 GROUP1_OP_OR = 1,
199 GROUP1_OP_ADC = 2,
200 GROUP1_OP_AND = 4,
201 GROUP1_OP_SUB = 5,
202 GROUP1_OP_XOR = 6,
203 GROUP1_OP_CMP = 7,
204
205 GROUP1A_OP_POP = 0,
206
207 GROUP2_OP_SHL = 4,
208 GROUP2_OP_SAR = 7,
209
210 GROUP3_OP_TEST = 0,
211 GROUP3_OP_NOT = 2,
212 GROUP3_OP_NEG = 3,
213 GROUP3_OP_IDIV = 7,
214
215 GROUP5_OP_CALLN = 2,
216 GROUP5_OP_JMPN = 4,
217 GROUP5_OP_PUSH = 6,
218
219 GROUP11_MOV = 0,
220 } GroupOpcodeID;
221
222 class X86InstructionFormatter;
223 public:
224
225 class JmpSrc {
226 friend class X86Assembler;
227 friend class X86InstructionFormatter;
228 public:
229 JmpSrc()
230 : m_offset(-1)
231 {
232 }
233
234 private:
235 JmpSrc(int offset)
236 : m_offset(offset)
237 {
238 }
239
240 int m_offset;
241 };
242
243 class JmpDst {
244 friend class X86Assembler;
245 friend class X86InstructionFormatter;
246 public:
247 JmpDst()
248 : m_offset(-1)
249 , m_used(false)
250 {
251 }
252
253 bool isUsed() const { return m_used; }
254 void used() { m_used = true; }
255 private:
256 JmpDst(int offset)
257 : m_offset(offset)
258 , m_used(false)
259 {
260 ASSERT(m_offset == offset);
261 }
262
263 int m_offset : 31;
264 bool m_used : 1;
265 };
266
267 X86Assembler()
268 {
269 }
270
271 size_t size() const { return m_formatter.size(); }
272
273 // Stack operations:
274
275 void push_r(RegisterID reg)
276 {
277 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
278 }
279
280 void pop_r(RegisterID reg)
281 {
282 m_formatter.oneByteOp(OP_POP_EAX, reg);
283 }
284
285 void push_i32(int imm)
286 {
287 m_formatter.oneByteOp(OP_PUSH_Iz);
288 m_formatter.immediate32(imm);
289 }
290
291 void push_m(int offset, RegisterID base)
292 {
293 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
294 }
295
296 void pop_m(int offset, RegisterID base)
297 {
298 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
299 }
300
301 // Arithmetic operations:
302
303 #if !PLATFORM(X86_64)
304 void adcl_im(int imm, void* addr)
305 {
306 if (CAN_SIGN_EXTEND_8_32(imm)) {
307 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
308 m_formatter.immediate8(imm);
309 } else {
310 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
311 m_formatter.immediate32(imm);
312 }
313 }
314 #endif
315
316 void addl_rr(RegisterID src, RegisterID dst)
317 {
318 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
319 }
320
321 void addl_mr(int offset, RegisterID base, RegisterID dst)
322 {
323 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
324 }
325
326 void addl_rm(RegisterID src, int offset, RegisterID base)
327 {
328 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
329 }
330
331 void addl_ir(int imm, RegisterID dst)
332 {
333 if (CAN_SIGN_EXTEND_8_32(imm)) {
334 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
335 m_formatter.immediate8(imm);
336 } else {
337 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
338 m_formatter.immediate32(imm);
339 }
340 }
341
342 void addl_im(int imm, int offset, RegisterID base)
343 {
344 if (CAN_SIGN_EXTEND_8_32(imm)) {
345 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
346 m_formatter.immediate8(imm);
347 } else {
348 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
349 m_formatter.immediate32(imm);
350 }
351 }
352
353 #if PLATFORM(X86_64)
354 void addq_rr(RegisterID src, RegisterID dst)
355 {
356 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
357 }
358
359 void addq_ir(int imm, RegisterID dst)
360 {
361 if (CAN_SIGN_EXTEND_8_32(imm)) {
362 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
363 m_formatter.immediate8(imm);
364 } else {
365 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
366 m_formatter.immediate32(imm);
367 }
368 }
369
370 void addq_im(int imm, int offset, RegisterID base)
371 {
372 if (CAN_SIGN_EXTEND_8_32(imm)) {
373 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
374 m_formatter.immediate8(imm);
375 } else {
376 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
377 m_formatter.immediate32(imm);
378 }
379 }
380 #else
381 void addl_im(int imm, void* addr)
382 {
383 if (CAN_SIGN_EXTEND_8_32(imm)) {
384 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
385 m_formatter.immediate8(imm);
386 } else {
387 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
388 m_formatter.immediate32(imm);
389 }
390 }
391 #endif
392
393 void andl_rr(RegisterID src, RegisterID dst)
394 {
395 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
396 }
397
398 void andl_mr(int offset, RegisterID base, RegisterID dst)
399 {
400 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
401 }
402
403 void andl_rm(RegisterID src, int offset, RegisterID base)
404 {
405 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
406 }
407
408 void andl_ir(int imm, RegisterID dst)
409 {
410 if (CAN_SIGN_EXTEND_8_32(imm)) {
411 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
412 m_formatter.immediate8(imm);
413 } else {
414 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
415 m_formatter.immediate32(imm);
416 }
417 }
418
419 void andl_im(int imm, int offset, RegisterID base)
420 {
421 if (CAN_SIGN_EXTEND_8_32(imm)) {
422 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
423 m_formatter.immediate8(imm);
424 } else {
425 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
426 m_formatter.immediate32(imm);
427 }
428 }
429
430 #if PLATFORM(X86_64)
431 void andq_rr(RegisterID src, RegisterID dst)
432 {
433 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
434 }
435
436 void andq_ir(int imm, RegisterID dst)
437 {
438 if (CAN_SIGN_EXTEND_8_32(imm)) {
439 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
440 m_formatter.immediate8(imm);
441 } else {
442 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
443 m_formatter.immediate32(imm);
444 }
445 }
446 #else
447 void andl_im(int imm, void* addr)
448 {
449 if (CAN_SIGN_EXTEND_8_32(imm)) {
450 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
451 m_formatter.immediate8(imm);
452 } else {
453 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
454 m_formatter.immediate32(imm);
455 }
456 }
457 #endif
458
459 void negl_r(RegisterID dst)
460 {
461 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
462 }
463
464 void negl_m(int offset, RegisterID base)
465 {
466 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
467 }
468
469 void notl_r(RegisterID dst)
470 {
471 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
472 }
473
474 void notl_m(int offset, RegisterID base)
475 {
476 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
477 }
478
479 void orl_rr(RegisterID src, RegisterID dst)
480 {
481 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
482 }
483
484 void orl_mr(int offset, RegisterID base, RegisterID dst)
485 {
486 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
487 }
488
489 void orl_rm(RegisterID src, int offset, RegisterID base)
490 {
491 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
492 }
493
494 void orl_ir(int imm, RegisterID dst)
495 {
496 if (CAN_SIGN_EXTEND_8_32(imm)) {
497 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
498 m_formatter.immediate8(imm);
499 } else {
500 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
501 m_formatter.immediate32(imm);
502 }
503 }
504
505 void orl_im(int imm, int offset, RegisterID base)
506 {
507 if (CAN_SIGN_EXTEND_8_32(imm)) {
508 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
509 m_formatter.immediate8(imm);
510 } else {
511 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
512 m_formatter.immediate32(imm);
513 }
514 }
515
516 #if PLATFORM(X86_64)
517 void orq_rr(RegisterID src, RegisterID dst)
518 {
519 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
520 }
521
522 void orq_ir(int imm, RegisterID dst)
523 {
524 if (CAN_SIGN_EXTEND_8_32(imm)) {
525 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
526 m_formatter.immediate8(imm);
527 } else {
528 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
529 m_formatter.immediate32(imm);
530 }
531 }
532 #else
533 void orl_im(int imm, void* addr)
534 {
535 if (CAN_SIGN_EXTEND_8_32(imm)) {
536 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
537 m_formatter.immediate8(imm);
538 } else {
539 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
540 m_formatter.immediate32(imm);
541 }
542 }
543 #endif
544
545 void subl_rr(RegisterID src, RegisterID dst)
546 {
547 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
548 }
549
550 void subl_mr(int offset, RegisterID base, RegisterID dst)
551 {
552 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
553 }
554
555 void subl_rm(RegisterID src, int offset, RegisterID base)
556 {
557 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
558 }
559
560 void subl_ir(int imm, RegisterID dst)
561 {
562 if (CAN_SIGN_EXTEND_8_32(imm)) {
563 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
564 m_formatter.immediate8(imm);
565 } else {
566 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
567 m_formatter.immediate32(imm);
568 }
569 }
570
571 void subl_im(int imm, int offset, RegisterID base)
572 {
573 if (CAN_SIGN_EXTEND_8_32(imm)) {
574 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
575 m_formatter.immediate8(imm);
576 } else {
577 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
578 m_formatter.immediate32(imm);
579 }
580 }
581
582 #if PLATFORM(X86_64)
583 void subq_rr(RegisterID src, RegisterID dst)
584 {
585 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
586 }
587
588 void subq_ir(int imm, RegisterID dst)
589 {
590 if (CAN_SIGN_EXTEND_8_32(imm)) {
591 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
592 m_formatter.immediate8(imm);
593 } else {
594 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
595 m_formatter.immediate32(imm);
596 }
597 }
598 #else
599 void subl_im(int imm, void* addr)
600 {
601 if (CAN_SIGN_EXTEND_8_32(imm)) {
602 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
603 m_formatter.immediate8(imm);
604 } else {
605 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
606 m_formatter.immediate32(imm);
607 }
608 }
609 #endif
610
611 void xorl_rr(RegisterID src, RegisterID dst)
612 {
613 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
614 }
615
616 void xorl_mr(int offset, RegisterID base, RegisterID dst)
617 {
618 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
619 }
620
621 void xorl_rm(RegisterID src, int offset, RegisterID base)
622 {
623 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
624 }
625
626 void xorl_im(int imm, int offset, RegisterID base)
627 {
628 if (CAN_SIGN_EXTEND_8_32(imm)) {
629 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
630 m_formatter.immediate8(imm);
631 } else {
632 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
633 m_formatter.immediate32(imm);
634 }
635 }
636
637 void xorl_ir(int imm, RegisterID dst)
638 {
639 if (CAN_SIGN_EXTEND_8_32(imm)) {
640 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
641 m_formatter.immediate8(imm);
642 } else {
643 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
644 m_formatter.immediate32(imm);
645 }
646 }
647
648 #if PLATFORM(X86_64)
649 void xorq_rr(RegisterID src, RegisterID dst)
650 {
651 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
652 }
653
654 void xorq_ir(int imm, RegisterID dst)
655 {
656 if (CAN_SIGN_EXTEND_8_32(imm)) {
657 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
658 m_formatter.immediate8(imm);
659 } else {
660 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
661 m_formatter.immediate32(imm);
662 }
663 }
664 #endif
665
666 void sarl_i8r(int imm, RegisterID dst)
667 {
668 if (imm == 1)
669 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
670 else {
671 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
672 m_formatter.immediate8(imm);
673 }
674 }
675
676 void sarl_CLr(RegisterID dst)
677 {
678 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
679 }
680
681 void shll_i8r(int imm, RegisterID dst)
682 {
683 if (imm == 1)
684 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
685 else {
686 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
687 m_formatter.immediate8(imm);
688 }
689 }
690
691 void shll_CLr(RegisterID dst)
692 {
693 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
694 }
695
696 #if PLATFORM(X86_64)
697 void sarq_CLr(RegisterID dst)
698 {
699 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
700 }
701
702 void sarq_i8r(int imm, RegisterID dst)
703 {
704 if (imm == 1)
705 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
706 else {
707 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
708 m_formatter.immediate8(imm);
709 }
710 }
711 #endif
712
713 void imull_rr(RegisterID src, RegisterID dst)
714 {
715 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
716 }
717
718 void imull_mr(int offset, RegisterID base, RegisterID dst)
719 {
720 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
721 }
722
723 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
724 {
725 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
726 m_formatter.immediate32(value);
727 }
728
729 void idivl_r(RegisterID dst)
730 {
731 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
732 }
733
734 // Comparisons:
735
736 void cmpl_rr(RegisterID src, RegisterID dst)
737 {
738 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
739 }
740
741 void cmpl_rm(RegisterID src, int offset, RegisterID base)
742 {
743 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
744 }
745
746 void cmpl_mr(int offset, RegisterID base, RegisterID src)
747 {
748 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
749 }
750
751 void cmpl_ir(int imm, RegisterID dst)
752 {
753 if (CAN_SIGN_EXTEND_8_32(imm)) {
754 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
755 m_formatter.immediate8(imm);
756 } else {
757 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
758 m_formatter.immediate32(imm);
759 }
760 }
761
762 void cmpl_ir_force32(int imm, RegisterID dst)
763 {
764 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
765 m_formatter.immediate32(imm);
766 }
767
768 void cmpl_im(int imm, int offset, RegisterID base)
769 {
770 if (CAN_SIGN_EXTEND_8_32(imm)) {
771 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
772 m_formatter.immediate8(imm);
773 } else {
774 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
775 m_formatter.immediate32(imm);
776 }
777 }
778
779 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
780 {
781 if (CAN_SIGN_EXTEND_8_32(imm)) {
782 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
783 m_formatter.immediate8(imm);
784 } else {
785 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
786 m_formatter.immediate32(imm);
787 }
788 }
789
790 void cmpl_im_force32(int imm, int offset, RegisterID base)
791 {
792 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
793 m_formatter.immediate32(imm);
794 }
795
796 #if PLATFORM(X86_64)
797 void cmpq_rr(RegisterID src, RegisterID dst)
798 {
799 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
800 }
801
802 void cmpq_rm(RegisterID src, int offset, RegisterID base)
803 {
804 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
805 }
806
807 void cmpq_mr(int offset, RegisterID base, RegisterID src)
808 {
809 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
810 }
811
812 void cmpq_ir(int imm, RegisterID dst)
813 {
814 if (CAN_SIGN_EXTEND_8_32(imm)) {
815 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
816 m_formatter.immediate8(imm);
817 } else {
818 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
819 m_formatter.immediate32(imm);
820 }
821 }
822
823 void cmpq_im(int imm, int offset, RegisterID base)
824 {
825 if (CAN_SIGN_EXTEND_8_32(imm)) {
826 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
827 m_formatter.immediate8(imm);
828 } else {
829 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
830 m_formatter.immediate32(imm);
831 }
832 }
833
834 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
835 {
836 if (CAN_SIGN_EXTEND_8_32(imm)) {
837 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
838 m_formatter.immediate8(imm);
839 } else {
840 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
841 m_formatter.immediate32(imm);
842 }
843 }
844 #else
845 void cmpl_rm(RegisterID reg, void* addr)
846 {
847 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
848 }
849
850 void cmpl_im(int imm, void* addr)
851 {
852 if (CAN_SIGN_EXTEND_8_32(imm)) {
853 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
854 m_formatter.immediate8(imm);
855 } else {
856 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
857 m_formatter.immediate32(imm);
858 }
859 }
860 #endif
861
862 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
863 {
864 m_formatter.prefix(PRE_OPERAND_SIZE);
865 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
866 }
867
868 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
869 {
870 if (CAN_SIGN_EXTEND_8_32(imm)) {
871 m_formatter.prefix(PRE_OPERAND_SIZE);
872 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
873 m_formatter.immediate8(imm);
874 } else {
875 m_formatter.prefix(PRE_OPERAND_SIZE);
876 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
877 m_formatter.immediate16(imm);
878 }
879 }
880
881 void testl_rr(RegisterID src, RegisterID dst)
882 {
883 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
884 }
885
886 void testl_i32r(int imm, RegisterID dst)
887 {
888 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
889 m_formatter.immediate32(imm);
890 }
891
892 void testl_i32m(int imm, int offset, RegisterID base)
893 {
894 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
895 m_formatter.immediate32(imm);
896 }
897
898 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
899 {
900 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
901 m_formatter.immediate32(imm);
902 }
903
904 #if PLATFORM(X86_64)
905 void testq_rr(RegisterID src, RegisterID dst)
906 {
907 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
908 }
909
910 void testq_i32r(int imm, RegisterID dst)
911 {
912 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
913 m_formatter.immediate32(imm);
914 }
915
916 void testq_i32m(int imm, int offset, RegisterID base)
917 {
918 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
919 m_formatter.immediate32(imm);
920 }
921
922 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
923 {
924 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
925 m_formatter.immediate32(imm);
926 }
927 #endif
928
929 void testw_rr(RegisterID src, RegisterID dst)
930 {
931 m_formatter.prefix(PRE_OPERAND_SIZE);
932 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
933 }
934
935 void testb_i8r(int imm, RegisterID dst)
936 {
937 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
938 m_formatter.immediate8(imm);
939 }
940
941 void setCC_r(Condition cond, RegisterID dst)
942 {
943 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
944 }
945
946 void sete_r(RegisterID dst)
947 {
948 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
949 }
950
951 void setz_r(RegisterID dst)
952 {
953 sete_r(dst);
954 }
955
956 void setne_r(RegisterID dst)
957 {
958 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
959 }
960
961 void setnz_r(RegisterID dst)
962 {
963 setne_r(dst);
964 }
965
966 // Various move ops:
967
968 void cdq()
969 {
970 m_formatter.oneByteOp(OP_CDQ);
971 }
972
973 void xchgl_rr(RegisterID src, RegisterID dst)
974 {
975 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
976 }
977
978 #if PLATFORM(X86_64)
979 void xchgq_rr(RegisterID src, RegisterID dst)
980 {
981 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
982 }
983 #endif
984
985 void movl_rr(RegisterID src, RegisterID dst)
986 {
987 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
988 }
989
990 void movl_rm(RegisterID src, int offset, RegisterID base)
991 {
992 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
993 }
994
995 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
996 {
997 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
998 }
999
1000 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1001 {
1002 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1003 }
1004
1005 void movl_mEAX(void* addr)
1006 {
1007 m_formatter.oneByteOp(OP_MOV_EAXOv);
1008 #if PLATFORM(X86_64)
1009 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1010 #else
1011 m_formatter.immediate32(reinterpret_cast<int>(addr));
1012 #endif
1013 }
1014
1015 void movl_mr(int offset, RegisterID base, RegisterID dst)
1016 {
1017 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1018 }
1019
1020 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1021 {
1022 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1023 }
1024
1025 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1026 {
1027 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1028 }
1029
1030 void movl_i32r(int imm, RegisterID dst)
1031 {
1032 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1033 m_formatter.immediate32(imm);
1034 }
1035
1036 void movl_i32m(int imm, int offset, RegisterID base)
1037 {
1038 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1039 m_formatter.immediate32(imm);
1040 }
1041
1042 void movl_EAXm(void* addr)
1043 {
1044 m_formatter.oneByteOp(OP_MOV_OvEAX);
1045 #if PLATFORM(X86_64)
1046 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1047 #else
1048 m_formatter.immediate32(reinterpret_cast<int>(addr));
1049 #endif
1050 }
1051
1052 #if PLATFORM(X86_64)
1053 void movq_rr(RegisterID src, RegisterID dst)
1054 {
1055 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1056 }
1057
1058 void movq_rm(RegisterID src, int offset, RegisterID base)
1059 {
1060 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1061 }
1062
1063 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1064 {
1065 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1066 }
1067
1068 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1069 {
1070 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1071 }
1072
1073 void movq_mEAX(void* addr)
1074 {
1075 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1076 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1077 }
1078
1079 void movq_EAXm(void* addr)
1080 {
1081 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1082 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1083 }
1084
1085 void movq_mr(int offset, RegisterID base, RegisterID dst)
1086 {
1087 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1088 }
1089
1090 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1091 {
1092 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1093 }
1094
1095 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1096 {
1097 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1098 }
1099
1100 void movq_i32m(int imm, int offset, RegisterID base)
1101 {
1102 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1103 m_formatter.immediate32(imm);
1104 }
1105
1106 void movq_i64r(int64_t imm, RegisterID dst)
1107 {
1108 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1109 m_formatter.immediate64(imm);
1110 }
1111
1112 void movsxd_rr(RegisterID src, RegisterID dst)
1113 {
1114 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1115 }
1116
1117
1118 #else
1119 void movl_rm(RegisterID src, void* addr)
1120 {
1121 if (src == X86::eax)
1122 movl_EAXm(addr);
1123 else
1124 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1125 }
1126
1127 void movl_mr(void* addr, RegisterID dst)
1128 {
1129 if (dst == X86::eax)
1130 movl_mEAX(addr);
1131 else
1132 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1133 }
1134
1135 void movl_i32m(int imm, void* addr)
1136 {
1137 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1138 m_formatter.immediate32(imm);
1139 }
1140 #endif
1141
1142 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1143 {
1144 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1145 }
1146
1147 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1148 {
1149 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1150 }
1151
1152 void movzbl_rr(RegisterID src, RegisterID dst)
1153 {
1154 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1155 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1156 // REX prefixes are defined to be silently ignored by the processor.
1157 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1158 }
1159
1160 void leal_mr(int offset, RegisterID base, RegisterID dst)
1161 {
1162 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1163 }
1164 #if PLATFORM(X86_64)
1165 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1166 {
1167 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1168 }
1169 #endif
1170
1171 // Flow control:
1172
1173 JmpSrc call()
1174 {
1175 m_formatter.oneByteOp(OP_CALL_rel32);
1176 return m_formatter.immediateRel32();
1177 }
1178
1179 JmpSrc call(RegisterID dst)
1180 {
1181 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1182 return JmpSrc(m_formatter.size());
1183 }
1184
1185 void call_m(int offset, RegisterID base)
1186 {
1187 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1188 }
1189
1190 JmpSrc jmp()
1191 {
1192 m_formatter.oneByteOp(OP_JMP_rel32);
1193 return m_formatter.immediateRel32();
1194 }
1195
1196 // Return a JmpSrc so we have a label to the jump, so we can use this
1197 // To make a tail recursive call on x86-64. The MacroAssembler
1198 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1199 JmpSrc jmp_r(RegisterID dst)
1200 {
1201 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1202 return JmpSrc(m_formatter.size());
1203 }
1204
1205 void jmp_m(int offset, RegisterID base)
1206 {
1207 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1208 }
1209
1210 JmpSrc jne()
1211 {
1212 m_formatter.twoByteOp(jccRel32(ConditionNE));
1213 return m_formatter.immediateRel32();
1214 }
1215
1216 JmpSrc jnz()
1217 {
1218 return jne();
1219 }
1220
1221 JmpSrc je()
1222 {
1223 m_formatter.twoByteOp(jccRel32(ConditionE));
1224 return m_formatter.immediateRel32();
1225 }
1226
1227 JmpSrc jz()
1228 {
1229 return je();
1230 }
1231
1232 JmpSrc jl()
1233 {
1234 m_formatter.twoByteOp(jccRel32(ConditionL));
1235 return m_formatter.immediateRel32();
1236 }
1237
1238 JmpSrc jb()
1239 {
1240 m_formatter.twoByteOp(jccRel32(ConditionB));
1241 return m_formatter.immediateRel32();
1242 }
1243
1244 JmpSrc jle()
1245 {
1246 m_formatter.twoByteOp(jccRel32(ConditionLE));
1247 return m_formatter.immediateRel32();
1248 }
1249
1250 JmpSrc jbe()
1251 {
1252 m_formatter.twoByteOp(jccRel32(ConditionBE));
1253 return m_formatter.immediateRel32();
1254 }
1255
1256 JmpSrc jge()
1257 {
1258 m_formatter.twoByteOp(jccRel32(ConditionGE));
1259 return m_formatter.immediateRel32();
1260 }
1261
1262 JmpSrc jg()
1263 {
1264 m_formatter.twoByteOp(jccRel32(ConditionG));
1265 return m_formatter.immediateRel32();
1266 }
1267
1268 JmpSrc ja()
1269 {
1270 m_formatter.twoByteOp(jccRel32(ConditionA));
1271 return m_formatter.immediateRel32();
1272 }
1273
1274 JmpSrc jae()
1275 {
1276 m_formatter.twoByteOp(jccRel32(ConditionAE));
1277 return m_formatter.immediateRel32();
1278 }
1279
1280 JmpSrc jo()
1281 {
1282 m_formatter.twoByteOp(jccRel32(ConditionO));
1283 return m_formatter.immediateRel32();
1284 }
1285
1286 JmpSrc jp()
1287 {
1288 m_formatter.twoByteOp(jccRel32(ConditionP));
1289 return m_formatter.immediateRel32();
1290 }
1291
1292 JmpSrc js()
1293 {
1294 m_formatter.twoByteOp(jccRel32(ConditionS));
1295 return m_formatter.immediateRel32();
1296 }
1297
1298 JmpSrc jCC(Condition cond)
1299 {
1300 m_formatter.twoByteOp(jccRel32(cond));
1301 return m_formatter.immediateRel32();
1302 }
1303
1304 // SSE operations:
1305
1306 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1307 {
1308 m_formatter.prefix(PRE_SSE_F2);
1309 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1310 }
1311
1312 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1313 {
1314 m_formatter.prefix(PRE_SSE_F2);
1315 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1316 }
1317
1318 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1319 {
1320 m_formatter.prefix(PRE_SSE_F2);
1321 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1322 }
1323
1324 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1325 {
1326 m_formatter.prefix(PRE_SSE_F2);
1327 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1328 }
1329
1330 #if !PLATFORM(X86_64)
1331 void cvtsi2sd_mr(void* address, XMMRegisterID dst)
1332 {
1333 m_formatter.prefix(PRE_SSE_F2);
1334 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1335 }
1336 #endif
1337
1338 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1339 {
1340 m_formatter.prefix(PRE_SSE_F2);
1341 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1342 }
1343
1344 void movd_rr(XMMRegisterID src, RegisterID dst)
1345 {
1346 m_formatter.prefix(PRE_SSE_66);
1347 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1348 }
1349
1350 #if PLATFORM(X86_64)
1351 void movq_rr(XMMRegisterID src, RegisterID dst)
1352 {
1353 m_formatter.prefix(PRE_SSE_66);
1354 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1355 }
1356
1357 void movq_rr(RegisterID src, XMMRegisterID dst)
1358 {
1359 m_formatter.prefix(PRE_SSE_66);
1360 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1361 }
1362 #endif
1363
1364 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1365 {
1366 m_formatter.prefix(PRE_SSE_F2);
1367 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1368 }
1369
1370 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1371 {
1372 m_formatter.prefix(PRE_SSE_F2);
1373 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1374 }
1375
1376 #if !PLATFORM(X86_64)
1377 void movsd_mr(void* address, XMMRegisterID dst)
1378 {
1379 m_formatter.prefix(PRE_SSE_F2);
1380 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1381 }
1382 #endif
1383
1384 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1385 {
1386 m_formatter.prefix(PRE_SSE_F2);
1387 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1388 }
1389
1390 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1391 {
1392 m_formatter.prefix(PRE_SSE_F2);
1393 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1394 }
1395
1396 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1397 {
1398 m_formatter.prefix(PRE_SSE_66);
1399 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1400 m_formatter.immediate8(whichWord);
1401 }
1402
1403 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1404 {
1405 m_formatter.prefix(PRE_SSE_F2);
1406 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1407 }
1408
1409 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1410 {
1411 m_formatter.prefix(PRE_SSE_F2);
1412 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1413 }
1414
1415 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1416 {
1417 m_formatter.prefix(PRE_SSE_66);
1418 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1419 }
1420
1421 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1422 {
1423 m_formatter.prefix(PRE_SSE_66);
1424 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1425 }
1426
1427 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1428 {
1429 m_formatter.prefix(PRE_SSE_F2);
1430 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1431 }
1432
1433 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1434 {
1435 m_formatter.prefix(PRE_SSE_F2);
1436 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1437 }
1438
1439 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1440 {
1441 m_formatter.prefix(PRE_SSE_66);
1442 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1443 }
1444
1445 // Misc instructions:
1446
1447 void int3()
1448 {
1449 m_formatter.oneByteOp(OP_INT3);
1450 }
1451
1452 void ret()
1453 {
1454 m_formatter.oneByteOp(OP_RET);
1455 }
1456
1457 void predictNotTaken()
1458 {
1459 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1460 }
1461
1462 // Assembler admin methods:
1463
1464 JmpDst label()
1465 {
1466 return JmpDst(m_formatter.size());
1467 }
1468
1469 static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
1470 {
1471 return JmpDst(jump.m_offset + offset);
1472 }
1473
1474 JmpDst align(int alignment)
1475 {
1476 while (!m_formatter.isAligned(alignment))
1477 m_formatter.oneByteOp(OP_HLT);
1478
1479 return label();
1480 }
1481
1482 // Linking & patching:
1483 //
1484 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1485 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1486 // code has been finalized it is (platform support permitting) within a non-
1487 // writable region of memory; to modify the code in an execute-only execuable
1488 // pool the 'repatch' and 'relink' methods should be used.
1489
1490 void linkJump(JmpSrc from, JmpDst to)
1491 {
1492 ASSERT(from.m_offset != -1);
1493 ASSERT(to.m_offset != -1);
1494
1495 char* code = reinterpret_cast<char*>(m_formatter.data());
1496 setRel32(code + from.m_offset, code + to.m_offset);
1497 }
1498
1499 static void linkJump(void* code, JmpSrc from, void* to)
1500 {
1501 ASSERT(from.m_offset != -1);
1502
1503 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1504 }
1505
1506 static void linkCall(void* code, JmpSrc from, void* to)
1507 {
1508 ASSERT(from.m_offset != -1);
1509
1510 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1511 }
1512
1513 static void linkPointer(void* code, JmpDst where, void* value)
1514 {
1515 ASSERT(where.m_offset != -1);
1516
1517 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1518 }
1519
1520 static void relinkJump(void* from, void* to)
1521 {
1522 setRel32(from, to);
1523 }
1524
1525 static void relinkCall(void* from, void* to)
1526 {
1527 setRel32(from, to);
1528 }
1529
1530 static void repatchInt32(void* where, int32_t value)
1531 {
1532 setInt32(where, value);
1533 }
1534
1535 static void repatchPointer(void* where, void* value)
1536 {
1537 setPointer(where, value);
1538 }
1539
1540 static void repatchLoadPtrToLEA(void* where)
1541 {
1542 #if PLATFORM(X86_64)
1543 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
1544 // Skip over the prefix byte.
1545 where = reinterpret_cast<char*>(where) + 1;
1546 #endif
1547 *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
1548 }
1549
1550 static unsigned getCallReturnOffset(JmpSrc call)
1551 {
1552 ASSERT(call.m_offset >= 0);
1553 return call.m_offset;
1554 }
1555
1556 static void* getRelocatedAddress(void* code, JmpSrc jump)
1557 {
1558 ASSERT(jump.m_offset != -1);
1559
1560 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1561 }
1562
1563 static void* getRelocatedAddress(void* code, JmpDst destination)
1564 {
1565 ASSERT(destination.m_offset != -1);
1566
1567 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1568 }
1569
1570 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1571 {
1572 return dst.m_offset - src.m_offset;
1573 }
1574
1575 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1576 {
1577 return dst.m_offset - src.m_offset;
1578 }
1579
1580 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1581 {
1582 return dst.m_offset - src.m_offset;
1583 }
1584
1585 void* executableCopy(ExecutablePool* allocator)
1586 {
1587 void* copy = m_formatter.executableCopy(allocator);
1588 ASSERT(copy);
1589 return copy;
1590 }
1591
1592 private:
1593
1594 static void setPointer(void* where, void* value)
1595 {
1596 reinterpret_cast<void**>(where)[-1] = value;
1597 }
1598
1599 static void setInt32(void* where, int32_t value)
1600 {
1601 reinterpret_cast<int32_t*>(where)[-1] = value;
1602 }
1603
1604 static void setRel32(void* from, void* to)
1605 {
1606 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1607 ASSERT(offset == static_cast<int32_t>(offset));
1608
1609 setInt32(from, offset);
1610 }
1611
1612 class X86InstructionFormatter {
1613
1614 static const int maxInstructionSize = 16;
1615
1616 public:
1617
1618 // Legacy prefix bytes:
1619 //
1620 // These are emmitted prior to the instruction.
1621
1622 void prefix(OneByteOpcodeID pre)
1623 {
1624 m_buffer.putByte(pre);
1625 }
1626
1627 // Word-sized operands / no operand instruction formatters.
1628 //
1629 // In addition to the opcode, the following operand permutations are supported:
1630 // * None - instruction takes no operands.
1631 // * One register - the low three bits of the RegisterID are added into the opcode.
1632 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1633 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1634 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1635 //
1636 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1637 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1638 //
1639 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1640
1641 void oneByteOp(OneByteOpcodeID opcode)
1642 {
1643 m_buffer.ensureSpace(maxInstructionSize);
1644 m_buffer.putByteUnchecked(opcode);
1645 }
1646
1647 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1648 {
1649 m_buffer.ensureSpace(maxInstructionSize);
1650 emitRexIfNeeded(0, 0, reg);
1651 m_buffer.putByteUnchecked(opcode + (reg & 7));
1652 }
1653
1654 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1655 {
1656 m_buffer.ensureSpace(maxInstructionSize);
1657 emitRexIfNeeded(reg, 0, rm);
1658 m_buffer.putByteUnchecked(opcode);
1659 registerModRM(reg, rm);
1660 }
1661
1662 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1663 {
1664 m_buffer.ensureSpace(maxInstructionSize);
1665 emitRexIfNeeded(reg, 0, base);
1666 m_buffer.putByteUnchecked(opcode);
1667 memoryModRM(reg, base, offset);
1668 }
1669
1670 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1671 {
1672 m_buffer.ensureSpace(maxInstructionSize);
1673 emitRexIfNeeded(reg, 0, base);
1674 m_buffer.putByteUnchecked(opcode);
1675 memoryModRM_disp32(reg, base, offset);
1676 }
1677
1678 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1679 {
1680 m_buffer.ensureSpace(maxInstructionSize);
1681 emitRexIfNeeded(reg, index, base);
1682 m_buffer.putByteUnchecked(opcode);
1683 memoryModRM(reg, base, index, scale, offset);
1684 }
1685
1686 #if !PLATFORM(X86_64)
1687 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1688 {
1689 m_buffer.ensureSpace(maxInstructionSize);
1690 m_buffer.putByteUnchecked(opcode);
1691 memoryModRM(reg, address);
1692 }
1693 #endif
1694
1695 void twoByteOp(TwoByteOpcodeID opcode)
1696 {
1697 m_buffer.ensureSpace(maxInstructionSize);
1698 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1699 m_buffer.putByteUnchecked(opcode);
1700 }
1701
1702 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1703 {
1704 m_buffer.ensureSpace(maxInstructionSize);
1705 emitRexIfNeeded(reg, 0, rm);
1706 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1707 m_buffer.putByteUnchecked(opcode);
1708 registerModRM(reg, rm);
1709 }
1710
1711 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1712 {
1713 m_buffer.ensureSpace(maxInstructionSize);
1714 emitRexIfNeeded(reg, 0, base);
1715 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1716 m_buffer.putByteUnchecked(opcode);
1717 memoryModRM(reg, base, offset);
1718 }
1719
1720 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1721 {
1722 m_buffer.ensureSpace(maxInstructionSize);
1723 emitRexIfNeeded(reg, index, base);
1724 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1725 m_buffer.putByteUnchecked(opcode);
1726 memoryModRM(reg, base, index, scale, offset);
1727 }
1728
1729 #if !PLATFORM(X86_64)
1730 void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
1731 {
1732 m_buffer.ensureSpace(maxInstructionSize);
1733 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1734 m_buffer.putByteUnchecked(opcode);
1735 memoryModRM(reg, address);
1736 }
1737 #endif
1738
1739 #if PLATFORM(X86_64)
1740 // Quad-word-sized operands:
1741 //
1742 // Used to format 64-bit operantions, planting a REX.w prefix.
1743 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1744 // the normal (non-'64'-postfixed) formatters should be used.
1745
1746 void oneByteOp64(OneByteOpcodeID opcode)
1747 {
1748 m_buffer.ensureSpace(maxInstructionSize);
1749 emitRexW(0, 0, 0);
1750 m_buffer.putByteUnchecked(opcode);
1751 }
1752
1753 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1754 {
1755 m_buffer.ensureSpace(maxInstructionSize);
1756 emitRexW(0, 0, reg);
1757 m_buffer.putByteUnchecked(opcode + (reg & 7));
1758 }
1759
1760 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1761 {
1762 m_buffer.ensureSpace(maxInstructionSize);
1763 emitRexW(reg, 0, rm);
1764 m_buffer.putByteUnchecked(opcode);
1765 registerModRM(reg, rm);
1766 }
1767
1768 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1769 {
1770 m_buffer.ensureSpace(maxInstructionSize);
1771 emitRexW(reg, 0, base);
1772 m_buffer.putByteUnchecked(opcode);
1773 memoryModRM(reg, base, offset);
1774 }
1775
1776 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1777 {
1778 m_buffer.ensureSpace(maxInstructionSize);
1779 emitRexW(reg, 0, base);
1780 m_buffer.putByteUnchecked(opcode);
1781 memoryModRM_disp32(reg, base, offset);
1782 }
1783
1784 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1785 {
1786 m_buffer.ensureSpace(maxInstructionSize);
1787 emitRexW(reg, index, base);
1788 m_buffer.putByteUnchecked(opcode);
1789 memoryModRM(reg, base, index, scale, offset);
1790 }
1791
1792 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1793 {
1794 m_buffer.ensureSpace(maxInstructionSize);
1795 emitRexW(reg, 0, rm);
1796 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1797 m_buffer.putByteUnchecked(opcode);
1798 registerModRM(reg, rm);
1799 }
1800 #endif
1801
1802 // Byte-operands:
1803 //
1804 // These methods format byte operations. Byte operations differ from the normal
1805 // formatters in the circumstances under which they will decide to emit REX prefixes.
1806 // These should be used where any register operand signifies a byte register.
1807 //
1808 // The disctinction is due to the handling of register numbers in the range 4..7 on
1809 // x86-64. These register numbers may either represent the second byte of the first
1810 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1811 //
1812 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1813 // be accessed where a REX prefix is present), these are likely best treated as
1814 // deprecated. In order to ensure the correct registers spl..dil are selected a
1815 // REX prefix will be emitted for any byte register operand in the range 4..15.
1816 //
1817 // These formatters may be used in instructions where a mix of operand sizes, in which
1818 // case an unnecessary REX will be emitted, for example:
1819 // movzbl %al, %edi
1820 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1821 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1822 // be silently ignored by the processor.
1823 //
1824 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1825 // is provided to check byte register operands.
1826
1827 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1828 {
1829 m_buffer.ensureSpace(maxInstructionSize);
1830 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1831 m_buffer.putByteUnchecked(opcode);
1832 registerModRM(groupOp, rm);
1833 }
1834
1835 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1836 {
1837 m_buffer.ensureSpace(maxInstructionSize);
1838 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1839 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1840 m_buffer.putByteUnchecked(opcode);
1841 registerModRM(reg, rm);
1842 }
1843
1844 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1845 {
1846 m_buffer.ensureSpace(maxInstructionSize);
1847 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1848 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1849 m_buffer.putByteUnchecked(opcode);
1850 registerModRM(groupOp, rm);
1851 }
1852
1853 // Immediates:
1854 //
1855 // An immedaite should be appended where appropriate after an op has been emitted.
1856 // The writes are unchecked since the opcode formatters above will have ensured space.
1857
1858 void immediate8(int imm)
1859 {
1860 m_buffer.putByteUnchecked(imm);
1861 }
1862
1863 void immediate16(int imm)
1864 {
1865 m_buffer.putShortUnchecked(imm);
1866 }
1867
1868 void immediate32(int imm)
1869 {
1870 m_buffer.putIntUnchecked(imm);
1871 }
1872
1873 void immediate64(int64_t imm)
1874 {
1875 m_buffer.putInt64Unchecked(imm);
1876 }
1877
1878 JmpSrc immediateRel32()
1879 {
1880 m_buffer.putIntUnchecked(0);
1881 return JmpSrc(m_buffer.size());
1882 }
1883
1884 // Administrative methods:
1885
1886 size_t size() const { return m_buffer.size(); }
1887 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1888 void* data() const { return m_buffer.data(); }
1889 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1890
1891 private:
1892
1893 // Internals; ModRm and REX formatters.
1894
1895 static const RegisterID noBase = X86::ebp;
1896 static const RegisterID hasSib = X86::esp;
1897 static const RegisterID noIndex = X86::esp;
1898 #if PLATFORM(X86_64)
1899 static const RegisterID noBase2 = X86::r13;
1900 static const RegisterID hasSib2 = X86::r12;
1901
1902 // Registers r8 & above require a REX prefixe.
1903 inline bool regRequiresRex(int reg)
1904 {
1905 return (reg >= X86::r8);
1906 }
1907
1908 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1909 inline bool byteRegRequiresRex(int reg)
1910 {
1911 return (reg >= X86::esp);
1912 }
1913
1914 // Format a REX prefix byte.
1915 inline void emitRex(bool w, int r, int x, int b)
1916 {
1917 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1918 }
1919
1920 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1921 inline void emitRexW(int r, int x, int b)
1922 {
1923 emitRex(true, r, x, b);
1924 }
1925
1926 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1927 // regRequiresRex() to check other registers (i.e. address base & index).
1928 inline void emitRexIf(bool condition, int r, int x, int b)
1929 {
1930 if (condition) emitRex(false, r, x, b);
1931 }
1932
1933 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1934 inline void emitRexIfNeeded(int r, int x, int b)
1935 {
1936 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1937 }
1938 #else
1939 // No REX prefix bytes on 32-bit x86.
1940 inline bool regRequiresRex(int) { return false; }
1941 inline bool byteRegRequiresRex(int) { return false; }
1942 inline void emitRexIf(bool, int, int, int) {}
1943 inline void emitRexIfNeeded(int, int, int) {}
1944 #endif
1945
1946 enum ModRmMode {
1947 ModRmMemoryNoDisp,
1948 ModRmMemoryDisp8,
1949 ModRmMemoryDisp32,
1950 ModRmRegister,
1951 };
1952
1953 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1954 {
1955 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1956 }
1957
1958 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1959 {
1960 ASSERT(mode != ModRmRegister);
1961
1962 putModRm(mode, reg, hasSib);
1963 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1964 }
1965
1966 void registerModRM(int reg, RegisterID rm)
1967 {
1968 putModRm(ModRmRegister, reg, rm);
1969 }
1970
1971 void memoryModRM(int reg, RegisterID base, int offset)
1972 {
1973 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1974 #if PLATFORM(X86_64)
1975 if ((base == hasSib) || (base == hasSib2)) {
1976 #else
1977 if (base == hasSib) {
1978 #endif
1979 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1980 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1981 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1982 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1983 m_buffer.putByteUnchecked(offset);
1984 } else {
1985 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1986 m_buffer.putIntUnchecked(offset);
1987 }
1988 } else {
1989 #if PLATFORM(X86_64)
1990 if (!offset && (base != noBase) && (base != noBase2))
1991 #else
1992 if (!offset && (base != noBase))
1993 #endif
1994 putModRm(ModRmMemoryNoDisp, reg, base);
1995 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1996 putModRm(ModRmMemoryDisp8, reg, base);
1997 m_buffer.putByteUnchecked(offset);
1998 } else {
1999 putModRm(ModRmMemoryDisp32, reg, base);
2000 m_buffer.putIntUnchecked(offset);
2001 }
2002 }
2003 }
2004
2005 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2006 {
2007 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2008 #if PLATFORM(X86_64)
2009 if ((base == hasSib) || (base == hasSib2)) {
2010 #else
2011 if (base == hasSib) {
2012 #endif
2013 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2014 m_buffer.putIntUnchecked(offset);
2015 } else {
2016 putModRm(ModRmMemoryDisp32, reg, base);
2017 m_buffer.putIntUnchecked(offset);
2018 }
2019 }
2020
2021 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2022 {
2023 ASSERT(index != noIndex);
2024
2025 #if PLATFORM(X86_64)
2026 if (!offset && (base != noBase) && (base != noBase2))
2027 #else
2028 if (!offset && (base != noBase))
2029 #endif
2030 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2031 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2032 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2033 m_buffer.putByteUnchecked(offset);
2034 } else {
2035 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2036 m_buffer.putIntUnchecked(offset);
2037 }
2038 }
2039
2040 #if !PLATFORM(X86_64)
2041 void memoryModRM(int reg, void* address)
2042 {
2043 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2044 putModRm(ModRmMemoryNoDisp, reg, noBase);
2045 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2046 }
2047 #endif
2048
2049 AssemblerBuffer m_buffer;
2050 } m_formatter;
2051 };
2052
2053 } // namespace JSC
2054
2055 #endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
2056
2057 #endif // X86Assembler_h