]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/X86Assembler.h
JavaScriptCore-1097.3.tar.gz
[apple/javascriptcore.git] / assembler / X86Assembler.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
28
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
30
31 #include "AssemblerBuffer.h"
32 #include "JITCompilationEffort.h"
33 #include <stdint.h>
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
36
37 namespace JSC {
38
39 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
40
41 namespace X86Registers {
42 typedef enum {
43 eax,
44 ecx,
45 edx,
46 ebx,
47 esp,
48 ebp,
49 esi,
50 edi,
51
52 #if CPU(X86_64)
53 r8,
54 r9,
55 r10,
56 r11,
57 r12,
58 r13,
59 r14,
60 r15,
61 #endif
62 } RegisterID;
63
64 typedef enum {
65 xmm0,
66 xmm1,
67 xmm2,
68 xmm3,
69 xmm4,
70 xmm5,
71 xmm6,
72 xmm7,
73 } XMMRegisterID;
74 }
75
76 class X86Assembler {
77 public:
78 typedef X86Registers::RegisterID RegisterID;
79 typedef X86Registers::XMMRegisterID XMMRegisterID;
80 typedef XMMRegisterID FPRegisterID;
81
82 typedef enum {
83 ConditionO,
84 ConditionNO,
85 ConditionB,
86 ConditionAE,
87 ConditionE,
88 ConditionNE,
89 ConditionBE,
90 ConditionA,
91 ConditionS,
92 ConditionNS,
93 ConditionP,
94 ConditionNP,
95 ConditionL,
96 ConditionGE,
97 ConditionLE,
98 ConditionG,
99
100 ConditionC = ConditionB,
101 ConditionNC = ConditionAE,
102 } Condition;
103
104 private:
105 typedef enum {
106 OP_ADD_EvGv = 0x01,
107 OP_ADD_GvEv = 0x03,
108 OP_OR_EvGv = 0x09,
109 OP_OR_GvEv = 0x0B,
110 OP_2BYTE_ESCAPE = 0x0F,
111 OP_AND_EvGv = 0x21,
112 OP_AND_GvEv = 0x23,
113 OP_SUB_EvGv = 0x29,
114 OP_SUB_GvEv = 0x2B,
115 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
116 OP_XOR_EvGv = 0x31,
117 OP_XOR_GvEv = 0x33,
118 OP_CMP_EvGv = 0x39,
119 OP_CMP_GvEv = 0x3B,
120 #if CPU(X86_64)
121 PRE_REX = 0x40,
122 #endif
123 OP_PUSH_EAX = 0x50,
124 OP_POP_EAX = 0x58,
125 #if CPU(X86_64)
126 OP_MOVSXD_GvEv = 0x63,
127 #endif
128 PRE_OPERAND_SIZE = 0x66,
129 PRE_SSE_66 = 0x66,
130 OP_PUSH_Iz = 0x68,
131 OP_IMUL_GvEvIz = 0x69,
132 OP_GROUP1_EbIb = 0x80,
133 OP_GROUP1_EvIz = 0x81,
134 OP_GROUP1_EvIb = 0x83,
135 OP_TEST_EbGb = 0x84,
136 OP_TEST_EvGv = 0x85,
137 OP_XCHG_EvGv = 0x87,
138 OP_MOV_EbGb = 0x88,
139 OP_MOV_EvGv = 0x89,
140 OP_MOV_GvEv = 0x8B,
141 OP_LEA = 0x8D,
142 OP_GROUP1A_Ev = 0x8F,
143 OP_NOP = 0x90,
144 OP_CDQ = 0x99,
145 OP_MOV_EAXOv = 0xA1,
146 OP_MOV_OvEAX = 0xA3,
147 OP_MOV_EAXIv = 0xB8,
148 OP_GROUP2_EvIb = 0xC1,
149 OP_RET = 0xC3,
150 OP_GROUP11_EvIb = 0xC6,
151 OP_GROUP11_EvIz = 0xC7,
152 OP_INT3 = 0xCC,
153 OP_GROUP2_Ev1 = 0xD1,
154 OP_GROUP2_EvCL = 0xD3,
155 OP_ESCAPE_DD = 0xDD,
156 OP_CALL_rel32 = 0xE8,
157 OP_JMP_rel32 = 0xE9,
158 PRE_SSE_F2 = 0xF2,
159 PRE_SSE_F3 = 0xF3,
160 OP_HLT = 0xF4,
161 OP_GROUP3_EbIb = 0xF6,
162 OP_GROUP3_Ev = 0xF7,
163 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
164 OP_GROUP5_Ev = 0xFF,
165 } OneByteOpcodeID;
166
167 typedef enum {
168 OP2_MOVSD_VsdWsd = 0x10,
169 OP2_MOVSD_WsdVsd = 0x11,
170 OP2_MOVSS_VsdWsd = 0x10,
171 OP2_MOVSS_WsdVsd = 0x11,
172 OP2_CVTSI2SD_VsdEd = 0x2A,
173 OP2_CVTTSD2SI_GdWsd = 0x2C,
174 OP2_UCOMISD_VsdWsd = 0x2E,
175 OP2_ADDSD_VsdWsd = 0x58,
176 OP2_MULSD_VsdWsd = 0x59,
177 OP2_CVTSD2SS_VsdWsd = 0x5A,
178 OP2_CVTSS2SD_VsdWsd = 0x5A,
179 OP2_SUBSD_VsdWsd = 0x5C,
180 OP2_DIVSD_VsdWsd = 0x5E,
181 OP2_SQRTSD_VsdWsd = 0x51,
182 OP2_ANDNPD_VpdWpd = 0x55,
183 OP2_XORPD_VpdWpd = 0x57,
184 OP2_MOVD_VdEd = 0x6E,
185 OP2_MOVD_EdVd = 0x7E,
186 OP2_JCC_rel32 = 0x80,
187 OP_SETCC = 0x90,
188 OP2_IMUL_GvEv = 0xAF,
189 OP2_MOVZX_GvEb = 0xB6,
190 OP2_MOVSX_GvEb = 0xBE,
191 OP2_MOVZX_GvEw = 0xB7,
192 OP2_MOVSX_GvEw = 0xBF,
193 OP2_PEXTRW_GdUdIb = 0xC5,
194 OP2_PSLLQ_UdqIb = 0x73,
195 OP2_PSRLQ_UdqIb = 0x73,
196 OP2_POR_VdqWdq = 0XEB,
197 } TwoByteOpcodeID;
198
199 TwoByteOpcodeID jccRel32(Condition cond)
200 {
201 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
202 }
203
204 TwoByteOpcodeID setccOpcode(Condition cond)
205 {
206 return (TwoByteOpcodeID)(OP_SETCC + cond);
207 }
208
209 typedef enum {
210 GROUP1_OP_ADD = 0,
211 GROUP1_OP_OR = 1,
212 GROUP1_OP_ADC = 2,
213 GROUP1_OP_AND = 4,
214 GROUP1_OP_SUB = 5,
215 GROUP1_OP_XOR = 6,
216 GROUP1_OP_CMP = 7,
217
218 GROUP1A_OP_POP = 0,
219
220 GROUP2_OP_ROL = 0,
221 GROUP2_OP_ROR = 1,
222 GROUP2_OP_RCL = 2,
223 GROUP2_OP_RCR = 3,
224
225 GROUP2_OP_SHL = 4,
226 GROUP2_OP_SHR = 5,
227 GROUP2_OP_SAR = 7,
228
229 GROUP3_OP_TEST = 0,
230 GROUP3_OP_NOT = 2,
231 GROUP3_OP_NEG = 3,
232 GROUP3_OP_IDIV = 7,
233
234 GROUP5_OP_CALLN = 2,
235 GROUP5_OP_JMPN = 4,
236 GROUP5_OP_PUSH = 6,
237
238 GROUP11_MOV = 0,
239
240 GROUP14_OP_PSLLQ = 6,
241 GROUP14_OP_PSRLQ = 2,
242
243 ESCAPE_DD_FSTP_doubleReal = 3,
244 } GroupOpcodeID;
245
246 class X86InstructionFormatter;
247 public:
248
249 X86Assembler()
250 {
251 }
252
253 // Stack operations:
254
255 void push_r(RegisterID reg)
256 {
257 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
258 }
259
260 void pop_r(RegisterID reg)
261 {
262 m_formatter.oneByteOp(OP_POP_EAX, reg);
263 }
264
265 void push_i32(int imm)
266 {
267 m_formatter.oneByteOp(OP_PUSH_Iz);
268 m_formatter.immediate32(imm);
269 }
270
271 void push_m(int offset, RegisterID base)
272 {
273 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
274 }
275
276 void pop_m(int offset, RegisterID base)
277 {
278 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
279 }
280
281 // Arithmetic operations:
282
283 #if !CPU(X86_64)
284 void adcl_im(int imm, const void* addr)
285 {
286 if (CAN_SIGN_EXTEND_8_32(imm)) {
287 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
288 m_formatter.immediate8(imm);
289 } else {
290 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
291 m_formatter.immediate32(imm);
292 }
293 }
294 #endif
295
296 void addl_rr(RegisterID src, RegisterID dst)
297 {
298 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
299 }
300
301 void addl_mr(int offset, RegisterID base, RegisterID dst)
302 {
303 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
304 }
305
306 void addl_rm(RegisterID src, int offset, RegisterID base)
307 {
308 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
309 }
310
311 void addl_ir(int imm, RegisterID dst)
312 {
313 if (CAN_SIGN_EXTEND_8_32(imm)) {
314 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
315 m_formatter.immediate8(imm);
316 } else {
317 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
318 m_formatter.immediate32(imm);
319 }
320 }
321
322 void addl_im(int imm, int offset, RegisterID base)
323 {
324 if (CAN_SIGN_EXTEND_8_32(imm)) {
325 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
326 m_formatter.immediate8(imm);
327 } else {
328 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
329 m_formatter.immediate32(imm);
330 }
331 }
332
333 #if CPU(X86_64)
334 void addq_rr(RegisterID src, RegisterID dst)
335 {
336 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
337 }
338
339 void addq_ir(int imm, RegisterID dst)
340 {
341 if (CAN_SIGN_EXTEND_8_32(imm)) {
342 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
343 m_formatter.immediate8(imm);
344 } else {
345 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
346 m_formatter.immediate32(imm);
347 }
348 }
349
350 void addq_im(int imm, int offset, RegisterID base)
351 {
352 if (CAN_SIGN_EXTEND_8_32(imm)) {
353 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
354 m_formatter.immediate8(imm);
355 } else {
356 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
357 m_formatter.immediate32(imm);
358 }
359 }
360 #else
361 void addl_im(int imm, const void* addr)
362 {
363 if (CAN_SIGN_EXTEND_8_32(imm)) {
364 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
365 m_formatter.immediate8(imm);
366 } else {
367 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
368 m_formatter.immediate32(imm);
369 }
370 }
371 #endif
372
373 void andl_rr(RegisterID src, RegisterID dst)
374 {
375 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
376 }
377
378 void andl_mr(int offset, RegisterID base, RegisterID dst)
379 {
380 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
381 }
382
383 void andl_rm(RegisterID src, int offset, RegisterID base)
384 {
385 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
386 }
387
388 void andl_ir(int imm, RegisterID dst)
389 {
390 if (CAN_SIGN_EXTEND_8_32(imm)) {
391 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
392 m_formatter.immediate8(imm);
393 } else {
394 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
395 m_formatter.immediate32(imm);
396 }
397 }
398
399 void andl_im(int imm, int offset, RegisterID base)
400 {
401 if (CAN_SIGN_EXTEND_8_32(imm)) {
402 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
403 m_formatter.immediate8(imm);
404 } else {
405 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
406 m_formatter.immediate32(imm);
407 }
408 }
409
410 #if CPU(X86_64)
411 void andq_rr(RegisterID src, RegisterID dst)
412 {
413 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
414 }
415
416 void andq_ir(int imm, RegisterID dst)
417 {
418 if (CAN_SIGN_EXTEND_8_32(imm)) {
419 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
420 m_formatter.immediate8(imm);
421 } else {
422 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
423 m_formatter.immediate32(imm);
424 }
425 }
426 #else
427 void andl_im(int imm, const void* addr)
428 {
429 if (CAN_SIGN_EXTEND_8_32(imm)) {
430 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
431 m_formatter.immediate8(imm);
432 } else {
433 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
434 m_formatter.immediate32(imm);
435 }
436 }
437 #endif
438
439 void negl_r(RegisterID dst)
440 {
441 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
442 }
443
444 void negl_m(int offset, RegisterID base)
445 {
446 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
447 }
448
449 void notl_r(RegisterID dst)
450 {
451 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
452 }
453
454 void notl_m(int offset, RegisterID base)
455 {
456 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
457 }
458
459 void orl_rr(RegisterID src, RegisterID dst)
460 {
461 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
462 }
463
464 void orl_mr(int offset, RegisterID base, RegisterID dst)
465 {
466 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
467 }
468
469 void orl_rm(RegisterID src, int offset, RegisterID base)
470 {
471 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
472 }
473
474 void orl_ir(int imm, RegisterID dst)
475 {
476 if (CAN_SIGN_EXTEND_8_32(imm)) {
477 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
478 m_formatter.immediate8(imm);
479 } else {
480 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
481 m_formatter.immediate32(imm);
482 }
483 }
484
485 void orl_im(int imm, int offset, RegisterID base)
486 {
487 if (CAN_SIGN_EXTEND_8_32(imm)) {
488 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
489 m_formatter.immediate8(imm);
490 } else {
491 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
492 m_formatter.immediate32(imm);
493 }
494 }
495
496 #if CPU(X86_64)
497 void orq_rr(RegisterID src, RegisterID dst)
498 {
499 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
500 }
501
502 void orq_ir(int imm, RegisterID dst)
503 {
504 if (CAN_SIGN_EXTEND_8_32(imm)) {
505 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
506 m_formatter.immediate8(imm);
507 } else {
508 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
509 m_formatter.immediate32(imm);
510 }
511 }
512 #else
513 void orl_im(int imm, const void* addr)
514 {
515 if (CAN_SIGN_EXTEND_8_32(imm)) {
516 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
517 m_formatter.immediate8(imm);
518 } else {
519 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
520 m_formatter.immediate32(imm);
521 }
522 }
523 #endif
524
525 void subl_rr(RegisterID src, RegisterID dst)
526 {
527 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
528 }
529
530 void subl_mr(int offset, RegisterID base, RegisterID dst)
531 {
532 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
533 }
534
535 void subl_rm(RegisterID src, int offset, RegisterID base)
536 {
537 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
538 }
539
540 void subl_ir(int imm, RegisterID dst)
541 {
542 if (CAN_SIGN_EXTEND_8_32(imm)) {
543 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
544 m_formatter.immediate8(imm);
545 } else {
546 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
547 m_formatter.immediate32(imm);
548 }
549 }
550
551 void subl_im(int imm, int offset, RegisterID base)
552 {
553 if (CAN_SIGN_EXTEND_8_32(imm)) {
554 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
555 m_formatter.immediate8(imm);
556 } else {
557 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
558 m_formatter.immediate32(imm);
559 }
560 }
561
562 #if CPU(X86_64)
563 void subq_rr(RegisterID src, RegisterID dst)
564 {
565 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
566 }
567
568 void subq_ir(int imm, RegisterID dst)
569 {
570 if (CAN_SIGN_EXTEND_8_32(imm)) {
571 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
572 m_formatter.immediate8(imm);
573 } else {
574 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
575 m_formatter.immediate32(imm);
576 }
577 }
578 #else
579 void subl_im(int imm, const void* addr)
580 {
581 if (CAN_SIGN_EXTEND_8_32(imm)) {
582 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
583 m_formatter.immediate8(imm);
584 } else {
585 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
586 m_formatter.immediate32(imm);
587 }
588 }
589 #endif
590
591 void xorl_rr(RegisterID src, RegisterID dst)
592 {
593 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
594 }
595
596 void xorl_mr(int offset, RegisterID base, RegisterID dst)
597 {
598 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
599 }
600
601 void xorl_rm(RegisterID src, int offset, RegisterID base)
602 {
603 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
604 }
605
606 void xorl_im(int imm, int offset, RegisterID base)
607 {
608 if (CAN_SIGN_EXTEND_8_32(imm)) {
609 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
610 m_formatter.immediate8(imm);
611 } else {
612 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
613 m_formatter.immediate32(imm);
614 }
615 }
616
617 void xorl_ir(int imm, RegisterID dst)
618 {
619 if (CAN_SIGN_EXTEND_8_32(imm)) {
620 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
621 m_formatter.immediate8(imm);
622 } else {
623 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
624 m_formatter.immediate32(imm);
625 }
626 }
627
628 #if CPU(X86_64)
629 void xorq_rr(RegisterID src, RegisterID dst)
630 {
631 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
632 }
633
634 void xorq_ir(int imm, RegisterID dst)
635 {
636 if (CAN_SIGN_EXTEND_8_32(imm)) {
637 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
638 m_formatter.immediate8(imm);
639 } else {
640 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
641 m_formatter.immediate32(imm);
642 }
643 }
644
645 void xorq_rm(RegisterID src, int offset, RegisterID base)
646 {
647 m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
648 }
649
650 void rorq_i8r(int imm, RegisterID dst)
651 {
652 if (imm == 1)
653 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
654 else {
655 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
656 m_formatter.immediate8(imm);
657 }
658 }
659
660 #endif
661
662 void sarl_i8r(int imm, RegisterID dst)
663 {
664 if (imm == 1)
665 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
666 else {
667 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
668 m_formatter.immediate8(imm);
669 }
670 }
671
672 void sarl_CLr(RegisterID dst)
673 {
674 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
675 }
676
677 void shrl_i8r(int imm, RegisterID dst)
678 {
679 if (imm == 1)
680 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
681 else {
682 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
683 m_formatter.immediate8(imm);
684 }
685 }
686
687 void shrl_CLr(RegisterID dst)
688 {
689 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
690 }
691
692 void shll_i8r(int imm, RegisterID dst)
693 {
694 if (imm == 1)
695 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
696 else {
697 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
698 m_formatter.immediate8(imm);
699 }
700 }
701
702 void shll_CLr(RegisterID dst)
703 {
704 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
705 }
706
707 #if CPU(X86_64)
708 void sarq_CLr(RegisterID dst)
709 {
710 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
711 }
712
713 void sarq_i8r(int imm, RegisterID dst)
714 {
715 if (imm == 1)
716 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
717 else {
718 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
719 m_formatter.immediate8(imm);
720 }
721 }
722 #endif
723
724 void imull_rr(RegisterID src, RegisterID dst)
725 {
726 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
727 }
728
729 void imull_mr(int offset, RegisterID base, RegisterID dst)
730 {
731 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
732 }
733
734 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
735 {
736 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
737 m_formatter.immediate32(value);
738 }
739
740 void idivl_r(RegisterID dst)
741 {
742 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
743 }
744
745 // Comparisons:
746
747 void cmpl_rr(RegisterID src, RegisterID dst)
748 {
749 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
750 }
751
752 void cmpl_rm(RegisterID src, int offset, RegisterID base)
753 {
754 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
755 }
756
757 void cmpl_mr(int offset, RegisterID base, RegisterID src)
758 {
759 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
760 }
761
762 void cmpl_ir(int imm, RegisterID dst)
763 {
764 if (CAN_SIGN_EXTEND_8_32(imm)) {
765 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
766 m_formatter.immediate8(imm);
767 } else {
768 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
769 m_formatter.immediate32(imm);
770 }
771 }
772
773 void cmpl_ir_force32(int imm, RegisterID dst)
774 {
775 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
776 m_formatter.immediate32(imm);
777 }
778
779 void cmpl_im(int imm, int offset, RegisterID base)
780 {
781 if (CAN_SIGN_EXTEND_8_32(imm)) {
782 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
783 m_formatter.immediate8(imm);
784 } else {
785 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
786 m_formatter.immediate32(imm);
787 }
788 }
789
790 void cmpb_im(int imm, int offset, RegisterID base)
791 {
792 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
793 m_formatter.immediate8(imm);
794 }
795
796 void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
797 {
798 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
799 m_formatter.immediate8(imm);
800 }
801
802 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
803 {
804 if (CAN_SIGN_EXTEND_8_32(imm)) {
805 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
806 m_formatter.immediate8(imm);
807 } else {
808 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
809 m_formatter.immediate32(imm);
810 }
811 }
812
813 void cmpl_im_force32(int imm, int offset, RegisterID base)
814 {
815 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
816 m_formatter.immediate32(imm);
817 }
818
819 #if CPU(X86_64)
820 void cmpq_rr(RegisterID src, RegisterID dst)
821 {
822 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
823 }
824
825 void cmpq_rm(RegisterID src, int offset, RegisterID base)
826 {
827 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
828 }
829
830 void cmpq_mr(int offset, RegisterID base, RegisterID src)
831 {
832 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
833 }
834
835 void cmpq_ir(int imm, RegisterID dst)
836 {
837 if (CAN_SIGN_EXTEND_8_32(imm)) {
838 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
839 m_formatter.immediate8(imm);
840 } else {
841 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
842 m_formatter.immediate32(imm);
843 }
844 }
845
846 void cmpq_im(int imm, int offset, RegisterID base)
847 {
848 if (CAN_SIGN_EXTEND_8_32(imm)) {
849 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
850 m_formatter.immediate8(imm);
851 } else {
852 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
853 m_formatter.immediate32(imm);
854 }
855 }
856
857 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
858 {
859 if (CAN_SIGN_EXTEND_8_32(imm)) {
860 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
861 m_formatter.immediate8(imm);
862 } else {
863 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
864 m_formatter.immediate32(imm);
865 }
866 }
867 #else
868 void cmpl_rm(RegisterID reg, const void* addr)
869 {
870 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
871 }
872
873 void cmpl_im(int imm, const void* addr)
874 {
875 if (CAN_SIGN_EXTEND_8_32(imm)) {
876 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
877 m_formatter.immediate8(imm);
878 } else {
879 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
880 m_formatter.immediate32(imm);
881 }
882 }
883 #endif
884
885 void cmpw_ir(int imm, RegisterID dst)
886 {
887 if (CAN_SIGN_EXTEND_8_32(imm)) {
888 m_formatter.prefix(PRE_OPERAND_SIZE);
889 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
890 m_formatter.immediate8(imm);
891 } else {
892 m_formatter.prefix(PRE_OPERAND_SIZE);
893 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
894 m_formatter.immediate16(imm);
895 }
896 }
897
898 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
899 {
900 m_formatter.prefix(PRE_OPERAND_SIZE);
901 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
902 }
903
904 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
905 {
906 if (CAN_SIGN_EXTEND_8_32(imm)) {
907 m_formatter.prefix(PRE_OPERAND_SIZE);
908 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
909 m_formatter.immediate8(imm);
910 } else {
911 m_formatter.prefix(PRE_OPERAND_SIZE);
912 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
913 m_formatter.immediate16(imm);
914 }
915 }
916
917 void testl_rr(RegisterID src, RegisterID dst)
918 {
919 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
920 }
921
922 void testl_i32r(int imm, RegisterID dst)
923 {
924 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
925 m_formatter.immediate32(imm);
926 }
927
928 void testl_i32m(int imm, int offset, RegisterID base)
929 {
930 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
931 m_formatter.immediate32(imm);
932 }
933
934 void testb_rr(RegisterID src, RegisterID dst)
935 {
936 m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
937 }
938
939 void testb_im(int imm, int offset, RegisterID base)
940 {
941 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
942 m_formatter.immediate8(imm);
943 }
944
945 void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
946 {
947 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
948 m_formatter.immediate8(imm);
949 }
950
951 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
952 {
953 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
954 m_formatter.immediate32(imm);
955 }
956
957 #if CPU(X86_64)
958 void testq_rr(RegisterID src, RegisterID dst)
959 {
960 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
961 }
962
963 void testq_i32r(int imm, RegisterID dst)
964 {
965 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
966 m_formatter.immediate32(imm);
967 }
968
969 void testq_i32m(int imm, int offset, RegisterID base)
970 {
971 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
972 m_formatter.immediate32(imm);
973 }
974
975 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
976 {
977 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
978 m_formatter.immediate32(imm);
979 }
980 #endif
981
982 void testw_rr(RegisterID src, RegisterID dst)
983 {
984 m_formatter.prefix(PRE_OPERAND_SIZE);
985 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
986 }
987
988 void testb_i8r(int imm, RegisterID dst)
989 {
990 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
991 m_formatter.immediate8(imm);
992 }
993
994 void setCC_r(Condition cond, RegisterID dst)
995 {
996 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
997 }
998
999 void sete_r(RegisterID dst)
1000 {
1001 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
1002 }
1003
1004 void setz_r(RegisterID dst)
1005 {
1006 sete_r(dst);
1007 }
1008
1009 void setne_r(RegisterID dst)
1010 {
1011 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
1012 }
1013
1014 void setnz_r(RegisterID dst)
1015 {
1016 setne_r(dst);
1017 }
1018
1019 // Various move ops:
1020
1021 void cdq()
1022 {
1023 m_formatter.oneByteOp(OP_CDQ);
1024 }
1025
1026 void fstpl(int offset, RegisterID base)
1027 {
1028 m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
1029 }
1030
1031 void xchgl_rr(RegisterID src, RegisterID dst)
1032 {
1033 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
1034 }
1035
1036 #if CPU(X86_64)
1037 void xchgq_rr(RegisterID src, RegisterID dst)
1038 {
1039 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
1040 }
1041 #endif
1042
1043 void movl_rr(RegisterID src, RegisterID dst)
1044 {
1045 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
1046 }
1047
1048 void movl_rm(RegisterID src, int offset, RegisterID base)
1049 {
1050 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
1051 }
1052
1053 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
1054 {
1055 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
1056 }
1057
1058 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1059 {
1060 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1061 }
1062
1063 void movl_mEAX(const void* addr)
1064 {
1065 m_formatter.oneByteOp(OP_MOV_EAXOv);
1066 #if CPU(X86_64)
1067 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1068 #else
1069 m_formatter.immediate32(reinterpret_cast<int>(addr));
1070 #endif
1071 }
1072
1073 void movl_mr(int offset, RegisterID base, RegisterID dst)
1074 {
1075 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1076 }
1077
1078 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1079 {
1080 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1081 }
1082
1083 void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
1084 {
1085 m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
1086 }
1087
1088 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1089 {
1090 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1091 }
1092
1093 void movl_i32r(int imm, RegisterID dst)
1094 {
1095 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1096 m_formatter.immediate32(imm);
1097 }
1098
1099 void movl_i32m(int imm, int offset, RegisterID base)
1100 {
1101 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1102 m_formatter.immediate32(imm);
1103 }
1104
1105 void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1106 {
1107 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
1108 m_formatter.immediate32(imm);
1109 }
1110
1111 void movb_i8m(int imm, int offset, RegisterID base)
1112 {
1113 ASSERT(-128 <= imm && imm < 128);
1114 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
1115 m_formatter.immediate8(imm);
1116 }
1117
1118 void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1119 {
1120 ASSERT(-128 <= imm && imm < 128);
1121 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
1122 m_formatter.immediate8(imm);
1123 }
1124
1125 void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1126 {
1127 m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
1128 }
1129
1130 void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1131 {
1132 m_formatter.prefix(PRE_OPERAND_SIZE);
1133 m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
1134 }
1135
1136 void movl_EAXm(const void* addr)
1137 {
1138 m_formatter.oneByteOp(OP_MOV_OvEAX);
1139 #if CPU(X86_64)
1140 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1141 #else
1142 m_formatter.immediate32(reinterpret_cast<int>(addr));
1143 #endif
1144 }
1145
1146 #if CPU(X86_64)
1147 void movq_rr(RegisterID src, RegisterID dst)
1148 {
1149 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1150 }
1151
1152 void movq_rm(RegisterID src, int offset, RegisterID base)
1153 {
1154 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1155 }
1156
1157 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1158 {
1159 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1160 }
1161
1162 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1163 {
1164 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1165 }
1166
1167 void movq_mEAX(const void* addr)
1168 {
1169 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1170 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1171 }
1172
1173 void movq_EAXm(const void* addr)
1174 {
1175 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1176 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1177 }
1178
1179 void movq_mr(int offset, RegisterID base, RegisterID dst)
1180 {
1181 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1182 }
1183
1184 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1185 {
1186 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1187 }
1188
1189 void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
1190 {
1191 m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
1192 }
1193
1194 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1195 {
1196 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1197 }
1198
1199 void movq_i32m(int imm, int offset, RegisterID base)
1200 {
1201 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1202 m_formatter.immediate32(imm);
1203 }
1204
1205 void movq_i64r(int64_t imm, RegisterID dst)
1206 {
1207 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1208 m_formatter.immediate64(imm);
1209 }
1210
1211 void movsxd_rr(RegisterID src, RegisterID dst)
1212 {
1213 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1214 }
1215
1216
1217 #else
1218 void movl_rm(RegisterID src, const void* addr)
1219 {
1220 if (src == X86Registers::eax)
1221 movl_EAXm(addr);
1222 else
1223 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1224 }
1225
1226 void movl_mr(const void* addr, RegisterID dst)
1227 {
1228 if (dst == X86Registers::eax)
1229 movl_mEAX(addr);
1230 else
1231 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1232 }
1233
1234 void movl_i32m(int imm, const void* addr)
1235 {
1236 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1237 m_formatter.immediate32(imm);
1238 }
1239 #endif
1240
1241 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1242 {
1243 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1244 }
1245
1246 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1247 {
1248 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1249 }
1250
1251 void movswl_mr(int offset, RegisterID base, RegisterID dst)
1252 {
1253 m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
1254 }
1255
1256 void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1257 {
1258 m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
1259 }
1260
1261 void movzbl_mr(int offset, RegisterID base, RegisterID dst)
1262 {
1263 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
1264 }
1265
1266 void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1267 {
1268 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
1269 }
1270
1271 void movsbl_mr(int offset, RegisterID base, RegisterID dst)
1272 {
1273 m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
1274 }
1275
1276 void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1277 {
1278 m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
1279 }
1280
1281 void movzbl_rr(RegisterID src, RegisterID dst)
1282 {
1283 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1284 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1285 // REX prefixes are defined to be silently ignored by the processor.
1286 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1287 }
1288
1289 void leal_mr(int offset, RegisterID base, RegisterID dst)
1290 {
1291 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1292 }
1293 #if CPU(X86_64)
1294 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1295 {
1296 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1297 }
1298 #endif
1299
1300 // Flow control:
1301
1302 AssemblerLabel call()
1303 {
1304 m_formatter.oneByteOp(OP_CALL_rel32);
1305 return m_formatter.immediateRel32();
1306 }
1307
1308 AssemblerLabel call(RegisterID dst)
1309 {
1310 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1311 return m_formatter.label();
1312 }
1313
1314 void call_m(int offset, RegisterID base)
1315 {
1316 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1317 }
1318
1319 AssemblerLabel jmp()
1320 {
1321 m_formatter.oneByteOp(OP_JMP_rel32);
1322 return m_formatter.immediateRel32();
1323 }
1324
1325 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1326 // To make a tail recursive call on x86-64. The MacroAssembler
1327 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1328 AssemblerLabel jmp_r(RegisterID dst)
1329 {
1330 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1331 return m_formatter.label();
1332 }
1333
1334 void jmp_m(int offset, RegisterID base)
1335 {
1336 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1337 }
1338
1339 #if !CPU(X86_64)
1340 void jmp_m(const void* address)
1341 {
1342 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
1343 }
1344 #endif
1345
1346 AssemblerLabel jne()
1347 {
1348 m_formatter.twoByteOp(jccRel32(ConditionNE));
1349 return m_formatter.immediateRel32();
1350 }
1351
1352 AssemblerLabel jnz()
1353 {
1354 return jne();
1355 }
1356
1357 AssemblerLabel je()
1358 {
1359 m_formatter.twoByteOp(jccRel32(ConditionE));
1360 return m_formatter.immediateRel32();
1361 }
1362
1363 AssemblerLabel jz()
1364 {
1365 return je();
1366 }
1367
1368 AssemblerLabel jl()
1369 {
1370 m_formatter.twoByteOp(jccRel32(ConditionL));
1371 return m_formatter.immediateRel32();
1372 }
1373
1374 AssemblerLabel jb()
1375 {
1376 m_formatter.twoByteOp(jccRel32(ConditionB));
1377 return m_formatter.immediateRel32();
1378 }
1379
1380 AssemblerLabel jle()
1381 {
1382 m_formatter.twoByteOp(jccRel32(ConditionLE));
1383 return m_formatter.immediateRel32();
1384 }
1385
1386 AssemblerLabel jbe()
1387 {
1388 m_formatter.twoByteOp(jccRel32(ConditionBE));
1389 return m_formatter.immediateRel32();
1390 }
1391
1392 AssemblerLabel jge()
1393 {
1394 m_formatter.twoByteOp(jccRel32(ConditionGE));
1395 return m_formatter.immediateRel32();
1396 }
1397
1398 AssemblerLabel jg()
1399 {
1400 m_formatter.twoByteOp(jccRel32(ConditionG));
1401 return m_formatter.immediateRel32();
1402 }
1403
1404 AssemblerLabel ja()
1405 {
1406 m_formatter.twoByteOp(jccRel32(ConditionA));
1407 return m_formatter.immediateRel32();
1408 }
1409
1410 AssemblerLabel jae()
1411 {
1412 m_formatter.twoByteOp(jccRel32(ConditionAE));
1413 return m_formatter.immediateRel32();
1414 }
1415
1416 AssemblerLabel jo()
1417 {
1418 m_formatter.twoByteOp(jccRel32(ConditionO));
1419 return m_formatter.immediateRel32();
1420 }
1421
1422 AssemblerLabel jp()
1423 {
1424 m_formatter.twoByteOp(jccRel32(ConditionP));
1425 return m_formatter.immediateRel32();
1426 }
1427
1428 AssemblerLabel js()
1429 {
1430 m_formatter.twoByteOp(jccRel32(ConditionS));
1431 return m_formatter.immediateRel32();
1432 }
1433
1434 AssemblerLabel jCC(Condition cond)
1435 {
1436 m_formatter.twoByteOp(jccRel32(cond));
1437 return m_formatter.immediateRel32();
1438 }
1439
1440 // SSE operations:
1441
1442 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1443 {
1444 m_formatter.prefix(PRE_SSE_F2);
1445 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1446 }
1447
1448 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1449 {
1450 m_formatter.prefix(PRE_SSE_F2);
1451 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1452 }
1453
1454 #if !CPU(X86_64)
1455 void addsd_mr(const void* address, XMMRegisterID dst)
1456 {
1457 m_formatter.prefix(PRE_SSE_F2);
1458 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
1459 }
1460 #endif
1461
1462 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1463 {
1464 m_formatter.prefix(PRE_SSE_F2);
1465 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1466 }
1467
1468 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1469 {
1470 m_formatter.prefix(PRE_SSE_F2);
1471 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1472 }
1473
1474 #if !CPU(X86_64)
1475 void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
1476 {
1477 m_formatter.prefix(PRE_SSE_F2);
1478 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1479 }
1480 #endif
1481
1482 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1483 {
1484 m_formatter.prefix(PRE_SSE_F2);
1485 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1486 }
1487
1488 void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
1489 {
1490 m_formatter.prefix(PRE_SSE_F2);
1491 m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
1492 }
1493
1494 void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
1495 {
1496 m_formatter.prefix(PRE_SSE_F3);
1497 m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
1498 }
1499
1500 #if CPU(X86_64)
1501 void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
1502 {
1503 m_formatter.prefix(PRE_SSE_F2);
1504 m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1505 }
1506 #endif
1507
1508 void movd_rr(XMMRegisterID src, RegisterID dst)
1509 {
1510 m_formatter.prefix(PRE_SSE_66);
1511 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1512 }
1513
1514 void movd_rr(RegisterID src, XMMRegisterID dst)
1515 {
1516 m_formatter.prefix(PRE_SSE_66);
1517 m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
1518 }
1519
1520 #if CPU(X86_64)
1521 void movq_rr(XMMRegisterID src, RegisterID dst)
1522 {
1523 m_formatter.prefix(PRE_SSE_66);
1524 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1525 }
1526
1527 void movq_rr(RegisterID src, XMMRegisterID dst)
1528 {
1529 m_formatter.prefix(PRE_SSE_66);
1530 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1531 }
1532 #endif
1533
1534 void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
1535 {
1536 m_formatter.prefix(PRE_SSE_F2);
1537 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1538 }
1539
1540 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1541 {
1542 m_formatter.prefix(PRE_SSE_F2);
1543 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1544 }
1545
1546 void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1547 {
1548 m_formatter.prefix(PRE_SSE_F2);
1549 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
1550 }
1551
1552 void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1553 {
1554 m_formatter.prefix(PRE_SSE_F3);
1555 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
1556 }
1557
1558 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1559 {
1560 m_formatter.prefix(PRE_SSE_F2);
1561 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1562 }
1563
1564 void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1565 {
1566 m_formatter.prefix(PRE_SSE_F2);
1567 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
1568 }
1569
1570 void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1571 {
1572 m_formatter.prefix(PRE_SSE_F3);
1573 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
1574 }
1575
1576 #if !CPU(X86_64)
1577 void movsd_mr(const void* address, XMMRegisterID dst)
1578 {
1579 m_formatter.prefix(PRE_SSE_F2);
1580 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1581 }
1582 void movsd_rm(XMMRegisterID src, const void* address)
1583 {
1584 m_formatter.prefix(PRE_SSE_F2);
1585 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
1586 }
1587 #endif
1588
1589 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1590 {
1591 m_formatter.prefix(PRE_SSE_F2);
1592 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1593 }
1594
1595 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1596 {
1597 m_formatter.prefix(PRE_SSE_F2);
1598 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1599 }
1600
1601 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1602 {
1603 m_formatter.prefix(PRE_SSE_66);
1604 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1605 m_formatter.immediate8(whichWord);
1606 }
1607
1608 void psllq_i8r(int imm, XMMRegisterID dst)
1609 {
1610 m_formatter.prefix(PRE_SSE_66);
1611 m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
1612 m_formatter.immediate8(imm);
1613 }
1614
1615 void psrlq_i8r(int imm, XMMRegisterID dst)
1616 {
1617 m_formatter.prefix(PRE_SSE_66);
1618 m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
1619 m_formatter.immediate8(imm);
1620 }
1621
1622 void por_rr(XMMRegisterID src, XMMRegisterID dst)
1623 {
1624 m_formatter.prefix(PRE_SSE_66);
1625 m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
1626 }
1627
1628 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1629 {
1630 m_formatter.prefix(PRE_SSE_F2);
1631 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1632 }
1633
1634 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1635 {
1636 m_formatter.prefix(PRE_SSE_F2);
1637 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1638 }
1639
1640 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1641 {
1642 m_formatter.prefix(PRE_SSE_66);
1643 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1644 }
1645
1646 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1647 {
1648 m_formatter.prefix(PRE_SSE_66);
1649 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1650 }
1651
1652 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1653 {
1654 m_formatter.prefix(PRE_SSE_F2);
1655 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1656 }
1657
1658 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1659 {
1660 m_formatter.prefix(PRE_SSE_F2);
1661 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1662 }
1663
1664 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1665 {
1666 m_formatter.prefix(PRE_SSE_66);
1667 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1668 }
1669
1670 void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
1671 {
1672 m_formatter.prefix(PRE_SSE_66);
1673 m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1674 }
1675
1676 void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
1677 {
1678 m_formatter.prefix(PRE_SSE_F2);
1679 m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1680 }
1681
1682 // Misc instructions:
1683
1684 void int3()
1685 {
1686 m_formatter.oneByteOp(OP_INT3);
1687 }
1688
1689 void ret()
1690 {
1691 m_formatter.oneByteOp(OP_RET);
1692 }
1693
1694 void predictNotTaken()
1695 {
1696 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1697 }
1698
1699 // Assembler admin methods:
1700
1701 size_t codeSize() const
1702 {
1703 return m_formatter.codeSize();
1704 }
1705
1706 AssemblerLabel label()
1707 {
1708 return m_formatter.label();
1709 }
1710
1711 AssemblerLabel align(int alignment)
1712 {
1713 while (!m_formatter.isAligned(alignment))
1714 m_formatter.oneByteOp(OP_HLT);
1715
1716 return label();
1717 }
1718
1719 // Linking & patching:
1720 //
1721 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1722 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1723 // code has been finalized it is (platform support permitting) within a non-
1724 // writable region of memory; to modify the code in an execute-only execuable
1725 // pool the 'repatch' and 'relink' methods should be used.
1726
1727 void linkJump(AssemblerLabel from, AssemblerLabel to)
1728 {
1729 ASSERT(from.isSet());
1730 ASSERT(to.isSet());
1731
1732 char* code = reinterpret_cast<char*>(m_formatter.data());
1733 ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
1734 setRel32(code + from.m_offset, code + to.m_offset);
1735 }
1736
1737 static void linkJump(void* code, AssemblerLabel from, void* to)
1738 {
1739 ASSERT(from.isSet());
1740
1741 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1742 }
1743
1744 static void linkCall(void* code, AssemblerLabel from, void* to)
1745 {
1746 ASSERT(from.isSet());
1747
1748 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1749 }
1750
1751 static void linkPointer(void* code, AssemblerLabel where, void* value)
1752 {
1753 ASSERT(where.isSet());
1754
1755 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1756 }
1757
1758 static void relinkJump(void* from, void* to)
1759 {
1760 setRel32(from, to);
1761 }
1762
1763 static void relinkCall(void* from, void* to)
1764 {
1765 setRel32(from, to);
1766 }
1767
1768 static void repatchCompact(void* where, int32_t value)
1769 {
1770 ASSERT(value >= 0);
1771 ASSERT(value <= std::numeric_limits<int8_t>::max());
1772 setInt8(where, value);
1773 }
1774
1775 static void repatchInt32(void* where, int32_t value)
1776 {
1777 setInt32(where, value);
1778 }
1779
1780 static void repatchPointer(void* where, void* value)
1781 {
1782 setPointer(where, value);
1783 }
1784
1785 static void* readPointer(void* where)
1786 {
1787 return reinterpret_cast<void**>(where)[-1];
1788 }
1789
1790 static unsigned getCallReturnOffset(AssemblerLabel call)
1791 {
1792 ASSERT(call.isSet());
1793 return call.m_offset;
1794 }
1795
1796 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1797 {
1798 ASSERT(label.isSet());
1799 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1800 }
1801
1802 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1803 {
1804 return b.m_offset - a.m_offset;
1805 }
1806
1807 PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
1808 {
1809 return m_formatter.executableCopy(globalData, ownerUID, effort);
1810 }
1811
1812 unsigned debugOffset() { return m_formatter.debugOffset(); }
1813
1814 void nop()
1815 {
1816 m_formatter.oneByteOp(OP_NOP);
1817 }
1818
1819 // This is a no-op on x86
1820 ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
1821
1822 private:
1823
1824 static void setPointer(void* where, void* value)
1825 {
1826 reinterpret_cast<void**>(where)[-1] = value;
1827 }
1828
1829 static void setInt32(void* where, int32_t value)
1830 {
1831 reinterpret_cast<int32_t*>(where)[-1] = value;
1832 }
1833
1834 static void setInt8(void* where, int8_t value)
1835 {
1836 reinterpret_cast<int8_t*>(where)[-1] = value;
1837 }
1838
1839 static void setRel32(void* from, void* to)
1840 {
1841 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1842 ASSERT(offset == static_cast<int32_t>(offset));
1843
1844 setInt32(from, offset);
1845 }
1846
1847 class X86InstructionFormatter {
1848
1849 static const int maxInstructionSize = 16;
1850
1851 public:
1852
1853 // Legacy prefix bytes:
1854 //
1855 // These are emmitted prior to the instruction.
1856
1857 void prefix(OneByteOpcodeID pre)
1858 {
1859 m_buffer.putByte(pre);
1860 }
1861
1862 // Word-sized operands / no operand instruction formatters.
1863 //
1864 // In addition to the opcode, the following operand permutations are supported:
1865 // * None - instruction takes no operands.
1866 // * One register - the low three bits of the RegisterID are added into the opcode.
1867 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1868 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1869 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1870 //
1871 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1872 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1873 //
1874 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1875
1876 void oneByteOp(OneByteOpcodeID opcode)
1877 {
1878 m_buffer.ensureSpace(maxInstructionSize);
1879 m_buffer.putByteUnchecked(opcode);
1880 }
1881
1882 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1883 {
1884 m_buffer.ensureSpace(maxInstructionSize);
1885 emitRexIfNeeded(0, 0, reg);
1886 m_buffer.putByteUnchecked(opcode + (reg & 7));
1887 }
1888
1889 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1890 {
1891 m_buffer.ensureSpace(maxInstructionSize);
1892 emitRexIfNeeded(reg, 0, rm);
1893 m_buffer.putByteUnchecked(opcode);
1894 registerModRM(reg, rm);
1895 }
1896
1897 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1898 {
1899 m_buffer.ensureSpace(maxInstructionSize);
1900 emitRexIfNeeded(reg, 0, base);
1901 m_buffer.putByteUnchecked(opcode);
1902 memoryModRM(reg, base, offset);
1903 }
1904
1905 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1906 {
1907 m_buffer.ensureSpace(maxInstructionSize);
1908 emitRexIfNeeded(reg, 0, base);
1909 m_buffer.putByteUnchecked(opcode);
1910 memoryModRM_disp32(reg, base, offset);
1911 }
1912
1913 void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1914 {
1915 m_buffer.ensureSpace(maxInstructionSize);
1916 emitRexIfNeeded(reg, 0, base);
1917 m_buffer.putByteUnchecked(opcode);
1918 memoryModRM_disp8(reg, base, offset);
1919 }
1920
1921 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1922 {
1923 m_buffer.ensureSpace(maxInstructionSize);
1924 emitRexIfNeeded(reg, index, base);
1925 m_buffer.putByteUnchecked(opcode);
1926 memoryModRM(reg, base, index, scale, offset);
1927 }
1928
1929 #if !CPU(X86_64)
1930 void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
1931 {
1932 m_buffer.ensureSpace(maxInstructionSize);
1933 m_buffer.putByteUnchecked(opcode);
1934 memoryModRM(reg, address);
1935 }
1936 #endif
1937
1938 void twoByteOp(TwoByteOpcodeID opcode)
1939 {
1940 m_buffer.ensureSpace(maxInstructionSize);
1941 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1942 m_buffer.putByteUnchecked(opcode);
1943 }
1944
1945 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1946 {
1947 m_buffer.ensureSpace(maxInstructionSize);
1948 emitRexIfNeeded(reg, 0, rm);
1949 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1950 m_buffer.putByteUnchecked(opcode);
1951 registerModRM(reg, rm);
1952 }
1953
1954 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1955 {
1956 m_buffer.ensureSpace(maxInstructionSize);
1957 emitRexIfNeeded(reg, 0, base);
1958 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1959 m_buffer.putByteUnchecked(opcode);
1960 memoryModRM(reg, base, offset);
1961 }
1962
1963 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1964 {
1965 m_buffer.ensureSpace(maxInstructionSize);
1966 emitRexIfNeeded(reg, index, base);
1967 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1968 m_buffer.putByteUnchecked(opcode);
1969 memoryModRM(reg, base, index, scale, offset);
1970 }
1971
1972 #if !CPU(X86_64)
1973 void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
1974 {
1975 m_buffer.ensureSpace(maxInstructionSize);
1976 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1977 m_buffer.putByteUnchecked(opcode);
1978 memoryModRM(reg, address);
1979 }
1980 #endif
1981
1982 #if CPU(X86_64)
1983 // Quad-word-sized operands:
1984 //
1985 // Used to format 64-bit operantions, planting a REX.w prefix.
1986 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1987 // the normal (non-'64'-postfixed) formatters should be used.
1988
1989 void oneByteOp64(OneByteOpcodeID opcode)
1990 {
1991 m_buffer.ensureSpace(maxInstructionSize);
1992 emitRexW(0, 0, 0);
1993 m_buffer.putByteUnchecked(opcode);
1994 }
1995
1996 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1997 {
1998 m_buffer.ensureSpace(maxInstructionSize);
1999 emitRexW(0, 0, reg);
2000 m_buffer.putByteUnchecked(opcode + (reg & 7));
2001 }
2002
2003 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
2004 {
2005 m_buffer.ensureSpace(maxInstructionSize);
2006 emitRexW(reg, 0, rm);
2007 m_buffer.putByteUnchecked(opcode);
2008 registerModRM(reg, rm);
2009 }
2010
2011 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2012 {
2013 m_buffer.ensureSpace(maxInstructionSize);
2014 emitRexW(reg, 0, base);
2015 m_buffer.putByteUnchecked(opcode);
2016 memoryModRM(reg, base, offset);
2017 }
2018
2019 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2020 {
2021 m_buffer.ensureSpace(maxInstructionSize);
2022 emitRexW(reg, 0, base);
2023 m_buffer.putByteUnchecked(opcode);
2024 memoryModRM_disp32(reg, base, offset);
2025 }
2026
2027 void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2028 {
2029 m_buffer.ensureSpace(maxInstructionSize);
2030 emitRexW(reg, 0, base);
2031 m_buffer.putByteUnchecked(opcode);
2032 memoryModRM_disp8(reg, base, offset);
2033 }
2034
2035 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2036 {
2037 m_buffer.ensureSpace(maxInstructionSize);
2038 emitRexW(reg, index, base);
2039 m_buffer.putByteUnchecked(opcode);
2040 memoryModRM(reg, base, index, scale, offset);
2041 }
2042
2043 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2044 {
2045 m_buffer.ensureSpace(maxInstructionSize);
2046 emitRexW(reg, 0, rm);
2047 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2048 m_buffer.putByteUnchecked(opcode);
2049 registerModRM(reg, rm);
2050 }
2051 #endif
2052
2053 // Byte-operands:
2054 //
2055 // These methods format byte operations. Byte operations differ from the normal
2056 // formatters in the circumstances under which they will decide to emit REX prefixes.
2057 // These should be used where any register operand signifies a byte register.
2058 //
2059 // The disctinction is due to the handling of register numbers in the range 4..7 on
2060 // x86-64. These register numbers may either represent the second byte of the first
2061 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
2062 //
2063 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
2064 // be accessed where a REX prefix is present), these are likely best treated as
2065 // deprecated. In order to ensure the correct registers spl..dil are selected a
2066 // REX prefix will be emitted for any byte register operand in the range 4..15.
2067 //
2068 // These formatters may be used in instructions where a mix of operand sizes, in which
2069 // case an unnecessary REX will be emitted, for example:
2070 // movzbl %al, %edi
2071 // In this case a REX will be planted since edi is 7 (and were this a byte operand
2072 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
2073 // be silently ignored by the processor.
2074 //
2075 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
2076 // is provided to check byte register operands.
2077
2078 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2079 {
2080 m_buffer.ensureSpace(maxInstructionSize);
2081 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
2082 m_buffer.putByteUnchecked(opcode);
2083 registerModRM(groupOp, rm);
2084 }
2085
2086 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
2087 {
2088 m_buffer.ensureSpace(maxInstructionSize);
2089 emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
2090 m_buffer.putByteUnchecked(opcode);
2091 registerModRM(reg, rm);
2092 }
2093
2094 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2095 {
2096 m_buffer.ensureSpace(maxInstructionSize);
2097 emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
2098 m_buffer.putByteUnchecked(opcode);
2099 memoryModRM(reg, base, index, scale, offset);
2100 }
2101
2102 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
2103 {
2104 m_buffer.ensureSpace(maxInstructionSize);
2105 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
2106 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2107 m_buffer.putByteUnchecked(opcode);
2108 registerModRM(reg, rm);
2109 }
2110
2111 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2112 {
2113 m_buffer.ensureSpace(maxInstructionSize);
2114 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
2115 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2116 m_buffer.putByteUnchecked(opcode);
2117 registerModRM(groupOp, rm);
2118 }
2119
2120 // Immediates:
2121 //
2122 // An immedaite should be appended where appropriate after an op has been emitted.
2123 // The writes are unchecked since the opcode formatters above will have ensured space.
2124
2125 void immediate8(int imm)
2126 {
2127 m_buffer.putByteUnchecked(imm);
2128 }
2129
2130 void immediate16(int imm)
2131 {
2132 m_buffer.putShortUnchecked(imm);
2133 }
2134
2135 void immediate32(int imm)
2136 {
2137 m_buffer.putIntUnchecked(imm);
2138 }
2139
2140 void immediate64(int64_t imm)
2141 {
2142 m_buffer.putInt64Unchecked(imm);
2143 }
2144
2145 AssemblerLabel immediateRel32()
2146 {
2147 m_buffer.putIntUnchecked(0);
2148 return label();
2149 }
2150
2151 // Administrative methods:
2152
2153 size_t codeSize() const { return m_buffer.codeSize(); }
2154 AssemblerLabel label() const { return m_buffer.label(); }
2155 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2156 void* data() const { return m_buffer.data(); }
2157
2158 PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
2159 {
2160 return m_buffer.executableCopy(globalData, ownerUID, effort);
2161 }
2162
2163 unsigned debugOffset() { return m_buffer.debugOffset(); }
2164
2165 private:
2166
2167 // Internals; ModRm and REX formatters.
2168
2169 static const RegisterID noBase = X86Registers::ebp;
2170 static const RegisterID hasSib = X86Registers::esp;
2171 static const RegisterID noIndex = X86Registers::esp;
2172 #if CPU(X86_64)
2173 static const RegisterID noBase2 = X86Registers::r13;
2174 static const RegisterID hasSib2 = X86Registers::r12;
2175
2176 // Registers r8 & above require a REX prefixe.
2177 inline bool regRequiresRex(int reg)
2178 {
2179 return (reg >= X86Registers::r8);
2180 }
2181
2182 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
2183 inline bool byteRegRequiresRex(int reg)
2184 {
2185 return (reg >= X86Registers::esp);
2186 }
2187
2188 // Format a REX prefix byte.
2189 inline void emitRex(bool w, int r, int x, int b)
2190 {
2191 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
2192 }
2193
2194 // Used to plant a REX byte with REX.w set (for 64-bit operations).
2195 inline void emitRexW(int r, int x, int b)
2196 {
2197 emitRex(true, r, x, b);
2198 }
2199
2200 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
2201 // regRequiresRex() to check other registers (i.e. address base & index).
2202 inline void emitRexIf(bool condition, int r, int x, int b)
2203 {
2204 if (condition) emitRex(false, r, x, b);
2205 }
2206
2207 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
2208 inline void emitRexIfNeeded(int r, int x, int b)
2209 {
2210 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
2211 }
2212 #else
2213 // No REX prefix bytes on 32-bit x86.
2214 inline bool regRequiresRex(int) { return false; }
2215 inline bool byteRegRequiresRex(int) { return false; }
2216 inline void emitRexIf(bool, int, int, int) {}
2217 inline void emitRexIfNeeded(int, int, int) {}
2218 #endif
2219
2220 enum ModRmMode {
2221 ModRmMemoryNoDisp,
2222 ModRmMemoryDisp8,
2223 ModRmMemoryDisp32,
2224 ModRmRegister,
2225 };
2226
2227 void putModRm(ModRmMode mode, int reg, RegisterID rm)
2228 {
2229 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
2230 }
2231
2232 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
2233 {
2234 ASSERT(mode != ModRmRegister);
2235
2236 putModRm(mode, reg, hasSib);
2237 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
2238 }
2239
2240 void registerModRM(int reg, RegisterID rm)
2241 {
2242 putModRm(ModRmRegister, reg, rm);
2243 }
2244
2245 void memoryModRM(int reg, RegisterID base, int offset)
2246 {
2247 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2248 #if CPU(X86_64)
2249 if ((base == hasSib) || (base == hasSib2)) {
2250 #else
2251 if (base == hasSib) {
2252 #endif
2253 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
2254 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
2255 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2256 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2257 m_buffer.putByteUnchecked(offset);
2258 } else {
2259 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2260 m_buffer.putIntUnchecked(offset);
2261 }
2262 } else {
2263 #if CPU(X86_64)
2264 if (!offset && (base != noBase) && (base != noBase2))
2265 #else
2266 if (!offset && (base != noBase))
2267 #endif
2268 putModRm(ModRmMemoryNoDisp, reg, base);
2269 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2270 putModRm(ModRmMemoryDisp8, reg, base);
2271 m_buffer.putByteUnchecked(offset);
2272 } else {
2273 putModRm(ModRmMemoryDisp32, reg, base);
2274 m_buffer.putIntUnchecked(offset);
2275 }
2276 }
2277 }
2278
2279 void memoryModRM_disp8(int reg, RegisterID base, int offset)
2280 {
2281 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2282 ASSERT(CAN_SIGN_EXTEND_8_32(offset));
2283 #if CPU(X86_64)
2284 if ((base == hasSib) || (base == hasSib2)) {
2285 #else
2286 if (base == hasSib) {
2287 #endif
2288 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2289 m_buffer.putByteUnchecked(offset);
2290 } else {
2291 putModRm(ModRmMemoryDisp8, reg, base);
2292 m_buffer.putByteUnchecked(offset);
2293 }
2294 }
2295
2296 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2297 {
2298 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2299 #if CPU(X86_64)
2300 if ((base == hasSib) || (base == hasSib2)) {
2301 #else
2302 if (base == hasSib) {
2303 #endif
2304 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2305 m_buffer.putIntUnchecked(offset);
2306 } else {
2307 putModRm(ModRmMemoryDisp32, reg, base);
2308 m_buffer.putIntUnchecked(offset);
2309 }
2310 }
2311
2312 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2313 {
2314 ASSERT(index != noIndex);
2315
2316 #if CPU(X86_64)
2317 if (!offset && (base != noBase) && (base != noBase2))
2318 #else
2319 if (!offset && (base != noBase))
2320 #endif
2321 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2322 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2323 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2324 m_buffer.putByteUnchecked(offset);
2325 } else {
2326 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2327 m_buffer.putIntUnchecked(offset);
2328 }
2329 }
2330
2331 #if !CPU(X86_64)
2332 void memoryModRM(int reg, const void* address)
2333 {
2334 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2335 putModRm(ModRmMemoryNoDisp, reg, noBase);
2336 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2337 }
2338 #endif
2339
2340 AssemblerBuffer m_buffer;
2341 } m_formatter;
2342 };
2343
2344 } // namespace JSC
2345
2346 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2347
2348 #endif // X86Assembler_h