]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/X86Assembler.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / X86Assembler.h
1 /*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
28
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
30
31 #include "AssemblerBuffer.h"
32 #include "JITCompilationEffort.h"
33 #include <stdint.h>
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
36
37 namespace JSC {
38
39 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
40
41 namespace X86Registers {
42 typedef enum {
43 eax,
44 ecx,
45 edx,
46 ebx,
47 esp,
48 ebp,
49 esi,
50 edi,
51
52 #if CPU(X86_64)
53 r8,
54 r9,
55 r10,
56 r11,
57 r12,
58 r13,
59 r14,
60 r15,
61 #endif
62 } RegisterID;
63
64 typedef enum {
65 xmm0,
66 xmm1,
67 xmm2,
68 xmm3,
69 xmm4,
70 xmm5,
71 xmm6,
72 xmm7,
73 } XMMRegisterID;
74 }
75
76 class X86Assembler {
77 public:
78 typedef X86Registers::RegisterID RegisterID;
79 typedef X86Registers::XMMRegisterID XMMRegisterID;
80 typedef XMMRegisterID FPRegisterID;
81
82 typedef enum {
83 ConditionO,
84 ConditionNO,
85 ConditionB,
86 ConditionAE,
87 ConditionE,
88 ConditionNE,
89 ConditionBE,
90 ConditionA,
91 ConditionS,
92 ConditionNS,
93 ConditionP,
94 ConditionNP,
95 ConditionL,
96 ConditionGE,
97 ConditionLE,
98 ConditionG,
99
100 ConditionC = ConditionB,
101 ConditionNC = ConditionAE,
102 } Condition;
103
104 private:
105 typedef enum {
106 OP_ADD_EvGv = 0x01,
107 OP_ADD_GvEv = 0x03,
108 OP_OR_EvGv = 0x09,
109 OP_OR_GvEv = 0x0B,
110 OP_2BYTE_ESCAPE = 0x0F,
111 OP_AND_EvGv = 0x21,
112 OP_AND_GvEv = 0x23,
113 OP_SUB_EvGv = 0x29,
114 OP_SUB_GvEv = 0x2B,
115 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
116 OP_XOR_EvGv = 0x31,
117 OP_XOR_GvEv = 0x33,
118 OP_CMP_EvGv = 0x39,
119 OP_CMP_GvEv = 0x3B,
120 #if CPU(X86_64)
121 PRE_REX = 0x40,
122 #endif
123 OP_PUSH_EAX = 0x50,
124 OP_POP_EAX = 0x58,
125 #if CPU(X86_64)
126 OP_MOVSXD_GvEv = 0x63,
127 #endif
128 PRE_OPERAND_SIZE = 0x66,
129 PRE_SSE_66 = 0x66,
130 OP_PUSH_Iz = 0x68,
131 OP_IMUL_GvEvIz = 0x69,
132 OP_GROUP1_EbIb = 0x80,
133 OP_GROUP1_EvIz = 0x81,
134 OP_GROUP1_EvIb = 0x83,
135 OP_TEST_EbGb = 0x84,
136 OP_TEST_EvGv = 0x85,
137 OP_XCHG_EvGv = 0x87,
138 OP_MOV_EbGb = 0x88,
139 OP_MOV_EvGv = 0x89,
140 OP_MOV_GvEv = 0x8B,
141 OP_LEA = 0x8D,
142 OP_GROUP1A_Ev = 0x8F,
143 OP_NOP = 0x90,
144 OP_CDQ = 0x99,
145 OP_MOV_EAXOv = 0xA1,
146 OP_MOV_OvEAX = 0xA3,
147 OP_MOV_EAXIv = 0xB8,
148 OP_GROUP2_EvIb = 0xC1,
149 OP_RET = 0xC3,
150 OP_GROUP11_EvIb = 0xC6,
151 OP_GROUP11_EvIz = 0xC7,
152 OP_INT3 = 0xCC,
153 OP_GROUP2_Ev1 = 0xD1,
154 OP_GROUP2_EvCL = 0xD3,
155 OP_ESCAPE_DD = 0xDD,
156 OP_CALL_rel32 = 0xE8,
157 OP_JMP_rel32 = 0xE9,
158 PRE_SSE_F2 = 0xF2,
159 PRE_SSE_F3 = 0xF3,
160 OP_HLT = 0xF4,
161 OP_GROUP3_EbIb = 0xF6,
162 OP_GROUP3_Ev = 0xF7,
163 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
164 OP_GROUP5_Ev = 0xFF,
165 } OneByteOpcodeID;
166
167 typedef enum {
168 OP2_MOVSD_VsdWsd = 0x10,
169 OP2_MOVSD_WsdVsd = 0x11,
170 OP2_MOVSS_VsdWsd = 0x10,
171 OP2_MOVSS_WsdVsd = 0x11,
172 OP2_CVTSI2SD_VsdEd = 0x2A,
173 OP2_CVTTSD2SI_GdWsd = 0x2C,
174 OP2_UCOMISD_VsdWsd = 0x2E,
175 OP2_ADDSD_VsdWsd = 0x58,
176 OP2_MULSD_VsdWsd = 0x59,
177 OP2_CVTSD2SS_VsdWsd = 0x5A,
178 OP2_CVTSS2SD_VsdWsd = 0x5A,
179 OP2_SUBSD_VsdWsd = 0x5C,
180 OP2_DIVSD_VsdWsd = 0x5E,
181 OP2_SQRTSD_VsdWsd = 0x51,
182 OP2_ANDNPD_VpdWpd = 0x55,
183 OP2_XORPD_VpdWpd = 0x57,
184 OP2_MOVD_VdEd = 0x6E,
185 OP2_MOVD_EdVd = 0x7E,
186 OP2_JCC_rel32 = 0x80,
187 OP_SETCC = 0x90,
188 OP2_IMUL_GvEv = 0xAF,
189 OP2_MOVZX_GvEb = 0xB6,
190 OP2_MOVSX_GvEb = 0xBE,
191 OP2_MOVZX_GvEw = 0xB7,
192 OP2_MOVSX_GvEw = 0xBF,
193 OP2_PEXTRW_GdUdIb = 0xC5,
194 OP2_PSLLQ_UdqIb = 0x73,
195 OP2_PSRLQ_UdqIb = 0x73,
196 OP2_POR_VdqWdq = 0XEB,
197 } TwoByteOpcodeID;
198
199 TwoByteOpcodeID jccRel32(Condition cond)
200 {
201 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
202 }
203
204 TwoByteOpcodeID setccOpcode(Condition cond)
205 {
206 return (TwoByteOpcodeID)(OP_SETCC + cond);
207 }
208
209 typedef enum {
210 GROUP1_OP_ADD = 0,
211 GROUP1_OP_OR = 1,
212 GROUP1_OP_ADC = 2,
213 GROUP1_OP_AND = 4,
214 GROUP1_OP_SUB = 5,
215 GROUP1_OP_XOR = 6,
216 GROUP1_OP_CMP = 7,
217
218 GROUP1A_OP_POP = 0,
219
220 GROUP2_OP_ROL = 0,
221 GROUP2_OP_ROR = 1,
222 GROUP2_OP_RCL = 2,
223 GROUP2_OP_RCR = 3,
224
225 GROUP2_OP_SHL = 4,
226 GROUP2_OP_SHR = 5,
227 GROUP2_OP_SAR = 7,
228
229 GROUP3_OP_TEST = 0,
230 GROUP3_OP_NOT = 2,
231 GROUP3_OP_NEG = 3,
232 GROUP3_OP_IDIV = 7,
233
234 GROUP5_OP_CALLN = 2,
235 GROUP5_OP_JMPN = 4,
236 GROUP5_OP_PUSH = 6,
237
238 GROUP11_MOV = 0,
239
240 GROUP14_OP_PSLLQ = 6,
241 GROUP14_OP_PSRLQ = 2,
242
243 ESCAPE_DD_FSTP_doubleReal = 3,
244 } GroupOpcodeID;
245
246 class X86InstructionFormatter;
247 public:
248
249 X86Assembler()
250 : m_indexOfLastWatchpoint(INT_MIN)
251 , m_indexOfTailOfLastWatchpoint(INT_MIN)
252 {
253 }
254
255 // Stack operations:
256
257 void push_r(RegisterID reg)
258 {
259 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
260 }
261
262 void pop_r(RegisterID reg)
263 {
264 m_formatter.oneByteOp(OP_POP_EAX, reg);
265 }
266
267 void push_i32(int imm)
268 {
269 m_formatter.oneByteOp(OP_PUSH_Iz);
270 m_formatter.immediate32(imm);
271 }
272
273 void push_m(int offset, RegisterID base)
274 {
275 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
276 }
277
278 void pop_m(int offset, RegisterID base)
279 {
280 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
281 }
282
283 // Arithmetic operations:
284
285 #if !CPU(X86_64)
286 void adcl_im(int imm, const void* addr)
287 {
288 if (CAN_SIGN_EXTEND_8_32(imm)) {
289 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
290 m_formatter.immediate8(imm);
291 } else {
292 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
293 m_formatter.immediate32(imm);
294 }
295 }
296 #endif
297
298 void addl_rr(RegisterID src, RegisterID dst)
299 {
300 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
301 }
302
303 void addl_mr(int offset, RegisterID base, RegisterID dst)
304 {
305 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
306 }
307
308 #if !CPU(X86_64)
309 void addl_mr(const void* addr, RegisterID dst)
310 {
311 m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
312 }
313 #endif
314
315 void addl_rm(RegisterID src, int offset, RegisterID base)
316 {
317 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
318 }
319
320 void addl_ir(int imm, RegisterID dst)
321 {
322 if (CAN_SIGN_EXTEND_8_32(imm)) {
323 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
324 m_formatter.immediate8(imm);
325 } else {
326 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
327 m_formatter.immediate32(imm);
328 }
329 }
330
331 void addl_im(int imm, int offset, RegisterID base)
332 {
333 if (CAN_SIGN_EXTEND_8_32(imm)) {
334 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
335 m_formatter.immediate8(imm);
336 } else {
337 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
338 m_formatter.immediate32(imm);
339 }
340 }
341
342 #if CPU(X86_64)
343 void addq_rr(RegisterID src, RegisterID dst)
344 {
345 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
346 }
347
348 void addq_mr(int offset, RegisterID base, RegisterID dst)
349 {
350 m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
351 }
352
353 void addq_ir(int imm, RegisterID dst)
354 {
355 if (CAN_SIGN_EXTEND_8_32(imm)) {
356 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
357 m_formatter.immediate8(imm);
358 } else {
359 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
360 m_formatter.immediate32(imm);
361 }
362 }
363
364 void addq_im(int imm, int offset, RegisterID base)
365 {
366 if (CAN_SIGN_EXTEND_8_32(imm)) {
367 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
368 m_formatter.immediate8(imm);
369 } else {
370 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
371 m_formatter.immediate32(imm);
372 }
373 }
374 #else
375 void addl_im(int imm, const void* addr)
376 {
377 if (CAN_SIGN_EXTEND_8_32(imm)) {
378 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
379 m_formatter.immediate8(imm);
380 } else {
381 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
382 m_formatter.immediate32(imm);
383 }
384 }
385 #endif
386
387 void andl_rr(RegisterID src, RegisterID dst)
388 {
389 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
390 }
391
392 void andl_mr(int offset, RegisterID base, RegisterID dst)
393 {
394 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
395 }
396
397 void andl_rm(RegisterID src, int offset, RegisterID base)
398 {
399 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
400 }
401
402 void andl_ir(int imm, RegisterID dst)
403 {
404 if (CAN_SIGN_EXTEND_8_32(imm)) {
405 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
406 m_formatter.immediate8(imm);
407 } else {
408 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
409 m_formatter.immediate32(imm);
410 }
411 }
412
413 void andl_im(int imm, int offset, RegisterID base)
414 {
415 if (CAN_SIGN_EXTEND_8_32(imm)) {
416 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
417 m_formatter.immediate8(imm);
418 } else {
419 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
420 m_formatter.immediate32(imm);
421 }
422 }
423
424 #if CPU(X86_64)
425 void andq_rr(RegisterID src, RegisterID dst)
426 {
427 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
428 }
429
430 void andq_ir(int imm, RegisterID dst)
431 {
432 if (CAN_SIGN_EXTEND_8_32(imm)) {
433 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
434 m_formatter.immediate8(imm);
435 } else {
436 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
437 m_formatter.immediate32(imm);
438 }
439 }
440 #else
441 void andl_im(int imm, const void* addr)
442 {
443 if (CAN_SIGN_EXTEND_8_32(imm)) {
444 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
445 m_formatter.immediate8(imm);
446 } else {
447 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
448 m_formatter.immediate32(imm);
449 }
450 }
451 #endif
452
453 void negl_r(RegisterID dst)
454 {
455 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
456 }
457
458 #if CPU(X86_64)
459 void negq_r(RegisterID dst)
460 {
461 m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
462 }
463 #endif
464
465 void negl_m(int offset, RegisterID base)
466 {
467 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
468 }
469
470 void notl_r(RegisterID dst)
471 {
472 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
473 }
474
475 void notl_m(int offset, RegisterID base)
476 {
477 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
478 }
479
480 void orl_rr(RegisterID src, RegisterID dst)
481 {
482 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
483 }
484
485 void orl_mr(int offset, RegisterID base, RegisterID dst)
486 {
487 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
488 }
489
490 void orl_rm(RegisterID src, int offset, RegisterID base)
491 {
492 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
493 }
494
495 void orl_ir(int imm, RegisterID dst)
496 {
497 if (CAN_SIGN_EXTEND_8_32(imm)) {
498 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
499 m_formatter.immediate8(imm);
500 } else {
501 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
502 m_formatter.immediate32(imm);
503 }
504 }
505
506 void orl_im(int imm, int offset, RegisterID base)
507 {
508 if (CAN_SIGN_EXTEND_8_32(imm)) {
509 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
510 m_formatter.immediate8(imm);
511 } else {
512 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
513 m_formatter.immediate32(imm);
514 }
515 }
516
517 #if CPU(X86_64)
518 void orq_rr(RegisterID src, RegisterID dst)
519 {
520 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
521 }
522
523 void orq_ir(int imm, RegisterID dst)
524 {
525 if (CAN_SIGN_EXTEND_8_32(imm)) {
526 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
527 m_formatter.immediate8(imm);
528 } else {
529 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
530 m_formatter.immediate32(imm);
531 }
532 }
533 #else
534 void orl_im(int imm, const void* addr)
535 {
536 if (CAN_SIGN_EXTEND_8_32(imm)) {
537 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
538 m_formatter.immediate8(imm);
539 } else {
540 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
541 m_formatter.immediate32(imm);
542 }
543 }
544
545 void orl_rm(RegisterID src, const void* addr)
546 {
547 m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
548 }
549 #endif
550
551 void subl_rr(RegisterID src, RegisterID dst)
552 {
553 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
554 }
555
556 void subl_mr(int offset, RegisterID base, RegisterID dst)
557 {
558 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
559 }
560
561 void subl_rm(RegisterID src, int offset, RegisterID base)
562 {
563 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
564 }
565
566 void subl_ir(int imm, RegisterID dst)
567 {
568 if (CAN_SIGN_EXTEND_8_32(imm)) {
569 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
570 m_formatter.immediate8(imm);
571 } else {
572 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
573 m_formatter.immediate32(imm);
574 }
575 }
576
577 void subl_im(int imm, int offset, RegisterID base)
578 {
579 if (CAN_SIGN_EXTEND_8_32(imm)) {
580 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
581 m_formatter.immediate8(imm);
582 } else {
583 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
584 m_formatter.immediate32(imm);
585 }
586 }
587
588 #if CPU(X86_64)
589 void subq_rr(RegisterID src, RegisterID dst)
590 {
591 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
592 }
593
594 void subq_ir(int imm, RegisterID dst)
595 {
596 if (CAN_SIGN_EXTEND_8_32(imm)) {
597 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
598 m_formatter.immediate8(imm);
599 } else {
600 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
601 m_formatter.immediate32(imm);
602 }
603 }
604 #else
605 void subl_im(int imm, const void* addr)
606 {
607 if (CAN_SIGN_EXTEND_8_32(imm)) {
608 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
609 m_formatter.immediate8(imm);
610 } else {
611 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
612 m_formatter.immediate32(imm);
613 }
614 }
615 #endif
616
617 void xorl_rr(RegisterID src, RegisterID dst)
618 {
619 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
620 }
621
622 void xorl_mr(int offset, RegisterID base, RegisterID dst)
623 {
624 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
625 }
626
627 void xorl_rm(RegisterID src, int offset, RegisterID base)
628 {
629 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
630 }
631
632 void xorl_im(int imm, int offset, RegisterID base)
633 {
634 if (CAN_SIGN_EXTEND_8_32(imm)) {
635 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
636 m_formatter.immediate8(imm);
637 } else {
638 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
639 m_formatter.immediate32(imm);
640 }
641 }
642
643 void xorl_ir(int imm, RegisterID dst)
644 {
645 if (CAN_SIGN_EXTEND_8_32(imm)) {
646 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
647 m_formatter.immediate8(imm);
648 } else {
649 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
650 m_formatter.immediate32(imm);
651 }
652 }
653
654 #if CPU(X86_64)
655 void xorq_rr(RegisterID src, RegisterID dst)
656 {
657 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
658 }
659
660 void xorq_ir(int imm, RegisterID dst)
661 {
662 if (CAN_SIGN_EXTEND_8_32(imm)) {
663 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
664 m_formatter.immediate8(imm);
665 } else {
666 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
667 m_formatter.immediate32(imm);
668 }
669 }
670
671 void xorq_rm(RegisterID src, int offset, RegisterID base)
672 {
673 m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
674 }
675
676 void rorq_i8r(int imm, RegisterID dst)
677 {
678 if (imm == 1)
679 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
680 else {
681 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
682 m_formatter.immediate8(imm);
683 }
684 }
685
686 #endif
687
688 void sarl_i8r(int imm, RegisterID dst)
689 {
690 if (imm == 1)
691 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
692 else {
693 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
694 m_formatter.immediate8(imm);
695 }
696 }
697
698 void sarl_CLr(RegisterID dst)
699 {
700 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
701 }
702
703 void shrl_i8r(int imm, RegisterID dst)
704 {
705 if (imm == 1)
706 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
707 else {
708 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
709 m_formatter.immediate8(imm);
710 }
711 }
712
713 void shrl_CLr(RegisterID dst)
714 {
715 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
716 }
717
718 void shll_i8r(int imm, RegisterID dst)
719 {
720 if (imm == 1)
721 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
722 else {
723 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
724 m_formatter.immediate8(imm);
725 }
726 }
727
728 void shll_CLr(RegisterID dst)
729 {
730 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
731 }
732
733 #if CPU(X86_64)
734 void sarq_CLr(RegisterID dst)
735 {
736 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
737 }
738
739 void sarq_i8r(int imm, RegisterID dst)
740 {
741 if (imm == 1)
742 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
743 else {
744 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
745 m_formatter.immediate8(imm);
746 }
747 }
748 #endif
749
750 void imull_rr(RegisterID src, RegisterID dst)
751 {
752 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
753 }
754
755 void imull_mr(int offset, RegisterID base, RegisterID dst)
756 {
757 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
758 }
759
760 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
761 {
762 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
763 m_formatter.immediate32(value);
764 }
765
766 void idivl_r(RegisterID dst)
767 {
768 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
769 }
770
771 // Comparisons:
772
773 void cmpl_rr(RegisterID src, RegisterID dst)
774 {
775 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
776 }
777
778 void cmpl_rm(RegisterID src, int offset, RegisterID base)
779 {
780 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
781 }
782
783 void cmpl_mr(int offset, RegisterID base, RegisterID src)
784 {
785 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
786 }
787
788 void cmpl_ir(int imm, RegisterID dst)
789 {
790 if (CAN_SIGN_EXTEND_8_32(imm)) {
791 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
792 m_formatter.immediate8(imm);
793 } else {
794 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
795 m_formatter.immediate32(imm);
796 }
797 }
798
799 void cmpl_ir_force32(int imm, RegisterID dst)
800 {
801 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
802 m_formatter.immediate32(imm);
803 }
804
805 void cmpl_im(int imm, int offset, RegisterID base)
806 {
807 if (CAN_SIGN_EXTEND_8_32(imm)) {
808 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
809 m_formatter.immediate8(imm);
810 } else {
811 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
812 m_formatter.immediate32(imm);
813 }
814 }
815
816 void cmpb_im(int imm, int offset, RegisterID base)
817 {
818 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
819 m_formatter.immediate8(imm);
820 }
821
822 void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
823 {
824 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
825 m_formatter.immediate8(imm);
826 }
827
828 #if CPU(X86)
829 void cmpb_im(int imm, const void* addr)
830 {
831 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
832 m_formatter.immediate8(imm);
833 }
834 #endif
835
836 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
837 {
838 if (CAN_SIGN_EXTEND_8_32(imm)) {
839 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
840 m_formatter.immediate8(imm);
841 } else {
842 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
843 m_formatter.immediate32(imm);
844 }
845 }
846
847 void cmpl_im_force32(int imm, int offset, RegisterID base)
848 {
849 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
850 m_formatter.immediate32(imm);
851 }
852
853 #if CPU(X86_64)
854 void cmpq_rr(RegisterID src, RegisterID dst)
855 {
856 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
857 }
858
859 void cmpq_rm(RegisterID src, int offset, RegisterID base)
860 {
861 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
862 }
863
864 void cmpq_mr(int offset, RegisterID base, RegisterID src)
865 {
866 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
867 }
868
869 void cmpq_ir(int imm, RegisterID dst)
870 {
871 if (CAN_SIGN_EXTEND_8_32(imm)) {
872 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
873 m_formatter.immediate8(imm);
874 } else {
875 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
876 m_formatter.immediate32(imm);
877 }
878 }
879
880 void cmpq_im(int imm, int offset, RegisterID base)
881 {
882 if (CAN_SIGN_EXTEND_8_32(imm)) {
883 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
884 m_formatter.immediate8(imm);
885 } else {
886 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
887 m_formatter.immediate32(imm);
888 }
889 }
890
891 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
892 {
893 if (CAN_SIGN_EXTEND_8_32(imm)) {
894 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
895 m_formatter.immediate8(imm);
896 } else {
897 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
898 m_formatter.immediate32(imm);
899 }
900 }
901 #else
902 void cmpl_rm(RegisterID reg, const void* addr)
903 {
904 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
905 }
906
907 void cmpl_im(int imm, const void* addr)
908 {
909 if (CAN_SIGN_EXTEND_8_32(imm)) {
910 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
911 m_formatter.immediate8(imm);
912 } else {
913 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
914 m_formatter.immediate32(imm);
915 }
916 }
917 #endif
918
919 void cmpw_ir(int imm, RegisterID dst)
920 {
921 if (CAN_SIGN_EXTEND_8_32(imm)) {
922 m_formatter.prefix(PRE_OPERAND_SIZE);
923 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
924 m_formatter.immediate8(imm);
925 } else {
926 m_formatter.prefix(PRE_OPERAND_SIZE);
927 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
928 m_formatter.immediate16(imm);
929 }
930 }
931
932 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
933 {
934 m_formatter.prefix(PRE_OPERAND_SIZE);
935 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
936 }
937
938 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
939 {
940 if (CAN_SIGN_EXTEND_8_32(imm)) {
941 m_formatter.prefix(PRE_OPERAND_SIZE);
942 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
943 m_formatter.immediate8(imm);
944 } else {
945 m_formatter.prefix(PRE_OPERAND_SIZE);
946 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
947 m_formatter.immediate16(imm);
948 }
949 }
950
951 void testl_rr(RegisterID src, RegisterID dst)
952 {
953 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
954 }
955
956 void testl_i32r(int imm, RegisterID dst)
957 {
958 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
959 m_formatter.immediate32(imm);
960 }
961
962 void testl_i32m(int imm, int offset, RegisterID base)
963 {
964 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
965 m_formatter.immediate32(imm);
966 }
967
968 void testb_rr(RegisterID src, RegisterID dst)
969 {
970 m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
971 }
972
973 void testb_im(int imm, int offset, RegisterID base)
974 {
975 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
976 m_formatter.immediate8(imm);
977 }
978
979 void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
980 {
981 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
982 m_formatter.immediate8(imm);
983 }
984
985 #if CPU(X86)
986 void testb_im(int imm, const void* addr)
987 {
988 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
989 m_formatter.immediate8(imm);
990 }
991 #endif
992
993 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
994 {
995 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
996 m_formatter.immediate32(imm);
997 }
998
999 #if CPU(X86_64)
1000 void testq_rr(RegisterID src, RegisterID dst)
1001 {
1002 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
1003 }
1004
1005 void testq_rm(RegisterID src, int offset, RegisterID base)
1006 {
1007 m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
1008 }
1009
1010 void testq_i32r(int imm, RegisterID dst)
1011 {
1012 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
1013 m_formatter.immediate32(imm);
1014 }
1015
1016 void testq_i32m(int imm, int offset, RegisterID base)
1017 {
1018 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
1019 m_formatter.immediate32(imm);
1020 }
1021
1022 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1023 {
1024 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
1025 m_formatter.immediate32(imm);
1026 }
1027 #endif
1028
1029 void testw_rr(RegisterID src, RegisterID dst)
1030 {
1031 m_formatter.prefix(PRE_OPERAND_SIZE);
1032 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
1033 }
1034
1035 void testb_i8r(int imm, RegisterID dst)
1036 {
1037 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
1038 m_formatter.immediate8(imm);
1039 }
1040
1041 void setCC_r(Condition cond, RegisterID dst)
1042 {
1043 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
1044 }
1045
1046 void sete_r(RegisterID dst)
1047 {
1048 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
1049 }
1050
1051 void setz_r(RegisterID dst)
1052 {
1053 sete_r(dst);
1054 }
1055
1056 void setne_r(RegisterID dst)
1057 {
1058 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
1059 }
1060
1061 void setnz_r(RegisterID dst)
1062 {
1063 setne_r(dst);
1064 }
1065
1066 // Various move ops:
1067
1068 void cdq()
1069 {
1070 m_formatter.oneByteOp(OP_CDQ);
1071 }
1072
1073 void fstpl(int offset, RegisterID base)
1074 {
1075 m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
1076 }
1077
1078 void xchgl_rr(RegisterID src, RegisterID dst)
1079 {
1080 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
1081 }
1082
1083 #if CPU(X86_64)
1084 void xchgq_rr(RegisterID src, RegisterID dst)
1085 {
1086 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
1087 }
1088 #endif
1089
1090 void movl_rr(RegisterID src, RegisterID dst)
1091 {
1092 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
1093 }
1094
1095 void movl_rm(RegisterID src, int offset, RegisterID base)
1096 {
1097 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
1098 }
1099
1100 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
1101 {
1102 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
1103 }
1104
1105 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1106 {
1107 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1108 }
1109
1110 void movl_mEAX(const void* addr)
1111 {
1112 m_formatter.oneByteOp(OP_MOV_EAXOv);
1113 #if CPU(X86_64)
1114 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1115 #else
1116 m_formatter.immediate32(reinterpret_cast<int>(addr));
1117 #endif
1118 }
1119
1120 void movl_mr(int offset, RegisterID base, RegisterID dst)
1121 {
1122 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1123 }
1124
1125 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1126 {
1127 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1128 }
1129
1130 void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
1131 {
1132 m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
1133 }
1134
1135 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1136 {
1137 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1138 }
1139
1140 void movl_i32r(int imm, RegisterID dst)
1141 {
1142 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1143 m_formatter.immediate32(imm);
1144 }
1145
1146 void movl_i32m(int imm, int offset, RegisterID base)
1147 {
1148 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1149 m_formatter.immediate32(imm);
1150 }
1151
1152 void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1153 {
1154 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
1155 m_formatter.immediate32(imm);
1156 }
1157
1158 #if !CPU(X86_64)
1159 void movb_i8m(int imm, const void* addr)
1160 {
1161 ASSERT(-128 <= imm && imm < 128);
1162 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
1163 m_formatter.immediate8(imm);
1164 }
1165 #endif
1166
1167 void movb_i8m(int imm, int offset, RegisterID base)
1168 {
1169 ASSERT(-128 <= imm && imm < 128);
1170 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
1171 m_formatter.immediate8(imm);
1172 }
1173
1174 void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
1175 {
1176 ASSERT(-128 <= imm && imm < 128);
1177 m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
1178 m_formatter.immediate8(imm);
1179 }
1180
1181 void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1182 {
1183 m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
1184 }
1185
1186 void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1187 {
1188 m_formatter.prefix(PRE_OPERAND_SIZE);
1189 m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
1190 }
1191
1192 void movl_EAXm(const void* addr)
1193 {
1194 m_formatter.oneByteOp(OP_MOV_OvEAX);
1195 #if CPU(X86_64)
1196 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1197 #else
1198 m_formatter.immediate32(reinterpret_cast<int>(addr));
1199 #endif
1200 }
1201
1202 #if CPU(X86_64)
1203 void movq_rr(RegisterID src, RegisterID dst)
1204 {
1205 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1206 }
1207
1208 void movq_rm(RegisterID src, int offset, RegisterID base)
1209 {
1210 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1211 }
1212
1213 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1214 {
1215 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1216 }
1217
1218 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1219 {
1220 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1221 }
1222
1223 void movq_mEAX(const void* addr)
1224 {
1225 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1226 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1227 }
1228
1229 void movq_EAXm(const void* addr)
1230 {
1231 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1232 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1233 }
1234
1235 void movq_mr(int offset, RegisterID base, RegisterID dst)
1236 {
1237 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1238 }
1239
1240 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1241 {
1242 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1243 }
1244
1245 void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
1246 {
1247 m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
1248 }
1249
1250 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1251 {
1252 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1253 }
1254
1255 void movq_i32m(int imm, int offset, RegisterID base)
1256 {
1257 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1258 m_formatter.immediate32(imm);
1259 }
1260
1261 void movq_i64r(int64_t imm, RegisterID dst)
1262 {
1263 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1264 m_formatter.immediate64(imm);
1265 }
1266
1267 void movsxd_rr(RegisterID src, RegisterID dst)
1268 {
1269 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1270 }
1271
1272
1273 #else
1274 void movl_rm(RegisterID src, const void* addr)
1275 {
1276 if (src == X86Registers::eax)
1277 movl_EAXm(addr);
1278 else
1279 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1280 }
1281
1282 void movl_mr(const void* addr, RegisterID dst)
1283 {
1284 if (dst == X86Registers::eax)
1285 movl_mEAX(addr);
1286 else
1287 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1288 }
1289
1290 void movl_i32m(int imm, const void* addr)
1291 {
1292 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1293 m_formatter.immediate32(imm);
1294 }
1295 #endif
1296
1297 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1298 {
1299 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1300 }
1301
1302 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1303 {
1304 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1305 }
1306
1307 void movswl_mr(int offset, RegisterID base, RegisterID dst)
1308 {
1309 m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
1310 }
1311
1312 void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1313 {
1314 m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
1315 }
1316
1317 void movzbl_mr(int offset, RegisterID base, RegisterID dst)
1318 {
1319 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
1320 }
1321
1322 void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1323 {
1324 m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
1325 }
1326
1327 void movsbl_mr(int offset, RegisterID base, RegisterID dst)
1328 {
1329 m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
1330 }
1331
1332 void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1333 {
1334 m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
1335 }
1336
1337 void movzbl_rr(RegisterID src, RegisterID dst)
1338 {
1339 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1340 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1341 // REX prefixes are defined to be silently ignored by the processor.
1342 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1343 }
1344
1345 void leal_mr(int offset, RegisterID base, RegisterID dst)
1346 {
1347 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1348 }
1349 #if CPU(X86_64)
1350 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1351 {
1352 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1353 }
1354 #endif
1355
1356 // Flow control:
1357
1358 AssemblerLabel call()
1359 {
1360 m_formatter.oneByteOp(OP_CALL_rel32);
1361 return m_formatter.immediateRel32();
1362 }
1363
1364 AssemblerLabel call(RegisterID dst)
1365 {
1366 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1367 return m_formatter.label();
1368 }
1369
1370 void call_m(int offset, RegisterID base)
1371 {
1372 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1373 }
1374
1375 AssemblerLabel jmp()
1376 {
1377 m_formatter.oneByteOp(OP_JMP_rel32);
1378 return m_formatter.immediateRel32();
1379 }
1380
1381 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1382 // To make a tail recursive call on x86-64. The MacroAssembler
1383 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1384 AssemblerLabel jmp_r(RegisterID dst)
1385 {
1386 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1387 return m_formatter.label();
1388 }
1389
1390 void jmp_m(int offset, RegisterID base)
1391 {
1392 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1393 }
1394
1395 #if !CPU(X86_64)
1396 void jmp_m(const void* address)
1397 {
1398 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
1399 }
1400 #endif
1401
1402 AssemblerLabel jne()
1403 {
1404 m_formatter.twoByteOp(jccRel32(ConditionNE));
1405 return m_formatter.immediateRel32();
1406 }
1407
1408 AssemblerLabel jnz()
1409 {
1410 return jne();
1411 }
1412
1413 AssemblerLabel je()
1414 {
1415 m_formatter.twoByteOp(jccRel32(ConditionE));
1416 return m_formatter.immediateRel32();
1417 }
1418
1419 AssemblerLabel jz()
1420 {
1421 return je();
1422 }
1423
1424 AssemblerLabel jl()
1425 {
1426 m_formatter.twoByteOp(jccRel32(ConditionL));
1427 return m_formatter.immediateRel32();
1428 }
1429
1430 AssemblerLabel jb()
1431 {
1432 m_formatter.twoByteOp(jccRel32(ConditionB));
1433 return m_formatter.immediateRel32();
1434 }
1435
1436 AssemblerLabel jle()
1437 {
1438 m_formatter.twoByteOp(jccRel32(ConditionLE));
1439 return m_formatter.immediateRel32();
1440 }
1441
1442 AssemblerLabel jbe()
1443 {
1444 m_formatter.twoByteOp(jccRel32(ConditionBE));
1445 return m_formatter.immediateRel32();
1446 }
1447
1448 AssemblerLabel jge()
1449 {
1450 m_formatter.twoByteOp(jccRel32(ConditionGE));
1451 return m_formatter.immediateRel32();
1452 }
1453
1454 AssemblerLabel jg()
1455 {
1456 m_formatter.twoByteOp(jccRel32(ConditionG));
1457 return m_formatter.immediateRel32();
1458 }
1459
1460 AssemblerLabel ja()
1461 {
1462 m_formatter.twoByteOp(jccRel32(ConditionA));
1463 return m_formatter.immediateRel32();
1464 }
1465
1466 AssemblerLabel jae()
1467 {
1468 m_formatter.twoByteOp(jccRel32(ConditionAE));
1469 return m_formatter.immediateRel32();
1470 }
1471
1472 AssemblerLabel jo()
1473 {
1474 m_formatter.twoByteOp(jccRel32(ConditionO));
1475 return m_formatter.immediateRel32();
1476 }
1477
1478 AssemblerLabel jnp()
1479 {
1480 m_formatter.twoByteOp(jccRel32(ConditionNP));
1481 return m_formatter.immediateRel32();
1482 }
1483
1484 AssemblerLabel jp()
1485 {
1486 m_formatter.twoByteOp(jccRel32(ConditionP));
1487 return m_formatter.immediateRel32();
1488 }
1489
1490 AssemblerLabel js()
1491 {
1492 m_formatter.twoByteOp(jccRel32(ConditionS));
1493 return m_formatter.immediateRel32();
1494 }
1495
1496 AssemblerLabel jCC(Condition cond)
1497 {
1498 m_formatter.twoByteOp(jccRel32(cond));
1499 return m_formatter.immediateRel32();
1500 }
1501
1502 // SSE operations:
1503
1504 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1505 {
1506 m_formatter.prefix(PRE_SSE_F2);
1507 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1508 }
1509
1510 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1511 {
1512 m_formatter.prefix(PRE_SSE_F2);
1513 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1514 }
1515
1516 #if !CPU(X86_64)
1517 void addsd_mr(const void* address, XMMRegisterID dst)
1518 {
1519 m_formatter.prefix(PRE_SSE_F2);
1520 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
1521 }
1522 #endif
1523
1524 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1525 {
1526 m_formatter.prefix(PRE_SSE_F2);
1527 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1528 }
1529
1530 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1531 {
1532 m_formatter.prefix(PRE_SSE_F2);
1533 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1534 }
1535
1536 #if !CPU(X86_64)
1537 void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
1538 {
1539 m_formatter.prefix(PRE_SSE_F2);
1540 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1541 }
1542 #endif
1543
1544 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1545 {
1546 m_formatter.prefix(PRE_SSE_F2);
1547 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1548 }
1549
1550 void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
1551 {
1552 m_formatter.prefix(PRE_SSE_F2);
1553 m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
1554 }
1555
1556 void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
1557 {
1558 m_formatter.prefix(PRE_SSE_F3);
1559 m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
1560 }
1561
1562 #if CPU(X86_64)
1563 void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
1564 {
1565 m_formatter.prefix(PRE_SSE_F2);
1566 m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1567 }
1568 #endif
1569
1570 void movd_rr(XMMRegisterID src, RegisterID dst)
1571 {
1572 m_formatter.prefix(PRE_SSE_66);
1573 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1574 }
1575
1576 void movd_rr(RegisterID src, XMMRegisterID dst)
1577 {
1578 m_formatter.prefix(PRE_SSE_66);
1579 m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
1580 }
1581
1582 #if CPU(X86_64)
1583 void movq_rr(XMMRegisterID src, RegisterID dst)
1584 {
1585 m_formatter.prefix(PRE_SSE_66);
1586 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1587 }
1588
1589 void movq_rr(RegisterID src, XMMRegisterID dst)
1590 {
1591 m_formatter.prefix(PRE_SSE_66);
1592 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1593 }
1594 #endif
1595
1596 void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
1597 {
1598 m_formatter.prefix(PRE_SSE_F2);
1599 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1600 }
1601
1602 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1603 {
1604 m_formatter.prefix(PRE_SSE_F2);
1605 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1606 }
1607
1608 void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1609 {
1610 m_formatter.prefix(PRE_SSE_F2);
1611 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
1612 }
1613
1614 void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1615 {
1616 m_formatter.prefix(PRE_SSE_F3);
1617 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
1618 }
1619
1620 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1621 {
1622 m_formatter.prefix(PRE_SSE_F2);
1623 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1624 }
1625
1626 void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1627 {
1628 m_formatter.prefix(PRE_SSE_F2);
1629 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
1630 }
1631
1632 void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
1633 {
1634 m_formatter.prefix(PRE_SSE_F3);
1635 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
1636 }
1637
1638 #if !CPU(X86_64)
1639 void movsd_mr(const void* address, XMMRegisterID dst)
1640 {
1641 m_formatter.prefix(PRE_SSE_F2);
1642 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1643 }
1644 void movsd_rm(XMMRegisterID src, const void* address)
1645 {
1646 m_formatter.prefix(PRE_SSE_F2);
1647 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
1648 }
1649 #endif
1650
1651 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1652 {
1653 m_formatter.prefix(PRE_SSE_F2);
1654 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1655 }
1656
1657 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1658 {
1659 m_formatter.prefix(PRE_SSE_F2);
1660 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1661 }
1662
1663 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1664 {
1665 m_formatter.prefix(PRE_SSE_66);
1666 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1667 m_formatter.immediate8(whichWord);
1668 }
1669
1670 void psllq_i8r(int imm, XMMRegisterID dst)
1671 {
1672 m_formatter.prefix(PRE_SSE_66);
1673 m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
1674 m_formatter.immediate8(imm);
1675 }
1676
1677 void psrlq_i8r(int imm, XMMRegisterID dst)
1678 {
1679 m_formatter.prefix(PRE_SSE_66);
1680 m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
1681 m_formatter.immediate8(imm);
1682 }
1683
1684 void por_rr(XMMRegisterID src, XMMRegisterID dst)
1685 {
1686 m_formatter.prefix(PRE_SSE_66);
1687 m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
1688 }
1689
1690 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1691 {
1692 m_formatter.prefix(PRE_SSE_F2);
1693 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1694 }
1695
1696 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1697 {
1698 m_formatter.prefix(PRE_SSE_F2);
1699 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1700 }
1701
1702 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1703 {
1704 m_formatter.prefix(PRE_SSE_66);
1705 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1706 }
1707
1708 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1709 {
1710 m_formatter.prefix(PRE_SSE_66);
1711 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1712 }
1713
1714 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1715 {
1716 m_formatter.prefix(PRE_SSE_F2);
1717 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1718 }
1719
1720 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1721 {
1722 m_formatter.prefix(PRE_SSE_F2);
1723 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1724 }
1725
1726 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1727 {
1728 m_formatter.prefix(PRE_SSE_66);
1729 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1730 }
1731
1732 void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
1733 {
1734 m_formatter.prefix(PRE_SSE_66);
1735 m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1736 }
1737
1738 void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
1739 {
1740 m_formatter.prefix(PRE_SSE_F2);
1741 m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1742 }
1743
1744 // Misc instructions:
1745
1746 void int3()
1747 {
1748 m_formatter.oneByteOp(OP_INT3);
1749 }
1750
1751 void ret()
1752 {
1753 m_formatter.oneByteOp(OP_RET);
1754 }
1755
1756 void predictNotTaken()
1757 {
1758 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1759 }
1760
1761 // Assembler admin methods:
1762
1763 size_t codeSize() const
1764 {
1765 return m_formatter.codeSize();
1766 }
1767
1768 AssemblerLabel labelForWatchpoint()
1769 {
1770 AssemblerLabel result = m_formatter.label();
1771 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
1772 result = label();
1773 m_indexOfLastWatchpoint = result.m_offset;
1774 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
1775 return result;
1776 }
1777
1778 AssemblerLabel labelIgnoringWatchpoints()
1779 {
1780 return m_formatter.label();
1781 }
1782
1783 AssemblerLabel label()
1784 {
1785 AssemblerLabel result = m_formatter.label();
1786 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
1787 nop();
1788 result = m_formatter.label();
1789 }
1790 return result;
1791 }
1792
1793 AssemblerLabel align(int alignment)
1794 {
1795 while (!m_formatter.isAligned(alignment))
1796 m_formatter.oneByteOp(OP_HLT);
1797
1798 return label();
1799 }
1800
1801 // Linking & patching:
1802 //
1803 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1804 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1805 // code has been finalized it is (platform support permitting) within a non-
1806 // writable region of memory; to modify the code in an execute-only execuable
1807 // pool the 'repatch' and 'relink' methods should be used.
1808
1809 void linkJump(AssemblerLabel from, AssemblerLabel to)
1810 {
1811 ASSERT(from.isSet());
1812 ASSERT(to.isSet());
1813
1814 char* code = reinterpret_cast<char*>(m_formatter.data());
1815 ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
1816 setRel32(code + from.m_offset, code + to.m_offset);
1817 }
1818
1819 static void linkJump(void* code, AssemblerLabel from, void* to)
1820 {
1821 ASSERT(from.isSet());
1822
1823 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1824 }
1825
1826 static void linkCall(void* code, AssemblerLabel from, void* to)
1827 {
1828 ASSERT(from.isSet());
1829
1830 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1831 }
1832
1833 static void linkPointer(void* code, AssemblerLabel where, void* value)
1834 {
1835 ASSERT(where.isSet());
1836
1837 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1838 }
1839
1840 static void relinkJump(void* from, void* to)
1841 {
1842 setRel32(from, to);
1843 }
1844
1845 static void relinkCall(void* from, void* to)
1846 {
1847 setRel32(from, to);
1848 }
1849
1850 static void repatchCompact(void* where, int32_t value)
1851 {
1852 ASSERT(value >= std::numeric_limits<int8_t>::min());
1853 ASSERT(value <= std::numeric_limits<int8_t>::max());
1854 setInt8(where, value);
1855 }
1856
1857 static void repatchInt32(void* where, int32_t value)
1858 {
1859 setInt32(where, value);
1860 }
1861
1862 static void repatchPointer(void* where, void* value)
1863 {
1864 setPointer(where, value);
1865 }
1866
1867 static void* readPointer(void* where)
1868 {
1869 return reinterpret_cast<void**>(where)[-1];
1870 }
1871
1872 static void replaceWithJump(void* instructionStart, void* to)
1873 {
1874 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1875 uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
1876 intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
1877 ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
1878 *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
1879 }
1880
1881 static ptrdiff_t maxJumpReplacementSize()
1882 {
1883 return 5;
1884 }
1885
1886 #if CPU(X86_64)
1887 static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
1888 {
1889 const int rexBytes = 1;
1890 const int opcodeBytes = 1;
1891 ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize());
1892 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1893 ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
1894 ptr[1] = OP_MOV_EAXIv | (dst & 7);
1895
1896 union {
1897 uint64_t asWord;
1898 uint8_t asBytes[8];
1899 } u;
1900 u.asWord = imm;
1901 for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
1902 ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
1903 }
1904 #endif
1905
1906 static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
1907 {
1908 const int opcodeBytes = 1;
1909 const int modRMBytes = 1;
1910 ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
1911 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1912 ptr[0] = OP_GROUP1_EvIz;
1913 ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
1914 union {
1915 uint32_t asWord;
1916 uint8_t asBytes[4];
1917 } u;
1918 u.asWord = imm;
1919 for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
1920 ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
1921 }
1922
1923 static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
1924 {
1925 ASSERT_UNUSED(offset, !offset);
1926 const int opcodeBytes = 1;
1927 const int modRMBytes = 1;
1928 ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
1929 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1930 ptr[0] = OP_GROUP1_EvIz;
1931 ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
1932 union {
1933 uint32_t asWord;
1934 uint8_t asBytes[4];
1935 } u;
1936 u.asWord = imm;
1937 for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
1938 ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
1939 }
1940
1941 static void replaceWithLoad(void* instructionStart)
1942 {
1943 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1944 #if CPU(X86_64)
1945 if ((*ptr & ~15) == PRE_REX)
1946 ptr++;
1947 #endif
1948 switch (*ptr) {
1949 case OP_MOV_GvEv:
1950 break;
1951 case OP_LEA:
1952 *ptr = OP_MOV_GvEv;
1953 break;
1954 default:
1955 RELEASE_ASSERT_NOT_REACHED();
1956 }
1957 }
1958
1959 static void replaceWithAddressComputation(void* instructionStart)
1960 {
1961 uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
1962 #if CPU(X86_64)
1963 if ((*ptr & ~15) == PRE_REX)
1964 ptr++;
1965 #endif
1966 switch (*ptr) {
1967 case OP_MOV_GvEv:
1968 *ptr = OP_LEA;
1969 break;
1970 case OP_LEA:
1971 break;
1972 default:
1973 RELEASE_ASSERT_NOT_REACHED();
1974 }
1975 }
1976
1977 static unsigned getCallReturnOffset(AssemblerLabel call)
1978 {
1979 ASSERT(call.isSet());
1980 return call.m_offset;
1981 }
1982
1983 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1984 {
1985 ASSERT(label.isSet());
1986 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1987 }
1988
1989 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1990 {
1991 return b.m_offset - a.m_offset;
1992 }
1993
1994 PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort)
1995 {
1996 return m_formatter.executableCopy(vm, ownerUID, effort);
1997 }
1998
1999 unsigned debugOffset() { return m_formatter.debugOffset(); }
2000
2001 void nop()
2002 {
2003 m_formatter.oneByteOp(OP_NOP);
2004 }
2005
2006 // This is a no-op on x86
2007 ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
2008
2009 private:
2010
2011 static void setPointer(void* where, void* value)
2012 {
2013 reinterpret_cast<void**>(where)[-1] = value;
2014 }
2015
2016 static void setInt32(void* where, int32_t value)
2017 {
2018 reinterpret_cast<int32_t*>(where)[-1] = value;
2019 }
2020
2021 static void setInt8(void* where, int8_t value)
2022 {
2023 reinterpret_cast<int8_t*>(where)[-1] = value;
2024 }
2025
2026 static void setRel32(void* from, void* to)
2027 {
2028 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
2029 ASSERT(offset == static_cast<int32_t>(offset));
2030
2031 setInt32(from, offset);
2032 }
2033
2034 class X86InstructionFormatter {
2035
2036 static const int maxInstructionSize = 16;
2037
2038 public:
2039
2040 enum ModRmMode {
2041 ModRmMemoryNoDisp,
2042 ModRmMemoryDisp8,
2043 ModRmMemoryDisp32,
2044 ModRmRegister,
2045 };
2046
2047 // Legacy prefix bytes:
2048 //
2049 // These are emmitted prior to the instruction.
2050
2051 void prefix(OneByteOpcodeID pre)
2052 {
2053 m_buffer.putByte(pre);
2054 }
2055
2056 // Word-sized operands / no operand instruction formatters.
2057 //
2058 // In addition to the opcode, the following operand permutations are supported:
2059 // * None - instruction takes no operands.
2060 // * One register - the low three bits of the RegisterID are added into the opcode.
2061 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
2062 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
2063 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
2064 //
2065 // For 32-bit x86 targets, the address operand may also be provided as a void*.
2066 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
2067 //
2068 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
2069
2070 void oneByteOp(OneByteOpcodeID opcode)
2071 {
2072 m_buffer.ensureSpace(maxInstructionSize);
2073 m_buffer.putByteUnchecked(opcode);
2074 }
2075
2076 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
2077 {
2078 m_buffer.ensureSpace(maxInstructionSize);
2079 emitRexIfNeeded(0, 0, reg);
2080 m_buffer.putByteUnchecked(opcode + (reg & 7));
2081 }
2082
2083 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
2084 {
2085 m_buffer.ensureSpace(maxInstructionSize);
2086 emitRexIfNeeded(reg, 0, rm);
2087 m_buffer.putByteUnchecked(opcode);
2088 registerModRM(reg, rm);
2089 }
2090
2091 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2092 {
2093 m_buffer.ensureSpace(maxInstructionSize);
2094 emitRexIfNeeded(reg, 0, base);
2095 m_buffer.putByteUnchecked(opcode);
2096 memoryModRM(reg, base, offset);
2097 }
2098
2099 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2100 {
2101 m_buffer.ensureSpace(maxInstructionSize);
2102 emitRexIfNeeded(reg, 0, base);
2103 m_buffer.putByteUnchecked(opcode);
2104 memoryModRM_disp32(reg, base, offset);
2105 }
2106
2107 void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2108 {
2109 m_buffer.ensureSpace(maxInstructionSize);
2110 emitRexIfNeeded(reg, 0, base);
2111 m_buffer.putByteUnchecked(opcode);
2112 memoryModRM_disp8(reg, base, offset);
2113 }
2114
2115 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2116 {
2117 m_buffer.ensureSpace(maxInstructionSize);
2118 emitRexIfNeeded(reg, index, base);
2119 m_buffer.putByteUnchecked(opcode);
2120 memoryModRM(reg, base, index, scale, offset);
2121 }
2122
2123 #if !CPU(X86_64)
2124 void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
2125 {
2126 m_buffer.ensureSpace(maxInstructionSize);
2127 m_buffer.putByteUnchecked(opcode);
2128 memoryModRM(reg, address);
2129 }
2130 #endif
2131
2132 void twoByteOp(TwoByteOpcodeID opcode)
2133 {
2134 m_buffer.ensureSpace(maxInstructionSize);
2135 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2136 m_buffer.putByteUnchecked(opcode);
2137 }
2138
2139 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2140 {
2141 m_buffer.ensureSpace(maxInstructionSize);
2142 emitRexIfNeeded(reg, 0, rm);
2143 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2144 m_buffer.putByteUnchecked(opcode);
2145 registerModRM(reg, rm);
2146 }
2147
2148 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
2149 {
2150 m_buffer.ensureSpace(maxInstructionSize);
2151 emitRexIfNeeded(reg, 0, base);
2152 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2153 m_buffer.putByteUnchecked(opcode);
2154 memoryModRM(reg, base, offset);
2155 }
2156
2157 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2158 {
2159 m_buffer.ensureSpace(maxInstructionSize);
2160 emitRexIfNeeded(reg, index, base);
2161 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2162 m_buffer.putByteUnchecked(opcode);
2163 memoryModRM(reg, base, index, scale, offset);
2164 }
2165
2166 #if !CPU(X86_64)
2167 void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
2168 {
2169 m_buffer.ensureSpace(maxInstructionSize);
2170 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2171 m_buffer.putByteUnchecked(opcode);
2172 memoryModRM(reg, address);
2173 }
2174 #endif
2175
2176 #if CPU(X86_64)
2177 // Quad-word-sized operands:
2178 //
2179 // Used to format 64-bit operantions, planting a REX.w prefix.
2180 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
2181 // the normal (non-'64'-postfixed) formatters should be used.
2182
2183 void oneByteOp64(OneByteOpcodeID opcode)
2184 {
2185 m_buffer.ensureSpace(maxInstructionSize);
2186 emitRexW(0, 0, 0);
2187 m_buffer.putByteUnchecked(opcode);
2188 }
2189
2190 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
2191 {
2192 m_buffer.ensureSpace(maxInstructionSize);
2193 emitRexW(0, 0, reg);
2194 m_buffer.putByteUnchecked(opcode + (reg & 7));
2195 }
2196
2197 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
2198 {
2199 m_buffer.ensureSpace(maxInstructionSize);
2200 emitRexW(reg, 0, rm);
2201 m_buffer.putByteUnchecked(opcode);
2202 registerModRM(reg, rm);
2203 }
2204
2205 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2206 {
2207 m_buffer.ensureSpace(maxInstructionSize);
2208 emitRexW(reg, 0, base);
2209 m_buffer.putByteUnchecked(opcode);
2210 memoryModRM(reg, base, offset);
2211 }
2212
2213 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2214 {
2215 m_buffer.ensureSpace(maxInstructionSize);
2216 emitRexW(reg, 0, base);
2217 m_buffer.putByteUnchecked(opcode);
2218 memoryModRM_disp32(reg, base, offset);
2219 }
2220
2221 void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
2222 {
2223 m_buffer.ensureSpace(maxInstructionSize);
2224 emitRexW(reg, 0, base);
2225 m_buffer.putByteUnchecked(opcode);
2226 memoryModRM_disp8(reg, base, offset);
2227 }
2228
2229 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2230 {
2231 m_buffer.ensureSpace(maxInstructionSize);
2232 emitRexW(reg, index, base);
2233 m_buffer.putByteUnchecked(opcode);
2234 memoryModRM(reg, base, index, scale, offset);
2235 }
2236
2237 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
2238 {
2239 m_buffer.ensureSpace(maxInstructionSize);
2240 emitRexW(reg, 0, rm);
2241 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2242 m_buffer.putByteUnchecked(opcode);
2243 registerModRM(reg, rm);
2244 }
2245 #endif
2246
2247 // Byte-operands:
2248 //
2249 // These methods format byte operations. Byte operations differ from the normal
2250 // formatters in the circumstances under which they will decide to emit REX prefixes.
2251 // These should be used where any register operand signifies a byte register.
2252 //
2253 // The disctinction is due to the handling of register numbers in the range 4..7 on
2254 // x86-64. These register numbers may either represent the second byte of the first
2255 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
2256 //
2257 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
2258 // be accessed where a REX prefix is present), these are likely best treated as
2259 // deprecated. In order to ensure the correct registers spl..dil are selected a
2260 // REX prefix will be emitted for any byte register operand in the range 4..15.
2261 //
2262 // These formatters may be used in instructions where a mix of operand sizes, in which
2263 // case an unnecessary REX will be emitted, for example:
2264 // movzbl %al, %edi
2265 // In this case a REX will be planted since edi is 7 (and were this a byte operand
2266 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
2267 // be silently ignored by the processor.
2268 //
2269 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
2270 // is provided to check byte register operands.
2271
2272 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2273 {
2274 m_buffer.ensureSpace(maxInstructionSize);
2275 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
2276 m_buffer.putByteUnchecked(opcode);
2277 registerModRM(groupOp, rm);
2278 }
2279
2280 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
2281 {
2282 m_buffer.ensureSpace(maxInstructionSize);
2283 emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
2284 m_buffer.putByteUnchecked(opcode);
2285 registerModRM(reg, rm);
2286 }
2287
2288 void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
2289 {
2290 m_buffer.ensureSpace(maxInstructionSize);
2291 emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
2292 m_buffer.putByteUnchecked(opcode);
2293 memoryModRM(reg, base, index, scale, offset);
2294 }
2295
2296 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
2297 {
2298 m_buffer.ensureSpace(maxInstructionSize);
2299 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
2300 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2301 m_buffer.putByteUnchecked(opcode);
2302 registerModRM(reg, rm);
2303 }
2304
2305 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
2306 {
2307 m_buffer.ensureSpace(maxInstructionSize);
2308 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
2309 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
2310 m_buffer.putByteUnchecked(opcode);
2311 registerModRM(groupOp, rm);
2312 }
2313
2314 // Immediates:
2315 //
2316 // An immedaite should be appended where appropriate after an op has been emitted.
2317 // The writes are unchecked since the opcode formatters above will have ensured space.
2318
2319 void immediate8(int imm)
2320 {
2321 m_buffer.putByteUnchecked(imm);
2322 }
2323
2324 void immediate16(int imm)
2325 {
2326 m_buffer.putShortUnchecked(imm);
2327 }
2328
2329 void immediate32(int imm)
2330 {
2331 m_buffer.putIntUnchecked(imm);
2332 }
2333
2334 void immediate64(int64_t imm)
2335 {
2336 m_buffer.putInt64Unchecked(imm);
2337 }
2338
2339 AssemblerLabel immediateRel32()
2340 {
2341 m_buffer.putIntUnchecked(0);
2342 return label();
2343 }
2344
2345 // Administrative methods:
2346
2347 size_t codeSize() const { return m_buffer.codeSize(); }
2348 AssemblerLabel label() const { return m_buffer.label(); }
2349 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2350 void* data() const { return m_buffer.data(); }
2351
2352 PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort)
2353 {
2354 return m_buffer.executableCopy(vm, ownerUID, effort);
2355 }
2356
2357 unsigned debugOffset() { return m_buffer.debugOffset(); }
2358
2359 private:
2360
2361 // Internals; ModRm and REX formatters.
2362
2363 static const RegisterID noBase = X86Registers::ebp;
2364 static const RegisterID hasSib = X86Registers::esp;
2365 static const RegisterID noIndex = X86Registers::esp;
2366 #if CPU(X86_64)
2367 static const RegisterID noBase2 = X86Registers::r13;
2368 static const RegisterID hasSib2 = X86Registers::r12;
2369
2370 // Registers r8 & above require a REX prefixe.
2371 inline bool regRequiresRex(int reg)
2372 {
2373 return (reg >= X86Registers::r8);
2374 }
2375
2376 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
2377 inline bool byteRegRequiresRex(int reg)
2378 {
2379 return (reg >= X86Registers::esp);
2380 }
2381
2382 // Format a REX prefix byte.
2383 inline void emitRex(bool w, int r, int x, int b)
2384 {
2385 ASSERT(r >= 0);
2386 ASSERT(x >= 0);
2387 ASSERT(b >= 0);
2388 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
2389 }
2390
2391 // Used to plant a REX byte with REX.w set (for 64-bit operations).
2392 inline void emitRexW(int r, int x, int b)
2393 {
2394 emitRex(true, r, x, b);
2395 }
2396
2397 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
2398 // regRequiresRex() to check other registers (i.e. address base & index).
2399 inline void emitRexIf(bool condition, int r, int x, int b)
2400 {
2401 if (condition) emitRex(false, r, x, b);
2402 }
2403
2404 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
2405 inline void emitRexIfNeeded(int r, int x, int b)
2406 {
2407 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
2408 }
2409 #else
2410 // No REX prefix bytes on 32-bit x86.
2411 inline bool regRequiresRex(int) { return false; }
2412 inline bool byteRegRequiresRex(int) { return false; }
2413 inline void emitRexIf(bool, int, int, int) {}
2414 inline void emitRexIfNeeded(int, int, int) {}
2415 #endif
2416
2417 void putModRm(ModRmMode mode, int reg, RegisterID rm)
2418 {
2419 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
2420 }
2421
2422 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
2423 {
2424 ASSERT(mode != ModRmRegister);
2425
2426 putModRm(mode, reg, hasSib);
2427 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
2428 }
2429
2430 void registerModRM(int reg, RegisterID rm)
2431 {
2432 putModRm(ModRmRegister, reg, rm);
2433 }
2434
2435 void memoryModRM(int reg, RegisterID base, int offset)
2436 {
2437 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2438 #if CPU(X86_64)
2439 if ((base == hasSib) || (base == hasSib2)) {
2440 #else
2441 if (base == hasSib) {
2442 #endif
2443 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
2444 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
2445 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2446 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2447 m_buffer.putByteUnchecked(offset);
2448 } else {
2449 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2450 m_buffer.putIntUnchecked(offset);
2451 }
2452 } else {
2453 #if CPU(X86_64)
2454 if (!offset && (base != noBase) && (base != noBase2))
2455 #else
2456 if (!offset && (base != noBase))
2457 #endif
2458 putModRm(ModRmMemoryNoDisp, reg, base);
2459 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2460 putModRm(ModRmMemoryDisp8, reg, base);
2461 m_buffer.putByteUnchecked(offset);
2462 } else {
2463 putModRm(ModRmMemoryDisp32, reg, base);
2464 m_buffer.putIntUnchecked(offset);
2465 }
2466 }
2467 }
2468
2469 void memoryModRM_disp8(int reg, RegisterID base, int offset)
2470 {
2471 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2472 ASSERT(CAN_SIGN_EXTEND_8_32(offset));
2473 #if CPU(X86_64)
2474 if ((base == hasSib) || (base == hasSib2)) {
2475 #else
2476 if (base == hasSib) {
2477 #endif
2478 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2479 m_buffer.putByteUnchecked(offset);
2480 } else {
2481 putModRm(ModRmMemoryDisp8, reg, base);
2482 m_buffer.putByteUnchecked(offset);
2483 }
2484 }
2485
2486 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2487 {
2488 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2489 #if CPU(X86_64)
2490 if ((base == hasSib) || (base == hasSib2)) {
2491 #else
2492 if (base == hasSib) {
2493 #endif
2494 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2495 m_buffer.putIntUnchecked(offset);
2496 } else {
2497 putModRm(ModRmMemoryDisp32, reg, base);
2498 m_buffer.putIntUnchecked(offset);
2499 }
2500 }
2501
2502 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2503 {
2504 ASSERT(index != noIndex);
2505
2506 #if CPU(X86_64)
2507 if (!offset && (base != noBase) && (base != noBase2))
2508 #else
2509 if (!offset && (base != noBase))
2510 #endif
2511 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2512 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2513 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2514 m_buffer.putByteUnchecked(offset);
2515 } else {
2516 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2517 m_buffer.putIntUnchecked(offset);
2518 }
2519 }
2520
2521 #if !CPU(X86_64)
2522 void memoryModRM(int reg, const void* address)
2523 {
2524 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2525 putModRm(ModRmMemoryNoDisp, reg, noBase);
2526 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2527 }
2528 #endif
2529
2530 AssemblerBuffer m_buffer;
2531 } m_formatter;
2532 int m_indexOfLastWatchpoint;
2533 int m_indexOfTailOfLastWatchpoint;
2534 };
2535
2536 } // namespace JSC
2537
2538 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2539
2540 #endif // X86Assembler_h