]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/X86Assembler.h
JavaScriptCore-903.5.tar.gz
[apple/javascriptcore.git] / assembler / X86Assembler.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
28
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
30
31 #include "AssemblerBuffer.h"
32 #include <stdint.h>
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
35
36 namespace JSC {
37
38 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
39
40 namespace X86Registers {
41 typedef enum {
42 eax,
43 ecx,
44 edx,
45 ebx,
46 esp,
47 ebp,
48 esi,
49 edi,
50
51 #if CPU(X86_64)
52 r8,
53 r9,
54 r10,
55 r11,
56 r12,
57 r13,
58 r14,
59 r15,
60 #endif
61 } RegisterID;
62
63 typedef enum {
64 xmm0,
65 xmm1,
66 xmm2,
67 xmm3,
68 xmm4,
69 xmm5,
70 xmm6,
71 xmm7,
72 } XMMRegisterID;
73 }
74
75 class X86Assembler {
76 public:
77 typedef X86Registers::RegisterID RegisterID;
78 typedef X86Registers::XMMRegisterID XMMRegisterID;
79 typedef XMMRegisterID FPRegisterID;
80
81 typedef enum {
82 ConditionO,
83 ConditionNO,
84 ConditionB,
85 ConditionAE,
86 ConditionE,
87 ConditionNE,
88 ConditionBE,
89 ConditionA,
90 ConditionS,
91 ConditionNS,
92 ConditionP,
93 ConditionNP,
94 ConditionL,
95 ConditionGE,
96 ConditionLE,
97 ConditionG,
98
99 ConditionC = ConditionB,
100 ConditionNC = ConditionAE,
101 } Condition;
102
103 private:
104 typedef enum {
105 OP_ADD_EvGv = 0x01,
106 OP_ADD_GvEv = 0x03,
107 OP_OR_EvGv = 0x09,
108 OP_OR_GvEv = 0x0B,
109 OP_2BYTE_ESCAPE = 0x0F,
110 OP_AND_EvGv = 0x21,
111 OP_AND_GvEv = 0x23,
112 OP_SUB_EvGv = 0x29,
113 OP_SUB_GvEv = 0x2B,
114 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
115 OP_XOR_EvGv = 0x31,
116 OP_XOR_GvEv = 0x33,
117 OP_CMP_EvGv = 0x39,
118 OP_CMP_GvEv = 0x3B,
119 #if CPU(X86_64)
120 PRE_REX = 0x40,
121 #endif
122 OP_PUSH_EAX = 0x50,
123 OP_POP_EAX = 0x58,
124 #if CPU(X86_64)
125 OP_MOVSXD_GvEv = 0x63,
126 #endif
127 PRE_OPERAND_SIZE = 0x66,
128 PRE_SSE_66 = 0x66,
129 OP_PUSH_Iz = 0x68,
130 OP_IMUL_GvEvIz = 0x69,
131 OP_GROUP1_EbIb = 0x80,
132 OP_GROUP1_EvIz = 0x81,
133 OP_GROUP1_EvIb = 0x83,
134 OP_TEST_EbGb = 0x84,
135 OP_TEST_EvGv = 0x85,
136 OP_XCHG_EvGv = 0x87,
137 OP_MOV_EvGv = 0x89,
138 OP_MOV_GvEv = 0x8B,
139 OP_LEA = 0x8D,
140 OP_GROUP1A_Ev = 0x8F,
141 OP_NOP = 0x90,
142 OP_CDQ = 0x99,
143 OP_MOV_EAXOv = 0xA1,
144 OP_MOV_OvEAX = 0xA3,
145 OP_MOV_EAXIv = 0xB8,
146 OP_GROUP2_EvIb = 0xC1,
147 OP_RET = 0xC3,
148 OP_GROUP11_EvIz = 0xC7,
149 OP_INT3 = 0xCC,
150 OP_GROUP2_Ev1 = 0xD1,
151 OP_GROUP2_EvCL = 0xD3,
152 OP_CALL_rel32 = 0xE8,
153 OP_JMP_rel32 = 0xE9,
154 PRE_SSE_F2 = 0xF2,
155 OP_HLT = 0xF4,
156 OP_GROUP3_EbIb = 0xF6,
157 OP_GROUP3_Ev = 0xF7,
158 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
159 OP_GROUP5_Ev = 0xFF,
160 } OneByteOpcodeID;
161
162 typedef enum {
163 OP2_MOVSD_VsdWsd = 0x10,
164 OP2_MOVSD_WsdVsd = 0x11,
165 OP2_CVTSI2SD_VsdEd = 0x2A,
166 OP2_CVTTSD2SI_GdWsd = 0x2C,
167 OP2_UCOMISD_VsdWsd = 0x2E,
168 OP2_ADDSD_VsdWsd = 0x58,
169 OP2_MULSD_VsdWsd = 0x59,
170 OP2_SUBSD_VsdWsd = 0x5C,
171 OP2_DIVSD_VsdWsd = 0x5E,
172 OP2_SQRTSD_VsdWsd = 0x51,
173 OP2_XORPD_VpdWpd = 0x57,
174 OP2_MOVD_VdEd = 0x6E,
175 OP2_MOVD_EdVd = 0x7E,
176 OP2_JCC_rel32 = 0x80,
177 OP_SETCC = 0x90,
178 OP2_IMUL_GvEv = 0xAF,
179 OP2_MOVZX_GvEb = 0xB6,
180 OP2_MOVZX_GvEw = 0xB7,
181 OP2_PEXTRW_GdUdIb = 0xC5,
182 } TwoByteOpcodeID;
183
184 TwoByteOpcodeID jccRel32(Condition cond)
185 {
186 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
187 }
188
189 TwoByteOpcodeID setccOpcode(Condition cond)
190 {
191 return (TwoByteOpcodeID)(OP_SETCC + cond);
192 }
193
194 typedef enum {
195 GROUP1_OP_ADD = 0,
196 GROUP1_OP_OR = 1,
197 GROUP1_OP_ADC = 2,
198 GROUP1_OP_AND = 4,
199 GROUP1_OP_SUB = 5,
200 GROUP1_OP_XOR = 6,
201 GROUP1_OP_CMP = 7,
202
203 GROUP1A_OP_POP = 0,
204
205 GROUP2_OP_SHL = 4,
206 GROUP2_OP_SHR = 5,
207 GROUP2_OP_SAR = 7,
208
209 GROUP3_OP_TEST = 0,
210 GROUP3_OP_NOT = 2,
211 GROUP3_OP_NEG = 3,
212 GROUP3_OP_IDIV = 7,
213
214 GROUP5_OP_CALLN = 2,
215 GROUP5_OP_JMPN = 4,
216 GROUP5_OP_PUSH = 6,
217
218 GROUP11_MOV = 0,
219 } GroupOpcodeID;
220
221 class X86InstructionFormatter;
222 public:
223
224 X86Assembler()
225 {
226 }
227
228 // Stack operations:
229
230 void push_r(RegisterID reg)
231 {
232 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
233 }
234
235 void pop_r(RegisterID reg)
236 {
237 m_formatter.oneByteOp(OP_POP_EAX, reg);
238 }
239
240 void push_i32(int imm)
241 {
242 m_formatter.oneByteOp(OP_PUSH_Iz);
243 m_formatter.immediate32(imm);
244 }
245
246 void push_m(int offset, RegisterID base)
247 {
248 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
249 }
250
251 void pop_m(int offset, RegisterID base)
252 {
253 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
254 }
255
256 // Arithmetic operations:
257
258 #if !CPU(X86_64)
259 void adcl_im(int imm, const void* addr)
260 {
261 if (CAN_SIGN_EXTEND_8_32(imm)) {
262 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
263 m_formatter.immediate8(imm);
264 } else {
265 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
266 m_formatter.immediate32(imm);
267 }
268 }
269 #endif
270
271 void addl_rr(RegisterID src, RegisterID dst)
272 {
273 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
274 }
275
276 void addl_mr(int offset, RegisterID base, RegisterID dst)
277 {
278 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
279 }
280
281 void addl_rm(RegisterID src, int offset, RegisterID base)
282 {
283 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
284 }
285
286 void addl_ir(int imm, RegisterID dst)
287 {
288 if (CAN_SIGN_EXTEND_8_32(imm)) {
289 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
290 m_formatter.immediate8(imm);
291 } else {
292 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
293 m_formatter.immediate32(imm);
294 }
295 }
296
297 void addl_im(int imm, int offset, RegisterID base)
298 {
299 if (CAN_SIGN_EXTEND_8_32(imm)) {
300 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
301 m_formatter.immediate8(imm);
302 } else {
303 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
304 m_formatter.immediate32(imm);
305 }
306 }
307
308 #if CPU(X86_64)
309 void addq_rr(RegisterID src, RegisterID dst)
310 {
311 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
312 }
313
314 void addq_ir(int imm, RegisterID dst)
315 {
316 if (CAN_SIGN_EXTEND_8_32(imm)) {
317 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
318 m_formatter.immediate8(imm);
319 } else {
320 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
321 m_formatter.immediate32(imm);
322 }
323 }
324
325 void addq_im(int imm, int offset, RegisterID base)
326 {
327 if (CAN_SIGN_EXTEND_8_32(imm)) {
328 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
329 m_formatter.immediate8(imm);
330 } else {
331 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
332 m_formatter.immediate32(imm);
333 }
334 }
335 #else
336 void addl_im(int imm, const void* addr)
337 {
338 if (CAN_SIGN_EXTEND_8_32(imm)) {
339 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
340 m_formatter.immediate8(imm);
341 } else {
342 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
343 m_formatter.immediate32(imm);
344 }
345 }
346 #endif
347
348 void andl_rr(RegisterID src, RegisterID dst)
349 {
350 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
351 }
352
353 void andl_mr(int offset, RegisterID base, RegisterID dst)
354 {
355 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
356 }
357
358 void andl_rm(RegisterID src, int offset, RegisterID base)
359 {
360 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
361 }
362
363 void andl_ir(int imm, RegisterID dst)
364 {
365 if (CAN_SIGN_EXTEND_8_32(imm)) {
366 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
367 m_formatter.immediate8(imm);
368 } else {
369 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
370 m_formatter.immediate32(imm);
371 }
372 }
373
374 void andl_im(int imm, int offset, RegisterID base)
375 {
376 if (CAN_SIGN_EXTEND_8_32(imm)) {
377 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
378 m_formatter.immediate8(imm);
379 } else {
380 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
381 m_formatter.immediate32(imm);
382 }
383 }
384
385 #if CPU(X86_64)
386 void andq_rr(RegisterID src, RegisterID dst)
387 {
388 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
389 }
390
391 void andq_ir(int imm, RegisterID dst)
392 {
393 if (CAN_SIGN_EXTEND_8_32(imm)) {
394 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
395 m_formatter.immediate8(imm);
396 } else {
397 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
398 m_formatter.immediate32(imm);
399 }
400 }
401 #else
402 void andl_im(int imm, const void* addr)
403 {
404 if (CAN_SIGN_EXTEND_8_32(imm)) {
405 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
406 m_formatter.immediate8(imm);
407 } else {
408 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
409 m_formatter.immediate32(imm);
410 }
411 }
412 #endif
413
414 void negl_r(RegisterID dst)
415 {
416 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
417 }
418
419 void negl_m(int offset, RegisterID base)
420 {
421 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
422 }
423
424 void notl_r(RegisterID dst)
425 {
426 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
427 }
428
429 void notl_m(int offset, RegisterID base)
430 {
431 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
432 }
433
434 void orl_rr(RegisterID src, RegisterID dst)
435 {
436 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
437 }
438
439 void orl_mr(int offset, RegisterID base, RegisterID dst)
440 {
441 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
442 }
443
444 void orl_rm(RegisterID src, int offset, RegisterID base)
445 {
446 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
447 }
448
449 void orl_ir(int imm, RegisterID dst)
450 {
451 if (CAN_SIGN_EXTEND_8_32(imm)) {
452 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
453 m_formatter.immediate8(imm);
454 } else {
455 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
456 m_formatter.immediate32(imm);
457 }
458 }
459
460 void orl_im(int imm, int offset, RegisterID base)
461 {
462 if (CAN_SIGN_EXTEND_8_32(imm)) {
463 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
464 m_formatter.immediate8(imm);
465 } else {
466 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
467 m_formatter.immediate32(imm);
468 }
469 }
470
471 #if CPU(X86_64)
472 void orq_rr(RegisterID src, RegisterID dst)
473 {
474 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
475 }
476
477 void orq_ir(int imm, RegisterID dst)
478 {
479 if (CAN_SIGN_EXTEND_8_32(imm)) {
480 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
481 m_formatter.immediate8(imm);
482 } else {
483 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
484 m_formatter.immediate32(imm);
485 }
486 }
487 #else
488 void orl_im(int imm, const void* addr)
489 {
490 if (CAN_SIGN_EXTEND_8_32(imm)) {
491 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
492 m_formatter.immediate8(imm);
493 } else {
494 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
495 m_formatter.immediate32(imm);
496 }
497 }
498 #endif
499
500 void subl_rr(RegisterID src, RegisterID dst)
501 {
502 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
503 }
504
505 void subl_mr(int offset, RegisterID base, RegisterID dst)
506 {
507 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
508 }
509
510 void subl_rm(RegisterID src, int offset, RegisterID base)
511 {
512 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
513 }
514
515 void subl_ir(int imm, RegisterID dst)
516 {
517 if (CAN_SIGN_EXTEND_8_32(imm)) {
518 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
519 m_formatter.immediate8(imm);
520 } else {
521 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
522 m_formatter.immediate32(imm);
523 }
524 }
525
526 void subl_im(int imm, int offset, RegisterID base)
527 {
528 if (CAN_SIGN_EXTEND_8_32(imm)) {
529 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
530 m_formatter.immediate8(imm);
531 } else {
532 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
533 m_formatter.immediate32(imm);
534 }
535 }
536
537 #if CPU(X86_64)
538 void subq_rr(RegisterID src, RegisterID dst)
539 {
540 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
541 }
542
543 void subq_ir(int imm, RegisterID dst)
544 {
545 if (CAN_SIGN_EXTEND_8_32(imm)) {
546 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
547 m_formatter.immediate8(imm);
548 } else {
549 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
550 m_formatter.immediate32(imm);
551 }
552 }
553 #else
554 void subl_im(int imm, const void* addr)
555 {
556 if (CAN_SIGN_EXTEND_8_32(imm)) {
557 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
558 m_formatter.immediate8(imm);
559 } else {
560 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
561 m_formatter.immediate32(imm);
562 }
563 }
564 #endif
565
566 void xorl_rr(RegisterID src, RegisterID dst)
567 {
568 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
569 }
570
571 void xorl_mr(int offset, RegisterID base, RegisterID dst)
572 {
573 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
574 }
575
576 void xorl_rm(RegisterID src, int offset, RegisterID base)
577 {
578 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
579 }
580
581 void xorl_im(int imm, int offset, RegisterID base)
582 {
583 if (CAN_SIGN_EXTEND_8_32(imm)) {
584 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
585 m_formatter.immediate8(imm);
586 } else {
587 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
588 m_formatter.immediate32(imm);
589 }
590 }
591
592 void xorl_ir(int imm, RegisterID dst)
593 {
594 if (CAN_SIGN_EXTEND_8_32(imm)) {
595 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
596 m_formatter.immediate8(imm);
597 } else {
598 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
599 m_formatter.immediate32(imm);
600 }
601 }
602
603 #if CPU(X86_64)
604 void xorq_rr(RegisterID src, RegisterID dst)
605 {
606 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
607 }
608
609 void xorq_ir(int imm, RegisterID dst)
610 {
611 if (CAN_SIGN_EXTEND_8_32(imm)) {
612 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
613 m_formatter.immediate8(imm);
614 } else {
615 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
616 m_formatter.immediate32(imm);
617 }
618 }
619 #endif
620
621 void sarl_i8r(int imm, RegisterID dst)
622 {
623 if (imm == 1)
624 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
625 else {
626 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
627 m_formatter.immediate8(imm);
628 }
629 }
630
631 void sarl_CLr(RegisterID dst)
632 {
633 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
634 }
635
636 void shrl_i8r(int imm, RegisterID dst)
637 {
638 if (imm == 1)
639 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
640 else {
641 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
642 m_formatter.immediate8(imm);
643 }
644 }
645
646 void shrl_CLr(RegisterID dst)
647 {
648 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
649 }
650
651 void shll_i8r(int imm, RegisterID dst)
652 {
653 if (imm == 1)
654 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
655 else {
656 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
657 m_formatter.immediate8(imm);
658 }
659 }
660
661 void shll_CLr(RegisterID dst)
662 {
663 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
664 }
665
666 #if CPU(X86_64)
667 void sarq_CLr(RegisterID dst)
668 {
669 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
670 }
671
672 void sarq_i8r(int imm, RegisterID dst)
673 {
674 if (imm == 1)
675 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
676 else {
677 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
678 m_formatter.immediate8(imm);
679 }
680 }
681 #endif
682
683 void imull_rr(RegisterID src, RegisterID dst)
684 {
685 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
686 }
687
688 void imull_mr(int offset, RegisterID base, RegisterID dst)
689 {
690 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
691 }
692
693 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
694 {
695 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
696 m_formatter.immediate32(value);
697 }
698
699 void idivl_r(RegisterID dst)
700 {
701 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
702 }
703
704 // Comparisons:
705
706 void cmpl_rr(RegisterID src, RegisterID dst)
707 {
708 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
709 }
710
711 void cmpl_rm(RegisterID src, int offset, RegisterID base)
712 {
713 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
714 }
715
716 void cmpl_mr(int offset, RegisterID base, RegisterID src)
717 {
718 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
719 }
720
721 void cmpl_ir(int imm, RegisterID dst)
722 {
723 if (CAN_SIGN_EXTEND_8_32(imm)) {
724 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
725 m_formatter.immediate8(imm);
726 } else {
727 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
728 m_formatter.immediate32(imm);
729 }
730 }
731
732 void cmpl_ir_force32(int imm, RegisterID dst)
733 {
734 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
735 m_formatter.immediate32(imm);
736 }
737
738 void cmpl_im(int imm, int offset, RegisterID base)
739 {
740 if (CAN_SIGN_EXTEND_8_32(imm)) {
741 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
742 m_formatter.immediate8(imm);
743 } else {
744 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
745 m_formatter.immediate32(imm);
746 }
747 }
748
749 void cmpb_im(int imm, int offset, RegisterID base)
750 {
751 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
752 m_formatter.immediate8(imm);
753 }
754
755 void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
756 {
757 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
758 m_formatter.immediate8(imm);
759 }
760
761 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
762 {
763 if (CAN_SIGN_EXTEND_8_32(imm)) {
764 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
765 m_formatter.immediate8(imm);
766 } else {
767 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
768 m_formatter.immediate32(imm);
769 }
770 }
771
772 void cmpl_im_force32(int imm, int offset, RegisterID base)
773 {
774 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
775 m_formatter.immediate32(imm);
776 }
777
778 #if CPU(X86_64)
779 void cmpq_rr(RegisterID src, RegisterID dst)
780 {
781 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
782 }
783
784 void cmpq_rm(RegisterID src, int offset, RegisterID base)
785 {
786 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
787 }
788
789 void cmpq_mr(int offset, RegisterID base, RegisterID src)
790 {
791 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
792 }
793
794 void cmpq_ir(int imm, RegisterID dst)
795 {
796 if (CAN_SIGN_EXTEND_8_32(imm)) {
797 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
798 m_formatter.immediate8(imm);
799 } else {
800 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
801 m_formatter.immediate32(imm);
802 }
803 }
804
805 void cmpq_im(int imm, int offset, RegisterID base)
806 {
807 if (CAN_SIGN_EXTEND_8_32(imm)) {
808 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
809 m_formatter.immediate8(imm);
810 } else {
811 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
812 m_formatter.immediate32(imm);
813 }
814 }
815
816 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
817 {
818 if (CAN_SIGN_EXTEND_8_32(imm)) {
819 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
820 m_formatter.immediate8(imm);
821 } else {
822 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
823 m_formatter.immediate32(imm);
824 }
825 }
826 #else
827 void cmpl_rm(RegisterID reg, const void* addr)
828 {
829 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
830 }
831
832 void cmpl_im(int imm, const void* addr)
833 {
834 if (CAN_SIGN_EXTEND_8_32(imm)) {
835 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
836 m_formatter.immediate8(imm);
837 } else {
838 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
839 m_formatter.immediate32(imm);
840 }
841 }
842 #endif
843
844 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
845 {
846 m_formatter.prefix(PRE_OPERAND_SIZE);
847 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
848 }
849
850 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
851 {
852 if (CAN_SIGN_EXTEND_8_32(imm)) {
853 m_formatter.prefix(PRE_OPERAND_SIZE);
854 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
855 m_formatter.immediate8(imm);
856 } else {
857 m_formatter.prefix(PRE_OPERAND_SIZE);
858 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
859 m_formatter.immediate16(imm);
860 }
861 }
862
863 void testl_rr(RegisterID src, RegisterID dst)
864 {
865 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
866 }
867
868 void testl_i32r(int imm, RegisterID dst)
869 {
870 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
871 m_formatter.immediate32(imm);
872 }
873
874 void testl_i32m(int imm, int offset, RegisterID base)
875 {
876 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
877 m_formatter.immediate32(imm);
878 }
879
880 void testb_rr(RegisterID src, RegisterID dst)
881 {
882 m_formatter.oneByteOp(OP_TEST_EbGb, src, dst);
883 }
884
885 void testb_im(int imm, int offset, RegisterID base)
886 {
887 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
888 m_formatter.immediate8(imm);
889 }
890
891 void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
892 {
893 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
894 m_formatter.immediate8(imm);
895 }
896
897 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
898 {
899 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
900 m_formatter.immediate32(imm);
901 }
902
903 #if CPU(X86_64)
904 void testq_rr(RegisterID src, RegisterID dst)
905 {
906 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
907 }
908
909 void testq_i32r(int imm, RegisterID dst)
910 {
911 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
912 m_formatter.immediate32(imm);
913 }
914
915 void testq_i32m(int imm, int offset, RegisterID base)
916 {
917 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
918 m_formatter.immediate32(imm);
919 }
920
921 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
922 {
923 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
924 m_formatter.immediate32(imm);
925 }
926 #endif
927
928 void testw_rr(RegisterID src, RegisterID dst)
929 {
930 m_formatter.prefix(PRE_OPERAND_SIZE);
931 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
932 }
933
934 void testb_i8r(int imm, RegisterID dst)
935 {
936 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
937 m_formatter.immediate8(imm);
938 }
939
940 void setCC_r(Condition cond, RegisterID dst)
941 {
942 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
943 }
944
945 void sete_r(RegisterID dst)
946 {
947 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
948 }
949
950 void setz_r(RegisterID dst)
951 {
952 sete_r(dst);
953 }
954
955 void setne_r(RegisterID dst)
956 {
957 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
958 }
959
960 void setnz_r(RegisterID dst)
961 {
962 setne_r(dst);
963 }
964
965 // Various move ops:
966
967 void cdq()
968 {
969 m_formatter.oneByteOp(OP_CDQ);
970 }
971
972 void xchgl_rr(RegisterID src, RegisterID dst)
973 {
974 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
975 }
976
977 #if CPU(X86_64)
978 void xchgq_rr(RegisterID src, RegisterID dst)
979 {
980 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
981 }
982 #endif
983
984 void movl_rr(RegisterID src, RegisterID dst)
985 {
986 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
987 }
988
989 void movl_rm(RegisterID src, int offset, RegisterID base)
990 {
991 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
992 }
993
994 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
995 {
996 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
997 }
998
999 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1000 {
1001 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1002 }
1003
1004 void movl_mEAX(const void* addr)
1005 {
1006 m_formatter.oneByteOp(OP_MOV_EAXOv);
1007 #if CPU(X86_64)
1008 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1009 #else
1010 m_formatter.immediate32(reinterpret_cast<int>(addr));
1011 #endif
1012 }
1013
1014 void movl_mr(int offset, RegisterID base, RegisterID dst)
1015 {
1016 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1017 }
1018
1019 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1020 {
1021 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1022 }
1023
1024 void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
1025 {
1026 m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
1027 }
1028
1029 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1030 {
1031 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1032 }
1033
1034 void movl_i32r(int imm, RegisterID dst)
1035 {
1036 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1037 m_formatter.immediate32(imm);
1038 }
1039
1040 void movl_i32m(int imm, int offset, RegisterID base)
1041 {
1042 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1043 m_formatter.immediate32(imm);
1044 }
1045
1046 void movl_EAXm(const void* addr)
1047 {
1048 m_formatter.oneByteOp(OP_MOV_OvEAX);
1049 #if CPU(X86_64)
1050 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1051 #else
1052 m_formatter.immediate32(reinterpret_cast<int>(addr));
1053 #endif
1054 }
1055
1056 #if CPU(X86_64)
1057 void movq_rr(RegisterID src, RegisterID dst)
1058 {
1059 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1060 }
1061
1062 void movq_rm(RegisterID src, int offset, RegisterID base)
1063 {
1064 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1065 }
1066
1067 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1068 {
1069 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1070 }
1071
1072 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1073 {
1074 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1075 }
1076
1077 void movq_mEAX(const void* addr)
1078 {
1079 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1080 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1081 }
1082
1083 void movq_EAXm(const void* addr)
1084 {
1085 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1086 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1087 }
1088
1089 void movq_mr(int offset, RegisterID base, RegisterID dst)
1090 {
1091 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1092 }
1093
1094 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1095 {
1096 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1097 }
1098
1099 void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
1100 {
1101 m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
1102 }
1103
1104 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1105 {
1106 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1107 }
1108
1109 void movq_i32m(int imm, int offset, RegisterID base)
1110 {
1111 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1112 m_formatter.immediate32(imm);
1113 }
1114
1115 void movq_i64r(int64_t imm, RegisterID dst)
1116 {
1117 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1118 m_formatter.immediate64(imm);
1119 }
1120
1121 void movsxd_rr(RegisterID src, RegisterID dst)
1122 {
1123 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1124 }
1125
1126
1127 #else
1128 void movl_rm(RegisterID src, const void* addr)
1129 {
1130 if (src == X86Registers::eax)
1131 movl_EAXm(addr);
1132 else
1133 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1134 }
1135
1136 void movl_mr(const void* addr, RegisterID dst)
1137 {
1138 if (dst == X86Registers::eax)
1139 movl_mEAX(addr);
1140 else
1141 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1142 }
1143
1144 void movl_i32m(int imm, const void* addr)
1145 {
1146 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1147 m_formatter.immediate32(imm);
1148 }
1149 #endif
1150
1151 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1152 {
1153 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1154 }
1155
1156 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1157 {
1158 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1159 }
1160
1161 void movzbl_rr(RegisterID src, RegisterID dst)
1162 {
1163 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1164 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1165 // REX prefixes are defined to be silently ignored by the processor.
1166 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1167 }
1168
1169 void leal_mr(int offset, RegisterID base, RegisterID dst)
1170 {
1171 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1172 }
1173 #if CPU(X86_64)
1174 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1175 {
1176 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1177 }
1178 #endif
1179
1180 // Flow control:
1181
1182 AssemblerLabel call()
1183 {
1184 m_formatter.oneByteOp(OP_CALL_rel32);
1185 return m_formatter.immediateRel32();
1186 }
1187
1188 AssemblerLabel call(RegisterID dst)
1189 {
1190 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1191 return m_formatter.label();
1192 }
1193
1194 void call_m(int offset, RegisterID base)
1195 {
1196 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1197 }
1198
1199 AssemblerLabel jmp()
1200 {
1201 m_formatter.oneByteOp(OP_JMP_rel32);
1202 return m_formatter.immediateRel32();
1203 }
1204
1205 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1206 // To make a tail recursive call on x86-64. The MacroAssembler
1207 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1208 AssemblerLabel jmp_r(RegisterID dst)
1209 {
1210 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1211 return m_formatter.label();
1212 }
1213
1214 void jmp_m(int offset, RegisterID base)
1215 {
1216 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1217 }
1218
1219 AssemblerLabel jne()
1220 {
1221 m_formatter.twoByteOp(jccRel32(ConditionNE));
1222 return m_formatter.immediateRel32();
1223 }
1224
1225 AssemblerLabel jnz()
1226 {
1227 return jne();
1228 }
1229
1230 AssemblerLabel je()
1231 {
1232 m_formatter.twoByteOp(jccRel32(ConditionE));
1233 return m_formatter.immediateRel32();
1234 }
1235
1236 AssemblerLabel jz()
1237 {
1238 return je();
1239 }
1240
1241 AssemblerLabel jl()
1242 {
1243 m_formatter.twoByteOp(jccRel32(ConditionL));
1244 return m_formatter.immediateRel32();
1245 }
1246
1247 AssemblerLabel jb()
1248 {
1249 m_formatter.twoByteOp(jccRel32(ConditionB));
1250 return m_formatter.immediateRel32();
1251 }
1252
1253 AssemblerLabel jle()
1254 {
1255 m_formatter.twoByteOp(jccRel32(ConditionLE));
1256 return m_formatter.immediateRel32();
1257 }
1258
1259 AssemblerLabel jbe()
1260 {
1261 m_formatter.twoByteOp(jccRel32(ConditionBE));
1262 return m_formatter.immediateRel32();
1263 }
1264
1265 AssemblerLabel jge()
1266 {
1267 m_formatter.twoByteOp(jccRel32(ConditionGE));
1268 return m_formatter.immediateRel32();
1269 }
1270
1271 AssemblerLabel jg()
1272 {
1273 m_formatter.twoByteOp(jccRel32(ConditionG));
1274 return m_formatter.immediateRel32();
1275 }
1276
1277 AssemblerLabel ja()
1278 {
1279 m_formatter.twoByteOp(jccRel32(ConditionA));
1280 return m_formatter.immediateRel32();
1281 }
1282
1283 AssemblerLabel jae()
1284 {
1285 m_formatter.twoByteOp(jccRel32(ConditionAE));
1286 return m_formatter.immediateRel32();
1287 }
1288
1289 AssemblerLabel jo()
1290 {
1291 m_formatter.twoByteOp(jccRel32(ConditionO));
1292 return m_formatter.immediateRel32();
1293 }
1294
1295 AssemblerLabel jp()
1296 {
1297 m_formatter.twoByteOp(jccRel32(ConditionP));
1298 return m_formatter.immediateRel32();
1299 }
1300
1301 AssemblerLabel js()
1302 {
1303 m_formatter.twoByteOp(jccRel32(ConditionS));
1304 return m_formatter.immediateRel32();
1305 }
1306
1307 AssemblerLabel jCC(Condition cond)
1308 {
1309 m_formatter.twoByteOp(jccRel32(cond));
1310 return m_formatter.immediateRel32();
1311 }
1312
1313 // SSE operations:
1314
1315 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1316 {
1317 m_formatter.prefix(PRE_SSE_F2);
1318 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1319 }
1320
1321 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1322 {
1323 m_formatter.prefix(PRE_SSE_F2);
1324 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1325 }
1326
1327 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1328 {
1329 m_formatter.prefix(PRE_SSE_F2);
1330 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1331 }
1332
1333 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1334 {
1335 m_formatter.prefix(PRE_SSE_F2);
1336 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1337 }
1338
1339 #if !CPU(X86_64)
1340 void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
1341 {
1342 m_formatter.prefix(PRE_SSE_F2);
1343 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1344 }
1345 #endif
1346
1347 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1348 {
1349 m_formatter.prefix(PRE_SSE_F2);
1350 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1351 }
1352
1353 void movd_rr(XMMRegisterID src, RegisterID dst)
1354 {
1355 m_formatter.prefix(PRE_SSE_66);
1356 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1357 }
1358
1359 #if CPU(X86_64)
1360 void movq_rr(XMMRegisterID src, RegisterID dst)
1361 {
1362 m_formatter.prefix(PRE_SSE_66);
1363 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1364 }
1365
1366 void movq_rr(RegisterID src, XMMRegisterID dst)
1367 {
1368 m_formatter.prefix(PRE_SSE_66);
1369 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1370 }
1371 #endif
1372
1373 void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
1374 {
1375 m_formatter.prefix(PRE_SSE_F2);
1376 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1377 }
1378
1379 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1380 {
1381 m_formatter.prefix(PRE_SSE_F2);
1382 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1383 }
1384
1385 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1386 {
1387 m_formatter.prefix(PRE_SSE_F2);
1388 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1389 }
1390
1391 #if !CPU(X86_64)
1392 void movsd_mr(const void* address, XMMRegisterID dst)
1393 {
1394 m_formatter.prefix(PRE_SSE_F2);
1395 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1396 }
1397 #endif
1398
1399 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1400 {
1401 m_formatter.prefix(PRE_SSE_F2);
1402 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1403 }
1404
1405 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1406 {
1407 m_formatter.prefix(PRE_SSE_F2);
1408 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1409 }
1410
1411 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1412 {
1413 m_formatter.prefix(PRE_SSE_66);
1414 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1415 m_formatter.immediate8(whichWord);
1416 }
1417
1418 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1419 {
1420 m_formatter.prefix(PRE_SSE_F2);
1421 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1422 }
1423
1424 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1425 {
1426 m_formatter.prefix(PRE_SSE_F2);
1427 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1428 }
1429
1430 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1431 {
1432 m_formatter.prefix(PRE_SSE_66);
1433 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1434 }
1435
1436 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1437 {
1438 m_formatter.prefix(PRE_SSE_66);
1439 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1440 }
1441
1442 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1443 {
1444 m_formatter.prefix(PRE_SSE_F2);
1445 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1446 }
1447
1448 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1449 {
1450 m_formatter.prefix(PRE_SSE_F2);
1451 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1452 }
1453
1454 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1455 {
1456 m_formatter.prefix(PRE_SSE_66);
1457 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1458 }
1459
1460 void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
1461 {
1462 m_formatter.prefix(PRE_SSE_F2);
1463 m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1464 }
1465
1466 // Misc instructions:
1467
1468 void int3()
1469 {
1470 m_formatter.oneByteOp(OP_INT3);
1471 }
1472
1473 void ret()
1474 {
1475 m_formatter.oneByteOp(OP_RET);
1476 }
1477
1478 void predictNotTaken()
1479 {
1480 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1481 }
1482
1483 // Assembler admin methods:
1484
1485 size_t codeSize() const
1486 {
1487 return m_formatter.codeSize();
1488 }
1489
1490 AssemblerLabel label()
1491 {
1492 return m_formatter.label();
1493 }
1494
1495 AssemblerLabel align(int alignment)
1496 {
1497 while (!m_formatter.isAligned(alignment))
1498 m_formatter.oneByteOp(OP_HLT);
1499
1500 return label();
1501 }
1502
1503 // Linking & patching:
1504 //
1505 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1506 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1507 // code has been finalized it is (platform support permitting) within a non-
1508 // writable region of memory; to modify the code in an execute-only execuable
1509 // pool the 'repatch' and 'relink' methods should be used.
1510
1511 void linkJump(AssemblerLabel from, AssemblerLabel to)
1512 {
1513 ASSERT(from.isSet());
1514 ASSERT(to.isSet());
1515
1516 char* code = reinterpret_cast<char*>(m_formatter.data());
1517 ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
1518 setRel32(code + from.m_offset, code + to.m_offset);
1519 }
1520
1521 static void linkJump(void* code, AssemblerLabel from, void* to)
1522 {
1523 ASSERT(from.isSet());
1524
1525 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1526 }
1527
1528 static void linkCall(void* code, AssemblerLabel from, void* to)
1529 {
1530 ASSERT(from.isSet());
1531
1532 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1533 }
1534
1535 static void linkPointer(void* code, AssemblerLabel where, void* value)
1536 {
1537 ASSERT(where.isSet());
1538
1539 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1540 }
1541
1542 static void relinkJump(void* from, void* to)
1543 {
1544 setRel32(from, to);
1545 }
1546
1547 static void relinkCall(void* from, void* to)
1548 {
1549 setRel32(from, to);
1550 }
1551
1552 static void repatchCompact(void* where, int32_t value)
1553 {
1554 ASSERT(value >= 0);
1555 ASSERT(value <= std::numeric_limits<int8_t>::max());
1556 setInt8(where, value);
1557 }
1558
1559 static void repatchInt32(void* where, int32_t value)
1560 {
1561 setInt32(where, value);
1562 }
1563
1564 static void repatchPointer(void* where, void* value)
1565 {
1566 setPointer(where, value);
1567 }
1568
1569 static void* readPointer(void* where)
1570 {
1571 return reinterpret_cast<void**>(where)[-1];
1572 }
1573
1574 static unsigned getCallReturnOffset(AssemblerLabel call)
1575 {
1576 ASSERT(call.isSet());
1577 return call.m_offset;
1578 }
1579
1580 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1581 {
1582 ASSERT(label.isSet());
1583 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1584 }
1585
1586 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1587 {
1588 return b.m_offset - a.m_offset;
1589 }
1590
1591 void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
1592 {
1593 return m_formatter.executableCopy(globalData, allocator);
1594 }
1595
1596 void rewindToLabel(AssemblerLabel rewindTo) { m_formatter.rewindToLabel(rewindTo); }
1597
1598 #ifndef NDEBUG
1599 unsigned debugOffset() { return m_formatter.debugOffset(); }
1600 #endif
1601
1602 void nop()
1603 {
1604 m_formatter.oneByteOp(OP_NOP);
1605 }
1606
1607 private:
1608
1609 static void setPointer(void* where, void* value)
1610 {
1611 reinterpret_cast<void**>(where)[-1] = value;
1612 }
1613
1614 static void setInt32(void* where, int32_t value)
1615 {
1616 reinterpret_cast<int32_t*>(where)[-1] = value;
1617 }
1618
1619 static void setInt8(void* where, int8_t value)
1620 {
1621 reinterpret_cast<int8_t*>(where)[-1] = value;
1622 }
1623
1624 static void setRel32(void* from, void* to)
1625 {
1626 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1627 ASSERT(offset == static_cast<int32_t>(offset));
1628
1629 setInt32(from, offset);
1630 }
1631
1632 class X86InstructionFormatter {
1633
1634 static const int maxInstructionSize = 16;
1635
1636 public:
1637
1638 // Legacy prefix bytes:
1639 //
1640 // These are emmitted prior to the instruction.
1641
1642 void prefix(OneByteOpcodeID pre)
1643 {
1644 m_buffer.putByte(pre);
1645 }
1646
1647 // Word-sized operands / no operand instruction formatters.
1648 //
1649 // In addition to the opcode, the following operand permutations are supported:
1650 // * None - instruction takes no operands.
1651 // * One register - the low three bits of the RegisterID are added into the opcode.
1652 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1653 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1654 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1655 //
1656 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1657 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1658 //
1659 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1660
1661 void oneByteOp(OneByteOpcodeID opcode)
1662 {
1663 m_buffer.ensureSpace(maxInstructionSize);
1664 m_buffer.putByteUnchecked(opcode);
1665 }
1666
1667 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1668 {
1669 m_buffer.ensureSpace(maxInstructionSize);
1670 emitRexIfNeeded(0, 0, reg);
1671 m_buffer.putByteUnchecked(opcode + (reg & 7));
1672 }
1673
1674 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1675 {
1676 m_buffer.ensureSpace(maxInstructionSize);
1677 emitRexIfNeeded(reg, 0, rm);
1678 m_buffer.putByteUnchecked(opcode);
1679 registerModRM(reg, rm);
1680 }
1681
1682 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1683 {
1684 m_buffer.ensureSpace(maxInstructionSize);
1685 emitRexIfNeeded(reg, 0, base);
1686 m_buffer.putByteUnchecked(opcode);
1687 memoryModRM(reg, base, offset);
1688 }
1689
1690 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1691 {
1692 m_buffer.ensureSpace(maxInstructionSize);
1693 emitRexIfNeeded(reg, 0, base);
1694 m_buffer.putByteUnchecked(opcode);
1695 memoryModRM_disp32(reg, base, offset);
1696 }
1697
1698 void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1699 {
1700 m_buffer.ensureSpace(maxInstructionSize);
1701 emitRexIfNeeded(reg, 0, base);
1702 m_buffer.putByteUnchecked(opcode);
1703 memoryModRM_disp8(reg, base, offset);
1704 }
1705
1706 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1707 {
1708 m_buffer.ensureSpace(maxInstructionSize);
1709 emitRexIfNeeded(reg, index, base);
1710 m_buffer.putByteUnchecked(opcode);
1711 memoryModRM(reg, base, index, scale, offset);
1712 }
1713
1714 #if !CPU(X86_64)
1715 void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
1716 {
1717 m_buffer.ensureSpace(maxInstructionSize);
1718 m_buffer.putByteUnchecked(opcode);
1719 memoryModRM(reg, address);
1720 }
1721 #endif
1722
1723 void twoByteOp(TwoByteOpcodeID opcode)
1724 {
1725 m_buffer.ensureSpace(maxInstructionSize);
1726 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1727 m_buffer.putByteUnchecked(opcode);
1728 }
1729
1730 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1731 {
1732 m_buffer.ensureSpace(maxInstructionSize);
1733 emitRexIfNeeded(reg, 0, rm);
1734 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1735 m_buffer.putByteUnchecked(opcode);
1736 registerModRM(reg, rm);
1737 }
1738
1739 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1740 {
1741 m_buffer.ensureSpace(maxInstructionSize);
1742 emitRexIfNeeded(reg, 0, base);
1743 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1744 m_buffer.putByteUnchecked(opcode);
1745 memoryModRM(reg, base, offset);
1746 }
1747
1748 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1749 {
1750 m_buffer.ensureSpace(maxInstructionSize);
1751 emitRexIfNeeded(reg, index, base);
1752 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1753 m_buffer.putByteUnchecked(opcode);
1754 memoryModRM(reg, base, index, scale, offset);
1755 }
1756
1757 #if !CPU(X86_64)
1758 void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
1759 {
1760 m_buffer.ensureSpace(maxInstructionSize);
1761 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1762 m_buffer.putByteUnchecked(opcode);
1763 memoryModRM(reg, address);
1764 }
1765 #endif
1766
1767 #if CPU(X86_64)
1768 // Quad-word-sized operands:
1769 //
1770 // Used to format 64-bit operantions, planting a REX.w prefix.
1771 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1772 // the normal (non-'64'-postfixed) formatters should be used.
1773
1774 void oneByteOp64(OneByteOpcodeID opcode)
1775 {
1776 m_buffer.ensureSpace(maxInstructionSize);
1777 emitRexW(0, 0, 0);
1778 m_buffer.putByteUnchecked(opcode);
1779 }
1780
1781 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1782 {
1783 m_buffer.ensureSpace(maxInstructionSize);
1784 emitRexW(0, 0, reg);
1785 m_buffer.putByteUnchecked(opcode + (reg & 7));
1786 }
1787
1788 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1789 {
1790 m_buffer.ensureSpace(maxInstructionSize);
1791 emitRexW(reg, 0, rm);
1792 m_buffer.putByteUnchecked(opcode);
1793 registerModRM(reg, rm);
1794 }
1795
1796 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1797 {
1798 m_buffer.ensureSpace(maxInstructionSize);
1799 emitRexW(reg, 0, base);
1800 m_buffer.putByteUnchecked(opcode);
1801 memoryModRM(reg, base, offset);
1802 }
1803
1804 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1805 {
1806 m_buffer.ensureSpace(maxInstructionSize);
1807 emitRexW(reg, 0, base);
1808 m_buffer.putByteUnchecked(opcode);
1809 memoryModRM_disp32(reg, base, offset);
1810 }
1811
1812 void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1813 {
1814 m_buffer.ensureSpace(maxInstructionSize);
1815 emitRexW(reg, 0, base);
1816 m_buffer.putByteUnchecked(opcode);
1817 memoryModRM_disp8(reg, base, offset);
1818 }
1819
1820 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1821 {
1822 m_buffer.ensureSpace(maxInstructionSize);
1823 emitRexW(reg, index, base);
1824 m_buffer.putByteUnchecked(opcode);
1825 memoryModRM(reg, base, index, scale, offset);
1826 }
1827
1828 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1829 {
1830 m_buffer.ensureSpace(maxInstructionSize);
1831 emitRexW(reg, 0, rm);
1832 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1833 m_buffer.putByteUnchecked(opcode);
1834 registerModRM(reg, rm);
1835 }
1836 #endif
1837
1838 // Byte-operands:
1839 //
1840 // These methods format byte operations. Byte operations differ from the normal
1841 // formatters in the circumstances under which they will decide to emit REX prefixes.
1842 // These should be used where any register operand signifies a byte register.
1843 //
1844 // The disctinction is due to the handling of register numbers in the range 4..7 on
1845 // x86-64. These register numbers may either represent the second byte of the first
1846 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1847 //
1848 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1849 // be accessed where a REX prefix is present), these are likely best treated as
1850 // deprecated. In order to ensure the correct registers spl..dil are selected a
1851 // REX prefix will be emitted for any byte register operand in the range 4..15.
1852 //
1853 // These formatters may be used in instructions where a mix of operand sizes, in which
1854 // case an unnecessary REX will be emitted, for example:
1855 // movzbl %al, %edi
1856 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1857 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1858 // be silently ignored by the processor.
1859 //
1860 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1861 // is provided to check byte register operands.
1862
1863 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1864 {
1865 m_buffer.ensureSpace(maxInstructionSize);
1866 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1867 m_buffer.putByteUnchecked(opcode);
1868 registerModRM(groupOp, rm);
1869 }
1870
1871 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1872 {
1873 m_buffer.ensureSpace(maxInstructionSize);
1874 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1875 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1876 m_buffer.putByteUnchecked(opcode);
1877 registerModRM(reg, rm);
1878 }
1879
1880 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1881 {
1882 m_buffer.ensureSpace(maxInstructionSize);
1883 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1884 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1885 m_buffer.putByteUnchecked(opcode);
1886 registerModRM(groupOp, rm);
1887 }
1888
1889 // Immediates:
1890 //
1891 // An immedaite should be appended where appropriate after an op has been emitted.
1892 // The writes are unchecked since the opcode formatters above will have ensured space.
1893
1894 void immediate8(int imm)
1895 {
1896 m_buffer.putByteUnchecked(imm);
1897 }
1898
1899 void immediate16(int imm)
1900 {
1901 m_buffer.putShortUnchecked(imm);
1902 }
1903
1904 void immediate32(int imm)
1905 {
1906 m_buffer.putIntUnchecked(imm);
1907 }
1908
1909 void immediate64(int64_t imm)
1910 {
1911 m_buffer.putInt64Unchecked(imm);
1912 }
1913
1914 AssemblerLabel immediateRel32()
1915 {
1916 m_buffer.putIntUnchecked(0);
1917 return label();
1918 }
1919
1920 // Administrative methods:
1921
1922 size_t codeSize() const { return m_buffer.codeSize(); }
1923 AssemblerLabel label() const { return m_buffer.label(); }
1924 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1925 void* data() const { return m_buffer.data(); }
1926
1927 void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
1928 {
1929 return m_buffer.executableCopy(globalData, allocator);
1930 }
1931
1932 void rewindToLabel(AssemblerLabel rewindTo) { m_buffer.rewindToLabel(rewindTo); }
1933
1934 #ifndef NDEBUG
1935 unsigned debugOffset() { return m_buffer.debugOffset(); }
1936 #endif
1937
1938 private:
1939
1940 // Internals; ModRm and REX formatters.
1941
1942 static const RegisterID noBase = X86Registers::ebp;
1943 static const RegisterID hasSib = X86Registers::esp;
1944 static const RegisterID noIndex = X86Registers::esp;
1945 #if CPU(X86_64)
1946 static const RegisterID noBase2 = X86Registers::r13;
1947 static const RegisterID hasSib2 = X86Registers::r12;
1948
1949 // Registers r8 & above require a REX prefixe.
1950 inline bool regRequiresRex(int reg)
1951 {
1952 return (reg >= X86Registers::r8);
1953 }
1954
1955 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1956 inline bool byteRegRequiresRex(int reg)
1957 {
1958 return (reg >= X86Registers::esp);
1959 }
1960
1961 // Format a REX prefix byte.
1962 inline void emitRex(bool w, int r, int x, int b)
1963 {
1964 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1965 }
1966
1967 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1968 inline void emitRexW(int r, int x, int b)
1969 {
1970 emitRex(true, r, x, b);
1971 }
1972
1973 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1974 // regRequiresRex() to check other registers (i.e. address base & index).
1975 inline void emitRexIf(bool condition, int r, int x, int b)
1976 {
1977 if (condition) emitRex(false, r, x, b);
1978 }
1979
1980 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1981 inline void emitRexIfNeeded(int r, int x, int b)
1982 {
1983 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1984 }
1985 #else
1986 // No REX prefix bytes on 32-bit x86.
1987 inline bool regRequiresRex(int) { return false; }
1988 inline bool byteRegRequiresRex(int) { return false; }
1989 inline void emitRexIf(bool, int, int, int) {}
1990 inline void emitRexIfNeeded(int, int, int) {}
1991 #endif
1992
1993 enum ModRmMode {
1994 ModRmMemoryNoDisp,
1995 ModRmMemoryDisp8,
1996 ModRmMemoryDisp32,
1997 ModRmRegister,
1998 };
1999
2000 void putModRm(ModRmMode mode, int reg, RegisterID rm)
2001 {
2002 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
2003 }
2004
2005 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
2006 {
2007 ASSERT(mode != ModRmRegister);
2008
2009 putModRm(mode, reg, hasSib);
2010 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
2011 }
2012
2013 void registerModRM(int reg, RegisterID rm)
2014 {
2015 putModRm(ModRmRegister, reg, rm);
2016 }
2017
2018 void memoryModRM(int reg, RegisterID base, int offset)
2019 {
2020 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2021 #if CPU(X86_64)
2022 if ((base == hasSib) || (base == hasSib2)) {
2023 #else
2024 if (base == hasSib) {
2025 #endif
2026 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
2027 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
2028 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2029 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2030 m_buffer.putByteUnchecked(offset);
2031 } else {
2032 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2033 m_buffer.putIntUnchecked(offset);
2034 }
2035 } else {
2036 #if CPU(X86_64)
2037 if (!offset && (base != noBase) && (base != noBase2))
2038 #else
2039 if (!offset && (base != noBase))
2040 #endif
2041 putModRm(ModRmMemoryNoDisp, reg, base);
2042 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2043 putModRm(ModRmMemoryDisp8, reg, base);
2044 m_buffer.putByteUnchecked(offset);
2045 } else {
2046 putModRm(ModRmMemoryDisp32, reg, base);
2047 m_buffer.putIntUnchecked(offset);
2048 }
2049 }
2050 }
2051
2052 void memoryModRM_disp8(int reg, RegisterID base, int offset)
2053 {
2054 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2055 ASSERT(CAN_SIGN_EXTEND_8_32(offset));
2056 #if CPU(X86_64)
2057 if ((base == hasSib) || (base == hasSib2)) {
2058 #else
2059 if (base == hasSib) {
2060 #endif
2061 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
2062 m_buffer.putByteUnchecked(offset);
2063 } else {
2064 putModRm(ModRmMemoryDisp8, reg, base);
2065 m_buffer.putByteUnchecked(offset);
2066 }
2067 }
2068
2069 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2070 {
2071 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2072 #if CPU(X86_64)
2073 if ((base == hasSib) || (base == hasSib2)) {
2074 #else
2075 if (base == hasSib) {
2076 #endif
2077 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2078 m_buffer.putIntUnchecked(offset);
2079 } else {
2080 putModRm(ModRmMemoryDisp32, reg, base);
2081 m_buffer.putIntUnchecked(offset);
2082 }
2083 }
2084
2085 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2086 {
2087 ASSERT(index != noIndex);
2088
2089 #if CPU(X86_64)
2090 if (!offset && (base != noBase) && (base != noBase2))
2091 #else
2092 if (!offset && (base != noBase))
2093 #endif
2094 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2095 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2096 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2097 m_buffer.putByteUnchecked(offset);
2098 } else {
2099 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2100 m_buffer.putIntUnchecked(offset);
2101 }
2102 }
2103
2104 #if !CPU(X86_64)
2105 void memoryModRM(int reg, const void* address)
2106 {
2107 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2108 putModRm(ModRmMemoryNoDisp, reg, noBase);
2109 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2110 }
2111 #endif
2112
2113 AssemblerBuffer m_buffer;
2114 } m_formatter;
2115 };
2116
2117 } // namespace JSC
2118
2119 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2120
2121 #endif // X86Assembler_h