]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/X86Assembler.h
JavaScriptCore-525.tar.gz
[apple/javascriptcore.git] / assembler / X86Assembler.h
CommitLineData
9dae56ea
A
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33#include "AssemblerBuffer.h"
34#include <stdint.h>
35#include <wtf/Assertions.h>
36#include <wtf/Vector.h>
37
38namespace JSC {
39
40inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41#if PLATFORM(X86_64)
42inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44#endif
45
46namespace X86 {
47 typedef enum {
48 eax,
49 ecx,
50 edx,
51 ebx,
52 esp,
53 ebp,
54 esi,
55 edi,
56
57#if PLATFORM(X86_64)
58 r8,
59 r9,
60 r10,
61 r11,
62 r12,
63 r13,
64 r14,
65 r15,
66#endif
67 } RegisterID;
68
69 typedef enum {
70 xmm0,
71 xmm1,
72 xmm2,
73 xmm3,
74 xmm4,
75 xmm5,
76 xmm6,
77 xmm7,
78 } XMMRegisterID;
79}
80
81class X86Assembler {
82public:
83 typedef X86::RegisterID RegisterID;
84 typedef X86::XMMRegisterID XMMRegisterID;
85
86 typedef enum {
87 OP_ADD_EvGv = 0x01,
88 OP_ADD_GvEv = 0x03,
89 OP_OR_EvGv = 0x09,
90 OP_OR_GvEv = 0x0B,
91 OP_2BYTE_ESCAPE = 0x0F,
92 OP_AND_EvGv = 0x21,
93 OP_SUB_EvGv = 0x29,
94 OP_SUB_GvEv = 0x2B,
95 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
96 OP_XOR_EvGv = 0x31,
97 OP_CMP_EvGv = 0x39,
98 OP_CMP_GvEv = 0x3B,
99#if PLATFORM(X86_64)
100 PRE_REX = 0x40,
101#endif
102 OP_PUSH_EAX = 0x50,
103 OP_POP_EAX = 0x58,
104#if PLATFORM(X86_64)
105 OP_MOVSXD_GvEv = 0x63,
106#endif
107 PRE_OPERAND_SIZE = 0x66,
108 PRE_SSE_66 = 0x66,
109 OP_PUSH_Iz = 0x68,
110 OP_IMUL_GvEvIz = 0x69,
111 OP_GROUP1_EvIz = 0x81,
112 OP_GROUP1_EvIb = 0x83,
113 OP_TEST_EvGv = 0x85,
114 OP_XCHG_EvGv = 0x87,
115 OP_MOV_EvGv = 0x89,
116 OP_MOV_GvEv = 0x8B,
117 OP_LEA = 0x8D,
118 OP_GROUP1A_Ev = 0x8F,
119 OP_CDQ = 0x99,
120 OP_MOV_EAXOv = 0xA1,
121 OP_MOV_OvEAX = 0xA3,
122 OP_MOV_EAXIv = 0xB8,
123 OP_GROUP2_EvIb = 0xC1,
124 OP_RET = 0xC3,
125 OP_GROUP11_EvIz = 0xC7,
126 OP_INT3 = 0xCC,
127 OP_GROUP2_Ev1 = 0xD1,
128 OP_GROUP2_EvCL = 0xD3,
129 OP_CALL_rel32 = 0xE8,
130 OP_JMP_rel32 = 0xE9,
131 PRE_SSE_F2 = 0xF2,
132 OP_HLT = 0xF4,
133 OP_GROUP3_EbIb = 0xF6,
134 OP_GROUP3_Ev = 0xF7,
135 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
136 OP_GROUP5_Ev = 0xFF,
137 } OneByteOpcodeID;
138
139 typedef enum {
140 OP2_MOVSD_VsdWsd = 0x10,
141 OP2_MOVSD_WsdVsd = 0x11,
142 OP2_CVTSI2SD_VsdEd = 0x2A,
143 OP2_CVTTSD2SI_GdWsd = 0x2C,
144 OP2_UCOMISD_VsdWsd = 0x2E,
145 OP2_ADDSD_VsdWsd = 0x58,
146 OP2_MULSD_VsdWsd = 0x59,
147 OP2_SUBSD_VsdWsd = 0x5C,
148 OP2_MOVD_VdEd = 0x6E,
149 OP2_MOVD_EdVd = 0x7E,
150 OP2_JO_rel32 = 0x80,
151 OP2_JB_rel32 = 0x82,
152 OP2_JAE_rel32 = 0x83,
153 OP2_JE_rel32 = 0x84,
154 OP2_JNE_rel32 = 0x85,
155 OP2_JBE_rel32 = 0x86,
156 OP2_JA_rel32 = 0x87,
157 OP2_JS_rel32 = 0x88,
158 OP2_JP_rel32 = 0x8A,
159 OP2_JL_rel32 = 0x8C,
160 OP2_JGE_rel32 = 0x8D,
161 OP2_JLE_rel32 = 0x8E,
162 OP2_JG_rel32 = 0x8F,
163 OP_SETE = 0x94,
164 OP_SETNE = 0x95,
165 OP2_IMUL_GvEv = 0xAF,
166 OP2_MOVZX_GvEb = 0xB6,
167 OP2_MOVZX_GvEw = 0xB7,
168 OP2_PEXTRW_GdUdIb = 0xC5,
169 } TwoByteOpcodeID;
170
171 typedef enum {
172 GROUP1_OP_ADD = 0,
173 GROUP1_OP_OR = 1,
174 GROUP1_OP_AND = 4,
175 GROUP1_OP_SUB = 5,
176 GROUP1_OP_XOR = 6,
177 GROUP1_OP_CMP = 7,
178
179 GROUP1A_OP_POP = 0,
180
181 GROUP2_OP_SHL = 4,
182 GROUP2_OP_SAR = 7,
183
184 GROUP3_OP_TEST = 0,
185 GROUP3_OP_NOT = 2,
186 GROUP3_OP_IDIV = 7,
187
188 GROUP5_OP_CALLN = 2,
189 GROUP5_OP_JMPN = 4,
190 GROUP5_OP_PUSH = 6,
191
192 GROUP11_MOV = 0,
193 } GroupOpcodeID;
194
195 // Opaque label types
196
197private:
198 class X86InstructionFormatter;
199public:
200
201 class JmpSrc {
202 friend class X86Assembler;
203 friend class X86InstructionFormatter;
204 public:
205 JmpSrc()
206 : m_offset(-1)
207 {
208 }
209
210 private:
211 JmpSrc(int offset)
212 : m_offset(offset)
213 {
214 }
215
216 int m_offset;
217 };
218
219 class JmpDst {
220 friend class X86Assembler;
221 friend class X86InstructionFormatter;
222 public:
223 JmpDst()
224 : m_offset(-1)
225 {
226 }
227
228 private:
229 JmpDst(int offset)
230 : m_offset(offset)
231 {
232 }
233
234 int m_offset;
235 };
236
237 X86Assembler()
238 {
239 }
240
241 size_t size() const { return m_formatter.size(); }
242
243 // Stack operations:
244
245 void push_r(RegisterID reg)
246 {
247 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
248 }
249
250 void pop_r(RegisterID reg)
251 {
252 m_formatter.oneByteOp(OP_POP_EAX, reg);
253 }
254
255 void push_i32(int imm)
256 {
257 m_formatter.oneByteOp(OP_PUSH_Iz);
258 m_formatter.immediate32(imm);
259 }
260
261 void push_m(int offset, RegisterID base)
262 {
263 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
264 }
265
266 void pop_m(int offset, RegisterID base)
267 {
268 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
269 }
270
271 // Arithmetic operations:
272
273 void addl_rr(RegisterID src, RegisterID dst)
274 {
275 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
276 }
277
278 void addl_mr(int offset, RegisterID base, RegisterID dst)
279 {
280 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
281 }
282
283 void addl_ir(int imm, RegisterID dst)
284 {
285 if (CAN_SIGN_EXTEND_8_32(imm)) {
286 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
287 m_formatter.immediate8(imm);
288 } else {
289 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
290 m_formatter.immediate32(imm);
291 }
292 }
293
294 void addl_im(int imm, int offset, RegisterID base)
295 {
296 if (CAN_SIGN_EXTEND_8_32(imm)) {
297 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
298 m_formatter.immediate8(imm);
299 } else {
300 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
301 m_formatter.immediate32(imm);
302 }
303 }
304
305#if PLATFORM(X86_64)
306 void addq_rr(RegisterID src, RegisterID dst)
307 {
308 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
309 }
310
311 void addq_ir(int imm, RegisterID dst)
312 {
313 if (CAN_SIGN_EXTEND_8_32(imm)) {
314 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
315 m_formatter.immediate8(imm);
316 } else {
317 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
318 m_formatter.immediate32(imm);
319 }
320 }
321#else
322 void addl_im(int imm, void* addr)
323 {
324 if (CAN_SIGN_EXTEND_8_32(imm)) {
325 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
326 m_formatter.immediate8(imm);
327 } else {
328 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
329 m_formatter.immediate32(imm);
330 }
331 }
332#endif
333
334 void andl_rr(RegisterID src, RegisterID dst)
335 {
336 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
337 }
338
339 void andl_ir(int imm, RegisterID dst)
340 {
341 if (CAN_SIGN_EXTEND_8_32(imm)) {
342 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
343 m_formatter.immediate8(imm);
344 } else {
345 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
346 m_formatter.immediate32(imm);
347 }
348 }
349
350#if PLATFORM(X86_64)
351 void andq_rr(RegisterID src, RegisterID dst)
352 {
353 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
354 }
355
356 void andq_ir(int imm, RegisterID dst)
357 {
358 if (CAN_SIGN_EXTEND_8_32(imm)) {
359 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
360 m_formatter.immediate8(imm);
361 } else {
362 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
363 m_formatter.immediate32(imm);
364 }
365 }
366#endif
367
368 void notl_r(RegisterID dst)
369 {
370 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
371 }
372
373 void orl_rr(RegisterID src, RegisterID dst)
374 {
375 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
376 }
377
378 void orl_mr(int offset, RegisterID base, RegisterID dst)
379 {
380 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
381 }
382
383 void orl_ir(int imm, RegisterID dst)
384 {
385 if (CAN_SIGN_EXTEND_8_32(imm)) {
386 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
387 m_formatter.immediate8(imm);
388 } else {
389 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
390 m_formatter.immediate32(imm);
391 }
392 }
393
394#if PLATFORM(X86_64)
395 void orq_rr(RegisterID src, RegisterID dst)
396 {
397 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
398 }
399
400 void orq_ir(int imm, RegisterID dst)
401 {
402 if (CAN_SIGN_EXTEND_8_32(imm)) {
403 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
404 m_formatter.immediate8(imm);
405 } else {
406 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
407 m_formatter.immediate32(imm);
408 }
409 }
410#endif
411
412 void subl_rr(RegisterID src, RegisterID dst)
413 {
414 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
415 }
416
417 void subl_mr(int offset, RegisterID base, RegisterID dst)
418 {
419 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
420 }
421
422 void subl_ir(int imm, RegisterID dst)
423 {
424 if (CAN_SIGN_EXTEND_8_32(imm)) {
425 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
426 m_formatter.immediate8(imm);
427 } else {
428 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
429 m_formatter.immediate32(imm);
430 }
431 }
432
433 void subl_im(int imm, int offset, RegisterID base)
434 {
435 if (CAN_SIGN_EXTEND_8_32(imm)) {
436 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
437 m_formatter.immediate8(imm);
438 } else {
439 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
440 m_formatter.immediate32(imm);
441 }
442 }
443
444#if PLATFORM(X86_64)
445 void subq_rr(RegisterID src, RegisterID dst)
446 {
447 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
448 }
449
450 void subq_ir(int imm, RegisterID dst)
451 {
452 if (CAN_SIGN_EXTEND_8_32(imm)) {
453 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
454 m_formatter.immediate8(imm);
455 } else {
456 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
457 m_formatter.immediate32(imm);
458 }
459 }
460#else
461 void subl_im(int imm, void* addr)
462 {
463 if (CAN_SIGN_EXTEND_8_32(imm)) {
464 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
465 m_formatter.immediate8(imm);
466 } else {
467 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
468 m_formatter.immediate32(imm);
469 }
470 }
471#endif
472
473 void xorl_rr(RegisterID src, RegisterID dst)
474 {
475 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
476 }
477
478 void xorl_ir(int imm, RegisterID dst)
479 {
480 if (CAN_SIGN_EXTEND_8_32(imm)) {
481 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
482 m_formatter.immediate8(imm);
483 } else {
484 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
485 m_formatter.immediate32(imm);
486 }
487 }
488
489#if PLATFORM(X86_64)
490 void xorq_rr(RegisterID src, RegisterID dst)
491 {
492 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
493 }
494
495 void xorq_ir(int imm, RegisterID dst)
496 {
497 if (CAN_SIGN_EXTEND_8_32(imm)) {
498 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
499 m_formatter.immediate8(imm);
500 } else {
501 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
502 m_formatter.immediate32(imm);
503 }
504 }
505#endif
506
507 void sarl_i8r(int imm, RegisterID dst)
508 {
509 if (imm == 1)
510 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
511 else {
512 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
513 m_formatter.immediate8(imm);
514 }
515 }
516
517 void sarl_CLr(RegisterID dst)
518 {
519 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
520 }
521
522 void shll_i8r(int imm, RegisterID dst)
523 {
524 if (imm == 1)
525 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
526 else {
527 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
528 m_formatter.immediate8(imm);
529 }
530 }
531
532 void shll_CLr(RegisterID dst)
533 {
534 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
535 }
536
537#if PLATFORM(X86_64)
538 void sarq_CLr(RegisterID dst)
539 {
540 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
541 }
542
543 void sarq_i8r(int imm, RegisterID dst)
544 {
545 if (imm == 1)
546 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
547 else {
548 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
549 m_formatter.immediate8(imm);
550 }
551 }
552#endif
553
554 void imull_rr(RegisterID src, RegisterID dst)
555 {
556 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
557 }
558
559 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
560 {
561 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
562 m_formatter.immediate32(value);
563 }
564
565 void idivl_r(RegisterID dst)
566 {
567 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
568 }
569
570 // Comparisons:
571
572 void cmpl_rr(RegisterID src, RegisterID dst)
573 {
574 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
575 }
576
577 void cmpl_rm(RegisterID src, int offset, RegisterID base)
578 {
579 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
580 }
581
582 void cmpl_mr(int offset, RegisterID base, RegisterID src)
583 {
584 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
585 }
586
587 void cmpl_ir(int imm, RegisterID dst)
588 {
589 if (CAN_SIGN_EXTEND_8_32(imm)) {
590 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
591 m_formatter.immediate8(imm);
592 } else {
593 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
594 m_formatter.immediate32(imm);
595 }
596 }
597
598 void cmpl_ir_force32(int imm, RegisterID dst)
599 {
600 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
601 m_formatter.immediate32(imm);
602 }
603
604 void cmpl_im(int imm, int offset, RegisterID base)
605 {
606 if (CAN_SIGN_EXTEND_8_32(imm)) {
607 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
608 m_formatter.immediate8(imm);
609 } else {
610 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
611 m_formatter.immediate32(imm);
612 }
613 }
614
615 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
616 {
617 if (CAN_SIGN_EXTEND_8_32(imm)) {
618 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
619 m_formatter.immediate8(imm);
620 } else {
621 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
622 m_formatter.immediate32(imm);
623 }
624 }
625
626 void cmpl_im_force32(int imm, int offset, RegisterID base)
627 {
628 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
629 m_formatter.immediate32(imm);
630 }
631
632#if PLATFORM(X86_64)
633 void cmpq_rr(RegisterID src, RegisterID dst)
634 {
635 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
636 }
637
638 void cmpq_rm(RegisterID src, int offset, RegisterID base)
639 {
640 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
641 }
642
643 void cmpq_ir(int imm, RegisterID dst)
644 {
645 if (CAN_SIGN_EXTEND_8_32(imm)) {
646 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
647 m_formatter.immediate8(imm);
648 } else {
649 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
650 m_formatter.immediate32(imm);
651 }
652 }
653
654 void cmpq_im(int imm, int offset, RegisterID base)
655 {
656 if (CAN_SIGN_EXTEND_8_32(imm)) {
657 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
658 m_formatter.immediate8(imm);
659 } else {
660 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
661 m_formatter.immediate32(imm);
662 }
663 }
664
665 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
666 {
667 if (CAN_SIGN_EXTEND_8_32(imm)) {
668 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
669 m_formatter.immediate8(imm);
670 } else {
671 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
672 m_formatter.immediate32(imm);
673 }
674 }
675#else
676 void cmpl_rm(RegisterID reg, void* addr)
677 {
678 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
679 }
680
681 void cmpl_im(int imm, void* addr)
682 {
683 if (CAN_SIGN_EXTEND_8_32(imm)) {
684 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
685 m_formatter.immediate8(imm);
686 } else {
687 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
688 m_formatter.immediate32(imm);
689 }
690 }
691#endif
692
693 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
694 {
695 m_formatter.prefix(PRE_OPERAND_SIZE);
696 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
697 }
698
699 void testl_rr(RegisterID src, RegisterID dst)
700 {
701 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
702 }
703
704 void testl_i32r(int imm, RegisterID dst)
705 {
706 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
707 m_formatter.immediate32(imm);
708 }
709
710 void testl_i32m(int imm, int offset, RegisterID base)
711 {
712 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
713 m_formatter.immediate32(imm);
714 }
715
716 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
717 {
718 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
719 m_formatter.immediate32(imm);
720 }
721
722#if PLATFORM(X86_64)
723 void testq_rr(RegisterID src, RegisterID dst)
724 {
725 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
726 }
727
728 void testq_i32r(int imm, RegisterID dst)
729 {
730 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
731 m_formatter.immediate32(imm);
732 }
733
734 void testq_i32m(int imm, int offset, RegisterID base)
735 {
736 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
737 m_formatter.immediate32(imm);
738 }
739
740 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
741 {
742 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
743 m_formatter.immediate32(imm);
744 }
745#endif
746
747 void testb_i8r(int imm, RegisterID dst)
748 {
749 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
750 m_formatter.immediate8(imm);
751 }
752
753 void sete_r(RegisterID dst)
754 {
755 m_formatter.twoByteOp8(OP_SETE, (GroupOpcodeID)0, dst);
756 }
757
758 void setz_r(RegisterID dst)
759 {
760 sete_r(dst);
761 }
762
763 void setne_r(RegisterID dst)
764 {
765 m_formatter.twoByteOp8(OP_SETNE, (GroupOpcodeID)0, dst);
766 }
767
768 void setnz_r(RegisterID dst)
769 {
770 setne_r(dst);
771 }
772
773 // Various move ops:
774
775 void cdq()
776 {
777 m_formatter.oneByteOp(OP_CDQ);
778 }
779
780 void xchgl_rr(RegisterID src, RegisterID dst)
781 {
782 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
783 }
784
785#if PLATFORM(X86_64)
786 void xchgq_rr(RegisterID src, RegisterID dst)
787 {
788 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
789 }
790#endif
791
792 void movl_rr(RegisterID src, RegisterID dst)
793 {
794 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
795 }
796
797 void movl_rm(RegisterID src, int offset, RegisterID base)
798 {
799 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
800 }
801
802 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
803 {
804 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
805 }
806
807 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
808 {
809 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
810 }
811
812 void movl_mEAX(void* addr)
813 {
814 m_formatter.oneByteOp(OP_MOV_EAXOv);
815#if PLATFORM(X86_64)
816 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
817#else
818 m_formatter.immediate32(reinterpret_cast<int>(addr));
819#endif
820 }
821
822 void movl_mr(int offset, RegisterID base, RegisterID dst)
823 {
824 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
825 }
826
827 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
828 {
829 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
830 }
831
832 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
833 {
834 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
835 }
836
837 void movl_i32r(int imm, RegisterID dst)
838 {
839 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
840 m_formatter.immediate32(imm);
841 }
842
843 void movl_i32m(int imm, int offset, RegisterID base)
844 {
845 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
846 m_formatter.immediate32(imm);
847 }
848
849 void movl_EAXm(void* addr)
850 {
851 m_formatter.oneByteOp(OP_MOV_OvEAX);
852#if PLATFORM(X86_64)
853 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
854#else
855 m_formatter.immediate32(reinterpret_cast<int>(addr));
856#endif
857 }
858
859#if PLATFORM(X86_64)
860 void movq_rr(RegisterID src, RegisterID dst)
861 {
862 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
863 }
864
865 void movq_rm(RegisterID src, int offset, RegisterID base)
866 {
867 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
868 }
869
870 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
871 {
872 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
873 }
874
875 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
876 {
877 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
878 }
879
880 void movq_mEAX(void* addr)
881 {
882 m_formatter.oneByteOp64(OP_MOV_EAXOv);
883 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
884 }
885
886 void movq_mr(int offset, RegisterID base, RegisterID dst)
887 {
888 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
889 }
890
891 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
892 {
893 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
894 }
895
896 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
897 {
898 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
899 }
900
901 void movq_i64r(int64_t imm, RegisterID dst)
902 {
903 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
904 m_formatter.immediate64(imm);
905 }
906
907 void movsxd_rr(RegisterID src, RegisterID dst)
908 {
909 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
910 }
911
912
913#else
914 void movl_mr(void* addr, RegisterID dst)
915 {
916 if (dst == X86::eax)
917 movl_mEAX(addr);
918 else
919 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
920 }
921
922 void movl_i32m(int imm, void* addr)
923 {
924 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
925 m_formatter.immediate32(imm);
926 }
927#endif
928
929 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
930 {
931 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
932 }
933
934 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
935 {
936 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
937 }
938
939 void movzbl_rr(RegisterID src, RegisterID dst)
940 {
941 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
942 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
943 // REX prefixes are defined to be silently ignored by the processor.
944 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
945 }
946
947 void leal_mr(int offset, RegisterID base, RegisterID dst)
948 {
949 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
950 }
951
952 // Flow control:
953
954 JmpSrc call()
955 {
956 m_formatter.oneByteOp(OP_CALL_rel32);
957 return m_formatter.immediateRel32();
958 }
959
960 JmpSrc call(RegisterID dst)
961 {
962 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
963 return JmpSrc(m_formatter.size());
964 }
965
966 JmpSrc jmp()
967 {
968 m_formatter.oneByteOp(OP_JMP_rel32);
969 return m_formatter.immediateRel32();
970 }
971
972 void jmp_r(RegisterID dst)
973 {
974 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
975 }
976
977 void jmp_m(int offset, RegisterID base)
978 {
979 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
980 }
981
982 JmpSrc jne()
983 {
984 m_formatter.twoByteOp(OP2_JNE_rel32);
985 return m_formatter.immediateRel32();
986 }
987
988 JmpSrc jnz()
989 {
990 return jne();
991 }
992
993 JmpSrc je()
994 {
995 m_formatter.twoByteOp(OP2_JE_rel32);
996 return m_formatter.immediateRel32();
997 }
998
999 JmpSrc jl()
1000 {
1001 m_formatter.twoByteOp(OP2_JL_rel32);
1002 return m_formatter.immediateRel32();
1003 }
1004
1005 JmpSrc jb()
1006 {
1007 m_formatter.twoByteOp(OP2_JB_rel32);
1008 return m_formatter.immediateRel32();
1009 }
1010
1011 JmpSrc jle()
1012 {
1013 m_formatter.twoByteOp(OP2_JLE_rel32);
1014 return m_formatter.immediateRel32();
1015 }
1016
1017 JmpSrc jbe()
1018 {
1019 m_formatter.twoByteOp(OP2_JBE_rel32);
1020 return m_formatter.immediateRel32();
1021 }
1022
1023 JmpSrc jge()
1024 {
1025 m_formatter.twoByteOp(OP2_JGE_rel32);
1026 return m_formatter.immediateRel32();
1027 }
1028
1029 JmpSrc jg()
1030 {
1031 m_formatter.twoByteOp(OP2_JG_rel32);
1032 return m_formatter.immediateRel32();
1033 }
1034
1035 JmpSrc ja()
1036 {
1037 m_formatter.twoByteOp(OP2_JA_rel32);
1038 return m_formatter.immediateRel32();
1039 }
1040
1041 JmpSrc jae()
1042 {
1043 m_formatter.twoByteOp(OP2_JAE_rel32);
1044 return m_formatter.immediateRel32();
1045 }
1046
1047 JmpSrc jo()
1048 {
1049 m_formatter.twoByteOp(OP2_JO_rel32);
1050 return m_formatter.immediateRel32();
1051 }
1052
1053 JmpSrc jp()
1054 {
1055 m_formatter.twoByteOp(OP2_JP_rel32);
1056 return m_formatter.immediateRel32();
1057 }
1058
1059 JmpSrc js()
1060 {
1061 m_formatter.twoByteOp(OP2_JS_rel32);
1062 return m_formatter.immediateRel32();
1063 }
1064
1065 // SSE operations:
1066
1067 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1068 {
1069 m_formatter.prefix(PRE_SSE_F2);
1070 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1071 }
1072
1073 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1074 {
1075 m_formatter.prefix(PRE_SSE_F2);
1076 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1077 }
1078
1079 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1080 {
1081 m_formatter.prefix(PRE_SSE_F2);
1082 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1083 }
1084
1085 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1086 {
1087 m_formatter.prefix(PRE_SSE_F2);
1088 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1089 }
1090
1091 void movd_rr(XMMRegisterID src, RegisterID dst)
1092 {
1093 m_formatter.prefix(PRE_SSE_66);
1094 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1095 }
1096
1097#if PLATFORM(X86_64)
1098 void movq_rr(XMMRegisterID src, RegisterID dst)
1099 {
1100 m_formatter.prefix(PRE_SSE_66);
1101 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1102 }
1103
1104 void movq_rr(RegisterID src, XMMRegisterID dst)
1105 {
1106 m_formatter.prefix(PRE_SSE_66);
1107 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1108 }
1109#endif
1110
1111 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1112 {
1113 m_formatter.prefix(PRE_SSE_F2);
1114 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1115 }
1116
1117 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1118 {
1119 m_formatter.prefix(PRE_SSE_F2);
1120 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1121 }
1122
1123 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1124 {
1125 m_formatter.prefix(PRE_SSE_F2);
1126 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1127 }
1128
1129 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1130 {
1131 m_formatter.prefix(PRE_SSE_F2);
1132 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1133 }
1134
1135 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1136 {
1137 m_formatter.prefix(PRE_SSE_66);
1138 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1139 m_formatter.immediate8(whichWord);
1140 }
1141
1142 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1143 {
1144 m_formatter.prefix(PRE_SSE_F2);
1145 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1146 }
1147
1148 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1149 {
1150 m_formatter.prefix(PRE_SSE_F2);
1151 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1152 }
1153
1154 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1155 {
1156 m_formatter.prefix(PRE_SSE_66);
1157 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1158 }
1159
1160 // Misc instructions:
1161
1162 void int3()
1163 {
1164 m_formatter.oneByteOp(OP_INT3);
1165 }
1166
1167 void ret()
1168 {
1169 m_formatter.oneByteOp(OP_RET);
1170 }
1171
1172 void predictNotTaken()
1173 {
1174 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1175 }
1176
1177 // Assembler admin methods:
1178
1179 JmpDst label()
1180 {
1181 return JmpDst(m_formatter.size());
1182 }
1183
1184 JmpDst align(int alignment)
1185 {
1186 while (!m_formatter.isAligned(alignment))
1187 m_formatter.oneByteOp(OP_HLT);
1188
1189 return label();
1190 }
1191
1192 // Linking & patching:
1193
1194 void link(JmpSrc from, JmpDst to)
1195 {
1196 ASSERT(to.m_offset != -1);
1197 ASSERT(from.m_offset != -1);
1198
1199 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
1200 }
1201
1202 static void patchAddress(void* code, JmpDst position, void* value)
1203 {
1204 ASSERT(position.m_offset != -1);
1205
1206 reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
1207 }
1208
1209 static void link(void* code, JmpSrc from, void* to)
1210 {
1211 ASSERT(from.m_offset != -1);
1212
1213 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
1214 }
1215
1216 static void* getRelocatedAddress(void* code, JmpSrc jump)
1217 {
1218 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1219 }
1220
1221 static void* getRelocatedAddress(void* code, JmpDst destination)
1222 {
1223 ASSERT(destination.m_offset != -1);
1224
1225 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1226 }
1227
1228 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1229 {
1230 return dst.m_offset - src.m_offset;
1231 }
1232
1233 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1234 {
1235 return dst.m_offset - src.m_offset;
1236 }
1237
1238 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1239 {
1240 return dst.m_offset - src.m_offset;
1241 }
1242
1243 static void patchImmediate(intptr_t where, int32_t value)
1244 {
1245 reinterpret_cast<int32_t*>(where)[-1] = value;
1246 }
1247
1248 static void patchPointer(intptr_t where, intptr_t value)
1249 {
1250 reinterpret_cast<intptr_t*>(where)[-1] = value;
1251 }
1252
1253 static void patchBranchOffset(intptr_t where, void* destination)
1254 {
1255 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1256 ASSERT(offset == static_cast<int32_t>(offset));
1257 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1258 }
1259
1260 void* executableCopy(ExecutablePool* allocator)
1261 {
1262 void* copy = m_formatter.executableCopy(allocator);
1263 ASSERT(copy);
1264 return copy;
1265 }
1266
1267private:
1268
1269 class X86InstructionFormatter {
1270
1271 static const int maxInstructionSize = 16;
1272
1273 public:
1274
1275 // Legacy prefix bytes:
1276 //
1277 // These are emmitted prior to the instruction.
1278
1279 void prefix(OneByteOpcodeID pre)
1280 {
1281 m_buffer.putByte(pre);
1282 }
1283
1284 // Word-sized operands / no operand instruction formatters.
1285 //
1286 // In addition to the opcode, the following operand permutations are supported:
1287 // * None - instruction takes no operands.
1288 // * One register - the low three bits of the RegisterID are added into the opcode.
1289 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1290 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1291 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1292 //
1293 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1294 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1295 //
1296 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1297
1298 void oneByteOp(OneByteOpcodeID opcode)
1299 {
1300 m_buffer.ensureSpace(maxInstructionSize);
1301 m_buffer.putByteUnchecked(opcode);
1302 }
1303
1304 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1305 {
1306 m_buffer.ensureSpace(maxInstructionSize);
1307 emitRexIfNeeded(0, 0, reg);
1308 m_buffer.putByteUnchecked(opcode + (reg & 7));
1309 }
1310
1311 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1312 {
1313 m_buffer.ensureSpace(maxInstructionSize);
1314 emitRexIfNeeded(reg, 0, rm);
1315 m_buffer.putByteUnchecked(opcode);
1316 registerModRM(reg, rm);
1317 }
1318
1319 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1320 {
1321 m_buffer.ensureSpace(maxInstructionSize);
1322 emitRexIfNeeded(reg, 0, base);
1323 m_buffer.putByteUnchecked(opcode);
1324 memoryModRM(reg, base, offset);
1325 }
1326
1327 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1328 {
1329 m_buffer.ensureSpace(maxInstructionSize);
1330 emitRexIfNeeded(reg, 0, base);
1331 m_buffer.putByteUnchecked(opcode);
1332 memoryModRM_disp32(reg, base, offset);
1333 }
1334
1335 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1336 {
1337 m_buffer.ensureSpace(maxInstructionSize);
1338 emitRexIfNeeded(reg, index, base);
1339 m_buffer.putByteUnchecked(opcode);
1340 memoryModRM(reg, base, index, scale, offset);
1341 }
1342
1343#if !PLATFORM(X86_64)
1344 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1345 {
1346 m_buffer.ensureSpace(maxInstructionSize);
1347 m_buffer.putByteUnchecked(opcode);
1348 memoryModRM(reg, address);
1349 }
1350#endif
1351
1352 void twoByteOp(TwoByteOpcodeID opcode)
1353 {
1354 m_buffer.ensureSpace(maxInstructionSize);
1355 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1356 m_buffer.putByteUnchecked(opcode);
1357 }
1358
1359 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1360 {
1361 m_buffer.ensureSpace(maxInstructionSize);
1362 emitRexIfNeeded(reg, 0, rm);
1363 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1364 m_buffer.putByteUnchecked(opcode);
1365 registerModRM(reg, rm);
1366 }
1367
1368 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1369 {
1370 m_buffer.ensureSpace(maxInstructionSize);
1371 emitRexIfNeeded(reg, 0, base);
1372 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1373 m_buffer.putByteUnchecked(opcode);
1374 memoryModRM(reg, base, offset);
1375 }
1376
1377 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1378 {
1379 m_buffer.ensureSpace(maxInstructionSize);
1380 emitRexIfNeeded(reg, index, base);
1381 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1382 m_buffer.putByteUnchecked(opcode);
1383 memoryModRM(reg, base, index, scale, offset);
1384 }
1385
1386#if PLATFORM(X86_64)
1387 // Quad-word-sized operands:
1388 //
1389 // Used to format 64-bit operantions, planting a REX.w prefix.
1390 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1391 // the normal (non-'64'-postfixed) formatters should be used.
1392
1393 void oneByteOp64(OneByteOpcodeID opcode)
1394 {
1395 m_buffer.ensureSpace(maxInstructionSize);
1396 emitRexW(0, 0, 0);
1397 m_buffer.putByteUnchecked(opcode);
1398 }
1399
1400 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1401 {
1402 m_buffer.ensureSpace(maxInstructionSize);
1403 emitRexW(0, 0, reg);
1404 m_buffer.putByteUnchecked(opcode + (reg & 7));
1405 }
1406
1407 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1408 {
1409 m_buffer.ensureSpace(maxInstructionSize);
1410 emitRexW(reg, 0, rm);
1411 m_buffer.putByteUnchecked(opcode);
1412 registerModRM(reg, rm);
1413 }
1414
1415 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1416 {
1417 m_buffer.ensureSpace(maxInstructionSize);
1418 emitRexW(reg, 0, base);
1419 m_buffer.putByteUnchecked(opcode);
1420 memoryModRM(reg, base, offset);
1421 }
1422
1423 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1424 {
1425 m_buffer.ensureSpace(maxInstructionSize);
1426 emitRexW(reg, 0, base);
1427 m_buffer.putByteUnchecked(opcode);
1428 memoryModRM_disp32(reg, base, offset);
1429 }
1430
1431 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1432 {
1433 m_buffer.ensureSpace(maxInstructionSize);
1434 emitRexW(reg, index, base);
1435 m_buffer.putByteUnchecked(opcode);
1436 memoryModRM(reg, base, index, scale, offset);
1437 }
1438
1439 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1440 {
1441 m_buffer.ensureSpace(maxInstructionSize);
1442 emitRexW(reg, 0, rm);
1443 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1444 m_buffer.putByteUnchecked(opcode);
1445 registerModRM(reg, rm);
1446 }
1447#endif
1448
1449 // Byte-operands:
1450 //
1451 // These methods format byte operations. Byte operations differ from the normal
1452 // formatters in the circumstances under which they will decide to emit REX prefixes.
1453 // These should be used where any register operand signifies a byte register.
1454 //
1455 // The disctinction is due to the handling of register numbers in the range 4..7 on
1456 // x86-64. These register numbers may either represent the second byte of the first
1457 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1458 //
1459 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1460 // be accessed where a REX prefix is present), these are likely best treated as
1461 // deprecated. In order to ensure the correct registers spl..dil are selected a
1462 // REX prefix will be emitted for any byte register operand in the range 4..15.
1463 //
1464 // These formatters may be used in instructions where a mix of operand sizes, in which
1465 // case an unnecessary REX will be emitted, for example:
1466 // movzbl %al, %edi
1467 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1468 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1469 // be silently ignored by the processor.
1470 //
1471 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1472 // is provided to check byte register operands.
1473
1474 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1475 {
1476 m_buffer.ensureSpace(maxInstructionSize);
1477 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1478 m_buffer.putByteUnchecked(opcode);
1479 registerModRM(groupOp, rm);
1480 }
1481
1482 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1483 {
1484 m_buffer.ensureSpace(maxInstructionSize);
1485 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1486 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1487 m_buffer.putByteUnchecked(opcode);
1488 registerModRM(reg, rm);
1489 }
1490
1491 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1492 {
1493 m_buffer.ensureSpace(maxInstructionSize);
1494 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1495 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1496 m_buffer.putByteUnchecked(opcode);
1497 registerModRM(groupOp, rm);
1498 }
1499
1500 // Immediates:
1501 //
1502 // An immedaite should be appended where appropriate after an op has been emitted.
1503 // The writes are unchecked since the opcode formatters above will have ensured space.
1504
1505 void immediate8(int imm)
1506 {
1507 m_buffer.putByteUnchecked(imm);
1508 }
1509
1510 void immediate32(int imm)
1511 {
1512 m_buffer.putIntUnchecked(imm);
1513 }
1514
1515 void immediate64(int64_t imm)
1516 {
1517 m_buffer.putInt64Unchecked(imm);
1518 }
1519
1520 JmpSrc immediateRel32()
1521 {
1522 m_buffer.putIntUnchecked(0);
1523 return JmpSrc(m_buffer.size());
1524 }
1525
1526 // Administrative methods:
1527
1528 size_t size() const { return m_buffer.size(); }
1529 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1530 void* data() const { return m_buffer.data(); }
1531 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1532
1533 private:
1534
1535 // Internals; ModRm and REX formatters.
1536
1537 static const RegisterID noBase = X86::ebp;
1538 static const RegisterID hasSib = X86::esp;
1539 static const RegisterID noIndex = X86::esp;
1540#if PLATFORM(X86_64)
1541 static const RegisterID noBase2 = X86::r13;
1542 static const RegisterID hasSib2 = X86::r12;
1543
1544 // Registers r8 & above require a REX prefixe.
1545 inline bool regRequiresRex(int reg)
1546 {
1547 return (reg >= X86::r8);
1548 }
1549
1550 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1551 inline bool byteRegRequiresRex(int reg)
1552 {
1553 return (reg >= X86::esp);
1554 }
1555
1556 // Format a REX prefix byte.
1557 inline void emitRex(bool w, int r, int x, int b)
1558 {
1559 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1560 }
1561
1562 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1563 inline void emitRexW(int r, int x, int b)
1564 {
1565 emitRex(true, r, x, b);
1566 }
1567
1568 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1569 // regRequiresRex() to check other registers (i.e. address base & index).
1570 inline void emitRexIf(bool condition, int r, int x, int b)
1571 {
1572 if (condition) emitRex(false, r, x, b);
1573 }
1574
1575 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1576 inline void emitRexIfNeeded(int r, int x, int b)
1577 {
1578 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1579 }
1580#else
1581 // No REX prefix bytes on 32-bit x86.
1582 inline bool regRequiresRex(int) { return false; }
1583 inline bool byteRegRequiresRex(int) { return false; }
1584 inline void emitRexIf(bool, int, int, int) {}
1585 inline void emitRexIfNeeded(int, int, int) {}
1586#endif
1587
1588 enum ModRmMode {
1589 ModRmMemoryNoDisp,
1590 ModRmMemoryDisp8,
1591 ModRmMemoryDisp32,
1592 ModRmRegister,
1593 };
1594
1595 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1596 {
1597 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1598 }
1599
1600 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1601 {
1602 ASSERT(mode != ModRmRegister);
1603
1604 // Encode sacle of (1,2,4,8) -> (0,1,2,3)
1605 int shift = 0;
1606 while (scale >>= 1)
1607 shift++;
1608
1609 putModRm(mode, reg, hasSib);
1610 m_buffer.putByteUnchecked((shift << 6) | ((index & 7) << 3) | (base & 7));
1611 }
1612
1613 void registerModRM(int reg, RegisterID rm)
1614 {
1615 putModRm(ModRmRegister, reg, rm);
1616 }
1617
1618 void memoryModRM(int reg, RegisterID base, int offset)
1619 {
1620 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1621#if PLATFORM(X86_64)
1622 if ((base == hasSib) || (base == hasSib2)) {
1623#else
1624 if (base == hasSib) {
1625#endif
1626 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1627 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1628 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1629 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1630 m_buffer.putByteUnchecked(offset);
1631 } else {
1632 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1633 m_buffer.putIntUnchecked(offset);
1634 }
1635 } else {
1636#if PLATFORM(X86_64)
1637 if (!offset && (base != noBase) && (base != noBase2))
1638#else
1639 if (!offset && (base != noBase))
1640#endif
1641 putModRm(ModRmMemoryNoDisp, reg, base);
1642 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1643 putModRm(ModRmMemoryDisp8, reg, base);
1644 m_buffer.putByteUnchecked(offset);
1645 } else {
1646 putModRm(ModRmMemoryDisp32, reg, base);
1647 m_buffer.putIntUnchecked(offset);
1648 }
1649 }
1650 }
1651
1652 void memoryModRM_disp32(int reg, RegisterID base, int offset)
1653 {
1654 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1655#if PLATFORM(X86_64)
1656 if ((base == hasSib) || (base == hasSib2)) {
1657#else
1658 if (base == hasSib) {
1659#endif
1660 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1661 m_buffer.putIntUnchecked(offset);
1662 } else {
1663 putModRm(ModRmMemoryDisp32, reg, base);
1664 m_buffer.putIntUnchecked(offset);
1665 }
1666 }
1667
1668 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
1669 {
1670 ASSERT(index != noIndex);
1671
1672#if PLATFORM(X86_64)
1673 if (!offset && (base != noBase) && (base != noBase2))
1674#else
1675 if (!offset && (base != noBase))
1676#endif
1677 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
1678 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1679 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
1680 m_buffer.putByteUnchecked(offset);
1681 } else {
1682 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
1683 m_buffer.putIntUnchecked(offset);
1684 }
1685 }
1686
1687#if !PLATFORM(X86_64)
1688 void memoryModRM(int reg, void* address)
1689 {
1690 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1691 putModRm(ModRmMemoryNoDisp, reg, noBase);
1692 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
1693 }
1694#endif
1695
1696 AssemblerBuffer m_buffer;
1697 } m_formatter;
1698};
1699
1700} // namespace JSC
1701
1702#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1703
1704#endif // X86Assembler_h