]> git.saurik.com Git - apple/xnu.git/blob - tests/hvtest_x86_asm.s
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / tests / hvtest_x86_asm.s
1 #include <machine/asm.h>
2
3 .text
4
5 .balign 0x1000
6
7 .global _hvtest_begin
8 _hvtest_begin:
9
10 /*
11 * Everything between _hvtest_begin and _hvtest_end will be copied for
12 * tests that don't use the page faulting of the test harness.
13 * You can put constants here.
14 */
15
16 .code64
17
18 .balign 16
19
20 .global _save_restore_regs_entry
21 _save_restore_regs_entry:
22
23 pushq %rax
24 pushq %rcx
25
26 xor %rcx, %rcx
27
28 pushq %rbx
29
30
31 /*
32 * For all registers to test, each of these blocks:
33 * 1. increments rcx (to keep track in case of test failure),
34 * 2. checks the register's value against a (constant) template
35 * 3. flips all bits for the VMM to later verify that the changes value is available.
36 *
37 * For a second pass, bits are all flipped back to their original state after
38 * the vmcall.
39 */
40
41
42 // segment registers (pass 1)
43
44 incq %rcx
45 movq $0x1010, %rax
46 movq %ds, %rbx
47 cmpq %rbx, %rax
48 jne .foul
49 movq $1, %rbx
50 movq %rbx, %ds
51
52 incq %rcx
53 movq $0x2020, %rax
54 movq %es, %rbx
55 cmpq %rbx, %rax
56 jne .foul
57 movq $2, %rbx
58 movq %rbx, %es
59
60 incq %rcx
61 movq $0x3030, %rax
62 movq %fs, %rbx
63 cmpq %rbx, %rax
64 jne .foul
65 movq $3, %rbx
66 movq %rbx, %fs
67
68 incq %rcx
69 movq $0x4040, %rax
70 movq %gs, %rbx
71 cmpq %rbx, %rax
72 jne .foul
73 movq $1, %rbx
74 movq %rbx, %gs
75
76 popq %rbx
77
78 jmp .pass
79
80 .pass2:
81 pushq %rax
82 pushq %rcx
83
84 xor %rcx, %rcx
85
86 pushq %rbx
87
88 // segment registers (pass 2)
89
90 incq %rcx
91 movq $0x1, %rax
92 movq %ds, %rbx
93 cmpq %rbx, %rax
94 jne .foul
95 movq $1, %rbx
96 movq %rbx, %ds
97
98 incq %rcx
99 movq $0x2, %rax
100 movq %es, %rbx
101 cmpq %rbx, %rax
102 jne .foul
103 movq $2, %rbx
104 movq %rbx, %es
105
106 incq %rcx
107 movq $0x3, %rax
108 movq %fs, %rbx
109 cmpq %rbx, %rax
110 jne .foul
111 movq $3, %rbx
112 movq %rbx, %fs
113
114 incq %rcx
115 movq $0x1, %rax
116 movq %gs, %rbx
117 cmpq %rbx, %rax
118 jne .foul
119 movq $1, %rbx
120 movq %rbx, %gs
121
122 popq %rbx
123
124 .pass:
125 // general purpose registers
126
127 incq %rcx
128 movq $0x0101010101010101, %rax
129 cmpq 8(%rsp), %rax // %rax on stack
130 jne .foul
131 notq 8(%rsp)
132
133 incq %rcx
134 movq $0x0202020202020202, %rax
135 cmpq %rbx, %rax
136 jne .foul
137 notq %rbx
138
139 incq %rcx
140 movq $0x0303030303030303, %rax
141 cmpq (%rsp), %rax // %rcx on stack
142 jne .foul
143 notq (%rsp)
144
145 incq %rcx
146 movq $0x0404040404040404, %rax
147 cmpq %rdx, %rax
148 jne .foul
149 notq %rdx
150
151 incq %rcx
152 movq $0x0505050505050505, %rax
153 cmpq %rsi, %rax
154 jne .foul
155 notq %rsi
156
157 incq %rcx
158 movq $0x0606060606060606, %rax
159 cmpq %rdi, %rax
160 jne .foul
161 notq %rdi
162
163 incq %rcx
164 movq $0x0707070707070707, %rax
165 cmpq %rbp, %rax
166 jne .foul
167 notq %rbp
168
169 incq %rcx
170 movq $0x0808080808080808, %rax
171 cmpq %r8, %rax
172 jne .foul
173 notq %r8
174
175 incq %rcx
176 movq $0x0909090909090909, %rax
177 cmpq %r9, %rax
178 jne .foul
179 notq %r9
180
181 incq %rcx
182 movq $0x0a0a0a0a0a0a0a0a, %rax
183 cmpq %r10, %rax
184 jne .foul
185 notq %r10
186
187 incq %rcx
188 movq $0x0b0b0b0b0b0b0b0b, %rax
189 cmpq %r11, %rax
190 jne .foul
191 notq %r11
192
193 incq %rcx
194 movq $0x0c0c0c0c0c0c0c0c, %rax
195 cmpq %r12, %rax
196 jne .foul
197 notq %r12
198
199 incq %rcx
200 movq $0x0d0d0d0d0d0d0d0d, %rax
201 cmpq %r13, %rax
202 jne .foul
203 notq %r13
204
205 incq %rcx
206 movq $0x0e0e0e0e0e0e0e0e, %rax
207 cmpq %r14, %rax
208 jne .foul
209 notq %r14
210
211 incq %rcx
212 movq $0x0f0f0f0f0f0f0f0f, %rax
213 cmpq %r15, %rax
214 jne .foul
215 notq %r15
216
217 popq %rcx
218 movq (%rsp), %rax
219 vmcall
220
221 notq %rax
222 notq %rbx
223 notq %rcx
224 notq %rdx
225 notq %rsi
226 notq %rdi
227 notq %rbp
228 notq %r8
229 notq %r9
230 notq %r10
231 notq %r11
232 notq %r12
233 notq %r13
234 notq %r14
235 notq %r15
236
237 jmp .pass2
238
239 .foul:
240 movq %rcx, %rax
241 vmcall
242
243 .global _save_restore_debug_regs_entry
244 _save_restore_debug_regs_entry:
245
246 pushq %rax
247 xor %rcx, %rcx
248
249 /*
250 * For all registers to test, each of these blocks:
251 * 1. increments rcx (to keep track in case of test failure),
252 * 2. checks the register's value against a (constant) template
253 * 3. flips all bits for the VMM to later verify that the changes value is available.
254 *
255 * For a second pass, bits are all flipped back to their original state after
256 * the vmcall.
257 */
258
259 incq %rcx
260 movq $0x1111111111111111, %rbx
261 movq %dr0, %rax
262 cmpq %rbx, %rax
263 jne .foul
264 notq %rbx
265 movq %rbx, %dr0
266
267 incq %rcx
268 movq $0x2222222222222222, %rbx
269 movq %dr1, %rax
270 cmpq %rbx, %rax
271 jne .foul
272 notq %rbx
273 movq %rbx, %dr1
274
275 incq %rcx
276 movq $0x3333333333333333, %rbx
277 movq %dr2, %rax
278 cmpq %rbx, %rax
279 jne .foul
280 notq %rbx
281 movq %rbx, %dr2
282
283 incq %rcx
284 movq $0x4444444444444444, %rbx
285 movq %dr3, %rax
286 cmpq %rbx, %rax
287 jne .foul
288 notq %rbx
289 movq %rbx, %dr3
290
291 /*
292 * flip only defined bits for debug status and control registers
293 * (and also don't flip General Detect Enable, as the next access
294 * to any debug register would generate an exception)
295 */
296
297 incq %rcx
298 movq $0x5555555555555555, %rbx
299 mov $0xffff0ff0, %rax
300 orq %rax, %rbx
301 movq $0xffffefff, %rax
302 andq %rax, %rbx
303 movq %dr6, %rax
304 cmpq %rbx, %rax
305 jne .foul
306 notq %rbx
307 mov $0xffff0ff0, %rax
308 orq %rax, %rbx
309 movq $0xffffefff, %rax
310 andq %rax, %rbx
311 movq %rbx, %dr6
312
313 incq %rcx
314 movq $0x5555555555555555, %rbx
315 orq $0x400, %rbx
316 movq $0xffff0fff, %rax
317 andq %rax, %rbx
318 movq %dr7, %rax
319 cmpq %rbx, %rax
320 jne .foul
321 notq %rbx
322 orq $0x400, %rbx
323 movq $0xffff0fff, %rax
324 andq %rax, %rbx
325 movq %rbx, %dr7
326
327 popq %rax
328 vmcall
329
330 movq %dr0, %rbx
331 notq %rbx
332 movq %rbx, %dr0
333
334 movq %dr1, %rbx
335 notq %rbx
336 movq %rbx, %dr1
337
338 movq %dr2, %rbx
339 notq %rbx
340 movq %rbx, %dr2
341
342 movq %dr3, %rbx
343 notq %rbx
344 movq %rbx, %dr3
345
346 movq %dr6, %rbx
347 notq %rbx
348 mov $0xffff0ff0, %rax
349 orq %rax, %rbx
350 movq $0xffffefff, %rax
351 andq %rax, %rbx
352 movq %rbx, %dr6
353
354 movq %dr7, %rbx
355 notq %rbx
356 orq $0x400, %rbx
357 movq $0xffff0fff, %rax
358 andq %rax, %rbx
359 movq %rbx, %dr7
360
361 jmp _save_restore_debug_regs_entry // 2nd pass
362
363 .code32
364
365 .global _simple_protected_mode_vcpu_entry
366 _simple_protected_mode_vcpu_entry:
367
368 movl $0x23456, %eax
369 vmcall
370
371 .code16
372
373 .global _simple_real_mode_vcpu_entry
374 _simple_real_mode_vcpu_entry:
375
376 movl $0x23456, %eax
377 vmcall
378
379 .code32
380
381 .global _radar61961809_entry
382 _radar61961809_entry:
383
384 mov $0x99999999, %ebx // sentinel address, see _radar61961809_loop64
385
386 mov $0xc0000080,%ecx // IA32_EFER
387 rdmsr
388 or $0x100,%eax // .LME
389 wrmsr
390
391 vmcall
392
393 mov %cr0,%ecx
394 or $0x80000000,%ecx // CR0.PG
395 mov %ecx,%cr0
396
397 // first (%edi) 6 bytes are _radar61961809_prepare far ptr
398 ljmp *(%edi)
399
400 .code32
401
402 .global _radar61961809_prepare
403 _radar61961809_prepare:
404
405 /*
406 * We switched into long mode, now immediately out, and the test
407 * will switch back in.
408 *
409 * This is done to suppress (legitimate) EPT and Page Fault exits.
410 * Until CR0.PG is enabled (which is what effectively activates
411 * long mode), the page tables are never looked at. Right after
412 * setting PG, that changes immediately, effecting transparently
413 * handled EPT violations. Additionally, the far jump that
414 * would be necessary to switch into a 64bit code segment would
415 * also cause EPT violations and PFs when fetching the segment
416 * descriptor from the GDT.
417 *
418 * By first jumping into a 32bit code segment after enabling PG
419 * once, we "warm up" both EPT and (harness managed) page tables,
420 * so the next exit after the far jump will most likely be an
421 * IRQ exit, most faithfully reproducing the problem.
422 */
423
424 mov %cr0,%ecx
425 and $~0x80000000,%ecx
426 mov %ecx,%cr0
427
428 mov $0x1111, %eax
429 vmcall
430
431 // This is where the actual test really starts.
432 mov %cr0,%ecx
433 or $0x80000000,%ecx
434 mov %ecx,%cr0 // enable PG => long mode
435
436 xor %ecx, %ecx
437
438 add $8,%edi
439 ljmp *(%edi) // _radar61961809_loop64
440
441 .code64
442
443 .global _radar61961809_loop64
444 _radar61961809_loop64:
445 1:
446 // as 16bit code, this instruction will be:
447 // add %al,(%bx,%si)
448 // and cause an obvious EPT violation (%bx is 0x9999)
449 mov $0x1,%ebp
450
451 // loop long enough for a good chance to an IRQ exit
452 dec %ecx
453 jnz 1b
454
455 // if we reach here, we stayed in long mode.
456 mov $0x2222, %eax
457 vmcall
458
459 .global _radar60691363_entry
460 _radar60691363_entry:
461 movq $0x800, %rsi // VMCS_GUEST_ES
462 vmreadq %rsi, %rax
463 vmcall
464 movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC
465 vmreadq %rsi, %rax
466 vmcall
467 movq $0x6402, %rsi // VMCS_RO_IO_RCX
468 vmreadq %rsi, %rax
469 vmcall
470
471 movq $0x800, %rsi // VMCS_GUEST_ES
472 movq $0x9191, %rax
473 vmwriteq %rax, %rsi
474 movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC
475 movq $0x9898, %rax
476 vmwriteq %rax, %rsi
477 movq $0x6402, %rsi // VMCS_RO_IO_RCX
478 movq $0x7979, %rax
479 vmwriteq %rax, %rsi
480
481 movq $0x4567, %rax
482
483 vmcall
484
485 .global _hvtest_end
486 _hvtest_end: