1 #include <machine/asm.h>
11 * Everything between _hvtest_begin and _hvtest_end will be copied for
12 * tests that don't use the page faulting of the test harness.
13 * You can put constants here.
20 .global _save_restore_regs_entry
21 _save_restore_regs_entry:
32 * For all registers to test, each of these blocks:
33 * 1. increments rcx (to keep track in case of test failure),
34 * 2. checks the register's value against a (constant) template
35 * 3. flips all bits for the VMM to later verify that the changes value is available.
37 * For a second pass, bits are all flipped back to their original state after
42 // segment registers (pass 1)
88 // segment registers (pass 2)
125 // general purpose registers
128 movq $0x0101010101010101, %rax
129 cmpq 8(%rsp), %rax // %rax on stack
134 movq $0x0202020202020202, %rax
140 movq $0x0303030303030303, %rax
141 cmpq (%rsp), %rax // %rcx on stack
146 movq $0x0404040404040404, %rax
152 movq $0x0505050505050505, %rax
158 movq $0x0606060606060606, %rax
164 movq $0x0707070707070707, %rax
170 movq $0x0808080808080808, %rax
176 movq $0x0909090909090909, %rax
182 movq $0x0a0a0a0a0a0a0a0a, %rax
188 movq $0x0b0b0b0b0b0b0b0b, %rax
194 movq $0x0c0c0c0c0c0c0c0c, %rax
200 movq $0x0d0d0d0d0d0d0d0d, %rax
206 movq $0x0e0e0e0e0e0e0e0e, %rax
212 movq $0x0f0f0f0f0f0f0f0f, %rax
243 .global _save_restore_debug_regs_entry
244 _save_restore_debug_regs_entry:
250 * For all registers to test, each of these blocks:
251 * 1. increments rcx (to keep track in case of test failure),
252 * 2. checks the register's value against a (constant) template
253 * 3. flips all bits for the VMM to later verify that the changes value is available.
255 * For a second pass, bits are all flipped back to their original state after
260 movq $0x1111111111111111, %rbx
268 movq $0x2222222222222222, %rbx
276 movq $0x3333333333333333, %rbx
284 movq $0x4444444444444444, %rbx
292 * flip only defined bits for debug status and control registers
293 * (and also don't flip General Detect Enable, as the next access
294 * to any debug register would generate an exception)
298 movq $0x5555555555555555, %rbx
299 mov $0xffff0ff0, %rax
301 movq $0xffffefff, %rax
307 mov $0xffff0ff0, %rax
309 movq $0xffffefff, %rax
314 movq $0x5555555555555555, %rbx
316 movq $0xffff0fff, %rax
323 movq $0xffff0fff, %rax
348 mov $0xffff0ff0, %rax
350 movq $0xffffefff, %rax
357 movq $0xffff0fff, %rax
361 jmp _save_restore_debug_regs_entry // 2nd pass
365 .global _simple_protected_mode_vcpu_entry
366 _simple_protected_mode_vcpu_entry:
373 .global _simple_real_mode_vcpu_entry
374 _simple_real_mode_vcpu_entry:
381 .global _radar61961809_entry
382 _radar61961809_entry:
384 mov $0x99999999, %ebx // sentinel address, see _radar61961809_loop64
386 mov $0xc0000080,%ecx // IA32_EFER
388 or $0x100,%eax // .LME
394 or $0x80000000,%ecx // CR0.PG
397 // first (%edi) 6 bytes are _radar61961809_prepare far ptr
402 .global _radar61961809_prepare
403 _radar61961809_prepare:
406 * We switched into long mode, now immediately out, and the test
407 * will switch back in.
409 * This is done to suppress (legitimate) EPT and Page Fault exits.
410 * Until CR0.PG is enabled (which is what effectively activates
411 * long mode), the page tables are never looked at. Right after
412 * setting PG, that changes immediately, effecting transparently
413 * handled EPT violations. Additionally, the far jump that
414 * would be necessary to switch into a 64bit code segment would
415 * also cause EPT violations and PFs when fetching the segment
416 * descriptor from the GDT.
418 * By first jumping into a 32bit code segment after enabling PG
419 * once, we "warm up" both EPT and (harness managed) page tables,
420 * so the next exit after the far jump will most likely be an
421 * IRQ exit, most faithfully reproducing the problem.
425 and $~0x80000000,%ecx
431 // This is where the actual test really starts.
434 mov %ecx,%cr0 // enable PG => long mode
439 ljmp *(%edi) // _radar61961809_loop64
443 .global _radar61961809_loop64
444 _radar61961809_loop64:
446 // as 16bit code, this instruction will be:
448 // and cause an obvious EPT violation (%bx is 0x9999)
451 // loop long enough for a good chance to an IRQ exit
455 // if we reach here, we stayed in long mode.
459 .global _radar60691363_entry
460 _radar60691363_entry:
461 movq $0x800, %rsi // VMCS_GUEST_ES
464 movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC
467 movq $0x6402, %rsi // VMCS_RO_IO_RCX
471 movq $0x800, %rsi // VMCS_GUEST_ES
474 movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC
477 movq $0x6402, %rsi // VMCS_RO_IO_RCX