+
+
+#define VM_TEST_COLLAPSE_COMPRESSOR 0
+#define VM_TEST_WIRE_AND_EXTRACT 0
+#define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
+#if __arm64__
+#define VM_TEST_KERNEL_OBJECT_FAULT 0
+#endif /* __arm64__ */
+#define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
+
+#if VM_TEST_COLLAPSE_COMPRESSOR
+extern boolean_t vm_object_collapse_compressor_allowed;
+#include <IOKit/IOLib.h>
+static void
+vm_test_collapse_compressor(void)
+{
+ vm_object_size_t backing_size, top_size;
+ vm_object_t backing_object, top_object;
+ vm_map_offset_t backing_offset, top_offset;
+ unsigned char *backing_address, *top_address;
+ kern_return_t kr;
+
+ printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
+
+ /* create backing object */
+ backing_size = 15 * PAGE_SIZE;
+ backing_object = vm_object_allocate(backing_size);
+ assert(backing_object != VM_OBJECT_NULL);
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
+ backing_object);
+ /* map backing object */
+ backing_offset = 0;
+ kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
+ VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
+ backing_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ backing_address = (unsigned char *) backing_offset;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped backing object %p at 0x%llx\n",
+ backing_object, (uint64_t) backing_offset);
+ /* populate with pages to be compressed in backing object */
+ backing_address[0x1 * PAGE_SIZE] = 0xB1;
+ backing_address[0x4 * PAGE_SIZE] = 0xB4;
+ backing_address[0x7 * PAGE_SIZE] = 0xB7;
+ backing_address[0xa * PAGE_SIZE] = 0xBA;
+ backing_address[0xd * PAGE_SIZE] = 0xBD;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "backing_object %p\n", backing_object);
+ /* compress backing object */
+ vm_object_pageout(backing_object);
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
+ backing_object);
+ /* wait for all the pages to be gone */
+ while (*(volatile int *)&backing_object->resident_page_count != 0) {
+ IODelay(10);
+ }
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
+ backing_object);
+ /* populate with pages to be resident in backing object */
+ backing_address[0x0 * PAGE_SIZE] = 0xB0;
+ backing_address[0x3 * PAGE_SIZE] = 0xB3;
+ backing_address[0x6 * PAGE_SIZE] = 0xB6;
+ backing_address[0x9 * PAGE_SIZE] = 0xB9;
+ backing_address[0xc * PAGE_SIZE] = 0xBC;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "backing_object %p\n", backing_object);
+ /* leave the other pages absent */
+ /* mess with the paging_offset of the backing_object */
+ assert(backing_object->paging_offset == 0);
+ backing_object->paging_offset = 0x3000;
+
+ /* create top object */
+ top_size = 9 * PAGE_SIZE;
+ top_object = vm_object_allocate(top_size);
+ assert(top_object != VM_OBJECT_NULL);
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
+ top_object);
+ /* map top object */
+ top_offset = 0;
+ kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
+ VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
+ top_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ top_address = (unsigned char *) top_offset;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped top object %p at 0x%llx\n",
+ top_object, (uint64_t) top_offset);
+ /* populate with pages to be compressed in top object */
+ top_address[0x3 * PAGE_SIZE] = 0xA3;
+ top_address[0x4 * PAGE_SIZE] = 0xA4;
+ top_address[0x5 * PAGE_SIZE] = 0xA5;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "top_object %p\n", top_object);
+ /* compress top object */
+ vm_object_pageout(top_object);
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
+ top_object);
+ /* wait for all the pages to be gone */
+ while (top_object->resident_page_count != 0) {
+ IODelay(10);
+ }
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
+ top_object);
+ /* populate with pages to be resident in top object */
+ top_address[0x0 * PAGE_SIZE] = 0xA0;
+ top_address[0x1 * PAGE_SIZE] = 0xA1;
+ top_address[0x2 * PAGE_SIZE] = 0xA2;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "top_object %p\n", top_object);
+ /* leave the other pages absent */
+
+ /* link the 2 objects */
+ vm_object_reference(backing_object);
+ top_object->shadow = backing_object;
+ top_object->vo_shadow_offset = 0x3000;
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
+ top_object, backing_object);
+
+ /* unmap backing object */
+ vm_map_remove(kernel_map,
+ backing_offset,
+ backing_offset + backing_size,
+ VM_MAP_REMOVE_NO_FLAGS);
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "unmapped backing_object %p [0x%llx:0x%llx]\n",
+ backing_object,
+ (uint64_t) backing_offset,
+ (uint64_t) (backing_offset + backing_size));
+
+ /* collapse */
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
+ vm_object_lock(top_object);
+ vm_object_collapse(top_object, 0, FALSE);
+ vm_object_unlock(top_object);
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
+
+ /* did it work? */
+ if (top_object->shadow != VM_OBJECT_NULL) {
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ if (vm_object_collapse_compressor_allowed) {
+ panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ }
+ } else {
+ /* check the contents of the mapping */
+ unsigned char expect[9] =
+ { 0xA0, 0xA1, 0xA2, /* resident in top */
+ 0xA3, 0xA4, 0xA5, /* compressed in top */
+ 0xB9, /* resident in backing + shadow_offset */
+ 0xBD, /* compressed in backing + shadow_offset + paging_offset */
+ 0x00 }; /* absent in both */
+ unsigned char actual[9];
+ unsigned int i, errors;
+
+ errors = 0;
+ for (i = 0; i < sizeof(actual); i++) {
+ actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
+ if (actual[i] != expect[i]) {
+ errors++;
+ }
+ }
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: "
+ "actual [%x %x %x %x %x %x %x %x %x] "
+ "expect [%x %x %x %x %x %x %x %x %x] "
+ "%d errors\n",
+ actual[0], actual[1], actual[2], actual[3],
+ actual[4], actual[5], actual[6], actual[7],
+ actual[8],
+ expect[0], expect[1], expect[2], expect[3],
+ expect[4], expect[5], expect[6], expect[7],
+ expect[8],
+ errors);
+ if (errors) {
+ panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ } else {
+ printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
+ }
+ }
+}
+#else /* VM_TEST_COLLAPSE_COMPRESSOR */
+#define vm_test_collapse_compressor()
+#endif /* VM_TEST_COLLAPSE_COMPRESSOR */
+
+#if VM_TEST_WIRE_AND_EXTRACT
+extern ledger_template_t task_ledger_template;
+#include <mach/mach_vm.h>
+extern ppnum_t vm_map_get_phys_page(vm_map_t map,
+ vm_offset_t offset);
+static void
+vm_test_wire_and_extract(void)
+{
+ ledger_t ledger;
+ vm_map_t user_map, wire_map;
+ mach_vm_address_t user_addr, wire_addr;
+ mach_vm_size_t user_size, wire_size;
+ mach_vm_offset_t cur_offset;
+ vm_prot_t cur_prot, max_prot;
+ ppnum_t user_ppnum, wire_ppnum;
+ kern_return_t kr;
+
+ ledger = ledger_instantiate(task_ledger_template,
+ LEDGER_CREATE_ACTIVE_ENTRIES);
+ user_map = vm_map_create(pmap_create_options(ledger, 0, PMAP_CREATE_64BIT),
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ wire_map = vm_map_create(NULL,
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ user_addr = 0;
+ user_size = 0x10000;
+ kr = mach_vm_allocate(user_map,
+ &user_addr,
+ user_size,
+ VM_FLAGS_ANYWHERE);
+ assert(kr == KERN_SUCCESS);
+ wire_addr = 0;
+ wire_size = user_size;
+ kr = mach_vm_remap(wire_map,
+ &wire_addr,
+ wire_size,
+ 0,
+ VM_FLAGS_ANYWHERE,
+ user_map,
+ user_addr,
+ FALSE,
+ &cur_prot,
+ &max_prot,
+ VM_INHERIT_NONE);
+ assert(kr == KERN_SUCCESS);
+ for (cur_offset = 0;
+ cur_offset < wire_size;
+ cur_offset += PAGE_SIZE) {
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ user_ppnum = vm_map_get_phys_page(user_map,
+ user_addr + cur_offset);
+ printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+ }
+ cur_offset -= PAGE_SIZE;
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT,
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+
+ printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
+}
+#else /* VM_TEST_WIRE_AND_EXTRACT */
+#define vm_test_wire_and_extract()
+#endif /* VM_TEST_WIRE_AND_EXTRACT */
+
+#if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
+static void
+vm_test_page_wire_overflow_panic(void)
+{
+ vm_object_t object;
+ vm_page_t page;
+
+ printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
+
+ object = vm_object_allocate(PAGE_SIZE);
+ vm_object_lock(object);
+ page = vm_page_alloc(object, 0x0);
+ vm_page_lock_queues();
+ do {
+ vm_page_wire(page, 1, FALSE);
+ } while (page->wire_count != 0);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+ panic("FBDP(%p,%p): wire_count overflow not detected\n",
+ object, page);
+}
+#else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
+#define vm_test_page_wire_overflow_panic()
+#endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
+
+#if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
+extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
+static void
+vm_test_kernel_object_fault(void)
+{
+ kern_return_t kr;
+ vm_offset_t stack;
+ uintptr_t frameb[2];
+ int ret;
+
+ kr = kernel_memory_allocate(kernel_map, &stack,
+ kernel_stack_size + (2 * PAGE_SIZE),
+ 0,
+ (KMA_KSTACK | KMA_KOBJECT |
+ KMA_GUARD_FIRST | KMA_GUARD_LAST),
+ VM_KERN_MEMORY_STACK);
+ if (kr != KERN_SUCCESS) {
+ panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr);
+ }
+ ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
+ if (ret != 0) {
+ printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
+ } else {
+ printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
+ }
+ vm_map_remove(kernel_map,
+ stack,
+ stack + kernel_stack_size + (2 * PAGE_SIZE),
+ VM_MAP_REMOVE_KUNWIRE);
+ stack = 0;
+}
+#else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
+#define vm_test_kernel_object_fault()
+#endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
+
+#if VM_TEST_DEVICE_PAGER_TRANSPOSE
+static void
+vm_test_device_pager_transpose(void)
+{
+ memory_object_t device_pager;
+ vm_object_t anon_object, device_object;
+ vm_size_t size;
+ vm_map_offset_t device_mapping;
+ kern_return_t kr;
+
+ size = 3 * PAGE_SIZE;
+ anon_object = vm_object_allocate(size);
+ assert(anon_object != VM_OBJECT_NULL);
+ device_pager = device_pager_setup(NULL, 0, size, 0);
+ assert(device_pager != NULL);
+ device_object = memory_object_to_vm_object(device_pager);
+ assert(device_object != VM_OBJECT_NULL);
+#if 0
+ /*
+ * Can't actually map this, since another thread might do a
+ * vm_map_enter() that gets coalesced into this object, which
+ * would cause the test to fail.
+ */
+ vm_map_offset_t anon_mapping = 0;
+ kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
+ VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
+ anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+#endif
+ device_mapping = 0;
+ kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0,
+ VM_FLAGS_ANYWHERE,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,
+ (void *)device_pager, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ memory_object_deallocate(device_pager);
+
+ vm_object_lock(anon_object);
+ vm_object_activity_begin(anon_object);
+ anon_object->blocked_access = TRUE;
+ vm_object_unlock(anon_object);
+ vm_object_lock(device_object);
+ vm_object_activity_begin(device_object);
+ device_object->blocked_access = TRUE;
+ vm_object_unlock(device_object);
+
+ assert(anon_object->ref_count == 1);
+ assert(!anon_object->named);
+ assert(device_object->ref_count == 2);
+ assert(device_object->named);
+
+ kr = vm_object_transpose(device_object, anon_object, size);
+ assert(kr == KERN_SUCCESS);
+
+ vm_object_lock(anon_object);
+ vm_object_activity_end(anon_object);
+ anon_object->blocked_access = FALSE;
+ vm_object_unlock(anon_object);
+ vm_object_lock(device_object);
+ vm_object_activity_end(device_object);
+ device_object->blocked_access = FALSE;
+ vm_object_unlock(device_object);
+
+ assert(anon_object->ref_count == 2);
+ assert(anon_object->named);
+#if 0
+ kr = vm_deallocate(kernel_map, anon_mapping, size);
+ assert(kr == KERN_SUCCESS);
+#endif
+ assert(device_object->ref_count == 1);
+ assert(!device_object->named);
+ kr = vm_deallocate(kernel_map, device_mapping, size);
+ assert(kr == KERN_SUCCESS);
+
+ printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
+}
+#else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
+#define vm_test_device_pager_transpose()
+#endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
+
+void
+vm_tests(void)
+{
+ vm_test_collapse_compressor();
+ vm_test_wire_and_extract();
+ vm_test_page_wire_overflow_panic();
+ vm_test_kernel_object_fault();
+ vm_test_device_pager_transpose();
+}