+ kmem_ready = TRUE;
+ /*
+ * Eat a random amount of kernel_map to fuzz subsequent heap, zone and
+ * stack addresses. (With a 4K page and 9 bits of randomness, this
+ * eats at most 2M of VA from the map.)
+ */
+ if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt,
+ sizeof (kmapoff_pgcnt)))
+ kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */
+
+ if (kmapoff_pgcnt > 0 &&
+ vm_allocate(kernel_map, &kmapoff_kaddr,
+ kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE) != KERN_SUCCESS)
+ panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt);