- pmap_init();
-
- if (PE_parse_boot_arg("zsize", &zsize))
- zsize = zsize * 1024 * 1024;
- else {
- zsize = mem_size >> 2; /* Get target zone size as 1/4 of physical memory */
+
+ kernel_startup_initialize_upto(STARTUP_SUB_KMEM);
+
+ /*
+ * Eat a random amount of kernel_map to fuzz subsequent heap, zone and
+ * stack addresses. (With a 4K page and 9 bits of randomness, this
+ * eats about 2M of VA from the map)
+ *
+ * Note that we always need to slide by at least one page because the VM
+ * pointer packing schemes using KERNEL_PMAP_HEAP_RANGE_START as a base
+ * do not admit this address to be part of any zone submap.
+ */
+ uint32_t kmapoff_pgcnt = (early_random() & 0x1ff) + 1; /* 9 bits */
+ if (kernel_memory_allocate(kernel_map, &kmapoff_kaddr,
+ ptoa(kmapoff_pgcnt), 0, KMA_KOBJECT | KMA_PERMANENT | KMA_VAONLY,
+ VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) {
+ panic("cannot kernel_memory_allocate %u pages", kmapoff_pgcnt);