+ kmem_ready = TRUE;
+ /*
+ * Eat a random amount of kernel_map to fuzz subsequent heap, zone and
+ * stack addresses. (With a 4K page and 9 bits of randomness, this
+ * eats at most 2M of VA from the map.)
+ */
+ if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt,
+ sizeof (kmapoff_pgcnt)))
+ kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */
+
+ if (kmapoff_pgcnt > 0 &&
+ vm_allocate_kernel(kernel_map, &kmapoff_kaddr,
+ kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS)
+ panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt);
+
+#if CONFIG_EMBEDDED
+ PE_parse_boot_argn("log_executable_mem_entry",
+ &log_executable_mem_entry,
+ sizeof (log_executable_mem_entry));
+#endif /* CONFIG_EMBEDDED */
+
+ vm_mem_bootstrap_log("pmap_init");