+ if (zsize < ZONE_MAP_MIN)
+ zsize = ZONE_MAP_MIN; /* Clamp to min */
+#if defined(__LP64__)
+ zsize += zsize >> 1;
+#endif /* __LP64__ */
+ if (zsize > sane_size >> 1)
+ zsize = sane_size >> 1; /* Clamp to half of RAM max */
+#if !__LP64__
+ if (zsize > ZONE_MAP_MAX)
+ zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */
+#endif /* !__LP64__ */
+
+#if CONFIG_EMBEDDED
+#if defined(__LP64__)
+ {
+ mach_vm_size_t max_zsize;
+
+ /*
+ * because of the limited kernel virtual space for embedded systems,
+ * we need to clamp the size of the zone map being created... replicate
+ * the above calculation for a 1Gbyte, LP64 system and use that as the
+ * maximum size for the zone map
+ */
+ max_zsize = (1024ULL * 1024ULL * 1024ULL) >> 2ULL;
+ max_zsize += max_zsize >> 1;
+
+ if (zsize > max_zsize)
+ zsize = max_zsize;
+ }
+#endif
+#endif
+ vm_mem_bootstrap_log("kext_alloc_init");
+ kext_alloc_init();
+
+ vm_mem_bootstrap_log("zone_init");
+ assert((vm_size_t) zsize == zsize);
+ zone_init((vm_size_t) zsize); /* Allocate address space for zones */
+
+ /* The vm_page_zone must be created prior to kalloc_init; that
+ * routine can trigger zalloc()s (for e.g. mutex statistic structure
+ * initialization). The vm_page_zone must exist to saisfy fictitious
+ * page allocations (which are used for guard pages by the guard
+ * mode zone allocator).
+ */
+ vm_mem_bootstrap_log("vm_page_module_init");
+ vm_page_module_init();
+
+ vm_mem_bootstrap_log("kalloc_init");