X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..b226f5e54a60dc81db17b1260381d7dbfea3cdf1:/osfmk/vm/vm_init.c diff --git a/osfmk/vm/vm_init.c b/osfmk/vm/vm_init.c index 2ee659818..82f7ce30c 100644 --- a/osfmk/vm/vm_init.c +++ b/osfmk/vm/vm_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,8 +64,11 @@ */ #include +#include #include #include +#include +#include #include #include #include @@ -78,8 +81,32 @@ #include -#define ZONE_MAP_MIN (12 * 1024 * 1024) -#define ZONE_MAP_MAX (768 * 1024 * 1024) +#define ZONE_MAP_MIN CONFIG_ZONE_MAP_MIN + +/* Maximum zone size is 1.5G */ +#define ZONE_MAP_MAX (1024 * 1024 * 1536) + +const vm_offset_t vm_min_kernel_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; +const vm_offset_t vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS; + +boolean_t vm_kernel_ready = FALSE; +boolean_t kmem_ready = FALSE; +boolean_t kmem_alloc_ready = FALSE; +boolean_t zlog_ready = FALSE; + +vm_offset_t kmapoff_kaddr; +unsigned int kmapoff_pgcnt; + +#if CONFIG_EMBEDDED +extern int log_executable_mem_entry; +#endif /* CONFIG_EMBEDDED */ + +static inline void +vm_mem_bootstrap_log(const char *message) +{ +// kprintf("vm_mem_bootstrap: %s\n", message); + kernel_debug_string_early(message); +} /* * vm_mem_bootstrap initializes the virtual memory system. @@ -90,42 +117,118 @@ void vm_mem_bootstrap(void) { vm_offset_t start, end; - vm_size_t zsize; + vm_size_t zsizearg; + mach_vm_size_t zsize; /* * Initializes resident memory structures. * From here on, all physical memory is accounted for, * and we use only virtual addresses. */ - + vm_mem_bootstrap_log("vm_page_bootstrap"); vm_page_bootstrap(&start, &end); /* * Initialize other VM packages */ + vm_mem_bootstrap_log("zone_bootstrap"); zone_bootstrap(); + + vm_mem_bootstrap_log("vm_object_bootstrap"); vm_object_bootstrap(); + + vm_kernel_ready = TRUE; + + vm_mem_bootstrap_log("vm_map_init"); vm_map_init(); + + vm_mem_bootstrap_log("kmem_init"); kmem_init(start, end); + kmem_ready = TRUE; + /* + * Eat a random amount of kernel_map to fuzz subsequent heap, zone and + * stack addresses. (With a 4K page and 9 bits of randomness, this + * eats at most 2M of VA from the map.) + */ + if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt, + sizeof (kmapoff_pgcnt))) + kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */ + + if (kmapoff_pgcnt > 0 && + vm_allocate_kernel(kernel_map, &kmapoff_kaddr, + kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) + panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt); + +#if CONFIG_EMBEDDED + PE_parse_boot_argn("log_executable_mem_entry", + &log_executable_mem_entry, + sizeof (log_executable_mem_entry)); +#endif /* CONFIG_EMBEDDED */ + + vm_mem_bootstrap_log("pmap_init"); pmap_init(); - if (PE_parse_boot_arg("zsize", &zsize)) - zsize = zsize * 1024 * 1024; + kmem_alloc_ready = TRUE; + + if (PE_parse_boot_argn("zsize", &zsizearg, sizeof (zsizearg))) + zsize = zsizearg * 1024ULL * 1024ULL; else { zsize = sane_size >> 2; /* Get target zone size as 1/4 of physical memory */ } - if(zsize < ZONE_MAP_MIN) zsize = ZONE_MAP_MIN; /* Clamp to min */ - if(zsize > ZONE_MAP_MAX) zsize = ZONE_MAP_MAX; /* Clamp to max */ - zone_init(zsize); /* Allocate address space for zones */ - + if (zsize < ZONE_MAP_MIN) + zsize = ZONE_MAP_MIN; /* Clamp to min */ + +#if defined(__LP64__) + zsize += zsize >> 1; +#endif /* __LP64__ */ + if (zsize > sane_size >> 1) + zsize = sane_size >> 1; /* Clamp to half of RAM max */ +#if !__LP64__ + if (zsize > ZONE_MAP_MAX) + zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */ +#endif /* !__LP64__ */ + + vm_mem_bootstrap_log("kext_alloc_init"); + kext_alloc_init(); + + vm_mem_bootstrap_log("zone_init"); + assert((vm_size_t) zsize == zsize); + zone_init((vm_size_t) zsize); /* Allocate address space for zones */ + + /* The vm_page_zone must be created prior to kalloc_init; that + * routine can trigger zalloc()s (for e.g. mutex statistic structure + * initialization). The vm_page_zone must exist to saisfy fictitious + * page allocations (which are used for guard pages by the guard + * mode zone allocator). + */ + vm_mem_bootstrap_log("vm_page_module_init"); + vm_page_module_init(); + + vm_mem_bootstrap_log("kalloc_init"); kalloc_init(); + + vm_mem_bootstrap_log("vm_fault_init"); vm_fault_init(); - vm_page_module_init(); + + vm_mem_bootstrap_log("memory_manager_default_init"); memory_manager_default_init(); + + vm_mem_bootstrap_log("memory_object_control_bootstrap"); memory_object_control_bootstrap(); + + vm_mem_bootstrap_log("device_pager_bootstrap"); device_pager_bootstrap(); + + vm_paging_map_init(); + + vm_mem_bootstrap_log("vm_mem_bootstrap done"); + +#ifdef CONFIG_ZCACHE + zcache_bootstrap(); +#endif + vm_rtfault_record_init(); } void