X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..143464d58d2bd6378e74eec636961ceb0d32fb91:/osfmk/vm/vm_init.c diff --git a/osfmk/vm/vm_init.c b/osfmk/vm/vm_init.c index 2ee659818..027e6c416 100644 --- a/osfmk/vm/vm_init.c +++ b/osfmk/vm/vm_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,8 +64,10 @@ */ #include +#include #include #include +#include #include #include #include @@ -78,8 +80,21 @@ #include -#define ZONE_MAP_MIN (12 * 1024 * 1024) -#define ZONE_MAP_MAX (768 * 1024 * 1024) +#define ZONE_MAP_MIN CONFIG_ZONE_MAP_MIN + +/* Maximum zone size is 1.5G */ +#define ZONE_MAP_MAX (1024 * 1024 * 1536) + +const vm_offset_t vm_min_kernel_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; +const vm_offset_t vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS; + +boolean_t vm_kernel_ready = FALSE; +boolean_t kmem_ready = FALSE; +boolean_t kmem_alloc_ready = FALSE; +boolean_t zlog_ready = FALSE; + +vm_offset_t kmapoff_kaddr; +unsigned int kmapoff_pgcnt; /* * vm_mem_bootstrap initializes the virtual memory system. @@ -90,42 +105,108 @@ void vm_mem_bootstrap(void) { vm_offset_t start, end; - vm_size_t zsize; + vm_size_t zsizearg; + mach_vm_size_t zsize; /* * Initializes resident memory structures. * From here on, all physical memory is accounted for, * and we use only virtual addresses. */ +#define vm_mem_bootstrap_kprintf(x) /* kprintf(x) */ + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling vm_page_bootstrap\n")); vm_page_bootstrap(&start, &end); /* * Initialize other VM packages */ + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling zone_bootstrap\n")); zone_bootstrap(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling vm_object_bootstrap\n")); vm_object_bootstrap(); + + vm_kernel_ready = TRUE; + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling vm_map_init\n")); vm_map_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling kmem_init\n")); kmem_init(start, end); + kmem_ready = TRUE; + /* + * Eat a random amount of kernel_map to fuzz subsequent heap, zone and + * stack addresses. (With a 4K page and 9 bits of randomness, this + * eats at most 2M of VA from the map.) + */ + if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt, + sizeof (kmapoff_pgcnt))) + kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */ + + if (kmapoff_pgcnt > 0 && + vm_allocate(kernel_map, &kmapoff_kaddr, + kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE) != KERN_SUCCESS) + panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling pmap_init\n")); pmap_init(); - if (PE_parse_boot_arg("zsize", &zsize)) - zsize = zsize * 1024 * 1024; + kmem_alloc_ready = TRUE; + + if (PE_parse_boot_argn("zsize", &zsizearg, sizeof (zsizearg))) + zsize = zsizearg * 1024ULL * 1024ULL; else { zsize = sane_size >> 2; /* Get target zone size as 1/4 of physical memory */ } - if(zsize < ZONE_MAP_MIN) zsize = ZONE_MAP_MIN; /* Clamp to min */ - if(zsize > ZONE_MAP_MAX) zsize = ZONE_MAP_MAX; /* Clamp to max */ - zone_init(zsize); /* Allocate address space for zones */ - + if (zsize < ZONE_MAP_MIN) + zsize = ZONE_MAP_MIN; /* Clamp to min */ +#if defined(__LP64__) + zsize += zsize >> 1; +#endif /* __LP64__ */ + if (zsize > sane_size >> 1) + zsize = sane_size >> 1; /* Clamp to half of RAM max */ +#if !__LP64__ + if (zsize > ZONE_MAP_MAX) + zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */ +#endif /* !__LP64__ */ + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling kext_alloc_init\n")); + kext_alloc_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling zone_init\n")); + assert((vm_size_t) zsize == zsize); + zone_init((vm_size_t) zsize); /* Allocate address space for zones */ + + /* The vm_page_zone must be created prior to kalloc_init; that + * routine can trigger zalloc()s (for e.g. mutex statistic structure + * initialization). The vm_page_zone must exist to saisfy fictitious + * page allocations (which are used for guard pages by the guard + * mode zone allocator). + */ + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling vm_page_module_init\n")); + vm_page_module_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling kalloc_init\n")); kalloc_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling vm_fault_init\n")); vm_fault_init(); - vm_page_module_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling memory_manager_default_init\n")); memory_manager_default_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling memory_object_control_bootstrap\n")); memory_object_control_bootstrap(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: calling device_pager_bootstrap\n")); device_pager_bootstrap(); + + vm_paging_map_init(); + + vm_mem_bootstrap_kprintf(("vm_mem_bootstrap: done\n")); } void