/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
#include <mach/machine/vm_types.h>
+#include <mach/vm_map.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
+#include <kern/kext_alloc.h>
+#include <sys/kdebug.h>
#include <vm/vm_object.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_protos.h>
-#define ZONE_MAP_MIN (12 * 1024 * 1024)
-#define ZONE_MAP_MAX (768 * 1024 * 1024)
+#define ZONE_MAP_MIN CONFIG_ZONE_MAP_MIN
+
+/* Maximum zone size is 1.5G */
+#define ZONE_MAP_MAX (1024 * 1024 * 1536)
+
+const vm_offset_t vm_min_kernel_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+const vm_offset_t vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS;
+
+boolean_t vm_kernel_ready = FALSE;
+boolean_t kmem_ready = FALSE;
+boolean_t kmem_alloc_ready = FALSE;
+boolean_t zlog_ready = FALSE;
+
+vm_offset_t kmapoff_kaddr;
+unsigned int kmapoff_pgcnt;
+
+#if CONFIG_EMBEDDED
+extern int log_executable_mem_entry;
+#endif /* CONFIG_EMBEDDED */
+
+static inline void
+vm_mem_bootstrap_log(const char *message)
+{
+// kprintf("vm_mem_bootstrap: %s\n", message);
+ kernel_debug_string_early(message);
+}
/*
* vm_mem_bootstrap initializes the virtual memory system.
vm_mem_bootstrap(void)
{
vm_offset_t start, end;
- vm_size_t zsize;
+ vm_size_t zsizearg;
+ mach_vm_size_t zsize;
/*
* Initializes resident memory structures.
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
-
+ vm_mem_bootstrap_log("vm_page_bootstrap");
vm_page_bootstrap(&start, &end);
/*
* Initialize other VM packages
*/
+ vm_mem_bootstrap_log("zone_bootstrap");
zone_bootstrap();
+
+ vm_mem_bootstrap_log("vm_object_bootstrap");
vm_object_bootstrap();
+
+ vm_kernel_ready = TRUE;
+
+ vm_mem_bootstrap_log("vm_map_init");
vm_map_init();
+
+ vm_mem_bootstrap_log("kmem_init");
kmem_init(start, end);
+ kmem_ready = TRUE;
+ /*
+ * Eat a random amount of kernel_map to fuzz subsequent heap, zone and
+ * stack addresses. (With a 4K page and 9 bits of randomness, this
+ * eats at most 2M of VA from the map.)
+ */
+ if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt,
+ sizeof (kmapoff_pgcnt)))
+ kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */
+
+ if (kmapoff_pgcnt > 0 &&
+ vm_allocate_kernel(kernel_map, &kmapoff_kaddr,
+ kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS)
+ panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt);
+
+#if CONFIG_EMBEDDED
+ PE_parse_boot_argn("log_executable_mem_entry",
+ &log_executable_mem_entry,
+ sizeof (log_executable_mem_entry));
+#endif /* CONFIG_EMBEDDED */
+
+ vm_mem_bootstrap_log("pmap_init");
pmap_init();
- if (PE_parse_boot_arg("zsize", &zsize))
- zsize = zsize * 1024 * 1024;
+ kmem_alloc_ready = TRUE;
+
+ if (PE_parse_boot_argn("zsize", &zsizearg, sizeof (zsizearg)))
+ zsize = zsizearg * 1024ULL * 1024ULL;
else {
zsize = sane_size >> 2; /* Get target zone size as 1/4 of physical memory */
}
- if(zsize < ZONE_MAP_MIN) zsize = ZONE_MAP_MIN; /* Clamp to min */
- if(zsize > ZONE_MAP_MAX) zsize = ZONE_MAP_MAX; /* Clamp to max */
- zone_init(zsize); /* Allocate address space for zones */
-
+ if (zsize < ZONE_MAP_MIN)
+ zsize = ZONE_MAP_MIN; /* Clamp to min */
+
+#if defined(__LP64__)
+ zsize += zsize >> 1;
+#endif /* __LP64__ */
+ if (zsize > sane_size >> 1)
+ zsize = sane_size >> 1; /* Clamp to half of RAM max */
+#if !__LP64__
+ if (zsize > ZONE_MAP_MAX)
+ zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */
+#endif /* !__LP64__ */
+
+ vm_mem_bootstrap_log("kext_alloc_init");
+ kext_alloc_init();
+
+ vm_mem_bootstrap_log("zone_init");
+ assert((vm_size_t) zsize == zsize);
+ zone_init((vm_size_t) zsize); /* Allocate address space for zones */
+
+ /* The vm_page_zone must be created prior to kalloc_init; that
+ * routine can trigger zalloc()s (for e.g. mutex statistic structure
+ * initialization). The vm_page_zone must exist to saisfy fictitious
+ * page allocations (which are used for guard pages by the guard
+ * mode zone allocator).
+ */
+ vm_mem_bootstrap_log("vm_page_module_init");
+ vm_page_module_init();
+
+ vm_mem_bootstrap_log("kalloc_init");
kalloc_init();
+
+ vm_mem_bootstrap_log("vm_fault_init");
vm_fault_init();
- vm_page_module_init();
+
+ vm_mem_bootstrap_log("memory_manager_default_init");
memory_manager_default_init();
+
+ vm_mem_bootstrap_log("memory_object_control_bootstrap");
memory_object_control_bootstrap();
+
+ vm_mem_bootstrap_log("device_pager_bootstrap");
device_pager_bootstrap();
+
+ vm_paging_map_init();
+
+ vm_mem_bootstrap_log("vm_mem_bootstrap done");
+
+#ifdef CONFIG_ZCACHE
+ zcache_bootstrap();
+#endif
+ vm_rtfault_record_init();
}
void