- vm_object_t object = VM_OBJECT_NULL;
- vm_map_entry_t entry;
- vm_object_offset_t offset;
- vm_offset_t addr;
- vm_offset_t i;
- kern_return_t kr;
-
- size = round_page(size);
- if ((flags & KMA_KOBJECT) == 0) {
- /*
- * Allocate a new object. We must do this before locking
- * the map, or risk deadlock with the default pager:
- * device_read_alloc uses kmem_alloc,
- * which tries to allocate an object,
- * which uses kmem_alloc_wired to get memory,
- * which blocks for pages.
- * then the default pager needs to read a block
- * to process a memory_object_data_write,
- * and device_read_alloc calls kmem_alloc
- * and deadlocks on the map lock.
- */
- object = vm_object_allocate(size);
- kr = vm_map_find_space(map, &addr, size, mask, &entry);
+ vm_object_t object;
+ vm_object_offset_t offset;
+ vm_object_offset_t pg_offset;
+ vm_map_entry_t entry = NULL;
+ vm_map_offset_t map_addr, fill_start;
+ vm_map_offset_t map_mask;
+ vm_map_size_t map_size, fill_size;
+ kern_return_t kr, pe_result;
+ vm_page_t mem;
+ vm_page_t guard_page_list = NULL;
+ vm_page_t wired_page_list = NULL;
+ int guard_page_count = 0;
+ int wired_page_count = 0;
+ int page_grab_count = 0;
+ int i;
+ int vm_alloc_flags;
+ vm_map_kernel_flags_t vmk_flags;
+ vm_prot_t kma_prot;
+
+ if (! vm_kernel_ready) {
+ panic("kernel_memory_allocate: VM is not ready");
+ }
+
+ map_size = vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(map));
+ map_mask = (vm_map_offset_t) mask;
+
+ vm_alloc_flags = 0; //VM_MAKE_TAG(tag);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+
+ /* Check for zero allocation size (either directly or via overflow) */
+ if (map_size == 0) {
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * limit the size of a single extent of wired memory
+ * to try and limit the damage to the system if
+ * too many pages get wired down
+ * limit raised to 2GB with 128GB max physical limit,
+ * but scaled by installed memory above this
+ */
+ if (!(flags & (KMA_VAONLY | KMA_PAGEABLE)) &&
+ map_size > MAX(1ULL<<31, sane_size/64)) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * Guard pages:
+ *
+ * Guard pages are implemented as ficticious pages. By placing guard pages
+ * on either end of a stack, they can help detect cases where a thread walks
+ * off either end of its stack. They are allocated and set up here and attempts
+ * to access those pages are trapped in vm_fault_page().
+ *
+ * The map_size we were passed may include extra space for
+ * guard pages. If those were requested, then back it out of fill_size
+ * since vm_map_find_space() takes just the actual size not including
+ * guard pages. Similarly, fill_start indicates where the actual pages
+ * will begin in the range.
+ */
+
+ fill_start = 0;
+ fill_size = map_size;
+
+ if (flags & KMA_GUARD_FIRST) {
+ vmk_flags.vmkf_guard_before = TRUE;
+ fill_start += PAGE_SIZE_64;
+ fill_size -= PAGE_SIZE_64;
+ if (map_size < fill_start + fill_size) {
+ /* no space for a guard page */
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+ guard_page_count++;
+ }
+ if (flags & KMA_GUARD_LAST) {
+ vmk_flags.vmkf_guard_after = TRUE;
+ fill_size -= PAGE_SIZE_64;
+ if (map_size <= fill_start + fill_size) {
+ /* no space for a guard page */
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+ guard_page_count++;
+ }
+ wired_page_count = (int) (fill_size / PAGE_SIZE_64);
+ assert(wired_page_count * PAGE_SIZE_64 == fill_size);
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0);
+#endif
+
+ for (i = 0; i < guard_page_count; i++) {
+ for (;;) {
+ mem = vm_page_grab_guard();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ vm_page_more_fictitious();
+ }
+ mem->vmp_snext = guard_page_list;
+ guard_page_list = mem;
+ }
+
+ if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
+ for (i = 0; i < wired_page_count; i++) {
+ uint64_t unavailable;
+
+ for (;;) {
+ if (flags & KMA_LOMEM)
+ mem = vm_page_grablo();
+ else
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
+
+ if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ VM_PAGE_WAIT();
+ }
+ page_grab_count++;
+ if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+ mem->vmp_snext = wired_page_list;
+ wired_page_list = mem;
+ }