+
+out:
+ if (guard_page_list)
+ vm_page_free_list(guard_page_list, FALSE);
+
+ if (wired_page_list)
+ vm_page_free_list(wired_page_list, FALSE);
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+ return kr;
+}
+
+kern_return_t
+kernel_memory_populate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags,
+ vm_tag_t tag)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ kern_return_t kr, pe_result;
+ vm_page_t mem;
+ vm_page_t page_list = NULL;
+ int page_count = 0;
+ int page_grab_count = 0;
+ int i;
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0);
+#endif
+
+ page_count = (int) (size / PAGE_SIZE_64);
+
+ assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+
+ pg_offset = page_count * PAGE_SIZE_64;
+
+ do {
+ for (;;) {
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ VM_PAGE_WAIT();
+ }
+ page_grab_count++;
+ if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+ mem->vmp_snext = page_list;
+ page_list = mem;
+
+ pg_offset -= PAGE_SIZE_64;
+
+ kr = pmap_enter_options(kernel_pmap,
+ addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
+ PMAP_OPTIONS_INTERNAL, NULL);
+ assert(kr == KERN_SUCCESS);
+
+ } while (pg_offset);
+
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ mem = page_list;
+ page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
+
+ vm_page_insert(mem, object, offset + pg_offset);
+ assert(mem->vmp_busy);
+
+ mem->vmp_busy = FALSE;
+ mem->vmp_pmapped = TRUE;
+ mem->vmp_wpmapped = TRUE;
+ mem->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
+ }
+ vm_object_unlock(object);
+
+#if KASAN
+ if (map == compressor_map) {
+ kasan_notify_address_nopoison(addr, size);
+ } else {
+ kasan_notify_address(addr, size);
+ }
+#endif
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+ return KERN_SUCCESS;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ for (;;) {
+ if (flags & KMA_LOMEM)
+ mem = vm_page_grablo();
+ else
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ if ((flags & KMA_LOMEM) &&
+ (vm_lopage_needed == TRUE)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ VM_PAGE_WAIT();
+ }
+ page_grab_count++;
+ if (KMA_ZERO & flags) vm_page_zero_fill(mem);
+ mem->vmp_snext = page_list;
+ page_list = mem;
+ }
+ if (flags & KMA_KOBJECT) {
+ offset = addr;
+ object = kernel_object;
+
+ vm_object_lock(object);
+ } else {
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * take reference on object;
+ * unlock map;
+ */
+ panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
+ }
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ if (page_list == NULL)
+ panic("kernel_memory_populate: page_list == NULL");
+
+ mem = page_list;
+ page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
+
+ assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ mem->vmp_q_state = VM_PAGE_IS_WIRED;
+ mem->vmp_wire_count++;
+ if (__improbable(mem->vmp_wire_count == 0)) {
+ panic("kernel_memory_populate(%p): wire_count overflow", mem);
+ }
+
+ vm_page_insert_wired(mem, object, offset + pg_offset, tag);
+
+ mem->vmp_busy = FALSE;
+ mem->vmp_pmapped = TRUE;
+ mem->vmp_wpmapped = TRUE;
+
+ PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+
+ vm_object_unlock(object);
+
+ PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ pe_result);
+
+ vm_object_lock(object);
+ }
+
+ assert(pe_result == KERN_SUCCESS);
+
+ if (flags & KMA_NOENCRYPT) {
+ bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
+ pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+ }
+ }
+ vm_page_lockspin_queues();
+ vm_page_wire_count += page_count;
+ vm_page_unlock_queues();
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+ if (kernel_object == object) vm_tag_update_size(tag, size);
+
+ vm_object_unlock(object);
+
+#if KASAN
+ if (map == compressor_map) {
+ kasan_notify_address_nopoison(addr, size);
+ } else {
+ kasan_notify_address(addr, size);
+ }
+#endif
+ return KERN_SUCCESS;
+
+out:
+ if (page_list)
+ vm_page_free_list(page_list, FALSE);
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
+#endif
+
+ return kr;
+}
+
+
+void
+kernel_memory_depopulate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ vm_page_t mem;
+ vm_page_t local_freeq = NULL;
+
+ assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+ } else if (flags & KMA_KOBJECT) {
+ offset = addr;
+ object = kernel_object;
+ vm_object_lock(object);
+ } else {
+ offset = 0;
+ object = NULL;
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * unlock map;
+ */
+ panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
+ }
+ pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+
+ mem = vm_page_lookup(object, offset + pg_offset);
+
+ assert(mem);
+
+ if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
+
+ mem->vmp_busy = TRUE;
+
+ assert(mem->vmp_tabled);
+ vm_page_remove(mem, TRUE);
+ assert(mem->vmp_busy);
+
+ assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+ assert((mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (mem->vmp_q_state == VM_PAGE_NOT_ON_Q));
+
+ mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
+ mem->vmp_snext = local_freeq;
+ local_freeq = mem;
+ }
+ vm_object_unlock(object);
+
+ if (local_freeq)
+ vm_page_free_list(local_freeq, TRUE);