+ if (vm_pageout_deadlock_wait == 0)
+ vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+
+ if (vm_pageout_deadlock_relief == 0)
+ vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+
+ if (vm_pageout_inactive_relief == 0)
+ vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
+
+ if (vm_pageout_burst_active_throttle == 0)
+ vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
+
+ if (vm_pageout_burst_inactive_throttle == 0)
+ vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+
+ /*
+ * Set kernel task to low backing store privileged
+ * status
+ */
+ task_lock(kernel_task);
+ kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
+ task_unlock(kernel_task);
+
+ vm_page_free_count_init = vm_page_free_count;
+
+ /*
+ * even if we've already called vm_page_free_reserve
+ * call it again here to insure that the targets are
+ * accurately calculated (it uses vm_page_free_count_init)
+ * calling it with an arg of 0 will not change the reserve
+ * but will re-calculate free_min and free_target
+ */
+ if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
+ vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
+ } else
+ vm_page_free_reserve(0);
+
+
+ queue_init(&vm_pageout_queue_external.pgo_pending);
+ vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+ vm_pageout_queue_external.pgo_laundry = 0;
+ vm_pageout_queue_external.pgo_idle = FALSE;
+ vm_pageout_queue_external.pgo_busy = FALSE;
+ vm_pageout_queue_external.pgo_throttled = FALSE;
+ vm_pageout_queue_external.pgo_draining = FALSE;
+ vm_pageout_queue_external.pgo_lowpriority = FALSE;
+ vm_pageout_queue_external.pgo_tid = -1;
+ vm_pageout_queue_external.pgo_inited = FALSE;
+
+ queue_init(&vm_pageout_queue_internal.pgo_pending);
+ vm_pageout_queue_internal.pgo_maxlaundry = 0;
+ vm_pageout_queue_internal.pgo_laundry = 0;
+ vm_pageout_queue_internal.pgo_idle = FALSE;
+ vm_pageout_queue_internal.pgo_busy = FALSE;
+ vm_pageout_queue_internal.pgo_throttled = FALSE;
+ vm_pageout_queue_internal.pgo_draining = FALSE;
+ vm_pageout_queue_internal.pgo_lowpriority = FALSE;
+ vm_pageout_queue_internal.pgo_tid = -1;
+ vm_pageout_queue_internal.pgo_inited = FALSE;
+
+ /* internal pageout thread started when default pager registered first time */
+ /* external pageout and garbage collection threads started here */
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
+ BASEPRI_PREEMPT - 1,
+ &vm_pageout_external_iothread);
+ if (result != KERN_SUCCESS)
+ panic("vm_pageout_iothread_external: create failed");
+
+ thread_deallocate(vm_pageout_external_iothread);
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
+ BASEPRI_DEFAULT,
+ &thread);
+ if (result != KERN_SUCCESS)
+ panic("vm_pageout_garbage_collect: create failed");
+
+ thread_deallocate(thread);
+
+#if VM_PRESSURE_EVENTS
+ result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
+ BASEPRI_DEFAULT,
+ &thread);
+
+ if (result != KERN_SUCCESS)
+ panic("vm_pressure_thread: create failed");
+
+ thread_deallocate(thread);
+#endif
+
+ vm_object_reaper_init();
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
+ vm_compressor_pager_init();
+
+#if VM_PRESSURE_EVENTS
+ vm_pressure_events_enabled = TRUE;
+#endif /* VM_PRESSURE_EVENTS */
+
+#if CONFIG_PHANTOM_CACHE
+ vm_phantom_cache_init();
+#endif
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+ printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ pmap_protect(kernel_pmap,
+ vm_page_fake_buckets_start,
+ vm_page_fake_buckets_end,
+ VM_PROT_READ);
+// *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+#if VM_OBJECT_TRACKING
+ vm_object_tracking_init();
+#endif /* VM_OBJECT_TRACKING */
+
+
+#if FBDP_TEST_COLLAPSE_COMPRESSOR
+ vm_object_size_t backing_size, top_size;
+ vm_object_t backing_object, top_object;
+ vm_map_offset_t backing_offset, top_offset;
+ unsigned char *backing_address, *top_address;
+ kern_return_t kr;
+
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR:\n");
+
+ /* create backing object */
+ backing_size = 15 * PAGE_SIZE;
+ backing_object = vm_object_allocate(backing_size);
+ assert(backing_object != VM_OBJECT_NULL);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
+ backing_object);
+ /* map backing object */
+ backing_offset = 0;
+ kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
+ VM_FLAGS_ANYWHERE, backing_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ backing_address = (unsigned char *) backing_offset;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped backing object %p at 0x%llx\n",
+ backing_object, (uint64_t) backing_offset);
+ /* populate with pages to be compressed in backing object */
+ backing_address[0x1*PAGE_SIZE] = 0xB1;
+ backing_address[0x4*PAGE_SIZE] = 0xB4;
+ backing_address[0x7*PAGE_SIZE] = 0xB7;
+ backing_address[0xa*PAGE_SIZE] = 0xBA;
+ backing_address[0xd*PAGE_SIZE] = 0xBD;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "backing_object %p\n", backing_object);
+ /* compress backing object */
+ vm_object_pageout(backing_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
+ backing_object);
+ /* wait for all the pages to be gone */
+ while (*(volatile int *)&backing_object->resident_page_count != 0)
+ IODelay(10);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
+ backing_object);
+ /* populate with pages to be resident in backing object */
+ backing_address[0x0*PAGE_SIZE] = 0xB0;
+ backing_address[0x3*PAGE_SIZE] = 0xB3;
+ backing_address[0x6*PAGE_SIZE] = 0xB6;
+ backing_address[0x9*PAGE_SIZE] = 0xB9;
+ backing_address[0xc*PAGE_SIZE] = 0xBC;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "backing_object %p\n", backing_object);
+ /* leave the other pages absent */
+ /* mess with the paging_offset of the backing_object */
+ assert(backing_object->paging_offset == 0);
+ backing_object->paging_offset = 0x3000;
+
+ /* create top object */
+ top_size = 9 * PAGE_SIZE;
+ top_object = vm_object_allocate(top_size);
+ assert(top_object != VM_OBJECT_NULL);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
+ top_object);
+ /* map top object */
+ top_offset = 0;
+ kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
+ VM_FLAGS_ANYWHERE, top_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ top_address = (unsigned char *) top_offset;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped top object %p at 0x%llx\n",
+ top_object, (uint64_t) top_offset);
+ /* populate with pages to be compressed in top object */
+ top_address[0x3*PAGE_SIZE] = 0xA3;
+ top_address[0x4*PAGE_SIZE] = 0xA4;
+ top_address[0x5*PAGE_SIZE] = 0xA5;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "top_object %p\n", top_object);
+ /* compress top object */
+ vm_object_pageout(top_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
+ top_object);
+ /* wait for all the pages to be gone */
+ while (top_object->resident_page_count != 0);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
+ top_object);
+ /* populate with pages to be resident in top object */
+ top_address[0x0*PAGE_SIZE] = 0xA0;
+ top_address[0x1*PAGE_SIZE] = 0xA1;
+ top_address[0x2*PAGE_SIZE] = 0xA2;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "top_object %p\n", top_object);
+ /* leave the other pages absent */
+
+ /* link the 2 objects */
+ vm_object_reference(backing_object);
+ top_object->shadow = backing_object;
+ top_object->vo_shadow_offset = 0x3000;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
+ top_object, backing_object);
+
+ /* unmap backing object */
+ vm_map_remove(kernel_map,
+ backing_offset,
+ backing_offset + backing_size,
+ 0);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "unmapped backing_object %p [0x%llx:0x%llx]\n",
+ backing_object,
+ (uint64_t) backing_offset,
+ (uint64_t) (backing_offset + backing_size));
+
+ /* collapse */
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
+ vm_object_lock(top_object);
+ vm_object_collapse(top_object, 0, FALSE);
+ vm_object_unlock(top_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
+
+ /* did it work? */
+ if (top_object->shadow != VM_OBJECT_NULL) {
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ if (vm_object_collapse_compressor_allowed) {
+ panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ }
+ } else {
+ /* check the contents of the mapping */
+ unsigned char expect[9] =
+ { 0xA0, 0xA1, 0xA2, /* resident in top */
+ 0xA3, 0xA4, 0xA5, /* compressed in top */
+ 0xB9, /* resident in backing + shadow_offset */
+ 0xBD, /* compressed in backing + shadow_offset + paging_offset */
+ 0x00 }; /* absent in both */
+ unsigned char actual[9];
+ unsigned int i, errors;
+
+ errors = 0;
+ for (i = 0; i < sizeof (actual); i++) {
+ actual[i] = (unsigned char) top_address[i*PAGE_SIZE];
+ if (actual[i] != expect[i]) {
+ errors++;
+ }
+ }
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "actual [%x %x %x %x %x %x %x %x %x] "
+ "expect [%x %x %x %x %x %x %x %x %x] "
+ "%d errors\n",
+ actual[0], actual[1], actual[2], actual[3],
+ actual[4], actual[5], actual[6], actual[7],
+ actual[8],
+ expect[0], expect[1], expect[2], expect[3],
+ expect[4], expect[5], expect[6], expect[7],
+ expect[8],
+ errors);
+ if (errors) {
+ panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ } else {
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: PASS\n");
+ }
+ }
+#endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
+
+#if FBDP_TEST_WIRE_AND_EXTRACT
+ ledger_t ledger;
+ vm_map_t user_map, wire_map;
+ mach_vm_address_t user_addr, wire_addr;
+ mach_vm_size_t user_size, wire_size;
+ mach_vm_offset_t cur_offset;
+ vm_prot_t cur_prot, max_prot;
+ ppnum_t user_ppnum, wire_ppnum;
+ kern_return_t kr;
+
+ ledger = ledger_instantiate(task_ledger_template,
+ LEDGER_CREATE_ACTIVE_ENTRIES);
+ user_map = vm_map_create(pmap_create(ledger, 0, PMAP_CREATE_64BIT),
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ wire_map = vm_map_create(NULL,
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ user_addr = 0;
+ user_size = 0x10000;
+ kr = mach_vm_allocate(user_map,
+ &user_addr,
+ user_size,
+ VM_FLAGS_ANYWHERE);
+ assert(kr == KERN_SUCCESS);
+ wire_addr = 0;
+ wire_size = user_size;
+ kr = mach_vm_remap(wire_map,
+ &wire_addr,
+ wire_size,
+ 0,
+ VM_FLAGS_ANYWHERE,
+ user_map,
+ user_addr,
+ FALSE,
+ &cur_prot,
+ &max_prot,
+ VM_INHERIT_NONE);
+ assert(kr == KERN_SUCCESS);
+ for (cur_offset = 0;
+ cur_offset < wire_size;
+ cur_offset += PAGE_SIZE) {
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK)),
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ user_ppnum = vm_map_get_phys_page(user_map,
+ user_addr + cur_offset);
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+ }
+ cur_offset -= PAGE_SIZE;
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT,
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: PASS\n");
+#endif /* FBDP_TEST_WIRE_AND_EXTRACT */
+
+ vm_pageout_continue();
+
+ /*
+ * Unreached code!
+ *
+ * The vm_pageout_continue() call above never returns, so the code below is never
+ * executed. We take advantage of this to declare several DTrace VM related probe
+ * points that our kernel doesn't have an analog for. These are probe points that
+ * exist in Solaris and are in the DTrace documentation, so people may have written
+ * scripts that use them. Declaring the probe points here means their scripts will
+ * compile and execute which we want for portability of the scripts, but since this
+ * section of code is never reached, the probe points will simply never fire. Yes,
+ * this is basically a hack. The problem is the DTrace probe points were chosen with
+ * Solaris specific VM events in mind, not portability to different VM implementations.
+ */
+
+ DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
+ /*NOTREACHED*/
+}
+
+
+
+int vm_compressor_thread_count = 2;
+
+kern_return_t
+vm_pageout_internal_start(void)
+{
+ kern_return_t result;
+ int i;
+ host_basic_info_data_t hinfo;
+ int thread_count;
+
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+#define BSD_HOST 1
+ host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+
+ assert(hinfo.max_cpus > 0);
+
+ if (vm_compressor_thread_count >= hinfo.max_cpus)
+ vm_compressor_thread_count = hinfo.max_cpus - 1;
+ if (vm_compressor_thread_count <= 0)
+ vm_compressor_thread_count = 1;
+ else if (vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT)
+ vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
+
+ if (vm_compressor_immediate_preferred == TRUE) {
+ vm_pageout_immediate_chead = NULL;
+ vm_pageout_immediate_scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
+
+ vm_compressor_thread_count = 1;
+ }
+ thread_count = vm_compressor_thread_count;
+
+ vm_pageout_queue_internal.pgo_maxlaundry = (vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
+ } else {
+ vm_compressor_thread_count = 0;
+ thread_count = 1;
+ vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+ }
+
+ for (i = 0; i < vm_compressor_thread_count; i++) {
+ ciq[i].id = i;
+ ciq[i].q = &vm_pageout_queue_internal;
+ ciq[i].current_chead = NULL;
+ ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
+ }
+ for (i = 0; i < thread_count; i++) {
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
+
+ if (result == KERN_SUCCESS)
+ thread_deallocate(vm_pageout_internal_iothread);
+ else
+ break;
+ }
+ return result;
+}
+
+#if CONFIG_IOSCHED
+/*
+ * To support I/O Expedite for compressed files we mark the upls with special flags.
+ * The way decmpfs works is that we create a big upl which marks all the pages needed to
+ * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
+ * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
+ * being held in the big original UPL. We mark each of these smaller UPLs with the flag
+ * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
+ * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
+ * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
+ * unless the real I/O upl is being destroyed).
+ */
+
+
+static void
+upl_set_decmp_info(upl_t upl, upl_t src_upl)
+{
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+
+ upl_lock(src_upl);
+ if (src_upl->decmp_io_upl) {
+ /*
+ * If there is already an alive real I/O UPL, ignore this new UPL.
+ * This case should rarely happen and even if it does, it just means
+ * that we might issue a spurious expedite which the driver is expected
+ * to handle.
+ */
+ upl_unlock(src_upl);
+ return;
+ }
+ src_upl->decmp_io_upl = (void *)upl;
+ src_upl->ref_count++;
+
+ upl->flags |= UPL_DECMP_REAL_IO;
+ upl->decmp_io_upl = (void *)src_upl;
+ upl_unlock(src_upl);
+}
+#endif /* CONFIG_IOSCHED */
+
+#if UPL_DEBUG
+int upl_debug_enabled = 1;
+#else
+int upl_debug_enabled = 0;
+#endif
+
+static upl_t
+upl_create(int type, int flags, upl_size_t size)
+{
+ upl_t upl;
+ vm_size_t page_field_size = 0;
+ int upl_flags = 0;
+ vm_size_t upl_size = sizeof(struct upl);
+
+ size = round_page_32(size);
+
+ if (type & UPL_CREATE_LITE) {
+ page_field_size = (atop(size) + 7) >> 3;
+ page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
+
+ upl_flags |= UPL_LITE;
+ }
+ if (type & UPL_CREATE_INTERNAL) {
+ upl_size += sizeof(struct upl_page_info) * atop(size);
+
+ upl_flags |= UPL_INTERNAL;
+ }
+ upl = (upl_t)kalloc(upl_size + page_field_size);
+
+ if (page_field_size)
+ bzero((char *)upl + upl_size, page_field_size);
+
+ upl->flags = upl_flags | flags;
+ upl->src_object = NULL;
+ upl->kaddr = (vm_offset_t)0;
+ upl->size = 0;
+ upl->map_object = NULL;
+ upl->ref_count = 1;
+ upl->ext_ref_count = 0;
+ upl->highest_page = 0;
+ upl_lock_init(upl);
+ upl->vector_upl = NULL;
+ upl->associated_upl = NULL;
+#if CONFIG_IOSCHED
+ if (type & UPL_CREATE_IO_TRACKING) {
+ upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
+ }
+
+ upl->upl_reprio_info = 0;
+ upl->decmp_io_upl = 0;
+ if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
+ /* Only support expedite on internal UPLs */
+ thread_t curthread = current_thread();
+ upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
+ bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
+ upl->flags |= UPL_EXPEDITE_SUPPORTED;
+ if (curthread->decmp_upl != NULL)
+ upl_set_decmp_info(upl, curthread->decmp_upl);
+ }
+#endif
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
+ upl->upl_creator = current_thread();
+ upl->uplq.next = 0;
+ upl->uplq.prev = 0;
+ upl->flags |= UPL_TRACKED_BY_OBJECT;
+ }
+#endif
+
+#if UPL_DEBUG
+ upl->ubc_alias1 = 0;
+ upl->ubc_alias2 = 0;
+
+ upl->upl_state = 0;
+ upl->upl_commit_index = 0;
+ bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
+
+ (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+#endif /* UPL_DEBUG */
+
+ return(upl);
+}
+
+static void
+upl_destroy(upl_t upl)
+{
+ int page_field_size; /* bit field in word size buf */
+ int size;
+
+ if (upl->ext_ref_count) {
+ panic("upl(%p) ext_ref_count", upl);
+ }
+
+#if CONFIG_IOSCHED
+ if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
+ upl_t src_upl;
+ src_upl = upl->decmp_io_upl;
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+ upl_lock(src_upl);
+ src_upl->decmp_io_upl = NULL;
+ upl_unlock(src_upl);
+ upl_deallocate(src_upl);
+ }
+#endif /* CONFIG_IOSCHED */
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) {
+ vm_object_t object;
+
+ if (upl->flags & UPL_SHADOWED) {
+ object = upl->map_object->shadow;
+ } else {
+ object = upl->map_object;
+ }
+
+ vm_object_lock(object);
+ queue_remove(&object->uplq, upl, upl_t, uplq);
+ vm_object_activity_end(object);
+ vm_object_collapse(object, 0, TRUE);
+ vm_object_unlock(object);
+ }
+#endif
+ /*
+ * drop a reference on the map_object whether or
+ * not a pageout object is inserted
+ */
+ if (upl->flags & UPL_SHADOWED)
+ vm_object_deallocate(upl->map_object);
+
+ if (upl->flags & UPL_DEVICE_MEMORY)
+ size = PAGE_SIZE;
+ else
+ size = upl->size;
+ page_field_size = 0;
+
+ if (upl->flags & UPL_LITE) {
+ page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+ page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
+ }
+ upl_lock_destroy(upl);
+ upl->vector_upl = (vector_upl_t) 0xfeedbeef;
+
+#if CONFIG_IOSCHED
+ if (upl->flags & UPL_EXPEDITE_SUPPORTED)
+ kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size/PAGE_SIZE));
+#endif
+
+ if (upl->flags & UPL_INTERNAL) {
+ kfree(upl,
+ sizeof(struct upl) +
+ (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
+ + page_field_size);
+ } else {
+ kfree(upl, sizeof(struct upl) + page_field_size);
+ }
+}
+
+void
+upl_deallocate(upl_t upl)
+{
+ upl_lock(upl);
+ if (--upl->ref_count == 0) {
+ if(vector_upl_is_valid(upl))
+ vector_upl_deallocate(upl);
+ upl_unlock(upl);
+ upl_destroy(upl);
+ }
+ else
+ upl_unlock(upl);
+}
+
+#if CONFIG_IOSCHED
+void
+upl_mark_decmp(upl_t upl)
+{
+ if (upl->flags & UPL_TRACKED_BY_OBJECT) {
+ upl->flags |= UPL_DECMP_REQ;
+ upl->upl_creator->decmp_upl = (void *)upl;
+ }
+}
+
+void
+upl_unmark_decmp(upl_t upl)
+{
+ if(upl && (upl->flags & UPL_DECMP_REQ)) {
+ upl->upl_creator->decmp_upl = NULL;
+ }
+}
+
+#endif /* CONFIG_IOSCHED */
+
+#define VM_PAGE_Q_BACKING_UP(q) \
+ ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
+
+boolean_t must_throttle_writes(void);
+
+boolean_t
+must_throttle_writes()
+{
+ if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
+ vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10)
+ return (TRUE);
+
+ return (FALSE);
+}
+
+
+#if DEVELOPMENT || DEBUG
+/*/*
+ * Statistics about UPL enforcement of copy-on-write obligations.
+ */
+unsigned long upl_cow = 0;
+unsigned long upl_cow_again = 0;
+unsigned long upl_cow_pages = 0;
+unsigned long upl_cow_again_pages = 0;
+
+unsigned long iopl_cow = 0;
+unsigned long iopl_cow_pages = 0;
+#endif
+
+/*
+ * Routine: vm_object_upl_request
+ * Purpose:
+ * Cause the population of a portion of a vm_object.
+ * Depending on the nature of the request, the pages
+ * returned may be contain valid data or be uninitialized.
+ * A page list structure, listing the physical pages
+ * will be returned upon request.
+ * This function is called by the file system or any other
+ * supplier of backing store to a pager.
+ * IMPORTANT NOTE: The caller must still respect the relationship
+ * between the vm_object and its backing memory object. The
+ * caller MUST NOT substitute changes in the backing file
+ * without first doing a memory_object_lock_request on the
+ * target range unless it is know that the pages are not
+ * shared with another entity at the pager level.
+ * Copy_in_to:
+ * if a page list structure is present
+ * return the mapped physical pages, where a
+ * page is not present, return a non-initialized
+ * one. If the no_sync bit is turned on, don't
+ * call the pager unlock to synchronize with other
+ * possible copies of the page. Leave pages busy
+ * in the original object, if a page list structure
+ * was specified. When a commit of the page list
+ * pages is done, the dirty bit will be set for each one.
+ * Copy_out_from:
+ * If a page list structure is present, return
+ * all mapped pages. Where a page does not exist
+ * map a zero filled one. Leave pages busy in
+ * the original object. If a page list structure
+ * is not specified, this call is a no-op.
+ *
+ * Note: access of default pager objects has a rather interesting
+ * twist. The caller of this routine, presumably the file system
+ * page cache handling code, will never actually make a request
+ * against a default pager backed object. Only the default
+ * pager will make requests on backing store related vm_objects
+ * In this way the default pager can maintain the relationship
+ * between backing store files (abstract memory objects) and
+ * the vm_objects (cache objects), they support.
+ *
+ */
+
+__private_extern__ kern_return_t
+vm_object_upl_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ upl_size_t size,
+ upl_t *upl_ptr,
+ upl_page_info_array_t user_page_list,
+ unsigned int *page_list_count,
+ upl_control_flags_t cntrl_flags)
+{
+ vm_page_t dst_page = VM_PAGE_NULL;
+ vm_object_offset_t dst_offset;
+ upl_size_t xfer_size;
+ unsigned int size_in_pages;
+ boolean_t dirty;
+ boolean_t hw_dirty;
+ upl_t upl = NULL;
+ unsigned int entry;
+#if MACH_CLUSTER_STATS
+ boolean_t encountered_lrp = FALSE;
+#endif
+ vm_page_t alias_page = NULL;
+ int refmod_state = 0;
+ wpl_array_t lite_list = NULL;
+ vm_object_t last_copy_object;
+ struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+ struct vm_page_delayed_work *dwp;
+ int dw_count;
+ int dw_limit;
+ int io_tracking_flag = 0;
+
+ if (cntrl_flags & ~UPL_VALID_FLAGS) {
+ /*
+ * For forward compatibility's sake,
+ * reject any unknown flag.
+ */
+ return KERN_INVALID_VALUE;
+ }
+ if ( (!object->internal) && (object->paging_offset != 0) )
+ panic("vm_object_upl_request: external object with non-zero paging offset\n");
+ if (object->phys_contiguous)
+ panic("vm_object_upl_request: contiguous object specified\n");
+
+
+ if (size > MAX_UPL_SIZE_BYTES)
+ size = MAX_UPL_SIZE_BYTES;
+
+ if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
+ *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (object->io_tracking || upl_debug_enabled)
+ io_tracking_flag |= UPL_CREATE_IO_TRACKING;
+#endif
+#if CONFIG_IOSCHED
+ if (object->io_tracking)
+ io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
+#endif
+
+ if (cntrl_flags & UPL_SET_INTERNAL) {
+ if (cntrl_flags & UPL_SET_LITE) {
+
+ upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
+
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t)
+ (((uintptr_t)user_page_list) +
+ ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+ if (size == 0) {
+ user_page_list = NULL;
+ lite_list = NULL;
+ }
+ } else {
+ upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
+
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ if (size == 0) {
+ user_page_list = NULL;
+ }
+ }
+ } else {
+ if (cntrl_flags & UPL_SET_LITE) {
+
+ upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
+
+ lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+ if (size == 0) {
+ lite_list = NULL;
+ }
+ } else {
+ upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
+ }
+ }
+ *upl_ptr = upl;
+
+ if (user_page_list)
+ user_page_list[0].device = FALSE;
+
+ if (cntrl_flags & UPL_SET_LITE) {
+ upl->map_object = object;
+ } else {
+ upl->map_object = vm_object_allocate(size);
+ /*
+ * No neeed to lock the new object: nobody else knows
+ * about it yet, so it's all ours so far.
+ */
+ upl->map_object->shadow = object;
+ upl->map_object->pageout = TRUE;
+ upl->map_object->can_persist = FALSE;
+ upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ upl->map_object->vo_shadow_offset = offset;
+ upl->map_object->wimg_bits = object->wimg_bits;
+
+ VM_PAGE_GRAB_FICTITIOUS(alias_page);
+
+ upl->flags |= UPL_SHADOWED;
+ }
+ /*
+ * ENCRYPTED SWAP:
+ * Just mark the UPL as "encrypted" here.
+ * We'll actually encrypt the pages later,
+ * in upl_encrypt(), when the caller has
+ * selected which pages need to go to swap.
+ */
+ if (cntrl_flags & UPL_ENCRYPT)
+ upl->flags |= UPL_ENCRYPTED;
+
+ if (cntrl_flags & UPL_FOR_PAGEOUT)
+ upl->flags |= UPL_PAGEOUT;
+
+ vm_object_lock(object);
+ vm_object_activity_begin(object);
+
+ /*
+ * we can lock in the paging_offset once paging_in_progress is set
+ */
+ upl->size = size;
+ upl->offset = offset + object->paging_offset;
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (object->io_tracking || upl_debug_enabled) {
+ vm_object_activity_begin(object);
+ queue_enter(&object->uplq, upl, upl_t, uplq);
+ }
+#endif
+ if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
+ /*
+ * Honor copy-on-write obligations
+ *
+ * The caller is gathering these pages and
+ * might modify their contents. We need to
+ * make sure that the copy object has its own
+ * private copies of these pages before we let
+ * the caller modify them.
+ */
+ vm_object_update(object,
+ offset,
+ size,
+ NULL,
+ NULL,
+ FALSE, /* should_return */
+ MEMORY_OBJECT_COPY_SYNC,
+ VM_PROT_NO_CHANGE);
+#if DEVELOPMENT || DEBUG
+ upl_cow++;
+ upl_cow_pages += size >> PAGE_SHIFT;
+#endif
+ }
+ /*
+ * remember which copy object we synchronized with
+ */
+ last_copy_object = object->copy;
+ entry = 0;
+
+ xfer_size = size;
+ dst_offset = offset;
+ size_in_pages = size / PAGE_SIZE;
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+
+ if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
+ object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT))
+ object->scan_collisions = 0;
+
+ if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
+ boolean_t isSSD = FALSE;
+
+ vnode_pager_get_isSSD(object->pager, &isSSD);
+ vm_object_unlock(object);
+
+ OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
+
+ if (isSSD == TRUE)
+ delay(1000 * size_in_pages);
+ else
+ delay(5000 * size_in_pages);
+ OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
+
+ vm_object_lock(object);
+ }
+
+ while (xfer_size) {
+
+ dwp->dw_mask = 0;
+
+ if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
+ vm_object_unlock(object);
+ VM_PAGE_GRAB_FICTITIOUS(alias_page);
+ vm_object_lock(object);
+ }
+ if (cntrl_flags & UPL_COPYOUT_FROM) {
+ upl->flags |= UPL_PAGE_SYNC_DONE;
+
+ if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
+ dst_page->fictitious ||
+ dst_page->absent ||
+ dst_page->error ||
+ dst_page->cleaning ||
+ (VM_PAGE_WIRED(dst_page))) {
+
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ /*
+ * grab this up front...
+ * a high percentange of the time we're going to
+ * need the hardware modification state a bit later
+ * anyway... so we can eliminate an extra call into
+ * the pmap layer by grabbing it here and recording it
+ */
+ if (dst_page->pmapped)
+ refmod_state = pmap_get_refmod(dst_page->phys_page);
+ else
+ refmod_state = 0;
+
+ if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
+ /*
+ * page is on inactive list and referenced...
+ * reactivate it now... this gets it out of the
+ * way of vm_pageout_scan which would have to
+ * reactivate it upon tripping over it
+ */
+ dwp->dw_mask |= DW_vm_page_activate;
+ }
+ if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
+ /*
+ * we're only asking for DIRTY pages to be returned
+ */
+ if (dst_page->laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
+ /*
+ * if we were the page stolen by vm_pageout_scan to be
+ * cleaned (as opposed to a buddy being clustered in
+ * or this request is not being driven by a PAGEOUT cluster
+ * then we only need to check for the page being dirty or
+ * precious to decide whether to return it
+ */
+ if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
+ goto check_busy;
+ goto dont_return;
+ }
+ /*
+ * this is a request for a PAGEOUT cluster and this page
+ * is merely along for the ride as a 'buddy'... not only
+ * does it have to be dirty to be returned, but it also
+ * can't have been referenced recently...
+ */
+ if ( (hibernate_cleaning_in_progress == TRUE ||
+ (!((refmod_state & VM_MEM_REFERENCED) || dst_page->reference) || dst_page->throttled)) &&
+ ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
+ goto check_busy;
+ }
+dont_return:
+ /*
+ * if we reach here, we're not to return
+ * the page... go on to the next one
+ */
+ if (dst_page->laundry == TRUE) {
+ /*
+ * if we get here, the page is not 'cleaning' (filtered out above).
+ * since it has been referenced, remove it from the laundry
+ * so we don't pay the cost of an I/O to clean a page
+ * we're just going to take back
+ */
+ vm_page_lockspin_queues();
+
+ vm_pageout_steal_laundry(dst_page, TRUE);
+ vm_page_activate(dst_page);
+
+ vm_page_unlock_queues();
+ }
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+check_busy:
+ if (dst_page->busy) {
+ if (cntrl_flags & UPL_NOBLOCK) {
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ /*
+ * someone else is playing with the
+ * page. We will have to wait.
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+
+ continue;
+ }
+ /*
+ * ENCRYPTED SWAP:
+ * The caller is gathering this page and might
+ * access its contents later on. Decrypt the
+ * page before adding it to the UPL, so that
+ * the caller never sees encrypted data.
+ */
+ if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
+ int was_busy;
+
+ /*
+ * save the current state of busy
+ * mark page as busy while decrypt
+ * is in progress since it will drop
+ * the object lock...
+ */
+ was_busy = dst_page->busy;
+ dst_page->busy = TRUE;
+
+ vm_page_decrypt(dst_page, 0);
+ vm_page_decrypt_for_upl_counter++;
+ /*
+ * restore to original busy state
+ */
+ dst_page->busy = was_busy;
+ }
+ if (dst_page->pageout_queue == TRUE) {
+
+ vm_page_lockspin_queues();
+
+ if (dst_page->pageout_queue == TRUE) {
+ /*
+ * we've buddied up a page for a clustered pageout
+ * that has already been moved to the pageout
+ * queue by pageout_scan... we need to remove
+ * it from the queue and drop the laundry count
+ * on that queue
+ */
+ vm_pageout_throttle_up(dst_page);
+ }
+ vm_page_unlock_queues();
+ }
+#if MACH_CLUSTER_STATS
+ /*
+ * pageout statistics gathering. count
+ * all the pages we will page out that
+ * were not counted in the initial
+ * vm_pageout_scan work
+ */
+ if (dst_page->pageout)
+ encountered_lrp = TRUE;
+ if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious))) {
+ if (encountered_lrp)
+ CLUSTER_STAT(pages_at_higher_offsets++;)
+ else
+ CLUSTER_STAT(pages_at_lower_offsets++;)
+ }
+#endif
+ hw_dirty = refmod_state & VM_MEM_MODIFIED;
+ dirty = hw_dirty ? TRUE : dst_page->dirty;
+
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
+
+ assert (!pmap_is_noencrypt(dst_page->phys_page));
+
+ if (cntrl_flags & UPL_SET_LITE) {
+ unsigned int pg_num;
+
+ pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+ assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+ lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+ if (hw_dirty)
+ pmap_clear_modify(dst_page->phys_page);
+
+ /*
+ * Mark original page as cleaning
+ * in place.
+ */
+ dst_page->cleaning = TRUE;
+ dst_page->precious = FALSE;
+ } else {
+ /*
+ * use pageclean setup, it is more
+ * convenient even for the pageout
+ * cases here
+ */
+ vm_object_lock(upl->map_object);
+ vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
+ vm_object_unlock(upl->map_object);
+
+ alias_page->absent = FALSE;
+ alias_page = NULL;
+ }
+#if MACH_PAGEMAP
+ /*
+ * Record that this page has been
+ * written out
+ */
+ vm_external_state_set(object->existence_map, dst_page->offset);
+#endif /*MACH_PAGEMAP*/
+ if (dirty) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ } else {
+ dst_page->dirty = FALSE;
+ }
+
+ if (!dirty)
+ dst_page->precious = TRUE;
+
+ if ( (cntrl_flags & UPL_ENCRYPT) ) {
+ /*
+ * ENCRYPTED SWAP:
+ * We want to deny access to the target page
+ * because its contents are about to be
+ * encrypted and the user would be very
+ * confused to see encrypted data instead
+ * of their data.
+ * We also set "encrypted_cleaning" to allow
+ * vm_pageout_scan() to demote that page
+ * from "adjacent/clean-in-place" to
+ * "target/clean-and-free" if it bumps into
+ * this page during its scanning while we're
+ * still processing this cluster.
+ */
+ dst_page->busy = TRUE;
+ dst_page->encrypted_cleaning = TRUE;
+ }
+ if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
+ if ( !VM_PAGE_WIRED(dst_page))
+ dst_page->pageout = TRUE;
+ }
+ } else {
+ if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
+ /*
+ * Honor copy-on-write obligations
+ *
+ * The copy object has changed since we
+ * last synchronized for copy-on-write.
+ * Another copy object might have been
+ * inserted while we released the object's
+ * lock. Since someone could have seen the
+ * original contents of the remaining pages
+ * through that new object, we have to
+ * synchronize with it again for the remaining
+ * pages only. The previous pages are "busy"
+ * so they can not be seen through the new
+ * mapping. The new mapping will see our
+ * upcoming changes for those previous pages,
+ * but that's OK since they couldn't see what
+ * was there before. It's just a race anyway
+ * and there's no guarantee of consistency or
+ * atomicity. We just don't want new mappings
+ * to see both the *before* and *after* pages.
+ */
+ if (object->copy != VM_OBJECT_NULL) {
+ vm_object_update(
+ object,
+ dst_offset,/* current offset */
+ xfer_size, /* remaining size */
+ NULL,
+ NULL,
+ FALSE, /* should_return */
+ MEMORY_OBJECT_COPY_SYNC,
+ VM_PROT_NO_CHANGE);
+
+#if DEVELOPMENT || DEBUG
+ upl_cow_again++;
+ upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
+#endif
+ }
+ /*
+ * remember the copy object we synced with
+ */
+ last_copy_object = object->copy;
+ }
+ dst_page = vm_page_lookup(object, dst_offset);
+
+ if (dst_page != VM_PAGE_NULL) {
+
+ if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+ /*
+ * skip over pages already present in the cache
+ */
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ if (dst_page->fictitious) {
+ panic("need corner case for fictitious page");
+ }
+
+ if (dst_page->busy || dst_page->cleaning) {
+ /*
+ * someone else is playing with the
+ * page. We will have to wait.
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+
+ continue;
+ }
+ if (dst_page->laundry) {
+ dst_page->pageout = FALSE;
+
+ vm_pageout_steal_laundry(dst_page, FALSE);
+ }
+ } else {
+ if (object->private) {
+ /*
+ * This is a nasty wrinkle for users
+ * of upl who encounter device or
+ * private memory however, it is
+ * unavoidable, only a fault can
+ * resolve the actual backing
+ * physical page by asking the
+ * backing device.
+ */
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ if (object->scan_collisions) {
+ /*
+ * the pageout_scan thread is trying to steal
+ * pages from this object, but has run into our
+ * lock... grab 2 pages from the head of the object...
+ * the first is freed on behalf of pageout_scan, the
+ * 2nd is for our own use... we use vm_object_page_grab
+ * in both cases to avoid taking pages from the free
+ * list since we are under memory pressure and our
+ * lock on this object is getting in the way of
+ * relieving it
+ */
+ dst_page = vm_object_page_grab(object);
+
+ if (dst_page != VM_PAGE_NULL)
+ vm_page_release(dst_page);
+
+ dst_page = vm_object_page_grab(object);
+ }
+ if (dst_page == VM_PAGE_NULL) {
+ /*
+ * need to allocate a page
+ */
+ dst_page = vm_page_grab();
+ }
+ if (dst_page == VM_PAGE_NULL) {
+ if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
+ /*
+ * we don't want to stall waiting for pages to come onto the free list
+ * while we're already holding absent pages in this UPL
+ * the caller will deal with the empty slots
+ */
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ /*
+ * no pages available... wait
+ * then try again for the same
+ * offset...
+ */
+ vm_object_unlock(object);
+
+ OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
+
+ VM_PAGE_WAIT();
+ OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
+
+ vm_object_lock(object);
+
+ continue;
+ }
+ vm_page_insert(dst_page, object, dst_offset);
+
+ dst_page->absent = TRUE;
+ dst_page->busy = FALSE;
+
+ if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
+ /*
+ * if UPL_RET_ONLY_ABSENT was specified,
+ * than we're definitely setting up a
+ * upl for a clustered read/pagein
+ * operation... mark the pages as clustered
+ * so upl_commit_range can put them on the
+ * speculative list
+ */
+ dst_page->clustered = TRUE;
+
+ if ( !(cntrl_flags & UPL_FILE_IO))
+ VM_STAT_INCR(pageins);
+ }
+ }
+ /*
+ * ENCRYPTED SWAP:
+ */
+ if (cntrl_flags & UPL_ENCRYPT) {
+ /*
+ * The page is going to be encrypted when we
+ * get it from the pager, so mark it so.
+ */
+ dst_page->encrypted = TRUE;
+ } else {
+ /*
+ * Otherwise, the page will not contain
+ * encrypted data.
+ */
+ dst_page->encrypted = FALSE;
+ }
+ dst_page->overwriting = TRUE;
+
+ if (dst_page->pmapped) {
+ if ( !(cntrl_flags & UPL_FILE_IO))
+ /*
+ * eliminate all mappings from the
+ * original object and its prodigy
+ */
+ refmod_state = pmap_disconnect(dst_page->phys_page);
+ else
+ refmod_state = pmap_get_refmod(dst_page->phys_page);
+ } else
+ refmod_state = 0;
+
+ hw_dirty = refmod_state & VM_MEM_MODIFIED;
+ dirty = hw_dirty ? TRUE : dst_page->dirty;
+
+ if (cntrl_flags & UPL_SET_LITE) {
+ unsigned int pg_num;
+
+ pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+ assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+ lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+ if (hw_dirty)
+ pmap_clear_modify(dst_page->phys_page);
+
+ /*
+ * Mark original page as cleaning
+ * in place.
+ */
+ dst_page->cleaning = TRUE;
+ dst_page->precious = FALSE;
+ } else {
+ /*
+ * use pageclean setup, it is more
+ * convenient even for the pageout
+ * cases here
+ */
+ vm_object_lock(upl->map_object);
+ vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
+ vm_object_unlock(upl->map_object);
+
+ alias_page->absent = FALSE;
+ alias_page = NULL;
+ }
+
+ if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
+ upl->flags &= ~UPL_CLEAR_DIRTY;
+ upl->flags |= UPL_SET_DIRTY;
+ dirty = TRUE;
+ upl->flags |= UPL_SET_DIRTY;
+ } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
+ /*
+ * clean in place for read implies
+ * that a write will be done on all
+ * the pages that are dirty before
+ * a upl commit is done. The caller
+ * is obligated to preserve the
+ * contents of all pages marked dirty
+ */
+ upl->flags |= UPL_CLEAR_DIRTY;
+ }
+ dst_page->dirty = dirty;
+
+ if (!dirty)
+ dst_page->precious = TRUE;
+
+ if ( !VM_PAGE_WIRED(dst_page)) {
+ /*
+ * deny access to the target page while
+ * it is being worked on
+ */
+ dst_page->busy = TRUE;
+ } else
+ dwp->dw_mask |= DW_vm_page_wire;
+
+ /*
+ * We might be about to satisfy a fault which has been
+ * requested. So no need for the "restart" bit.
+ */
+ dst_page->restart = FALSE;
+ if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
+ /*
+ * expect the page to be used
+ */
+ dwp->dw_mask |= DW_set_reference;
+ }
+ if (cntrl_flags & UPL_PRECIOUS) {
+ if (dst_page->object->internal) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ dst_page->precious = FALSE;
+ } else {
+ dst_page->precious = TRUE;
+ }
+ } else {
+ dst_page->precious = FALSE;
+ }
+ }
+ if (dst_page->busy)
+ upl->flags |= UPL_HAS_BUSY;
+
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
+ assert (!pmap_is_noencrypt(dst_page->phys_page));
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = dst_page->phys_page;
+ user_page_list[entry].pageout = dst_page->pageout;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].precious = dst_page->precious;
+ user_page_list[entry].device = FALSE;
+ user_page_list[entry].needed = FALSE;
+ if (dst_page->clustered == TRUE)
+ user_page_list[entry].speculative = dst_page->speculative;
+ else
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = dst_page->cs_validated;
+ user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+ user_page_list[entry].cs_nx = dst_page->cs_nx;
+ user_page_list[entry].mark = FALSE;
+ }
+ /*
+ * if UPL_RET_ONLY_ABSENT is set, then
+ * we are working with a fresh page and we've
+ * just set the clustered flag on it to
+ * indicate that it was drug in as part of a
+ * speculative cluster... so leave it alone
+ */
+ if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+ /*
+ * someone is explicitly grabbing this page...
+ * update clustered and speculative state
+ *
+ */
+ if (dst_page->clustered)
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
+ }
+try_next_page:
+ if (dwp->dw_mask) {
+ if (dwp->dw_mask & DW_vm_page_activate)
+ VM_STAT_INCR(reactivations);
+
+ VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
+
+ if (dw_count >= dw_limit) {
+ vm_page_do_delayed_work(object, UPL_MEMORY_TAG(cntrl_flags), &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ }
+ entry++;
+ dst_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ }
+ if (dw_count)
+ vm_page_do_delayed_work(object, UPL_MEMORY_TAG(cntrl_flags), &dw_array[0], dw_count);
+
+ if (alias_page != NULL) {
+ VM_PAGE_FREE(alias_page);
+ }
+
+ if (page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL)
+ *page_list_count = 0;
+ else if (*page_list_count > entry)
+ *page_list_count = entry;
+ }
+#if UPL_DEBUG
+ upl->upl_state = 1;
+#endif
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_object_super_upl_request
+ * Purpose:
+ * Cause the population of a portion of a vm_object
+ * in much the same way as memory_object_upl_request.
+ * Depending on the nature of the request, the pages
+ * returned may be contain valid data or be uninitialized.
+ * However, the region may be expanded up to the super
+ * cluster size provided.
+ */
+
+__private_extern__ kern_return_t
+vm_object_super_upl_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ upl_size_t size,
+ upl_size_t super_cluster,
+ upl_t *upl,
+ upl_page_info_t *user_page_list,
+ unsigned int *page_list_count,
+ upl_control_flags_t cntrl_flags)
+{
+ if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
+ return KERN_FAILURE;
+
+ assert(object->paging_in_progress);
+ offset = offset - object->paging_offset;
+
+ if (super_cluster > size) {
+
+ vm_object_offset_t base_offset;
+ upl_size_t super_size;
+ vm_object_size_t super_size_64;
+
+ base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
+ super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
+ super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
+ super_size = (upl_size_t) super_size_64;
+ assert(super_size == super_size_64);
+
+ if (offset > (base_offset + super_size)) {
+ panic("vm_object_super_upl_request: Missed target pageout"
+ " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
+ offset, base_offset, super_size, super_cluster,
+ size, object->paging_offset);
+ }
+ /*
+ * apparently there is a case where the vm requests a
+ * page to be written out who's offset is beyond the
+ * object size
+ */
+ if ((offset + size) > (base_offset + super_size)) {
+ super_size_64 = (offset + size) - base_offset;
+ super_size = (upl_size_t) super_size_64;
+ assert(super_size == super_size_64);
+ }
+
+ offset = base_offset;
+ size = super_size;
+ }
+ return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
+}
+
+
+kern_return_t
+vm_map_create_upl(
+ vm_map_t map,
+ vm_map_address_t offset,
+ upl_size_t *upl_size,
+ upl_t *upl,
+ upl_page_info_array_t page_list,
+ unsigned int *count,
+ upl_control_flags_t *flags)
+{
+ vm_map_entry_t entry;
+ upl_control_flags_t caller_flags;
+ int force_data_sync;
+ int sync_cow_data;
+ vm_object_t local_object;
+ vm_map_offset_t local_offset;
+ vm_map_offset_t local_start;
+ kern_return_t ret;
+
+ caller_flags = *flags;
+
+ if (caller_flags & ~UPL_VALID_FLAGS) {
+ /*
+ * For forward compatibility's sake,
+ * reject any unknown flag.
+ */
+ return KERN_INVALID_VALUE;
+ }
+ force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
+ sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
+
+ if (upl == NULL)
+ return KERN_INVALID_ARGUMENT;
+
+REDISCOVER_ENTRY:
+ vm_map_lock_read(map);
+
+ if (!vm_map_lookup_entry(map, offset, &entry)) {
+ vm_map_unlock_read(map);
+ return KERN_FAILURE;
+ }
+
+ if ((entry->vme_end - offset) < *upl_size) {
+ *upl_size = (upl_size_t) (entry->vme_end - offset);
+ assert(*upl_size == entry->vme_end - offset);
+ }
+
+ if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
+ *flags = 0;
+
+ if (!entry->is_sub_map &&
+ VME_OBJECT(entry) != VM_OBJECT_NULL) {
+ if (VME_OBJECT(entry)->private)
+ *flags = UPL_DEV_MEMORY;
+
+ if (VME_OBJECT(entry)->phys_contiguous)
+ *flags |= UPL_PHYS_CONTIG;
+ }
+ vm_map_unlock_read(map);
+ return KERN_SUCCESS;
+ }
+
+ if (entry->is_sub_map) {
+ vm_map_t submap;
+
+ submap = VME_SUBMAP(entry);
+ local_start = entry->vme_start;
+ local_offset = VME_OFFSET(entry);
+
+ vm_map_reference(submap);
+ vm_map_unlock_read(map);
+
+ ret = vm_map_create_upl(submap,
+ local_offset + (offset - local_start),
+ upl_size, upl, page_list, count, flags);
+ vm_map_deallocate(submap);
+
+ return ret;
+ }
+
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
+ !VME_OBJECT(entry)->phys_contiguous) {
+ if (*upl_size > MAX_UPL_SIZE_BYTES)
+ *upl_size = MAX_UPL_SIZE_BYTES;
+ }
+
+ /*
+ * Create an object if necessary.
+ */
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
+
+ if (vm_map_lock_read_to_write(map))
+ goto REDISCOVER_ENTRY;
+
+ VME_OBJECT_SET(entry,
+ vm_object_allocate((vm_size_t)
+ (entry->vme_end -
+ entry->vme_start)));
+ VME_OFFSET_SET(entry, 0);
+
+ vm_map_lock_write_to_read(map);
+ }
+
+ if (!(caller_flags & UPL_COPYOUT_FROM) &&
+ !(entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock_read(map);
+ return KERN_PROTECTION_FAILURE;
+ }
+
+ local_object = VME_OBJECT(entry);
+ assert(local_object != VM_OBJECT_NULL);
+
+ if (*upl_size != 0 &&
+ local_object->vo_size > *upl_size && /* partial UPL */
+ entry->wired_count == 0 && /* No COW for entries that are wired */
+ (map->pmap != kernel_pmap) && /* alias checks */
+ (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
+ ||
+ (!entry->needs_copy && /* case 2 */
+ local_object->internal &&
+ (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
+ local_object->ref_count > 1))) {
+ vm_prot_t prot;
+
+ /*
+ * Case 1:
+ * Set up the targeted range for copy-on-write to avoid
+ * applying true_share/copy_delay to the entire object.
+ *
+ * Case 2:
+ * This map entry covers only part of an internal
+ * object. There could be other map entries covering
+ * other areas of this object and some of these map
+ * entries could be marked as "needs_copy", which
+ * assumes that the object is COPY_SYMMETRIC.
+ * To avoid marking this object as COPY_DELAY and
+ * "true_share", let's shadow it and mark the new
+ * (smaller) object as "true_share" and COPY_DELAY.
+ */
+
+ if (vm_map_lock_read_to_write(map)) {
+ goto REDISCOVER_ENTRY;
+ }
+ vm_map_lock_assert_exclusive(map);
+ assert(VME_OBJECT(entry) == local_object);
+
+ vm_map_clip_start(map,
+ entry,
+ vm_map_trunc_page(offset,
+ VM_MAP_PAGE_MASK(map)));
+ vm_map_clip_end(map,
+ entry,
+ vm_map_round_page(offset + *upl_size,
+ VM_MAP_PAGE_MASK(map)));
+ if ((entry->vme_end - offset) < *upl_size) {
+ *upl_size = (upl_size_t) (entry->vme_end - offset);
+ assert(*upl_size == entry->vme_end - offset);
+ }
+
+ prot = entry->protection & ~VM_PROT_WRITE;
+ if (override_nx(map, VME_ALIAS(entry)) && prot)
+ prot |= VM_PROT_EXECUTE;
+ vm_object_pmap_protect(local_object,
+ VME_OFFSET(entry),
+ entry->vme_end - entry->vme_start,
+ ((entry->is_shared ||
+ map->mapped_in_other_pmaps)
+ ? PMAP_NULL
+ : map->pmap),
+ entry->vme_start,
+ prot);
+
+ assert(entry->wired_count == 0);
+
+ /*
+ * Lock the VM object and re-check its status: if it's mapped
+ * in another address space, we could still be racing with
+ * another thread holding that other VM map exclusively.
+ */
+ vm_object_lock(local_object);
+ if (local_object->true_share) {
+ /* object is already in proper state: no COW needed */
+ assert(local_object->copy_strategy !=
+ MEMORY_OBJECT_COPY_SYMMETRIC);
+ } else {
+ /* not true_share: ask for copy-on-write below */
+ assert(local_object->copy_strategy ==
+ MEMORY_OBJECT_COPY_SYMMETRIC);
+ entry->needs_copy = TRUE;
+ }
+ vm_object_unlock(local_object);
+
+ vm_map_lock_write_to_read(map);
+ }
+
+ if (entry->needs_copy) {
+ /*
+ * Honor copy-on-write for COPY_SYMMETRIC
+ * strategy.
+ */
+ vm_map_t local_map;
+ vm_object_t object;
+ vm_object_offset_t new_offset;
+ vm_prot_t prot;
+ boolean_t wired;
+ vm_map_version_t version;
+ vm_map_t real_map;
+ vm_prot_t fault_type;
+
+ local_map = map;
+
+ if (caller_flags & UPL_COPYOUT_FROM) {
+ fault_type = VM_PROT_READ | VM_PROT_COPY;
+ vm_counters.create_upl_extra_cow++;
+ vm_counters.create_upl_extra_cow_pages +=
+ (entry->vme_end - entry->vme_start) / PAGE_SIZE;
+ } else {
+ fault_type = VM_PROT_WRITE;
+ }
+ if (vm_map_lookup_locked(&local_map,
+ offset, fault_type,
+ OBJECT_LOCK_EXCLUSIVE,
+ &version, &object,
+ &new_offset, &prot, &wired,
+ NULL,
+ &real_map) != KERN_SUCCESS) {
+ if (fault_type == VM_PROT_WRITE) {
+ vm_counters.create_upl_lookup_failure_write++;
+ } else {
+ vm_counters.create_upl_lookup_failure_copy++;
+ }
+ vm_map_unlock_read(local_map);
+ return KERN_FAILURE;
+ }
+ if (real_map != map)
+ vm_map_unlock(real_map);
+ vm_map_unlock_read(local_map);
+
+ vm_object_unlock(object);
+
+ goto REDISCOVER_ENTRY;
+ }
+
+ if (sync_cow_data &&
+ (VME_OBJECT(entry)->shadow ||
+ VME_OBJECT(entry)->copy)) {
+ local_object = VME_OBJECT(entry);
+ local_start = entry->vme_start;
+ local_offset = VME_OFFSET(entry);
+
+ vm_object_reference(local_object);
+ vm_map_unlock_read(map);
+
+ if (local_object->shadow && local_object->copy) {
+ vm_object_lock_request(local_object->shadow,
+ ((vm_object_offset_t)
+ ((offset - local_start) +
+ local_offset) +
+ local_object->vo_shadow_offset),
+ *upl_size, FALSE,
+ MEMORY_OBJECT_DATA_SYNC,
+ VM_PROT_NO_CHANGE);
+ }
+ sync_cow_data = FALSE;
+ vm_object_deallocate(local_object);
+
+ goto REDISCOVER_ENTRY;
+ }
+ if (force_data_sync) {
+ local_object = VME_OBJECT(entry);
+ local_start = entry->vme_start;
+ local_offset = VME_OFFSET(entry);
+
+ vm_object_reference(local_object);
+ vm_map_unlock_read(map);
+
+ vm_object_lock_request(local_object,
+ ((vm_object_offset_t)
+ ((offset - local_start) +
+ local_offset)),
+ (vm_object_size_t)*upl_size,
+ FALSE,
+ MEMORY_OBJECT_DATA_SYNC,
+ VM_PROT_NO_CHANGE);
+
+ force_data_sync = FALSE;
+ vm_object_deallocate(local_object);
+
+ goto REDISCOVER_ENTRY;
+ }
+ if (VME_OBJECT(entry)->private)
+ *flags = UPL_DEV_MEMORY;
+ else
+ *flags = 0;
+
+ if (VME_OBJECT(entry)->phys_contiguous)
+ *flags |= UPL_PHYS_CONTIG;
+
+ local_object = VME_OBJECT(entry);
+ local_offset = VME_OFFSET(entry);
+ local_start = entry->vme_start;
+
+ vm_object_lock(local_object);
+
+ /*
+ * Ensure that this object is "true_share" and "copy_delay" now,
+ * while we're still holding the VM map lock. After we unlock the map,
+ * anything could happen to that mapping, including some copy-on-write
+ * activity. We need to make sure that the IOPL will point at the
+ * same memory as the mapping.
+ */
+ if (local_object->true_share) {
+ assert(local_object->copy_strategy !=
+ MEMORY_OBJECT_COPY_SYMMETRIC);
+ } else if (local_object != kernel_object &&
+ local_object != compressor_object &&
+ !local_object->phys_contiguous) {
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+ if (!local_object->true_share &&
+ vm_object_tracking_inited) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int num = 0;
+ num = OSBacktrace(bt,
+ VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ local_object,
+ VM_OBJECT_TRACKING_OP_TRUESHARE,
+ bt,
+ num);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+ local_object->true_share = TRUE;
+ if (local_object->copy_strategy ==
+ MEMORY_OBJECT_COPY_SYMMETRIC) {
+ local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+ }
+
+ vm_object_reference_locked(local_object);
+ vm_object_unlock(local_object);
+
+ vm_map_unlock_read(map);
+
+ ret = vm_object_iopl_request(local_object,
+ ((vm_object_offset_t)
+ ((offset - local_start) + local_offset)),
+ *upl_size,
+ upl,
+ page_list,
+ count,
+ caller_flags);
+ vm_object_deallocate(local_object);
+
+ return ret;
+}
+
+/*
+ * Internal routine to enter a UPL into a VM map.
+ *
+ * JMM - This should just be doable through the standard
+ * vm_map_enter() API.
+ */
+kern_return_t
+vm_map_enter_upl(
+ vm_map_t map,
+ upl_t upl,
+ vm_map_offset_t *dst_addr)
+{
+ vm_map_size_t size;
+ vm_object_offset_t offset;
+ vm_map_offset_t addr;
+ vm_page_t m;
+ kern_return_t kr;
+ int isVectorUPL = 0, curr_upl=0;
+ upl_t vector_upl = NULL;
+ vm_offset_t vector_upl_dst_addr = 0;
+ vm_map_t vector_upl_submap = NULL;
+ upl_offset_t subupl_offset = 0;
+ upl_size_t subupl_size = 0;
+
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ int mapped=0,valid_upls=0;
+ vector_upl = upl;
+
+ upl_lock(vector_upl);
+ for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
+ if(upl == NULL)
+ continue;
+ valid_upls++;
+ if (UPL_PAGE_LIST_MAPPED & upl->flags)
+ mapped++;
+ }
+
+ if(mapped) {
+ if(mapped != valid_upls)
+ panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
+ else {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ }
+
+ kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
+ if( kr != KERN_SUCCESS )
+ panic("Vector UPL submap allocation failed\n");
+ map = vector_upl_submap;
+ vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
+ curr_upl=0;
+ }
+ else
+ upl_lock(upl);
+
+process_upl_to_enter:
+ if(isVectorUPL){
+ if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+ *dst_addr = vector_upl_dst_addr;
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+ if(upl == NULL)
+ goto process_upl_to_enter;
+
+ vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
+ *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
+ } else {
+ /*
+ * check to see if already mapped
+ */
+ if (UPL_PAGE_LIST_MAPPED & upl->flags) {
+ upl_unlock(upl);
+ return KERN_FAILURE;
+ }
+ }
+ if ((!(upl->flags & UPL_SHADOWED)) &&
+ ((upl->flags & UPL_HAS_BUSY) ||
+ !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
+
+ vm_object_t object;
+ vm_page_t alias_page;
+ vm_object_offset_t new_offset;
+ unsigned int pg_num;
+ wpl_array_t lite_list;
+
+ if (upl->flags & UPL_INTERNAL) {
+ lite_list = (wpl_array_t)
+ ((((uintptr_t)upl) + sizeof(struct upl))
+ + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+ } else {
+ lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
+ }
+ object = upl->map_object;
+ upl->map_object = vm_object_allocate(upl->size);
+
+ vm_object_lock(upl->map_object);
+
+ upl->map_object->shadow = object;
+ upl->map_object->pageout = TRUE;
+ upl->map_object->can_persist = FALSE;
+ upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset;
+ upl->map_object->wimg_bits = object->wimg_bits;
+ offset = upl->map_object->vo_shadow_offset;
+ new_offset = 0;
+ size = upl->size;
+
+ upl->flags |= UPL_SHADOWED;
+
+ while (size) {
+ pg_num = (unsigned int) (new_offset / PAGE_SIZE);
+ assert(pg_num == new_offset / PAGE_SIZE);
+
+ if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+
+ VM_PAGE_GRAB_FICTITIOUS(alias_page);
+
+ vm_object_lock(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL) {
+ panic("vm_upl_map: page missing\n");
+ }
+
+ /*
+ * Convert the fictitious page to a private
+ * shadow of the real page.
+ */
+ assert(alias_page->fictitious);
+ alias_page->fictitious = FALSE;
+ alias_page->private = TRUE;
+ alias_page->pageout = TRUE;
+ /*
+ * since m is a page in the upl it must
+ * already be wired or BUSY, so it's
+ * safe to assign the underlying physical
+ * page to the alias
+ */
+ alias_page->phys_page = m->phys_page;
+
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
+ vm_page_unlock_queues();
+
+ /*
+ * ENCRYPTED SWAP:
+ * The virtual page ("m") has to be wired in some way
+ * here or its physical page ("m->phys_page") could
+ * be recycled at any time.
+ * Assuming this is enforced by the caller, we can't
+ * get an encrypted page here. Since the encryption
+ * key depends on the VM page's "pager" object and
+ * the "paging_offset", we couldn't handle 2 pageable
+ * VM pages (with different pagers and paging_offsets)
+ * sharing the same physical page: we could end up
+ * encrypting with one key (via one VM page) and
+ * decrypting with another key (via the alias VM page).
+ */
+ ASSERT_PAGE_DECRYPTED(m);
+
+ vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
+
+ assert(!alias_page->wanted);
+ alias_page->busy = FALSE;
+ alias_page->absent = FALSE;
+ }
+ size -= PAGE_SIZE;
+ offset += PAGE_SIZE_64;
+ new_offset += PAGE_SIZE_64;
+ }
+ vm_object_unlock(upl->map_object);
+ }
+ if (upl->flags & UPL_SHADOWED)
+ offset = 0;
+ else
+ offset = upl->offset - upl->map_object->paging_offset;
+
+ size = upl->size;
+
+ vm_object_reference(upl->map_object);
+
+ if(!isVectorUPL) {
+ *dst_addr = 0;
+ /*
+ * NEED A UPL_MAP ALIAS
+ */
+ kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
+ upl->map_object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ upl_unlock(upl);
+ return(kr);
+ }
+ }
+ else {
+ kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+ VM_FLAGS_FIXED | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
+ upl->map_object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ if(kr)
+ panic("vm_map_enter failed for a Vector UPL\n");
+ }
+ vm_object_lock(upl->map_object);
+
+ for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
+ m = vm_page_lookup(upl->map_object, offset);
+
+ if (m) {
+ m->pmapped = TRUE;
+
+ /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
+ * but only in kernel space. If this was on a user map,
+ * we'd have to set the wpmapped bit. */
+ /* m->wpmapped = TRUE; */
+ assert(map->pmap == kernel_pmap);
+
+ PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE);
+ }
+ offset += PAGE_SIZE_64;
+ }
+ vm_object_unlock(upl->map_object);
+
+ /*
+ * hold a reference for the mapping
+ */
+ upl->ref_count++;
+ upl->flags |= UPL_PAGE_LIST_MAPPED;
+ upl->kaddr = (vm_offset_t) *dst_addr;
+ assert(upl->kaddr == *dst_addr);
+
+ if(isVectorUPL)
+ goto process_upl_to_enter;
+
+ upl_unlock(upl);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Internal routine to remove a UPL mapping from a VM map.
+ *
+ * XXX - This should just be doable through a standard
+ * vm_map_remove() operation. Otherwise, implicit clean-up
+ * of the target map won't be able to correctly remove
+ * these (and release the reference on the UPL). Having
+ * to do this means we can't map these into user-space
+ * maps yet.
+ */
+kern_return_t
+vm_map_remove_upl(
+ vm_map_t map,
+ upl_t upl)
+{
+ vm_address_t addr;
+ upl_size_t size;
+ int isVectorUPL = 0, curr_upl = 0;
+ upl_t vector_upl = NULL;
+
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ int unmapped=0, valid_upls=0;
+ vector_upl = upl;
+ upl_lock(vector_upl);
+ for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
+ if(upl == NULL)
+ continue;
+ valid_upls++;
+ if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
+ unmapped++;
+ }
+
+ if(unmapped) {
+ if(unmapped != valid_upls)
+ panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
+ else {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ }
+ curr_upl=0;
+ }
+ else
+ upl_lock(upl);
+
+process_upl_to_remove:
+ if(isVectorUPL) {
+ if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+ vm_map_t v_upl_submap;
+ vm_offset_t v_upl_submap_dst_addr;
+ vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
+
+ vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
+ vm_map_deallocate(v_upl_submap);
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+ if(upl == NULL)
+ goto process_upl_to_remove;
+ }
+
+ if (upl->flags & UPL_PAGE_LIST_MAPPED) {
+ addr = upl->kaddr;
+ size = upl->size;
+
+ assert(upl->ref_count > 1);
+ upl->ref_count--; /* removing mapping ref */
+
+ upl->flags &= ~UPL_PAGE_LIST_MAPPED;
+ upl->kaddr = (vm_offset_t) 0;
+
+ if(!isVectorUPL) {
+ upl_unlock(upl);
+
+ vm_map_remove(
+ map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_NO_FLAGS);
+
+ return KERN_SUCCESS;
+ }
+ else {
+ /*
+ * If it's a Vectored UPL, we'll be removing the entire
+ * submap anyways, so no need to remove individual UPL
+ * element mappings from within the submap
+ */
+ goto process_upl_to_remove;
+ }
+ }
+ upl_unlock(upl);
+
+ return KERN_FAILURE;
+}
+
+kern_return_t
+upl_commit_range(
+ upl_t upl,
+ upl_offset_t offset,
+ upl_size_t size,
+ int flags,
+ upl_page_info_t *page_list,
+ mach_msg_type_number_t count,
+ boolean_t *empty)
+{
+ upl_size_t xfer_size, subupl_size = size;
+ vm_object_t shadow_object;
+ vm_object_t object;
+ vm_object_offset_t target_offset;
+ upl_offset_t subupl_offset = offset;
+ int entry;
+ wpl_array_t lite_list;
+ int occupied;
+ int clear_refmod = 0;
+ int pgpgout_count = 0;
+ struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+ struct vm_page_delayed_work *dwp;
+ int dw_count;
+ int dw_limit;
+ int isVectorUPL = 0;
+ upl_t vector_upl = NULL;
+ boolean_t should_be_throttled = FALSE;
+
+ vm_page_t nxt_page = VM_PAGE_NULL;
+ int fast_path_possible = 0;
+ int fast_path_full_commit = 0;
+ int throttle_page = 0;
+ int unwired_count = 0;
+ int local_queue_count = 0;
+ queue_head_t local_queue;
+
+ *empty = FALSE;
+
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (count == 0)
+ page_list = NULL;
+
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ vector_upl = upl;
+ upl_lock(vector_upl);
+ }
+ else
+ upl_lock(upl);
+
+process_upl_to_commit:
+
+ if(isVectorUPL) {
+ size = subupl_size;
+ offset = subupl_offset;
+ if(size == 0) {
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+ upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+ if(upl == NULL) {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
+ subupl_size -= size;
+ subupl_offset += size;
+ }
+
+#if UPL_DEBUG
+ if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+ (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+
+ upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+ upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+
+ upl->upl_commit_index++;
+ }
+#endif
+ if (upl->flags & UPL_DEVICE_MEMORY)
+ xfer_size = 0;
+ else if ((offset + size) <= upl->size)
+ xfer_size = size;
+ else {
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ upl_unlock(vector_upl);
+ }
+ return KERN_FAILURE;
+ }
+ if (upl->flags & UPL_SET_DIRTY)
+ flags |= UPL_COMMIT_SET_DIRTY;
+ if (upl->flags & UPL_CLEAR_DIRTY)
+ flags |= UPL_COMMIT_CLEAR_DIRTY;
+
+ if (upl->flags & UPL_INTERNAL)
+ lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
+ + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+ else
+ lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+
+ object = upl->map_object;
+
+ if (upl->flags & UPL_SHADOWED) {
+ vm_object_lock(object);
+ shadow_object = object->shadow;
+ } else {
+ shadow_object = object;
+ }
+ entry = offset/PAGE_SIZE;
+ target_offset = (vm_object_offset_t)offset;
+
+ assert(!(target_offset & PAGE_MASK));
+ assert(!(xfer_size & PAGE_MASK));
+
+ if (upl->flags & UPL_KERNEL_OBJECT)
+ vm_object_lock_shared(shadow_object);
+ else
+ vm_object_lock(shadow_object);
+
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ assert(shadow_object->blocked_access);
+ shadow_object->blocked_access = FALSE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
+ }
+
+ if (shadow_object->code_signed) {
+ /*
+ * CODE SIGNING:
+ * If the object is code-signed, do not let this UPL tell
+ * us if the pages are valid or not. Let the pages be
+ * validated by VM the normal way (when they get mapped or
+ * copied).
+ */
+ flags &= ~UPL_COMMIT_CS_VALIDATED;
+ }
+ if (! page_list) {
+ /*
+ * No page list to get the code-signing info from !?
+ */
+ flags &= ~UPL_COMMIT_CS_VALIDATED;
+ }
+ if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && shadow_object->internal)
+ should_be_throttled = TRUE;
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+
+ if ((upl->flags & UPL_IO_WIRE) &&
+ !(flags & UPL_COMMIT_FREE_ABSENT) &&
+ !isVectorUPL &&
+ shadow_object->purgable != VM_PURGABLE_VOLATILE &&
+ shadow_object->purgable != VM_PURGABLE_EMPTY) {
+
+ if (!queue_empty(&shadow_object->memq)) {
+ queue_init(&local_queue);
+ if (size == shadow_object->vo_size) {
+ nxt_page = (vm_page_t)queue_first(&shadow_object->memq);
+ fast_path_full_commit = 1;
+ }
+ fast_path_possible = 1;
+
+ if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && shadow_object->internal &&
+ (shadow_object->purgable == VM_PURGABLE_DENY ||
+ shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
+ shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
+ throttle_page = 1;
+ }
+ }
+ }
+
+ while (xfer_size) {
+ vm_page_t t, m;
+
+ dwp->dw_mask = 0;
+ clear_refmod = 0;
+
+ m = VM_PAGE_NULL;
+
+ if (upl->flags & UPL_LITE) {
+ unsigned int pg_num;
+
+ if (nxt_page != VM_PAGE_NULL) {
+ m = nxt_page;
+ nxt_page = (vm_page_t)queue_next(&nxt_page->listq);
+ target_offset = m->offset;
+ }
+ pg_num = (unsigned int) (target_offset/PAGE_SIZE);
+ assert(pg_num == target_offset/PAGE_SIZE);
+
+ if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+ lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
+
+ if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL)
+ m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
+ } else
+ m = NULL;
+ }
+ if (upl->flags & UPL_SHADOWED) {
+ if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+
+ t->pageout = FALSE;
+
+ VM_PAGE_FREE(t);
+
+ if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL)
+ m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
+ }
+ }
+ if (m == VM_PAGE_NULL)
+ goto commit_next_page;
+
+ if (m->compressor) {
+ assert(m->busy);
+
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ goto commit_next_page;
+ }
+
+ if (flags & UPL_COMMIT_CS_VALIDATED) {
+ /*
+ * CODE SIGNING:
+ * Set the code signing bits according to
+ * what the UPL says they should be.
+ */
+ m->cs_validated = page_list[entry].cs_validated;
+ m->cs_tainted = page_list[entry].cs_tainted;
+ m->cs_nx = page_list[entry].cs_nx;
+ }
+ if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL)
+ m->written_by_kernel = TRUE;
+
+ if (upl->flags & UPL_IO_WIRE) {
+
+ if (page_list)
+ page_list[entry].phys_addr = 0;
+
+ if (flags & UPL_COMMIT_SET_DIRTY) {
+ SET_PAGE_DIRTY(m, FALSE);
+ } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+ m->dirty = FALSE;
+
+ if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+ m->cs_validated && !m->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * This page is no longer dirty
+ * but could have been modified,
+ * so it will need to be
+ * re-validated.
+ */
+ if (m->slid) {
+ panic("upl_commit_range(%p): page %p was slid\n",
+ upl, m);
+ }
+ assert(!m->slid);
+ m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(m->phys_page);
+ }
+ clear_refmod |= VM_MEM_MODIFIED;
+ }
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ /*
+ * We blocked access to the pages in this UPL.
+ * Clear the "busy" bit and wake up any waiter
+ * for this page.
+ */
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ }
+ if (fast_path_possible) {
+ assert(m->object->purgable != VM_PURGABLE_EMPTY);
+ assert(m->object->purgable != VM_PURGABLE_VOLATILE);
+ if (m->absent) {
+ assert(m->wire_count == 0);
+ assert(m->busy);
+
+ m->absent = FALSE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ } else {
+ if (m->wire_count == 0)
+ panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object);
+
+ /*
+ * XXX FBDP need to update some other
+ * counters here (purgeable_wired_count)
+ * (ledgers), ...
+ */
+ assert(m->wire_count);
+ m->wire_count--;
+
+ if (m->wire_count == 0)
+ unwired_count++;
+ }
+ if (m->wire_count == 0) {
+ queue_enter(&local_queue, m, vm_page_t, pageq);
+ local_queue_count++;
+
+ if (throttle_page) {
+ m->throttled = TRUE;
+ } else {
+ if (flags & UPL_COMMIT_INACTIVATE)
+ m->inactive = TRUE;
+ else
+ m->active = TRUE;
+ }
+ }
+ } else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ clear_refmod |= VM_MEM_REFERENCED;
+ }
+ if (m->absent) {
+ if (flags & UPL_COMMIT_FREE_ABSENT)
+ dwp->dw_mask |= DW_vm_page_free;
+ else {
+ m->absent = FALSE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+
+ if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal))
+ dwp->dw_mask |= DW_vm_page_activate;
+ }
+ } else
+ dwp->dw_mask |= DW_vm_page_unwire;
+ }
+ goto commit_next_page;
+ }
+ assert(!m->compressor);
+
+ if (page_list)
+ page_list[entry].phys_addr = 0;
+
+ /*
+ * make sure to clear the hardware
+ * modify or reference bits before
+ * releasing the BUSY bit on this page
+ * otherwise we risk losing a legitimate
+ * change of state
+ */
+ if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+ m->dirty = FALSE;
+
+ clear_refmod |= VM_MEM_MODIFIED;
+ }
+ if (m->laundry)
+ dwp->dw_mask |= DW_vm_pageout_throttle_up;
+
+ if (VM_PAGE_WIRED(m))
+ m->pageout = FALSE;
+
+ if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+ m->cs_validated && !m->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * This page is no longer dirty
+ * but could have been modified,
+ * so it will need to be
+ * re-validated.
+ */
+ if (m->slid) {
+ panic("upl_commit_range(%p): page %p was slid\n",
+ upl, m);
+ }
+ assert(!m->slid);
+ m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(m->phys_page);
+ }
+ if (m->overwriting) {
+ /*
+ * the (COPY_OUT_FROM == FALSE) request_page_list case
+ */
+ if (m->busy) {
+#if CONFIG_PHANTOM_CACHE
+ if (m->absent && !m->object->internal)
+ dwp->dw_mask |= DW_vm_phantom_cache_update;
+#endif
+ m->absent = FALSE;
+
+ dwp->dw_mask |= DW_clear_busy;
+ } else {
+ /*
+ * alternate (COPY_OUT_FROM == FALSE) page_list case
+ * Occurs when the original page was wired
+ * at the time of the list request
+ */
+ assert(VM_PAGE_WIRED(m));
+
+ dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
+ }
+ m->overwriting = FALSE;
+ }
+ if (m->encrypted_cleaning == TRUE) {
+ m->encrypted_cleaning = FALSE;
+
+ dwp->dw_mask |= DW_clear_busy | DW_PAGE_WAKEUP;
+ }
+ m->cleaning = FALSE;
+
+ if (m->pageout) {
+ /*
+ * With the clean queue enabled, UPL_PAGEOUT should
+ * no longer set the pageout bit. It's pages now go
+ * to the clean queue.
+ */
+ assert(!(flags & UPL_PAGEOUT));
+
+ m->pageout = FALSE;
+#if MACH_CLUSTER_STATS
+ if (m->wanted) vm_pageout_target_collisions++;
+#endif
+ if ((flags & UPL_COMMIT_SET_DIRTY) ||
+ (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED))) {
+ /*
+ * page was re-dirtied after we started
+ * the pageout... reactivate it since
+ * we don't know whether the on-disk
+ * copy matches what is now in memory
+ */
+ SET_PAGE_DIRTY(m, FALSE);
+
+ dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
+
+ if (upl->flags & UPL_PAGEOUT) {
+ CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
+ VM_STAT_INCR(reactivations);
+ DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
+ }
+ } else {
+ /*
+ * page has been successfully cleaned
+ * go ahead and free it for other use
+ */
+ if (m->object->internal) {
+ DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
+ } else {
+ DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
+ }
+ m->dirty = FALSE;
+ m->busy = TRUE;
+
+ dwp->dw_mask |= DW_vm_page_free;
+ }
+ goto commit_next_page;
+ }
+#if MACH_CLUSTER_STATS
+ if (m->wpmapped)
+ m->dirty = pmap_is_modified(m->phys_page);
+
+ if (m->dirty) vm_pageout_cluster_dirtied++;
+ else vm_pageout_cluster_cleaned++;
+ if (m->wanted) vm_pageout_cluster_collisions++;
+#endif
+ /*
+ * It is a part of the semantic of COPYOUT_FROM
+ * UPLs that a commit implies cache sync
+ * between the vm page and the backing store
+ * this can be used to strip the precious bit
+ * as well as clean
+ */
+ if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
+ m->precious = FALSE;
+
+ if (flags & UPL_COMMIT_SET_DIRTY) {
+ SET_PAGE_DIRTY(m, FALSE);
+ } else {
+ m->dirty = FALSE;
+ }
+
+ /* with the clean queue on, move *all* cleaned pages to the clean queue */
+ if (hibernate_cleaning_in_progress == FALSE && !m->dirty && (upl->flags & UPL_PAGEOUT)) {
+ pgpgout_count++;
+
+ VM_STAT_INCR(pageouts);
+ DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
+
+ dwp->dw_mask |= DW_enqueue_cleaned;
+ vm_pageout_enqueued_cleaned_from_inactive_dirty++;
+ } else if (should_be_throttled == TRUE && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+ /*
+ * page coming back in from being 'frozen'...
+ * it was dirty before it was frozen, so keep it so
+ * the vm_page_activate will notice that it really belongs
+ * on the throttle queue and put it there
+ */
+ SET_PAGE_DIRTY(m, FALSE);
+ dwp->dw_mask |= DW_vm_page_activate;
+
+ } else {
+ if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ clear_refmod |= VM_MEM_REFERENCED;
+ } else if (!m->active && !m->inactive && !m->speculative) {
+
+ if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
+ dwp->dw_mask |= DW_vm_page_speculate;
+ else if (m->reference)
+ dwp->dw_mask |= DW_vm_page_activate;
+ else {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ clear_refmod |= VM_MEM_REFERENCED;
+ }
+ }
+ }
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ /*
+ * We blocked access to the pages in this URL.
+ * Clear the "busy" bit on this page before we
+ * wake up any waiter.
+ */
+ dwp->dw_mask |= DW_clear_busy;
+ }
+ /*
+ * Wakeup any thread waiting for the page to be un-cleaning.
+ */
+ dwp->dw_mask |= DW_PAGE_WAKEUP;
+
+commit_next_page:
+ if (clear_refmod)
+ pmap_clear_refmod(m->phys_page, clear_refmod);
+
+ target_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ entry++;
+
+ if (dwp->dw_mask) {
+ if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+ VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
+
+ if (dw_count >= dw_limit) {
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ } else {
+ if (dwp->dw_mask & DW_clear_busy)
+ m->busy = FALSE;
+
+ if (dwp->dw_mask & DW_PAGE_WAKEUP)
+ PAGE_WAKEUP(m);
+ }
+ }
+ }
+ if (dw_count)
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
+ if (fast_path_possible) {
+
+ assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
+ assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
+
+ if (local_queue_count || unwired_count) {
+
+ if (local_queue_count) {
+ vm_page_t first_local, last_local;
+ vm_page_t first_target;
+ queue_head_t *target_queue;
+
+ if (throttle_page)
+ target_queue = &vm_page_queue_throttled;
+ else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ if (shadow_object->internal)
+ target_queue = &vm_page_queue_anonymous;
+ else
+ target_queue = &vm_page_queue_inactive;
+ } else
+ target_queue = &vm_page_queue_active;
+ }
+ /*
+ * Transfer the entire local queue to a regular LRU page queues.
+ */
+ first_local = (vm_page_t) queue_first(&local_queue);
+ last_local = (vm_page_t) queue_last(&local_queue);
+
+ vm_page_lockspin_queues();
+
+ first_target = (vm_page_t) queue_first(target_queue);
+
+ if (queue_empty(target_queue))
+ queue_last(target_queue) = (queue_entry_t) last_local;
+ else
+ queue_prev(&first_target->pageq) = (queue_entry_t) last_local;
+
+ queue_first(target_queue) = (queue_entry_t) first_local;
+ queue_prev(&first_local->pageq) = (queue_entry_t) target_queue;
+ queue_next(&last_local->pageq) = (queue_entry_t) first_target;
+
+ /*
+ * Adjust the global page counts.
+ */
+ if (throttle_page) {
+ vm_page_throttled_count += local_queue_count;
+ } else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ if (shadow_object->internal)
+ vm_page_anonymous_count += local_queue_count;
+ vm_page_inactive_count += local_queue_count;
+
+ token_new_pagecount += local_queue_count;
+ } else
+ vm_page_active_count += local_queue_count;
+
+ if (shadow_object->internal)
+ vm_page_pageable_internal_count += local_queue_count;
+ else
+ vm_page_pageable_external_count += local_queue_count;
+ }
+ } else {
+ vm_page_lockspin_queues();
+ }
+ if (unwired_count) {
+ vm_page_wire_count -= unwired_count;
+ VM_CHECK_MEMORYSTATUS;
+ }
+ vm_page_unlock_queues();
+
+ shadow_object->wired_page_count -= unwired_count;
+
+ if (!shadow_object->wired_page_count) {
+ VM_OBJECT_UNWIRED(shadow_object);
+ }
+ }
+ }
+ occupied = 1;
+
+ if (upl->flags & UPL_DEVICE_MEMORY) {
+ occupied = 0;
+ } else if (upl->flags & UPL_LITE) {
+ int pg_num;
+ int i;
+
+ occupied = 0;
+
+ if (!fast_path_full_commit) {
+ pg_num = upl->size/PAGE_SIZE;
+ pg_num = (pg_num + 31) >> 5;
+
+ for (i = 0; i < pg_num; i++) {
+ if (lite_list[i] != 0) {
+ occupied = 1;
+ break;
+ }
+ }
+ }
+ } else {
+ if (queue_empty(&upl->map_object->memq))
+ occupied = 0;
+ }
+ if (occupied == 0) {
+ /*
+ * If this UPL element belongs to a Vector UPL and is
+ * empty, then this is the right function to deallocate
+ * it. So go ahead set the *empty variable. The flag
+ * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+ * should be considered relevant for the Vector UPL and not
+ * the internal UPLs.
+ */
+ if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
+ *empty = TRUE;
+
+ if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+ /*
+ * this is not a paging object
+ * so we need to drop the paging reference
+ * that was taken when we created the UPL
+ * against this object
+ */
+ vm_object_activity_end(shadow_object);
+ vm_object_collapse(shadow_object, 0, TRUE);
+ } else {
+ /*
+ * we dontated the paging reference to
+ * the map object... vm_pageout_object_terminate
+ * will drop this reference
+ */
+ }
+ }
+ vm_object_unlock(shadow_object);
+ if (object != shadow_object)
+ vm_object_unlock(object);
+
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ /*
+ * If we completed our operations on an UPL that is
+ * part of a Vectored UPL and if empty is TRUE, then
+ * we should go ahead and deallocate this UPL element.
+ * Then we check if this was the last of the UPL elements
+ * within that Vectored UPL. If so, set empty to TRUE
+ * so that in ubc_upl_commit_range or ubc_upl_commit, we
+ * can go ahead and deallocate the Vector UPL too.
+ */
+ if(*empty==TRUE) {
+ *empty = vector_upl_set_subupl(vector_upl, upl, 0);
+ upl_deallocate(upl);
+ }
+ goto process_upl_to_commit;
+ }
+
+ if (pgpgout_count) {
+ DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+upl_abort_range(
+ upl_t upl,
+ upl_offset_t offset,
+ upl_size_t size,
+ int error,
+ boolean_t *empty)
+{
+ upl_page_info_t *user_page_list = NULL;
+ upl_size_t xfer_size, subupl_size = size;
+ vm_object_t shadow_object;
+ vm_object_t object;
+ vm_object_offset_t target_offset;
+ upl_offset_t subupl_offset = offset;
+ int entry;
+ wpl_array_t lite_list;
+ int occupied;
+ struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+ struct vm_page_delayed_work *dwp;
+ int dw_count;
+ int dw_limit;
+ int isVectorUPL = 0;
+ upl_t vector_upl = NULL;
+
+ *empty = FALSE;
+
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
+ return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
+
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ vector_upl = upl;
+ upl_lock(vector_upl);
+ }
+ else
+ upl_lock(upl);
+
+process_upl_to_abort:
+ if(isVectorUPL) {
+ size = subupl_size;
+ offset = subupl_offset;
+ if(size == 0) {
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+ upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+ if(upl == NULL) {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ subupl_size -= size;
+ subupl_offset += size;
+ }
+
+ *empty = FALSE;
+
+#if UPL_DEBUG
+ if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+ (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+
+ upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+ upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+ upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
+
+ upl->upl_commit_index++;
+ }
+#endif
+ if (upl->flags & UPL_DEVICE_MEMORY)
+ xfer_size = 0;
+ else if ((offset + size) <= upl->size)
+ xfer_size = size;
+ else {
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ upl_unlock(vector_upl);
+ }
+
+ return KERN_FAILURE;
+ }
+ if (upl->flags & UPL_INTERNAL) {
+ lite_list = (wpl_array_t)
+ ((((uintptr_t)upl) + sizeof(struct upl))
+ + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ } else {
+ lite_list = (wpl_array_t)
+ (((uintptr_t)upl) + sizeof(struct upl));
+ }
+ object = upl->map_object;
+
+ if (upl->flags & UPL_SHADOWED) {
+ vm_object_lock(object);
+ shadow_object = object->shadow;
+ } else
+ shadow_object = object;
+
+ entry = offset/PAGE_SIZE;
+ target_offset = (vm_object_offset_t)offset;
+
+ assert(!(target_offset & PAGE_MASK));
+ assert(!(xfer_size & PAGE_MASK));
+
+ if (upl->flags & UPL_KERNEL_OBJECT)
+ vm_object_lock_shared(shadow_object);
+ else
+ vm_object_lock(shadow_object);
+
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ assert(shadow_object->blocked_access);
+ shadow_object->blocked_access = FALSE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
+ }
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+
+ if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
+ panic("upl_abort_range: kernel_object being DUMPED");
+
+ while (xfer_size) {
+ vm_page_t t, m;
+ unsigned int pg_num;
+ boolean_t needed;
+
+ pg_num = (unsigned int) (target_offset/PAGE_SIZE);
+ assert(pg_num == target_offset/PAGE_SIZE);
+
+ needed = FALSE;
+
+ if (user_page_list)
+ needed = user_page_list[pg_num].needed;
+
+ dwp->dw_mask = 0;
+ m = VM_PAGE_NULL;
+
+ if (upl->flags & UPL_LITE) {
+
+ if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+ lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
+
+ if ( !(upl->flags & UPL_KERNEL_OBJECT))
+ m = vm_page_lookup(shadow_object, target_offset +
+ (upl->offset - shadow_object->paging_offset));
+ }
+ }
+ if (upl->flags & UPL_SHADOWED) {
+ if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+ t->pageout = FALSE;
+
+ VM_PAGE_FREE(t);
+
+ if (m == VM_PAGE_NULL)
+ m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
+ }
+ }
+ if ((upl->flags & UPL_KERNEL_OBJECT))
+ goto abort_next_page;
+
+ if (m != VM_PAGE_NULL) {
+
+ assert(!m->compressor);
+
+ if (m->absent) {
+ boolean_t must_free = TRUE;
+
+ /*
+ * COPYOUT = FALSE case
+ * check for error conditions which must
+ * be passed back to the pages customer
+ */
+ if (error & UPL_ABORT_RESTART) {
+ m->restart = TRUE;
+ m->absent = FALSE;
+ m->unusual = TRUE;
+ must_free = FALSE;
+ } else if (error & UPL_ABORT_UNAVAILABLE) {
+ m->restart = FALSE;
+ m->unusual = TRUE;
+ must_free = FALSE;
+ } else if (error & UPL_ABORT_ERROR) {
+ m->restart = FALSE;
+ m->absent = FALSE;
+ m->error = TRUE;
+ m->unusual = TRUE;
+ must_free = FALSE;
+ }
+ if (m->clustered && needed == FALSE) {
+ /*
+ * This page was a part of a speculative
+ * read-ahead initiated by the kernel
+ * itself. No one is expecting this
+ * page and no one will clean up its
+ * error state if it ever becomes valid
+ * in the future.
+ * We have to free it here.
+ */
+ must_free = TRUE;
+ }
+
+ /*
+ * ENCRYPTED SWAP:
+ * If the page was already encrypted,
+ * we don't really need to decrypt it
+ * now. It will get decrypted later,
+ * on demand, as soon as someone needs
+ * to access its contents.
+ */
+
+ m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
+
+ if (m->overwriting && !m->busy) {
+ /*
+ * this shouldn't happen since
+ * this is an 'absent' page, but
+ * it doesn't hurt to check for
+ * the 'alternate' method of
+ * stabilizing the page...
+ * we will mark 'busy' to be cleared
+ * in the following code which will
+ * take care of the primary stabilzation
+ * method (i.e. setting 'busy' to TRUE)
+ */
+ dwp->dw_mask |= DW_vm_page_unwire;
+ }
+ m->overwriting = FALSE;
+
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+
+ if (must_free == TRUE)
+ dwp->dw_mask |= DW_vm_page_free;
+ else
+ dwp->dw_mask |= DW_vm_page_activate;
+ } else {
+ /*
+ * Handle the trusted pager throttle.
+ */
+ if (m->laundry)
+ dwp->dw_mask |= DW_vm_pageout_throttle_up;
+
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ /*
+ * We blocked access to the pages in this UPL.
+ * Clear the "busy" bit and wake up any waiter
+ * for this page.
+ */
+ dwp->dw_mask |= DW_clear_busy;
+ }
+ if (m->overwriting) {
+ if (m->busy)
+ dwp->dw_mask |= DW_clear_busy;
+ else {
+ /*
+ * deal with the 'alternate' method
+ * of stabilizing the page...
+ * we will either free the page
+ * or mark 'busy' to be cleared
+ * in the following code which will
+ * take care of the primary stabilzation
+ * method (i.e. setting 'busy' to TRUE)
+ */
+ dwp->dw_mask |= DW_vm_page_unwire;
+ }
+ m->overwriting = FALSE;
+ }
+ if (m->encrypted_cleaning == TRUE) {
+ m->encrypted_cleaning = FALSE;
+
+ dwp->dw_mask |= DW_clear_busy;
+ }
+ m->pageout = FALSE;
+ m->cleaning = FALSE;
+#if MACH_PAGEMAP
+ vm_external_state_clr(m->object->existence_map, m->offset);
+#endif /* MACH_PAGEMAP */
+ if (error & UPL_ABORT_DUMP_PAGES) {
+ pmap_disconnect(m->phys_page);
+
+ dwp->dw_mask |= DW_vm_page_free;
+ } else {
+ if (!(dwp->dw_mask & DW_vm_page_unwire)) {
+ if (error & UPL_ABORT_REFERENCE) {
+ /*
+ * we've been told to explictly
+ * reference this page... for
+ * file I/O, this is done by
+ * implementing an LRU on the inactive q
+ */
+ dwp->dw_mask |= DW_vm_page_lru;
+
+ } else if (!m->active && !m->inactive && !m->speculative)
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ }
+ dwp->dw_mask |= DW_PAGE_WAKEUP;
+ }
+ }
+ }
+abort_next_page:
+ target_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ entry++;
+
+ if (dwp->dw_mask) {
+ if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+ VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
+
+ if (dw_count >= dw_limit) {
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ } else {
+ if (dwp->dw_mask & DW_clear_busy)
+ m->busy = FALSE;
+
+ if (dwp->dw_mask & DW_PAGE_WAKEUP)
+ PAGE_WAKEUP(m);
+ }
+ }
+ }
+ if (dw_count)
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
+ occupied = 1;
+
+ if (upl->flags & UPL_DEVICE_MEMORY) {
+ occupied = 0;
+ } else if (upl->flags & UPL_LITE) {
+ int pg_num;
+ int i;
+
+ pg_num = upl->size/PAGE_SIZE;
+ pg_num = (pg_num + 31) >> 5;
+ occupied = 0;
+
+ for (i = 0; i < pg_num; i++) {
+ if (lite_list[i] != 0) {
+ occupied = 1;
+ break;
+ }
+ }
+ } else {
+ if (queue_empty(&upl->map_object->memq))
+ occupied = 0;
+ }
+ if (occupied == 0) {
+ /*
+ * If this UPL element belongs to a Vector UPL and is
+ * empty, then this is the right function to deallocate
+ * it. So go ahead set the *empty variable. The flag
+ * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+ * should be considered relevant for the Vector UPL and
+ * not the internal UPLs.
+ */
+ if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
+ *empty = TRUE;
+
+ if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+ /*
+ * this is not a paging object
+ * so we need to drop the paging reference
+ * that was taken when we created the UPL
+ * against this object
+ */
+ vm_object_activity_end(shadow_object);
+ vm_object_collapse(shadow_object, 0, TRUE);
+ } else {
+ /*
+ * we dontated the paging reference to
+ * the map object... vm_pageout_object_terminate
+ * will drop this reference
+ */
+ }
+ }
+ vm_object_unlock(shadow_object);
+ if (object != shadow_object)
+ vm_object_unlock(object);
+
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ /*
+ * If we completed our operations on an UPL that is
+ * part of a Vectored UPL and if empty is TRUE, then
+ * we should go ahead and deallocate this UPL element.
+ * Then we check if this was the last of the UPL elements
+ * within that Vectored UPL. If so, set empty to TRUE
+ * so that in ubc_upl_abort_range or ubc_upl_abort, we
+ * can go ahead and deallocate the Vector UPL too.
+ */
+ if(*empty == TRUE) {
+ *empty = vector_upl_set_subupl(vector_upl, upl,0);
+ upl_deallocate(upl);
+ }
+ goto process_upl_to_abort;
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+kern_return_t
+upl_abort(
+ upl_t upl,
+ int error)
+{
+ boolean_t empty;
+
+ return upl_abort_range(upl, 0, upl->size, error, &empty);
+}
+
+
+/* an option on commit should be wire */
+kern_return_t
+upl_commit(
+ upl_t upl,
+ upl_page_info_t *page_list,
+ mach_msg_type_number_t count)
+{
+ boolean_t empty;
+
+ return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
+}
+
+
+void
+iopl_valid_data(
+ upl_t upl)
+{
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_page_t m, nxt_page = VM_PAGE_NULL;
+ upl_size_t size;
+ int wired_count = 0;
+
+ if (upl == NULL)
+ panic("iopl_valid_data: NULL upl");
+ if (vector_upl_is_valid(upl))
+ panic("iopl_valid_data: vector upl");
+ if ((upl->flags & (UPL_DEVICE_MEMORY|UPL_SHADOWED|UPL_ACCESS_BLOCKED|UPL_IO_WIRE|UPL_INTERNAL)) != UPL_IO_WIRE)
+ panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
+
+ object = upl->map_object;
+
+ if (object == kernel_object || object == compressor_object)
+ panic("iopl_valid_data: object == kernel or compressor");
+
+ if (object->purgable == VM_PURGABLE_VOLATILE)
+ panic("iopl_valid_data: object == VM_PURGABLE_VOLATILE");
+
+ size = upl->size;
+
+ vm_object_lock(object);
+
+ if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE))
+ nxt_page = (vm_page_t)queue_first(&object->memq);
+ else
+ offset = 0 + upl->offset - object->paging_offset;
+
+ while (size) {
+
+ if (nxt_page != VM_PAGE_NULL) {
+ m = nxt_page;
+ nxt_page = (vm_page_t)queue_next(&nxt_page->listq);
+ } else {
+ m = vm_page_lookup(object, offset);
+ offset += PAGE_SIZE;
+
+ if (m == VM_PAGE_NULL)
+ panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
+ }
+ if (m->busy) {
+ if (!m->absent)
+ panic("iopl_valid_data: busy page w/o absent");
+
+ if (m->pageq.next || m->pageq.prev)
+ panic("iopl_valid_data: busy+absent page on page queue");
+
+ m->absent = FALSE;
+ m->dirty = TRUE;
+ m->wire_count++;
+ wired_count++;
+
+ PAGE_WAKEUP_DONE(m);
+ }
+ size -= PAGE_SIZE;
+ }
+ if (wired_count) {
+
+ if (!object->wired_page_count) {
+ VM_OBJECT_WIRED(object);
+ }
+ object->wired_page_count += wired_count;
+
+ vm_page_lockspin_queues();
+ vm_page_wire_count += wired_count;
+ vm_page_unlock_queues();
+ }
+ vm_object_unlock(object);
+}
+
+void
+vm_object_set_pmap_cache_attr(
+ vm_object_t object,
+ upl_page_info_array_t user_page_list,
+ unsigned int num_pages,
+ boolean_t batch_pmap_op)
+{
+ unsigned int cache_attr = 0;
+
+ cache_attr = object->wimg_bits & VM_WIMG_MASK;
+ assert(user_page_list);
+ if (cache_attr != VM_WIMG_USE_DEFAULT) {
+ PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
+ }
+}
+
+
+boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t);
+kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_object_offset_t *, int);
+
+
+
+boolean_t
+vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
+ wpl_array_t lite_list, upl_control_flags_t cntrl_flags)
+{
+ vm_page_t dst_page;
+ vm_tag_t tag;
+ unsigned int entry;
+ int page_count;
+ int delayed_unlock = 0;
+ boolean_t retval = TRUE;
+
+ vm_object_lock_assert_exclusive(object);
+ assert(object->purgable != VM_PURGABLE_VOLATILE);
+ assert(object->purgable != VM_PURGABLE_EMPTY);
+ assert(object->pager == NULL);
+ assert(object->copy == NULL);
+ assert(object->shadow == NULL);
+
+ tag = UPL_MEMORY_TAG(cntrl_flags);
+ page_count = object->resident_page_count;
+ dst_page = (vm_page_t)queue_first(&object->memq);
+
+ vm_page_lock_queues();
+
+ while (page_count--) {
+
+ if (dst_page->busy ||
+ dst_page->fictitious ||
+ dst_page->absent ||
+ dst_page->error ||
+ dst_page->cleaning ||
+ dst_page->restart ||
+ dst_page->encrypted ||
+ dst_page->laundry) {
+ retval = FALSE;
+ goto done;
+ }
+ if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->written_by_kernel == TRUE) {
+ retval = FALSE;
+ goto done;
+ }
+ dst_page->reference = TRUE;
+
+ vm_page_wire(dst_page, tag, FALSE);
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ }
+ entry = (unsigned int)(dst_page->offset / PAGE_SIZE);
+ assert(entry >= 0 && entry < object->resident_page_count);
+ lite_list[entry>>5] |= 1 << (entry & 31);
+
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
+
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = dst_page->phys_page;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].pageout = dst_page->pageout;;
+ user_page_list[entry].precious = dst_page->precious;
+ user_page_list[entry].device = FALSE;
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = FALSE;
+ user_page_list[entry].cs_tainted = FALSE;
+ user_page_list[entry].cs_nx = FALSE;
+ user_page_list[entry].needed = FALSE;
+ user_page_list[entry].mark = FALSE;
+ }
+ if (delayed_unlock++ > 256) {
+ delayed_unlock = 0;
+ lck_mtx_yield(&vm_page_queue_lock);
+
+ VM_CHECK_MEMORYSTATUS;
+ }
+ dst_page = (vm_page_t)queue_next(&dst_page->listq);
+ }
+done:
+ vm_page_unlock_queues();
+
+ VM_CHECK_MEMORYSTATUS;
+
+ return (retval);
+}
+
+
+kern_return_t
+vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
+ wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_object_offset_t *dst_offset, int page_count)
+{
+ vm_page_t dst_page;
+ vm_tag_t tag;
+ boolean_t no_zero_fill = FALSE;
+ int interruptible;
+ int pages_wired = 0;
+ int pages_inserted = 0;
+ int entry = 0;
+ uint64_t delayed_ledger_update = 0;
+ kern_return_t ret = KERN_SUCCESS;
+
+ vm_object_lock_assert_exclusive(object);
+ assert(object->purgable != VM_PURGABLE_VOLATILE);
+ assert(object->purgable != VM_PURGABLE_EMPTY);
+ assert(object->pager == NULL);
+ assert(object->copy == NULL);
+ assert(object->shadow == NULL);
+
+ if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
+ interruptible = THREAD_ABORTSAFE;
+ else
+ interruptible = THREAD_UNINT;
+
+ if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO))
+ no_zero_fill = TRUE;
+
+ tag = UPL_MEMORY_TAG(cntrl_flags);
+
+ while (page_count--) {
+
+ while ( (dst_page = vm_page_grab()) == VM_PAGE_NULL) {
+
+ OSAddAtomic(page_count, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
+
+ if (vm_page_wait(interruptible) == FALSE) {
+ /*
+ * interrupted case
+ */
+ OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
+
+ ret = MACH_SEND_INTERRUPTED;
+ goto done;
+ }
+ OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
+ }
+ if (no_zero_fill == FALSE)
+ vm_page_zero_fill(dst_page);
+ else
+ dst_page->absent = TRUE;
+
+ dst_page->reference = TRUE;
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ }
+ if (dst_page->absent == FALSE) {
+ dst_page->wire_count++;
+ pages_wired++;
+ PAGE_WAKEUP_DONE(dst_page);
+ }
+ pages_inserted++;
+
+ vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
+
+ lite_list[entry>>5] |= 1 << (entry & 31);
+
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
+
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = dst_page->phys_page;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].pageout = FALSE;
+ user_page_list[entry].precious = FALSE;
+ user_page_list[entry].device = FALSE;
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = FALSE;
+ user_page_list[entry].cs_tainted = FALSE;
+ user_page_list[entry].cs_nx = FALSE;
+ user_page_list[entry].needed = FALSE;
+ user_page_list[entry].mark = FALSE;
+ }
+ entry++;
+ *dst_offset += PAGE_SIZE_64;
+ }
+done:
+ if (pages_wired) {
+ vm_page_lockspin_queues();
+ vm_page_wire_count += pages_wired;
+ vm_page_unlock_queues();
+ }
+ if (pages_inserted) {
+ if (object->internal) {
+ OSAddAtomic(pages_inserted, &vm_page_internal_count);
+ } else {
+ OSAddAtomic(pages_inserted, &vm_page_external_count);
+ }
+ }
+ if (delayed_ledger_update) {
+ task_t owner;
+
+ owner = object->vo_purgeable_owner;
+ assert(owner);
+
+ /* more non-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ delayed_ledger_update);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ delayed_ledger_update);
+ }
+ return (ret);
+}
+
+
+unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
+
+
+kern_return_t
+vm_object_iopl_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ upl_size_t size,
+ upl_t *upl_ptr,
+ upl_page_info_array_t user_page_list,
+ unsigned int *page_list_count,
+ upl_control_flags_t cntrl_flags)
+{
+ vm_page_t dst_page;
+ vm_object_offset_t dst_offset;
+ upl_size_t xfer_size;
+ upl_t upl = NULL;
+ unsigned int entry;
+ wpl_array_t lite_list = NULL;
+ int no_zero_fill = FALSE;
+ unsigned int size_in_pages;
+ u_int32_t psize;
+ kern_return_t ret;
+ vm_prot_t prot;
+ struct vm_object_fault_info fault_info;
+ struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+ struct vm_page_delayed_work *dwp;
+ int dw_count;
+ int dw_limit;
+ int dw_index;
+ boolean_t caller_lookup;
+ int io_tracking_flag = 0;
+ int interruptible;
+
+ boolean_t set_cache_attr_needed = FALSE;
+ boolean_t free_wired_pages = FALSE;
+ boolean_t fast_path_empty_req = FALSE;
+ boolean_t fast_path_full_req = FALSE;
+
+ if (cntrl_flags & ~UPL_VALID_FLAGS) {
+ /*
+ * For forward compatibility's sake,
+ * reject any unknown flag.
+ */
+ return KERN_INVALID_VALUE;
+ }
+ if (vm_lopage_needed == FALSE)
+ cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
+
+ if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
+ if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
+ return KERN_INVALID_VALUE;
+
+ if (object->phys_contiguous) {
+ if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
+ return KERN_INVALID_ADDRESS;
+
+ if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
+ return KERN_INVALID_ADDRESS;
+ }
+ }
+
+ if (cntrl_flags & UPL_ENCRYPT) {
+ /*
+ * ENCRYPTED SWAP:
+ * The paging path doesn't use this interface,
+ * so we don't support the UPL_ENCRYPT flag
+ * here. We won't encrypt the pages.
+ */
+ assert(! (cntrl_flags & UPL_ENCRYPT));
+ }
+ if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO))
+ no_zero_fill = TRUE;
+
+ if (cntrl_flags & UPL_COPYOUT_FROM)
+ prot = VM_PROT_READ;
+ else
+ prot = VM_PROT_READ | VM_PROT_WRITE;
+
+ if ((!object->internal) && (object->paging_offset != 0))
+ panic("vm_object_iopl_request: external object with non-zero paging offset\n");
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((object->io_tracking && object != kernel_object) || upl_debug_enabled)
+ io_tracking_flag |= UPL_CREATE_IO_TRACKING;
+#endif
+
+#if CONFIG_IOSCHED
+ if (object->io_tracking) {
+ /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
+ if (object != kernel_object)
+ io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
+ }
+#endif
+
+ if (object->phys_contiguous)
+ psize = PAGE_SIZE;
+ else
+ psize = size;
+
+ if (cntrl_flags & UPL_SET_INTERNAL) {
+ upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
+
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
+ ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
+ if (size == 0) {
+ user_page_list = NULL;
+ lite_list = NULL;
+ }
+ } else {
+ upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
+
+ lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+ if (size == 0) {
+ lite_list = NULL;
+ }
+ }
+ if (user_page_list)
+ user_page_list[0].device = FALSE;
+ *upl_ptr = upl;
+
+ upl->map_object = object;
+ upl->size = size;
+
+ size_in_pages = size / PAGE_SIZE;
+
+ if (object == kernel_object &&
+ !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
+ upl->flags |= UPL_KERNEL_OBJECT;
+#if UPL_DEBUG
+ vm_object_lock(object);
+#else
+ vm_object_lock_shared(object);
+#endif
+ } else {
+ vm_object_lock(object);
+ vm_object_activity_begin(object);
+ }
+ /*
+ * paging in progress also protects the paging_offset
+ */
+ upl->offset = offset + object->paging_offset;
+
+ if (cntrl_flags & UPL_BLOCK_ACCESS) {
+ /*
+ * The user requested that access to the pages in this UPL
+ * be blocked until the UPL is commited or aborted.
+ */
+ upl->flags |= UPL_ACCESS_BLOCKED;
+ }
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (upl->flags & UPL_TRACKED_BY_OBJECT) {
+ vm_object_activity_begin(object);
+ queue_enter(&object->uplq, upl, upl_t, uplq);
+ }
+#endif
+
+ if (object->phys_contiguous) {
+
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ assert(!object->blocked_access);
+ object->blocked_access = TRUE;
+ }
+
+ vm_object_unlock(object);
+
+ /*
+ * don't need any shadow mappings for this one
+ * since it is already I/O memory
+ */
+ upl->flags |= UPL_DEVICE_MEMORY;
+
+ upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1)>>PAGE_SHIFT);
+
+ if (user_page_list) {
+ user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset)>>PAGE_SHIFT);
+ user_page_list[0].device = TRUE;
+ }
+ if (page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL)
+ *page_list_count = 0;
+ else
+ *page_list_count = 1;
+ }
+ return KERN_SUCCESS;
+ }
+ if (object != kernel_object && object != compressor_object) {
+ /*
+ * Protect user space from future COW operations
+ */
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+ if (!object->true_share &&
+ vm_object_tracking_inited) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int num = 0;
+
+ num = OSBacktrace(bt,
+ VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ object,
+ VM_OBJECT_TRACKING_OP_TRUESHARE,
+ bt,
+ num);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+
+ object->true_share = TRUE;
+
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
+ object->copy != VM_OBJECT_NULL) {
+ /*
+ * Honor copy-on-write obligations
+ *
+ * The caller is gathering these pages and
+ * might modify their contents. We need to
+ * make sure that the copy object has its own
+ * private copies of these pages before we let
+ * the caller modify them.
+ *
+ * NOTE: someone else could map the original object
+ * after we've done this copy-on-write here, and they
+ * could then see an inconsistent picture of the memory
+ * while it's being modified via the UPL. To prevent this,
+ * we would have to block access to these pages until the
+ * UPL is released. We could use the UPL_BLOCK_ACCESS
+ * code path for that...
+ */
+ vm_object_update(object,
+ offset,
+ size,
+ NULL,
+ NULL,
+ FALSE, /* should_return */
+ MEMORY_OBJECT_COPY_SYNC,
+ VM_PROT_NO_CHANGE);
+#if DEVELOPMENT || DEBUG
+ iopl_cow++;
+ iopl_cow_pages += size >> PAGE_SHIFT;
+#endif
+ }
+ if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
+ object->purgable != VM_PURGABLE_VOLATILE &&
+ object->purgable != VM_PURGABLE_EMPTY &&
+ object->copy == NULL &&
+ size == object->vo_size &&
+ offset == 0 &&
+ object->shadow == NULL &&
+ object->pager == NULL)
+ {
+ if (object->resident_page_count == size_in_pages)
+ {
+ assert(object != compressor_object);
+ assert(object != kernel_object);
+ fast_path_full_req = TRUE;
+ }
+ else if (object->resident_page_count == 0)
+ {
+ assert(object != compressor_object);
+ assert(object != kernel_object);
+ fast_path_empty_req = TRUE;
+ set_cache_attr_needed = TRUE;
+ }
+ }
+
+ if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
+ interruptible = THREAD_ABORTSAFE;
+ else
+ interruptible = THREAD_UNINT;
+
+ entry = 0;
+
+ xfer_size = size;
+ dst_offset = offset;
+ dw_count = 0;
+
+ if (fast_path_full_req) {
+
+ if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags) == TRUE)
+ goto finish;
+ /*
+ * we couldn't complete the processing of this request on the fast path
+ * so fall through to the slow path and finish up
+ */
+
+ } else if (fast_path_empty_req) {
+
+ if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
+ ret = KERN_MEMORY_ERROR;
+ goto return_err;
+ }
+ ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, &dst_offset, size_in_pages);
+
+ if (ret) {
+ free_wired_pages = TRUE;
+ goto return_err;
+ }
+ goto finish;
+ }
+
+ fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
+ fault_info.user_tag = 0;
+ fault_info.lo_offset = offset;
+ fault_info.hi_offset = offset + xfer_size;
+ fault_info.no_cache = FALSE;
+ fault_info.stealth = FALSE;
+ fault_info.io_sync = FALSE;
+ fault_info.cs_bypass = FALSE;
+ fault_info.mark_zf_absent = TRUE;
+ fault_info.interruptible = interruptible;
+ fault_info.batch_pmap_op = TRUE;
+
+ dwp = &dw_array[0];
+ dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+
+ while (xfer_size) {
+ vm_fault_return_t result;
+
+ dwp->dw_mask = 0;
+
+ if (fast_path_full_req) {
+ /*
+ * if we get here, it means that we ran into a page
+ * state we couldn't handle in the fast path and
+ * bailed out to the slow path... since the order
+ * we look at pages is different between the 2 paths,
+ * the following check is needed to determine whether
+ * this page was already processed in the fast path
+ */
+ if (lite_list[entry>>5] & (1 << (entry & 31)))
+ goto skip_page;
+ }
+ dst_page = vm_page_lookup(object, dst_offset);
+
+ /*
+ * ENCRYPTED SWAP:
+ * If the page is encrypted, we need to decrypt it,
+ * so force a soft page fault.
+ */
+ if (dst_page == VM_PAGE_NULL ||
+ dst_page->busy ||
+ dst_page->encrypted ||
+ dst_page->error ||
+ dst_page->restart ||
+ dst_page->absent ||
+ dst_page->fictitious) {
+
+ if (object == kernel_object)
+ panic("vm_object_iopl_request: missing/bad page in kernel object\n");
+ if (object == compressor_object)
+ panic("vm_object_iopl_request: missing/bad page in compressor object\n");
+
+ if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
+ ret = KERN_MEMORY_ERROR;
+ goto return_err;
+ }
+ set_cache_attr_needed = TRUE;
+
+ /*
+ * We just looked up the page and the result remains valid
+ * until the object lock is release, so send it to
+ * vm_fault_page() (as "dst_page"), to avoid having to
+ * look it up again there.
+ */
+ caller_lookup = TRUE;
+
+ do {
+ vm_page_t top_page;
+ kern_return_t error_code;
+
+ fault_info.cluster_size = xfer_size;
+
+ vm_object_paging_begin(object);
+
+ result = vm_fault_page(object, dst_offset,
+ prot | VM_PROT_WRITE, FALSE,
+ caller_lookup,
+ &prot, &dst_page, &top_page,
+ (int *)0,
+ &error_code, no_zero_fill,
+ FALSE, &fault_info);
+
+ /* our lookup is no longer valid at this point */
+ caller_lookup = FALSE;
+
+ switch (result) {
+
+ case VM_FAULT_SUCCESS:
+
+ if ( !dst_page->absent) {
+ PAGE_WAKEUP_DONE(dst_page);
+ } else {
+ /*
+ * we only get back an absent page if we
+ * requested that it not be zero-filled
+ * because we are about to fill it via I/O
+ *
+ * absent pages should be left BUSY
+ * to prevent them from being faulted
+ * into an address space before we've
+ * had a chance to complete the I/O on
+ * them since they may contain info that
+ * shouldn't be seen by the faulting task
+ */
+ }
+ /*
+ * Release paging references and
+ * top-level placeholder page, if any.
+ */
+ if (top_page != VM_PAGE_NULL) {
+ vm_object_t local_object;
+
+ local_object = top_page->object;
+
+ if (top_page->object != dst_page->object) {
+ vm_object_lock(local_object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(local_object);
+ vm_object_unlock(local_object);
+ } else {
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(local_object);
+ }
+ }
+ vm_object_paging_end(object);
+ break;
+
+ case VM_FAULT_RETRY:
+ vm_object_lock(object);
+ break;
+
+ case VM_FAULT_MEMORY_SHORTAGE:
+ OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);