+
+
+kern_return_t
+vm_object_iopl_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_size_t size,
+ upl_t *upl_ptr,
+ upl_page_info_array_t user_page_list,
+ unsigned int *page_list_count,
+ int cntrl_flags)
+{
+ vm_page_t dst_page;
+ vm_object_offset_t dst_offset = offset;
+ vm_size_t xfer_size = size;
+ upl_t upl = NULL;
+ int entry;
+ wpl_array_t lite_list;
+ int page_field_size;
+ int delayed_unlock = 0;
+
+ vm_page_t alias_page = NULL;
+ kern_return_t ret;
+ vm_prot_t prot;
+
+
+ if(cntrl_flags & UPL_COPYOUT_FROM) {
+ prot = VM_PROT_READ;
+ } else {
+ prot = VM_PROT_READ | VM_PROT_WRITE;
+ }
+
+ if(((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous) {
+ size = MAX_UPL_TRANSFER * page_size;
+ }
+
+ if(cntrl_flags & UPL_SET_INTERNAL)
+ if(page_list_count != NULL)
+ *page_list_count = MAX_UPL_TRANSFER;
+ if(((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
+ ((page_list_count != NULL) && (*page_list_count != 0)
+ && *page_list_count < (size/page_size)))
+ return KERN_INVALID_ARGUMENT;
+
+ if((!object->internal) && (object->paging_offset != 0))
+ panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
+
+ if(object->phys_contiguous) {
+ /* No paging operations are possible against this memory */
+ /* and so no need for map object, ever */
+ cntrl_flags |= UPL_SET_LITE;
+ }
+
+ if(upl_ptr) {
+ if(cntrl_flags & UPL_SET_INTERNAL) {
+ if(cntrl_flags & UPL_SET_LITE) {
+ upl = upl_create(
+ UPL_CREATE_INTERNAL | UPL_CREATE_LITE,
+ size);
+ user_page_list = (upl_page_info_t *)
+ (((vm_offset_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t)
+ (((vm_offset_t)user_page_list) +
+ ((size/PAGE_SIZE) *
+ sizeof(upl_page_info_t)));
+ page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+ page_field_size =
+ (page_field_size + 3) & 0xFFFFFFFC;
+ bzero((char *)lite_list, page_field_size);
+ upl->flags =
+ UPL_LITE | UPL_INTERNAL | UPL_IO_WIRE;
+ } else {
+ upl = upl_create(UPL_CREATE_INTERNAL, size);
+ user_page_list = (upl_page_info_t *)
+ (((vm_offset_t)upl)
+ + sizeof(struct upl));
+ upl->flags = UPL_INTERNAL | UPL_IO_WIRE;
+ }
+ } else {
+ if(cntrl_flags & UPL_SET_LITE) {
+ upl = upl_create(UPL_CREATE_LITE, size);
+ lite_list = (wpl_array_t)
+ (((vm_offset_t)upl) + sizeof(struct upl));
+ page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+ page_field_size =
+ (page_field_size + 3) & 0xFFFFFFFC;
+ bzero((char *)lite_list, page_field_size);
+ upl->flags = UPL_LITE | UPL_IO_WIRE;
+ } else {
+ upl = upl_create(UPL_CREATE_EXTERNAL, size);
+ upl->flags = UPL_IO_WIRE;
+ }
+ }
+
+ if(object->phys_contiguous) {
+ upl->map_object = object;
+ /* don't need any shadow mappings for this one */
+ /* since it is already I/O memory */
+ upl->flags |= UPL_DEVICE_MEMORY;
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ vm_object_unlock(object);
+
+ /* paging in progress also protects the paging_offset */
+ upl->offset = offset + object->paging_offset;
+ upl->size = size;
+ *upl_ptr = upl;
+ if(user_page_list) {
+ user_page_list[0].phys_addr =
+ (offset + object->shadow_offset)>>12;
+ user_page_list[0].device = TRUE;
+ }
+
+ if(page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL) {
+ *page_list_count = 0;
+ } else {
+ *page_list_count = 1;
+ }
+ }
+ return KERN_SUCCESS;
+ }
+ if(user_page_list)
+ user_page_list[0].device = FALSE;
+
+ if(cntrl_flags & UPL_SET_LITE) {
+ upl->map_object = object;
+ } else {
+ upl->map_object = vm_object_allocate(size);
+ vm_object_lock(upl->map_object);
+ upl->map_object->shadow = object;
+ upl->map_object->pageout = TRUE;
+ upl->map_object->can_persist = FALSE;
+ upl->map_object->copy_strategy =
+ MEMORY_OBJECT_COPY_NONE;
+ upl->map_object->shadow_offset = offset;
+ upl->map_object->wimg_bits = object->wimg_bits;
+ vm_object_unlock(upl->map_object);
+ }
+ }
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ if (!object->phys_contiguous) {
+ /* Protect user space from future COW operations */
+ object->true_share = TRUE;
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+
+ /* we can lock the upl offset now that paging_in_progress is set */
+ if(upl_ptr) {
+ upl->size = size;
+ upl->offset = offset + object->paging_offset;
+ *upl_ptr = upl;
+#ifdef UBC_DEBUG
+ queue_enter(&object->uplq, upl, upl_t, uplq);
+#endif /* UBC_DEBUG */
+ }
+
+ entry = 0;
+ while (xfer_size) {
+ if((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
+ if (delayed_unlock) {
+ delayed_unlock = 0;
+ vm_page_unlock_queues();
+ }
+ vm_object_unlock(object);
+ VM_PAGE_GRAB_FICTITIOUS(alias_page);
+ vm_object_lock(object);
+ }
+ dst_page = vm_page_lookup(object, dst_offset);
+
+ if ((dst_page == VM_PAGE_NULL) || (dst_page->busy) ||
+ (dst_page->unusual && (dst_page->error ||
+ dst_page->restart || dst_page->absent ||
+ dst_page->fictitious ||
+ prot & dst_page->page_lock))) {
+ vm_fault_return_t result;
+ do {
+ vm_page_t top_page;
+ kern_return_t error_code;
+ int interruptible;
+
+ vm_object_offset_t lo_offset = offset;
+ vm_object_offset_t hi_offset = offset + size;
+
+
+ if (delayed_unlock) {
+ delayed_unlock = 0;
+ vm_page_unlock_queues();
+ }
+
+ if(cntrl_flags & UPL_SET_INTERRUPTIBLE) {
+ interruptible = THREAD_ABORTSAFE;
+ } else {
+ interruptible = THREAD_UNINT;
+ }
+
+ result = vm_fault_page(object, dst_offset,
+ prot | VM_PROT_WRITE, FALSE,
+ interruptible,
+ lo_offset, hi_offset,
+ VM_BEHAVIOR_SEQUENTIAL,
+ &prot, &dst_page, &top_page,
+ (int *)0,
+ &error_code, FALSE, FALSE, NULL, 0);
+
+ switch(result) {
+ case VM_FAULT_SUCCESS:
+
+ PAGE_WAKEUP_DONE(dst_page);
+
+ /*
+ * Release paging references and
+ * top-level placeholder page, if any.
+ */
+
+ if(top_page != VM_PAGE_NULL) {
+ vm_object_t local_object;
+ local_object =
+ top_page->object;
+ if(top_page->object
+ != dst_page->object) {
+ vm_object_lock(
+ local_object);
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(
+ local_object);
+ vm_object_unlock(
+ local_object);
+ } else {
+ VM_PAGE_FREE(top_page);
+ vm_object_paging_end(
+ local_object);
+ }
+ }
+
+ break;
+
+
+ case VM_FAULT_RETRY:
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ break;
+
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ break;
+
+ case VM_FAULT_MEMORY_SHORTAGE:
+ if (vm_page_wait(interruptible)) {
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ break;
+ }
+ /* fall thru */
+
+ case VM_FAULT_INTERRUPTED:
+ error_code = MACH_SEND_INTERRUPTED;
+ case VM_FAULT_MEMORY_ERROR:
+ ret = (error_code ? error_code:
+ KERN_MEMORY_ERROR);
+ vm_object_lock(object);
+ for(; offset < dst_offset;
+ offset += PAGE_SIZE) {
+ dst_page = vm_page_lookup(
+ object, offset);
+ if(dst_page == VM_PAGE_NULL)
+ panic("vm_object_iopl_request: Wired pages missing. \n");
+ vm_page_lock_queues();
+ vm_page_unwire(dst_page);
+ vm_page_unlock_queues();
+ VM_STAT(reactivations++);
+ }
+ vm_object_unlock(object);
+ upl_destroy(upl);
+ return ret;
+ }
+ } while ((result != VM_FAULT_SUCCESS)
+ || (result == VM_FAULT_INTERRUPTED));
+ }
+ if (delayed_unlock == 0)
+ vm_page_lock_queues();
+ vm_page_wire(dst_page);
+
+ if (upl_ptr) {
+ if (cntrl_flags & UPL_SET_LITE) {
+ int pg_num;
+ pg_num = (dst_offset-offset)/PAGE_SIZE;
+ lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+ } else {
+ /*
+ * Convert the fictitious page to a
+ * private shadow of the real page.
+ */
+ assert(alias_page->fictitious);
+ alias_page->fictitious = FALSE;
+ alias_page->private = TRUE;
+ alias_page->pageout = TRUE;
+ alias_page->phys_page = dst_page->phys_page;
+ vm_page_wire(alias_page);
+
+ vm_page_insert(alias_page,
+ upl->map_object, size - xfer_size);
+ assert(!alias_page->wanted);
+ alias_page->busy = FALSE;
+ alias_page->absent = FALSE;
+ }
+
+ /* expect the page to be used */
+ dst_page->reference = TRUE;
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM))
+ dst_page->dirty = TRUE;
+ alias_page = NULL;
+
+ if (user_page_list) {
+ user_page_list[entry].phys_addr
+ = dst_page->phys_page;
+ user_page_list[entry].dirty =
+ dst_page->dirty;
+ user_page_list[entry].pageout =
+ dst_page->pageout;
+ user_page_list[entry].absent =
+ dst_page->absent;
+ user_page_list[entry].precious =
+ dst_page->precious;
+ }
+ }
+ if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
+ delayed_unlock = 0;
+ vm_page_unlock_queues();
+ }
+ entry++;
+ dst_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ }
+ if (delayed_unlock)
+ vm_page_unlock_queues();
+
+ if (upl->flags & UPL_INTERNAL) {
+ if(page_list_count != NULL)
+ *page_list_count = 0;
+ } else if (*page_list_count > entry) {
+ if(page_list_count != NULL)
+ *page_list_count = entry;
+ }
+
+ if (alias_page != NULL) {
+ vm_page_lock_queues();
+ vm_page_free(alias_page);
+ vm_page_unlock_queues();
+ }
+
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+}
+