+
+ if(((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous) {
+ size = MAX_UPL_TRANSFER * page_size;
+ }
+
+ if(cntrl_flags & UPL_SET_INTERNAL)
+ if(page_list_count != NULL)
+ *page_list_count = MAX_UPL_TRANSFER;
+ if(((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
+ ((page_list_count != NULL) && (*page_list_count != 0)
+ && *page_list_count < (size/page_size)))
+ return KERN_INVALID_ARGUMENT;
+
+ if((!object->internal) && (object->paging_offset != 0))
+ panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
+
+ if(object->phys_contiguous) {
+ /* No paging operations are possible against this memory */
+ /* and so no need for map object, ever */
+ cntrl_flags |= UPL_SET_LITE;
+ }
+
+ if(upl_ptr) {
+ if(cntrl_flags & UPL_SET_INTERNAL) {
+ if(cntrl_flags & UPL_SET_LITE) {
+ upl = upl_create(
+ UPL_CREATE_INTERNAL | UPL_CREATE_LITE,
+ size);
+ user_page_list = (upl_page_info_t *)
+ (((vm_offset_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t)
+ (((vm_offset_t)user_page_list) +
+ ((size/PAGE_SIZE) *
+ sizeof(upl_page_info_t)));
+ page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+ page_field_size =
+ (page_field_size + 3) & 0xFFFFFFFC;
+ bzero((char *)lite_list, page_field_size);
+ upl->flags =
+ UPL_LITE | UPL_INTERNAL | UPL_IO_WIRE;
+ } else {
+ upl = upl_create(UPL_CREATE_INTERNAL, size);
+ user_page_list = (upl_page_info_t *)
+ (((vm_offset_t)upl)
+ + sizeof(struct upl));
+ upl->flags = UPL_INTERNAL | UPL_IO_WIRE;
+ }
+ } else {
+ if(cntrl_flags & UPL_SET_LITE) {
+ upl = upl_create(UPL_CREATE_LITE, size);
+ lite_list = (wpl_array_t)
+ (((vm_offset_t)upl) + sizeof(struct upl));
+ page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+ page_field_size =
+ (page_field_size + 3) & 0xFFFFFFFC;
+ bzero((char *)lite_list, page_field_size);
+ upl->flags = UPL_LITE | UPL_IO_WIRE;
+ } else {
+ upl = upl_create(UPL_CREATE_EXTERNAL, size);
+ upl->flags = UPL_IO_WIRE;
+ }
+ }
+
+ if(object->phys_contiguous) {
+ upl->map_object = object;
+ /* don't need any shadow mappings for this one */
+ /* since it is already I/O memory */
+ upl->flags |= UPL_DEVICE_MEMORY;
+
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+ vm_object_unlock(object);
+
+ /* paging in progress also protects the paging_offset */
+ upl->offset = offset + object->paging_offset;
+ upl->size = size;
+ *upl_ptr = upl;
+ if(user_page_list) {
+ user_page_list[0].phys_addr =
+ (offset + object->shadow_offset)>>12;
+ user_page_list[0].device = TRUE;
+ }
+
+ if(page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL) {
+ *page_list_count = 0;
+ } else {
+ *page_list_count = 1;
+ }
+ }
+ return KERN_SUCCESS;
+ }
+ if(user_page_list)
+ user_page_list[0].device = FALSE;
+
+ if(cntrl_flags & UPL_SET_LITE) {
+ upl->map_object = object;
+ } else {
+ upl->map_object = vm_object_allocate(size);
+ vm_object_lock(upl->map_object);
+ upl->map_object->shadow = object;
+ upl->map_object->pageout = TRUE;
+ upl->map_object->can_persist = FALSE;
+ upl->map_object->copy_strategy =
+ MEMORY_OBJECT_COPY_NONE;
+ upl->map_object->shadow_offset = offset;
+ upl->map_object->wimg_bits = object->wimg_bits;
+ vm_object_unlock(upl->map_object);
+ }
+ }
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ if (!object->phys_contiguous) {
+ /* Protect user space from future COW operations */
+ object->true_share = TRUE;
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+
+ /* we can lock the upl offset now that paging_in_progress is set */
+ if(upl_ptr) {
+ upl->size = size;
+ upl->offset = offset + object->paging_offset;
+ *upl_ptr = upl;