-
/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#define ALLOC_STRIDE (1024 * 1024 * 1024)
int physical_transfer_cluster_count = 0;
-#define VM_SUPER_CLUSTER 0x20000
-#define VM_SUPER_PAGES 32
+#define VM_SUPER_CLUSTER 0x40000
+#define VM_SUPER_PAGES 64
/*
* 0 means no shift to pages, so == 1 page/cluster. 1 would mean
return dpt_array[i];
}
}
- assert_wait(&dpt_array, THREAD_UNINT);
- DPT_UNLOCK(dpt_lock);
- thread_block((void(*)(void))0);
+ DPT_SLEEP(dpt_lock, &dpt_array, THREAD_UNINT);
}
}
while(backing_store_release_trigger_disable != 0) {
- assert_wait((event_t)
- &backing_store_release_trigger_disable,
- THREAD_UNINT);
- VSL_UNLOCK();
- thread_block((void (*)(void)) 0);
- VSL_LOCK();
+ VSL_SLEEP(&backing_store_release_trigger_disable, THREAD_UNINT);
}
/* we will choose instead to hold a send right */
UPL_NO_SYNC | UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL);
if(error == KERN_SUCCESS) {
-#ifndef ubc_sync_working
- upl_commit(upl, NULL);
- upl_deallocate(upl);
- error = ps_vstruct_transfer_from_segment(
- vs, ps, transfer_object);
-#else
error = ps_vstruct_transfer_from_segment(
vs, ps, upl);
upl_commit(upl, NULL);
upl_deallocate(upl);
-#endif
- vm_object_deallocate(transfer_object);
} else {
- vm_object_deallocate(transfer_object);
error = KERN_FAILURE;
}
+ vm_object_deallocate(transfer_object);
}
if(error) {
VS_LOCK(vs);
VSL_LOCK();
while(backing_store_release_trigger_disable != 0) {
- assert_wait((event_t)
- &backing_store_release_trigger_disable,
- THREAD_UNINT);
- VSL_UNLOCK();
- thread_block((void (*)(void)) 0);
- VSL_LOCK();
+ VSL_SLEEP(&backing_store_release_trigger_disable,
+ THREAD_UNINT);
}
next_vs = (vstruct_t) queue_next(&(vs->vs_links));
&backing_store_release_trigger_disable,
THREAD_UNINT);
VSL_UNLOCK();
- thread_block((void (*)(void)) 0);
+ thread_block(THREAD_CONTINUE_NULL);
} else {
VSL_UNLOCK();
}
(mach_msg_type_number_t *) &bytes_read);
if(kr == MIG_NO_REPLY) {
assert_wait(&vsa->vsa_lock, THREAD_UNINT);
- thread_block((void(*)(void))0);
+ thread_block(THREAD_CONTINUE_NULL);
dev_buffer = vsa->vsa_addr;
bytes_read = (unsigned int)vsa->vsa_size;
*/
#if USE_PRECIOUS
- request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS;
+ request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_RET_ONLY_ABSENT;
#else
- request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE ;
+ request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_RET_ONLY_ABSENT;
#endif
while (cnt && (error == KERN_SUCCESS)) {
int ps_info_valid;
int page_list_count;
- if (cnt > VM_SUPER_CLUSTER)
+ if((vs_offset & cl_mask) &&
+ (cnt > (VM_SUPER_CLUSTER -
+ (vs_offset & cl_mask)))) {
+ size = VM_SUPER_CLUSTER;
+ size -= vs_offset & cl_mask;
+ } else if (cnt > VM_SUPER_CLUSTER) {
size = VM_SUPER_CLUSTER;
- else
+ } else {
size = cnt;
+ }
cnt -= size;
ps_info_valid = 0;
*/
for (xfer_size = 0; xfer_size < size; ) {
- while (cl_index < pages_in_cl && xfer_size < size) {
+ while (cl_index < pages_in_cl
+ && xfer_size < size) {
/*
- * accumulate allocated pages within a physical segment
+ * accumulate allocated pages within
+ * a physical segment
*/
if (CLMAP_ISSET(clmap, cl_index)) {
xfer_size += vm_page_size;
} else
break;
}
- if (cl_index < pages_in_cl || xfer_size >= size) {
+ if (cl_index < pages_in_cl
+ || xfer_size >= size) {
/*
- * we've hit an unallocated page or the
- * end of this request... go fire the I/O
+ * we've hit an unallocated page or
+ * the end of this request... go fire
+ * the I/O
*/
break;
}
/*
- * we've hit the end of the current physical segment
- * and there's more to do, so try moving to the next one
+ * we've hit the end of the current physical
+ * segment and there's more to do, so try
+ * moving to the next one
*/
seg_index++;
- ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
- psp[seg_index] = CLMAP_PS(clmap);
+ ps_offset[seg_index] =
+ ps_clmap(vs,
+ cur_offset & ~cl_mask,
+ &clmap, CL_FIND, 0, 0);
+ psp[seg_index] = CLMAP_PS(clmap);
ps_info_valid = 1;
if ((ps_offset[seg_index - 1] != (ps_offset[seg_index] - cl_size)) || (psp[seg_index - 1] != psp[seg_index])) {
/*
- * if the physical segment we're about to step into
- * is not contiguous to the one we're currently
- * in, or it's in a different paging file, or
+ * if the physical segment we're about
+ * to step into is not contiguous to
+ * the one we're currently in, or it's
+ * in a different paging file, or
* it hasn't been allocated....
* we stop here and generate the I/O
*/
break;
}
/*
- * start with first page of the next physical segment
+ * start with first page of the next physical
+ * segment
*/
cl_index = 0;
}
*/
page_list_count = 0;
memory_object_super_upl_request(vs->vs_control,
- (memory_object_offset_t)vs_offset,
- xfer_size, xfer_size,
- &upl, NULL, &page_list_count,
- request_flags | UPL_SET_INTERNAL);
+ (memory_object_offset_t)vs_offset,
+ xfer_size, xfer_size,
+ &upl, NULL, &page_list_count,
+ request_flags | UPL_SET_INTERNAL);
- error = ps_read_file(psp[beg_pseg], upl, (vm_offset_t) 0,
- ps_offset[beg_pseg] + (beg_indx * vm_page_size), xfer_size, &residual, 0);
+ error = ps_read_file(psp[beg_pseg],
+ upl, (vm_offset_t) 0,
+ ps_offset[beg_pseg] +
+ (beg_indx * vm_page_size),
+ xfer_size, &residual, 0);
} else
continue;
failed_size = 0;
/*
- * Adjust counts and send response to VM. Optimize for the
- * common case, i.e. no error and/or partial data.
- * If there was an error, then we need to error the entire
- * range, even if some data was successfully read.
- * If there was a partial read we may supply some
+ * Adjust counts and send response to VM. Optimize
+ * for the common case, i.e. no error and/or partial
+ * data. If there was an error, then we need to error
+ * the entire range, even if some data was successfully
+ * read. If there was a partial read we may supply some
* data and may error some as well. In all cases the
* VM must receive some notification for every page in the
* range.
*/
if ((error == KERN_SUCCESS) && (residual == 0)) {
/*
- * Got everything we asked for, supply the data to
- * the VM. Note that as a side effect of supplying
- * the data, the buffer holding the supplied data is
- * deallocated from the pager's address space.
+ * Got everything we asked for, supply the data
+ * to the VM. Note that as a side effect of
+ * supplying * the data, the buffer holding the
+ * supplied data is * deallocated from the pager's
+ * address space.
*/
- pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
+ pvs_object_data_provided(
+ vs, upl, vs_offset, xfer_size);
} else {
failed_size = xfer_size;
if (error == KERN_SUCCESS) {
if (residual == xfer_size) {
- /*
- * If a read operation returns no error
- * and no data moved, we turn it into
- * an error, assuming we're reading at
- * or beyong EOF.
- * Fall through and error the entire
- * range.
- */
+ /*
+ * If a read operation returns no error
+ * and no data moved, we turn it into
+ * an error, assuming we're reading at
+ * or beyong EOF.
+ * Fall through and error the entire
+ * range.
+ */
error = KERN_FAILURE;
} else {
- /*
- * Otherwise, we have partial read. If
- * the part read is a integral number
- * of pages supply it. Otherwise round
- * it up to a page boundary, zero fill
- * the unread part, and supply it.
- * Fall through and error the remainder
- * of the range, if any.
- */
+ /*
+ * Otherwise, we have partial read. If
+ * the part read is a integral number
+ * of pages supply it. Otherwise round
+ * it up to a page boundary, zero fill
+ * the unread part, and supply it.
+ * Fall through and error the remainder
+ * of the range, if any.
+ */
int fill, lsize;
- fill = residual & ~vm_page_size;
- lsize = (xfer_size - residual) + fill;
- pvs_object_data_provided(vs, upl, vs_offset, lsize);
+ fill = residual
+ & ~vm_page_size;
+ lsize = (xfer_size - residual)
+ + fill;
+ pvs_object_data_provided(
+ vs, upl,
+ vs_offset, lsize);
if (lsize < xfer_size) {
- failed_size = xfer_size - lsize;
+ failed_size =
+ xfer_size - lsize;
error = KERN_FAILURE;
}
}
}
/*
* If there was an error in any part of the range, tell
- * the VM. Note that error is explicitly checked again since
- * it can be modified above.
+ * the VM. Note that error is explicitly checked again
+ * since it can be modified above.
*/
if (error != KERN_SUCCESS) {
BS_STAT(psp[beg_pseg]->ps_bs,
- psp[beg_pseg]->ps_bs->bs_pages_in_fail += atop(failed_size));
+ psp[beg_pseg]->ps_bs->bs_pages_in_fail
+ += atop(failed_size));
}
size -= xfer_size;
vs_offset += xfer_size;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
- for (seg_index = 0, transfer_size = upl->size; transfer_size > 0; ) {
+ for (seg_index = 0, transfer_size = upl->size;
+ transfer_size > 0; ) {
- ps_offset[seg_index] = ps_clmap(vs, upl->offset + (seg_index * cl_size),
- &clmap, CL_ALLOC,
- transfer_size < cl_size ?
- transfer_size : cl_size, 0);
+ ps_offset[seg_index] =
+ ps_clmap(vs, upl->offset + (seg_index * cl_size),
+ &clmap, CL_ALLOC,
+ transfer_size < cl_size ?
+ transfer_size : cl_size, 0);
if (ps_offset[seg_index] == (vm_offset_t) -1) {
upl_abort(upl, 0);
} else
transfer_size = 0;
}
- for (page_index = 0, num_of_pages = upl->size / vm_page_size; page_index < num_of_pages; ) {
+ for (page_index = 0,
+ num_of_pages = upl->size / vm_page_size;
+ page_index < num_of_pages; ) {
/*
* skip over non-dirty pages
*/
for ( ; page_index < num_of_pages; page_index++) {
- if (UPL_DIRTY_PAGE(pl, page_index) || UPL_PRECIOUS_PAGE(pl, page_index))
+ if (UPL_DIRTY_PAGE(pl, page_index)
+ || UPL_PRECIOUS_PAGE(pl, page_index))
/*
* this is a page we need to write
- * go see if we can buddy it up with others
- * that are contiguous to it
+ * go see if we can buddy it up with
+ * others that are contiguous to it
*/
break;
/*
- * if the page is not-dirty, but present we need to commit it...
- * this is an unusual case since we only asked for dirty pages
+ * if the page is not-dirty, but present we
+ * need to commit it... This is an unusual
+ * case since we only asked for dirty pages
*/
if (UPL_PAGE_PRESENT(pl, page_index)) {
boolean_t empty = FALSE;
break;
/*
- * gather up contiguous dirty pages... we have at least 1
- * otherwise we would have bailed above
+ * gather up contiguous dirty pages... we have at
+ * least 1 otherwise we would have bailed above
* make sure that each physical segment that we step
* into is contiguous to the one we're currently in
* if it's not, we have to stop and write what we have
*/
- for (first_dirty = page_index; page_index < num_of_pages; ) {
- if ( !UPL_DIRTY_PAGE(pl, page_index) && !UPL_PRECIOUS_PAGE(pl, page_index))
+ for (first_dirty = page_index;
+ page_index < num_of_pages; ) {
+ if ( !UPL_DIRTY_PAGE(pl, page_index)
+ && !UPL_PRECIOUS_PAGE(pl, page_index))
break;
page_index++;
/*
int cur_seg;
int nxt_seg;
- cur_seg = (page_index - 1) / pages_in_cl;
+ cur_seg =
+ (page_index - 1) / pages_in_cl;
nxt_seg = page_index / pages_in_cl;
if (cur_seg != nxt_seg) {
if ((ps_offset[cur_seg] != (ps_offset[nxt_seg] - cl_size)) || (psp[cur_seg] != psp[nxt_seg]))
- /*
- * if the segment we're about to step into
- * is not contiguous to the one we're currently
- * in, or it's in a different paging file....
- * we stop here and generate the I/O
- */
+ /*
+ * if the segment we're about
+ * to step into is not
+ * contiguous to the one we're
+ * currently in, or it's in a
+ * different paging file....
+ * we stop here and generate
+ * the I/O
+ */
break;
}
}
seg_offset = upl_offset - (seg_index * cl_size);
transfer_size = num_dirty * vm_page_size;
- error = ps_write_file(psp[seg_index], upl, upl_offset,
- ps_offset[seg_index] + seg_offset, transfer_size, flags);
- if (error == 0) {
- while (transfer_size) {
- int seg_size;
+ while (transfer_size) {
+ int seg_size;
- if ((seg_size = cl_size - (upl_offset % cl_size)) > transfer_size)
- seg_size = transfer_size;
+ if ((seg_size = cl_size -
+ (upl_offset % cl_size))
+ > transfer_size)
+ seg_size = transfer_size;
- ps_vs_write_complete(vs, upl->offset + upl_offset, seg_size, error);
+ ps_vs_write_complete(vs,
+ upl->offset + upl_offset,
+ seg_size, error);
- transfer_size -= seg_size;
- upl_offset += seg_size;
- }
- must_abort = 0;
+ transfer_size -= seg_size;
+ upl_offset += seg_size;
}
+ upl_offset = first_dirty * vm_page_size;
+ transfer_size = num_dirty * vm_page_size;
+ error = ps_write_file(psp[seg_index],
+ upl, upl_offset,
+ ps_offset[seg_index]
+ + seg_offset,
+ transfer_size, flags);
+ must_abort = 0;
}
if (must_abort) {
boolean_t empty = FALSE;
/* Assume that the caller has given us contiguous */
/* pages */
if(cnt) {
+ ps_vs_write_complete(vs, mobj_target_addr,
+ cnt, error);
error = ps_write_file(ps, internal_upl,
0, actual_offset,
cnt, flags);
if (error)
break;
- ps_vs_write_complete(vs, mobj_target_addr,
- cnt, error);
- }
+ }
if (error)
break;
actual_offset += cnt;
ps_vstruct_transfer_from_segment(
vstruct_t vs,
paging_segment_t segment,
-#ifndef ubc_sync_working
- vm_object_t transfer_object)
-#else
upl_t upl)
-#endif
{
struct vs_map *vsmap;
struct vs_map old_vsmap;
(vm_page_size * (j << vs->vs_clshift))
+ clmap_off,
vm_page_size << vs->vs_clshift,
-#ifndef ubc_sync_working
- transfer_object)
-#else
upl)
-#endif
!= KERN_SUCCESS) {
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
if(vs_cluster_transfer(vs,
vm_page_size * (j << vs->vs_clshift),
vm_page_size << vs->vs_clshift,
-#ifndef ubc_sync_working
- transfer_object) != KERN_SUCCESS) {
-#else
upl) != KERN_SUCCESS) {
-#endif
VS_LOCK(vs);
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vstruct_t vs,
vm_offset_t offset,
vm_size_t cnt,
-#ifndef ubc_sync_working
- vm_object_t transfer_object)
-#else
upl_t upl)
-#endif
{
vm_offset_t actual_offset;
paging_segment_t ps;
struct vs_map original_read_vsmap;
struct vs_map write_vsmap;
upl_t sync_upl;
-#ifndef ubc_sync_working
- upl_t upl;
-#endif
-
vm_offset_t ioaddr;
/* vs_cluster_transfer reads in the pages of a cluster and
if(ps->ps_segtype == PS_PARTITION) {
/*
- NEED TO BE WITH SYNC & NO COMMIT
+ NEED TO ISSUE WITH SYNC & NO COMMIT
error = ps_read_device(ps, actual_offset, &buffer,
size, &residual, flags);
*/
} else {
-#ifndef ubc_sync_working
- int page_list_count = 0;
-
- error = vm_object_upl_request(transfer_object,
-(vm_object_offset_t) (actual_offset & ((vm_page_size << vs->vs_clshift) - 1)),
- size, &upl, NULL, &page_list_count,
- UPL_NO_SYNC | UPL_CLEAN_IN_PLACE
- | UPL_SET_INTERNAL);
- if (error == KERN_SUCCESS) {
- error = ps_read_file(ps, upl, (vm_offset_t) 0, actual_offset,
- size, &residual, 0);
- }
-
-#else
- /* NEED TO BE WITH SYNC & NO COMMIT & NO RDAHEAD*/
+ /* NEED TO ISSUE WITH SYNC & NO COMMIT */
error = ps_read_file(ps, upl, (vm_offset_t) 0, actual_offset,
size, &residual,
- (UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD));
-#endif
+ (UPL_IOSYNC | UPL_NOCOMMIT));
}
read_vsmap = *vsmap_ptr;
/* the vm_map_copy_page_discard call */
*vsmap_ptr = write_vsmap;
-#ifndef ubc_sync_working
- error = vm_object_upl_request(transfer_object,
- (vm_object_offset_t)
- (actual_offset & ((vm_page_size << vs->vs_clshift) - 1)),
- size, &upl, NULL, &page_list_count,
- UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
- if(vs_cluster_write(vs, upl, offset,
- size, TRUE, 0) != KERN_SUCCESS) {
- upl_commit(upl, NULL);
- upl_deallocate(upl);
-#else
if(vs_cluster_write(vs, upl, offset,
size, TRUE, UPL_IOSYNC | UPL_NOCOMMIT ) != KERN_SUCCESS) {
-#endif
error = KERN_FAILURE;
if(!(VSM_ISCLR(*vsmap_ptr))) {
/* unmap the new backing store object */