int physical_transfer_cluster_count = 0;
#define VM_SUPER_CLUSTER 0x40000
-#define VM_SUPER_PAGES 64
+#define VM_SUPER_PAGES (VM_SUPER_CLUSTER / PAGE_SIZE)
/*
* 0 means no shift to pages, so == 1 page/cluster. 1 would mean
* 2 pages/cluster, 2 means 4 pages/cluster, and so on.
*/
+#define VSTRUCT_MIN_CLSHIFT 0
+
#define VSTRUCT_DEF_CLSHIFT 2
-int vstruct_def_clshift = VSTRUCT_DEF_CLSHIFT;
int default_pager_clsize = 0;
+int vstruct_def_clshift = VSTRUCT_DEF_CLSHIFT;
+
/* statistics */
unsigned int clustered_writes[VM_SUPER_PAGES+1];
unsigned int clustered_reads[VM_SUPER_PAGES+1];
#define VS_ASYNC_LOCK() lck_mtx_lock(&default_pager_async_lock)
#define VS_ASYNC_UNLOCK() lck_mtx_unlock(&default_pager_async_lock)
#define VS_ASYNC_LOCK_INIT() lck_mtx_init(&default_pager_async_lock, &default_pager_lck_grp, &default_pager_lck_attr)
+#define VS_ASYNC_LOCK_DESTROY() lck_mtx_destroy(&default_pager_async_lock, &default_pager_lck_grp)
#define VS_ASYNC_LOCK_ADDR() (&default_pager_async_lock)
/*
* Paging Space Hysteresis triggers and the target notification port
ipc_port_t min_pages_trigger_port = NULL;
ipc_port_t max_pages_trigger_port = NULL;
+#if CONFIG_FREEZE
+boolean_t use_emergency_swap_file_first = TRUE;
+#else
boolean_t use_emergency_swap_file_first = FALSE;
+#endif
boolean_t bs_low = FALSE;
int backing_store_release_trigger_disable = 0;
boolean_t backing_store_stop_compaction = FALSE;
-
+boolean_t backing_store_abort_compaction = FALSE;
/* Have we decided if swap needs to be encrypted yet ? */
boolean_t dp_encryption_inited = FALSE;
/* Should we encrypt swap ? */
boolean_t dp_encryption = FALSE;
+boolean_t dp_isssd = FALSE;
/*
* Object sizes are rounded up to the next power of 2,
unsigned int dp_pages_reserve = 0;
unsigned int cluster_transfer_minimum = 100;
+/*
+ * Trim state
+ */
+struct ps_vnode_trim_data {
+ struct vnode *vp;
+ dp_offset_t offset;
+ dp_size_t length;
+};
+
/* forward declarations */
kern_return_t ps_write_file(paging_segment_t, upl_t, upl_offset_t, dp_offset_t, unsigned int, int); /* forward */
kern_return_t ps_read_file (paging_segment_t, upl_t, upl_offset_t, dp_offset_t, unsigned int, unsigned int *, int); /* forward */
kern_return_t
default_pager_backing_store_delete_internal( MACH_PORT_FACE );
+static inline void ps_vnode_trim_init(struct ps_vnode_trim_data *data);
+static inline void ps_vnode_trim_now(struct ps_vnode_trim_data *data);
+static inline void ps_vnode_trim_more(struct ps_vnode_trim_data *data, struct vs_map *map, unsigned int shift, dp_size_t length);
+
default_pager_thread_t *
get_read_buffer( void )
{
if ((port == MACH_PORT_NULL) || port_is_vs(port))
*/
- if ((port == MACH_PORT_NULL))
+ if (port == MACH_PORT_NULL)
return BACKING_STORE_NULL;
BSL_LOCK();
if(alias_struct != NULL) {
alias_struct->vs = (struct vstruct *)bs;
alias_struct->name = &default_pager_ops;
- port->alias = (uintptr_t) alias_struct;
+ port->ip_alias = (uintptr_t) alias_struct;
}
else {
ipc_port_dealloc_kernel((MACH_PORT_FACE)(port));
+
+ BS_LOCK_DESTROY(bs);
kfree(bs, sizeof (struct backing_store));
+
return KERN_RESOURCE_SHORTAGE;
}
if ((vs_count != 0) && (vs != NULL))
vs->vs_async_pending += 1; /* hold parties calling */
/* vs_async_wait */
+
+ if (bs_low == FALSE)
+ backing_store_abort_compaction = FALSE;
+
VS_UNLOCK(vs);
VSL_UNLOCK();
while((vs_count != 0) && (vs != NULL)) {
vm_object_t transfer_object;
unsigned int count;
upl_t upl;
+ int upl_flags;
transfer_object = vm_object_allocate((vm_object_size_t)VM_SUPER_CLUSTER);
count = 0;
+ upl_flags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
+ UPL_SET_LITE | UPL_SET_INTERNAL);
+ if (dp_encryption) {
+ /* mark the pages as "encrypted" when they come in */
+ upl_flags |= UPL_ENCRYPT;
+ }
error = vm_object_upl_request(transfer_object,
(vm_object_offset_t)0, VM_SUPER_CLUSTER,
- &upl, NULL, &count,
- UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_LITE | UPL_SET_INTERNAL);
+ &upl, NULL, &count, upl_flags);
if(error == KERN_SUCCESS) {
error = ps_vstruct_transfer_from_segment(
}
vm_object_deallocate(transfer_object);
}
- if(error || current_thread_aborted() || backing_store_stop_compaction) {
+ if(error || current_thread_aborted()) {
VS_LOCK(vs);
vs->vs_async_pending -= 1; /* release vs_async_wait */
if (vs->vs_async_pending == 0 && vs->vs_waiting_async) {
ps->ps_special_clusters = 0;
ps->ps_pgcount = ps->ps_pgnum;
ps->ps_clcount = ps->ps_ncls = ps->ps_pgcount >> ps->ps_clshift;
+ dp_pages_reserve += ps->ps_pgcount;
PS_UNLOCK(ps);
- dp_pages_reserve += interim_pages_removed;
} else {
paging_segments[i] = PAGING_SEGMENT_NULL;
paging_segment_count--;
/*
* Disable lookups of this backing store.
*/
- if((void *)bs->bs_port->alias != NULL)
- kfree((void *) bs->bs_port->alias,
+ if((void *)bs->bs_port->ip_alias != NULL)
+ kfree((void *) bs->bs_port->ip_alias,
sizeof (struct vstruct_alias));
ipc_port_dealloc_kernel((ipc_port_t) (bs->bs_port));
bs->bs_port = MACH_PORT_NULL;
/*
* Free the backing store structure.
*/
+ BS_LOCK_DESTROY(bs);
kfree(bs, sizeof *bs);
return KERN_SUCCESS;
PS_LOCK_INIT(ps);
ps->ps_bmap = (unsigned char *) kalloc(RMAPSIZE(ps->ps_ncls));
if (!ps->ps_bmap) {
+ PS_LOCK_DESTROY(ps);
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
if ((error = ps_enter(ps)) != 0) {
kfree(ps->ps_bmap, RMAPSIZE(ps->ps_ncls));
+
+ PS_LOCK_DESTROY(ps);
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
if(alias_struct != NULL) {
alias_struct->vs = (struct vstruct *)vsa;
alias_struct->name = &default_pager_ops;
- reply_port->alias = (uintptr_t) alias_struct;
+ reply_port->ip_alias = (uintptr_t) alias_struct;
vsa->reply_port = reply_port;
vs_alloc_async_count++;
}
if(alias_struct != NULL) {
alias_struct->vs = reply_port;
alias_struct->name = &default_pager_ops;
- reply_port->alias = (int) vsa;
+ reply_port->defpager_importance.alias = (int) vsa;
vsa->reply_port = reply_port;
vs_alloc_async_count++;
}
kern_return_t kr;
reply_port = vsa->reply_port;
- kfree(reply_port->alias, sizeof (struct vstuct_alias));
+ kfree(reply_port->ip_alias, sizeof (struct vstuct_alias));
kfree(vsa, sizeof (struct vs_async));
ipc_port_dealloc_kernel((MACH_PORT_FACE) (reply_port));
#if 0
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
bs_low = TRUE;
+ backing_store_abort_compaction = TRUE;
}
lps = ps;
}
PSL_UNLOCK();
if (trigger != IP_NULL) {
+ dprintf(("ps_select_segment - send HI_WAT_ALERT\n"));
+
default_pager_space_alert(trigger, HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
minimum_pages_remaining)) {
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
+ bs_low = TRUE;
+ backing_store_abort_compaction = TRUE;
}
PS_UNLOCK(ps);
/*
PSL_UNLOCK();
if (trigger != IP_NULL) {
+ dprintf(("ps_select_segment - send HI_WAT_ALERT\n"));
+
default_pager_space_alert(
trigger,
HI_WAT_ALERT);
(dp_pages_free < minimum_pages_remaining)) {
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
+ bs_low = TRUE;
+ backing_store_abort_compaction = TRUE;
}
PSL_UNLOCK();
PS_UNLOCK(ps);
if (trigger != IP_NULL) {
+ dprintf(("ps_allocate_cluster - send HI_WAT_ALERT\n"));
+
default_pager_space_alert(trigger, HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
trigger = min_pages_trigger_port;
min_pages_trigger_port = NULL;
bs_low = TRUE;
+ backing_store_abort_compaction = TRUE;
}
PSL_UNLOCK();
if (trigger != IP_NULL) {
+ dprintf(("ps_allocate_cluster - send HI_WAT_ALERT\n"));
+
default_pager_space_alert(trigger, HI_WAT_ALERT);
ipc_port_release_send(trigger);
}
dp_size_t size)
{
unsigned int i;
- for (i = 0; i < size; i++)
- if (!VSM_ISCLR(vsmap[i]) && !VSM_ISERR(vsmap[i]))
+ struct ps_vnode_trim_data trim_data;
+
+ ps_vnode_trim_init(&trim_data);
+
+ for (i = 0; i < size; i++) {
+ if (!VSM_ISCLR(vsmap[i]) && !VSM_ISERR(vsmap[i])) {
+ ps_vnode_trim_more(&trim_data,
+ &vsmap[i],
+ VSM_PS(vsmap[i])->ps_clshift,
+ vm_page_size << VSM_PS(vsmap[i])->ps_clshift);
ps_deallocate_cluster(VSM_PS(vsmap[i]),
VSM_CLOFF(vsmap[i]));
+ } else {
+ ps_vnode_trim_now(&trim_data);
+ }
+ }
+ ps_vnode_trim_now(&trim_data);
}
void
bs_commit(- vs->vs_size);
+ VS_MAP_LOCK_DESTROY(vs);
+
zfree(vstruct_zone, vs);
}
+kern_return_t
+ps_vstruct_reclaim(
+ vstruct_t vs,
+ boolean_t return_to_vm,
+ boolean_t reclaim_backing_store)
+{
+ unsigned int i, j;
+ struct vs_map *vsmap;
+ boolean_t vsmap_all_clear, vsimap_all_clear;
+ struct vm_object_fault_info fault_info;
+ int clmap_off;
+ unsigned int vsmap_size;
+ kern_return_t kr = KERN_SUCCESS;
+
+ VS_MAP_LOCK(vs);
+
+ fault_info.cluster_size = VM_SUPER_CLUSTER;
+ fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
+ fault_info.user_tag = 0;
+ fault_info.lo_offset = 0;
+ fault_info.hi_offset = ptoa_32(vs->vs_size << vs->vs_clshift);
+ fault_info.io_sync = reclaim_backing_store;
+ fault_info.batch_pmap_op = FALSE;
+
+ /*
+ * If this is an indirect structure, then we walk through the valid
+ * (non-zero) indirect pointers and deallocate the clusters
+ * associated with each used map entry (via ps_dealloc_vsmap).
+ * When all of the clusters in an indirect block have been
+ * freed, we deallocate the block. When all of the indirect
+ * blocks have been deallocated we deallocate the memory
+ * holding the indirect pointers.
+ */
+ if (vs->vs_indirect) {
+ vsimap_all_clear = TRUE;
+ for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
+ vsmap = vs->vs_imap[i];
+ if (vsmap == NULL)
+ continue;
+ /* loop on clusters in this indirect map */
+ clmap_off = (vm_page_size * CLMAP_ENTRIES *
+ VSCLSIZE(vs) * i);
+ if (i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
+ vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
+ else
+ vsmap_size = CLMAP_ENTRIES;
+ vsmap_all_clear = TRUE;
+ if (return_to_vm) {
+ for (j = 0; j < vsmap_size;) {
+ if (VSM_ISCLR(vsmap[j]) ||
+ VSM_ISERR(vsmap[j])) {
+ j++;
+ clmap_off += vm_page_size * VSCLSIZE(vs);
+ continue;
+ }
+ VS_MAP_UNLOCK(vs);
+ kr = pvs_cluster_read(
+ vs,
+ clmap_off,
+ (dp_size_t) -1, /* read whole cluster */
+ &fault_info);
+
+ VS_MAP_LOCK(vs); /* XXX what if it changed ? */
+ if (kr != KERN_SUCCESS) {
+ vsmap_all_clear = FALSE;
+ vsimap_all_clear = FALSE;
+
+ kr = KERN_MEMORY_ERROR;
+ goto out;
+ }
+ }
+ }
+ if (vsmap_all_clear) {
+ ps_dealloc_vsmap(vsmap, CLMAP_ENTRIES);
+ kfree(vsmap, CLMAP_THRESHOLD);
+ vs->vs_imap[i] = NULL;
+ }
+ }
+ if (vsimap_all_clear) {
+// kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
+ }
+ } else {
+ /*
+ * Direct map. Free used clusters, then memory.
+ */
+ vsmap = vs->vs_dmap;
+ if (vsmap == NULL) {
+ goto out;
+ }
+ vsmap_all_clear = TRUE;
+ /* loop on clusters in the direct map */
+ if (return_to_vm) {
+ for (j = 0; j < vs->vs_size;) {
+ if (VSM_ISCLR(vsmap[j]) ||
+ VSM_ISERR(vsmap[j])) {
+ j++;
+ continue;
+ }
+ clmap_off = vm_page_size * (j << vs->vs_clshift);
+ VS_MAP_UNLOCK(vs);
+ kr = pvs_cluster_read(
+ vs,
+ clmap_off,
+ (dp_size_t) -1, /* read whole cluster */
+ &fault_info);
+
+ VS_MAP_LOCK(vs); /* XXX what if it changed ? */
+ if (kr != KERN_SUCCESS) {
+ vsmap_all_clear = FALSE;
+
+ kr = KERN_MEMORY_ERROR;
+ goto out;
+ } else {
+// VSM_CLR(vsmap[j]);
+ }
+ }
+ }
+ if (vsmap_all_clear) {
+ ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
+// kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
+ }
+ }
+out:
+ VS_MAP_UNLOCK(vs);
+
+ return kr;
+}
+
int ps_map_extend(vstruct_t, unsigned int); /* forward */
int ps_map_extend(
{
dp_offset_t cluster; /* The cluster number of offset */
struct vs_map *vsmap;
+ struct ps_vnode_trim_data trim_data;
+
+ ps_vnode_trim_init(&trim_data);
VS_MAP_LOCK(vs);
else
vsmap = vs->vs_dmap;
if (vsmap == NULL) {
+ ps_vnode_trim_now(&trim_data);
VS_MAP_UNLOCK(vs);
return;
}
vsmap += cluster%CLMAP_ENTRIES;
if (VSM_ISCLR(*vsmap)) {
+ ps_vnode_trim_now(&trim_data);
length -= vm_page_size;
offset += vm_page_size;
continue;
/*
* If map entry is empty, clear and deallocate cluster.
*/
- if (!VSM_ALLOC(*vsmap)) {
+ if (!VSM_BMAP(*vsmap)) {
+ ps_vnode_trim_more(&trim_data,
+ vsmap,
+ vs->vs_clshift,
+ VSCLSIZE(vs) * vm_page_size);
ps_deallocate_cluster(VSM_PS(*vsmap),
VSM_CLOFF(*vsmap));
VSM_CLR(*vsmap);
+ } else {
+ ps_vnode_trim_now(&trim_data);
}
}
+ ps_vnode_trim_now(&trim_data);
VS_MAP_UNLOCK(vs);
}
struct vs_async *vsa;
vsa = (struct vs_async *)
- ((struct vstruct_alias *)(reply_port->alias))->vs;
+ ((struct vstruct_alias *)(reply_port->ip_alias))->vs;
if (device_code == KERN_SUCCESS && bytes_written != vsa->vsa_size) {
device_code = KERN_FAILURE;
{
struct vs_async *vsa;
vsa = (struct vs_async *)
- ((struct vstruct_alias *)(reply_port->alias))->vs;
+ ((struct vstruct_alias *)(reply_port->defpager_importance.alias))->vs;
vsa->vsa_addr = (vm_offset_t)data;
vsa->vsa_size = (vm_size_t)dataCnt;
vsa->vsa_error = return_code;
__unused upl_offset_t offset,
upl_size_t size)
{
+#if RECLAIM_SWAP
+ boolean_t empty;
+#endif
DP_DEBUG(DEBUG_VS_INTERNAL,
("buffer=0x%x,offset=0x%x,size=0x%x\n",
ASSERT(size > 0);
GSTAT(global_stats.gs_pages_in += atop_32(size));
-
-#if USE_PRECIOUS
- ps_clunmap(vs, offset, size);
-#endif /* USE_PRECIOUS */
+/* check upl iosync flag instead of using RECLAIM_SWAP*/
+#if RECLAIM_SWAP
+ if (size != upl->size) {
+ if (size) {
+ ps_clunmap(vs, offset, size);
+ upl_commit_range(upl, 0, size, 0, NULL, 0, &empty);
+ }
+ upl_abort(upl, UPL_ABORT_ERROR);
+ upl_deallocate(upl);
+ } else {
+ ps_clunmap(vs, offset, size);
+ upl_commit(upl, NULL, 0);
+ upl_deallocate(upl);
+ }
+#endif /* RECLAIM_SWAP */
}
static memory_object_offset_t last_start;
static vm_size_t last_length;
+/*
+ * A "cnt" of 0 means that the caller just wants to check if the page at
+ * offset "vs_offset" exists in the backing store. That page hasn't been
+ * prepared, so no need to release it.
+ *
+ * A "cnt" of -1 means that the caller wants to bring back from the backing
+ * store all existing pages in the cluster containing "vs_offset".
+ */
kern_return_t
pvs_cluster_read(
vstruct_t vs,
int cl_index;
unsigned int xfer_size;
dp_offset_t orig_vs_offset;
- dp_offset_t ps_offset[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT];
- paging_segment_t psp[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT];
+ dp_offset_t ps_offset[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_MIN_CLSHIFT];
+ paging_segment_t psp[(VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_MIN_CLSHIFT];
struct clmap clmap;
upl_t upl;
unsigned int page_list_count;
memory_object_offset_t cluster_start;
vm_size_t cluster_length;
uint32_t io_streaming;
+ int i;
+ boolean_t io_sync = FALSE;
+ boolean_t reclaim_all = FALSE;
pages_in_cl = 1 << vs->vs_clshift;
cl_size = pages_in_cl * vm_page_size;
cl_mask = cl_size - 1;
-#if USE_PRECIOUS
- request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
-#else
- request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
-#endif
+ request_flags = UPL_NO_SYNC | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
+
+ if (cnt == (dp_size_t) -1)
+ reclaim_all = TRUE;
+
+ if (reclaim_all == TRUE) {
+ /*
+ * We've been called from ps_vstruct_reclaim() to move all
+ * the object's swapped pages back to VM pages.
+ * This can put memory pressure on the system, so we do want
+ * to wait for free pages, to avoid getting in the way of the
+ * vm_pageout_scan() thread.
+ * Let's not use UPL_NOBLOCK in this case.
+ */
+ vs_offset &= ~cl_mask;
+ i = pages_in_cl;
+ } else {
+ i = 1;
+
+ /*
+ * if the I/O cluster size == PAGE_SIZE, we don't want to set
+ * the UPL_NOBLOCK since we may be trying to recover from a
+ * previous partial pagein I/O that occurred because we were low
+ * on memory and bailed early in order to honor the UPL_NOBLOCK...
+ * since we're only asking for a single page, we can block w/o fear
+ * of tying up pages while waiting for more to become available
+ */
+ if (fault_info == NULL || ((vm_object_fault_info_t)fault_info)->cluster_size > PAGE_SIZE)
+ request_flags |= UPL_NOBLOCK;
+ }
+
+again:
cl_index = (vs_offset & cl_mask) / vm_page_size;
if ((ps_clmap(vs, vs_offset & ~cl_mask, &clmap, CL_FIND, 0, 0) == (dp_offset_t)-1) ||
*/
return KERN_FAILURE;
}
+ if (reclaim_all == TRUE) {
+ i--;
+ if (i == 0) {
+ /* no more pages in this cluster */
+ return KERN_FAILURE;
+ }
+ /* try the next page in this cluster */
+ vs_offset += vm_page_size;
+ goto again;
+ }
page_list_count = 0;
memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
PAGE_SIZE, PAGE_SIZE,
&upl, NULL, &page_list_count,
- request_flags);
+ request_flags | UPL_SET_INTERNAL);
+ upl_range_needed(upl, 0, 1);
if (clmap.cl_error)
upl_abort(upl, UPL_ABORT_ERROR);
*/
return KERN_SUCCESS;
}
-
+
+ if(((vm_object_fault_info_t)fault_info)->io_sync == TRUE ) {
+ io_sync = TRUE;
+ } else {
+#if RECLAIM_SWAP
+ io_sync = TRUE;
+#endif /* RECLAIM_SWAP */
+ }
+
+ if( io_sync == TRUE ) {
+
+ io_flags |= UPL_IOSYNC | UPL_NOCOMMIT;
+#if USE_PRECIOUS
+ request_flags |= UPL_PRECIOUS | UPL_CLEAN_IN_PLACE;
+#else /* USE_PRECIOUS */
+ request_flags |= UPL_REQUEST_SET_DIRTY;
+#endif /* USE_PRECIOUS */
+ }
+
assert(dp_encryption_inited);
if (dp_encryption) {
/*
* decryption.
*/
request_flags |= UPL_ENCRYPT;
+ io_flags |= UPL_PAGING_ENCRYPTED;
}
orig_vs_offset = vs_offset;
while (size > 0 && error == KERN_SUCCESS) {
unsigned int abort_size;
+ unsigned int lsize;
int failed_size;
int beg_pseg;
int beg_indx;
memory_object_super_upl_request(vs->vs_control, (memory_object_offset_t)vs_offset,
xfer_size, xfer_size,
&upl, NULL, &page_list_count,
- request_flags | UPL_SET_INTERNAL | UPL_NOBLOCK);
+ request_flags | UPL_SET_INTERNAL);
error = ps_read_file(psp[beg_pseg],
upl, (upl_offset_t) 0,
ps_offset[beg_pseg] + (beg_indx * vm_page_size),
xfer_size, &residual, io_flags);
-
- failed_size = 0;
+
/*
* Adjust counts and send response to VM. Optimize
* supplied data is deallocated from the pager's
* address space.
*/
- pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
+ lsize = xfer_size;
+ failed_size = 0;
} else {
+ lsize = 0;
failed_size = xfer_size;
if (error == KERN_SUCCESS) {
* of the range, if any.
*/
int fill;
- unsigned int lsize;
- fill = residual & ~vm_page_size;
+ fill = residual & (vm_page_size - 1);
lsize = (xfer_size - residual) + fill;
- pvs_object_data_provided(vs, upl, vs_offset, lsize);
-
- if (lsize < xfer_size) {
+ if (lsize < xfer_size)
failed_size = xfer_size - lsize;
+
+ if (reclaim_all == FALSE)
error = KERN_FAILURE;
- }
}
}
}
- if (error != KERN_SUCCESS) {
+ pvs_object_data_provided(vs, upl, vs_offset, lsize);
+
+ if (failed_size) {
/*
* There was an error in some part of the range, tell
* the VM. Note that error is explicitly checked again
upl_t upl;
upl_page_info_t *pl;
int page_index;
+ unsigned int page_max_index;
int list_size;
int pages_in_cl;
unsigned int cl_size;
int base_index;
unsigned int seg_size;
unsigned int upl_offset_in_object;
+ boolean_t minimal_clustering = FALSE;
+ boolean_t found_dirty;
+
+ if (!dp_encryption_inited) {
+ /*
+ * ENCRYPTED SWAP:
+ * Once we've started using swap, we
+ * can't change our mind on whether
+ * it needs to be encrypted or
+ * not.
+ */
+ dp_encryption_inited = TRUE;
+ }
+ if (dp_encryption) {
+ /*
+ * ENCRYPTED SWAP:
+ * the UPL will need to be encrypted...
+ */
+ flags |= UPL_PAGING_ENCRYPTED;
+ }
pages_in_cl = 1 << vs->vs_clshift;
cl_size = pages_in_cl * vm_page_size;
+#if CONFIG_FREEZE
+ minimal_clustering = TRUE;
+#else
+ if (dp_isssd == TRUE)
+ minimal_clustering = TRUE;
+#endif
if (!dp_internal) {
unsigned int page_list_count;
int request_flags;
int num_of_pages;
int seg_index;
upl_offset_t upl_offset;
+ upl_offset_t upl_offset_aligned;
dp_offset_t seg_offset;
- dp_offset_t ps_offset[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT) + 1];
- paging_segment_t psp[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_DEF_CLSHIFT) + 1];
+ dp_offset_t ps_offset[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_MIN_CLSHIFT) + 1];
+ paging_segment_t psp[((VM_SUPER_CLUSTER / PAGE_SIZE) >> VSTRUCT_MIN_CLSHIFT) + 1];
- if (bs_low) {
+ if (bs_low)
super_size = cl_size;
-
- request_flags = UPL_NOBLOCK |
- UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
- UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE;
- } else {
+ else
super_size = VM_SUPER_CLUSTER;
- request_flags = UPL_NOBLOCK | UPL_CLEAN_IN_PLACE |
- UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
+ request_flags = UPL_NOBLOCK | UPL_CLEAN_IN_PLACE |
+ UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE;
- }
- if (!dp_encryption_inited) {
- /*
- * ENCRYPTED SWAP:
- * Once we've started using swap, we
- * can't change our mind on whether
- * it needs to be encrypted or
- * not.
- */
- dp_encryption_inited = TRUE;
- }
if (dp_encryption) {
/*
* ENCRYPTED SWAP:
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
seg_size = cl_size - (upl_offset_in_object % cl_size);
- upl_offset = upl_offset_in_object & ~(cl_size - 1);
+ upl_offset_aligned = upl_offset_in_object & ~(cl_size - 1);
+ page_index = 0;
+ page_max_index = upl->size / PAGE_SIZE;
+ found_dirty = TRUE;
- for (seg_index = 0, transfer_size = upl->size;
- transfer_size > 0; ) {
- ps_offset[seg_index] =
- ps_clmap(vs,
- upl_offset,
- &clmap, CL_ALLOC,
- cl_size, 0);
+ for (seg_index = 0, transfer_size = upl->size; transfer_size > 0; ) {
- if (ps_offset[seg_index] == (dp_offset_t) -1) {
- upl_abort(upl, 0);
- upl_deallocate(upl);
-
- return KERN_FAILURE;
+ unsigned int seg_pgcnt;
- }
- psp[seg_index] = CLMAP_PS(clmap);
+ seg_pgcnt = seg_size / PAGE_SIZE;
+
+ if (minimal_clustering == TRUE) {
+ unsigned int non_dirty;
+
+ non_dirty = 0;
+ found_dirty = FALSE;
+ for (; non_dirty < seg_pgcnt; non_dirty++) {
+ if ((page_index + non_dirty) >= page_max_index)
+ break;
+
+ if (UPL_DIRTY_PAGE(pl, page_index + non_dirty) ||
+ UPL_PRECIOUS_PAGE(pl, page_index + non_dirty)) {
+ found_dirty = TRUE;
+ break;
+ }
+ }
+ }
+ if (found_dirty == TRUE) {
+ ps_offset[seg_index] =
+ ps_clmap(vs,
+ upl_offset_aligned,
+ &clmap, CL_ALLOC,
+ cl_size, 0);
+
+ if (ps_offset[seg_index] == (dp_offset_t) -1) {
+ upl_abort(upl, 0);
+ upl_deallocate(upl);
+
+ return KERN_FAILURE;
+ }
+ psp[seg_index] = CLMAP_PS(clmap);
+ }
if (transfer_size > seg_size) {
+ page_index += seg_pgcnt;
transfer_size -= seg_size;
- upl_offset += cl_size;
- seg_size = cl_size;
+ upl_offset_aligned += cl_size;
+ seg_size = cl_size;
seg_index++;
} else
transfer_size = 0;
* Ignore any non-present pages at the end of the
* UPL.
*/
- for (page_index = upl->size / vm_page_size; page_index > 0;)
- if (UPL_PAGE_PRESENT(pl, --page_index))
+ for (page_index = upl->size / vm_page_size; page_index > 0;) {
+ if (UPL_PAGE_PRESENT(pl, --page_index)) {
+ page_index++;
break;
- num_of_pages = page_index + 1;
+ }
+ }
+ if (page_index == 0) {
+ /*
+ * no pages in the UPL
+ * abort and return
+ */
+ upl_abort(upl, 0);
+ upl_deallocate(upl);
+
+ return KERN_SUCCESS;
+ }
+ num_of_pages = page_index;
base_index = (upl_offset_in_object % cl_size) / PAGE_SIZE;
ps_offset[seg_index]
+ seg_offset,
transfer_size, flags);
- } else {
- boolean_t empty = FALSE;
- upl_abort_range(upl,
- first_dirty * vm_page_size,
- num_dirty * vm_page_size,
- UPL_ABORT_NOTIFY_EMPTY,
- &empty);
- if (empty) {
- assert(page_index == num_of_pages);
- upl_deallocate(upl);
- }
}
}
vs->vs_xfer_pending = FALSE;
VS_UNLOCK(vs);
vs_finish_write(vs);
+
+ if (backing_store_abort_compaction || backing_store_stop_compaction) {
+ backing_store_abort_compaction = FALSE;
+ dprintf(("ps_vstruct_transfer_from_segment - ABORTED\n"));
+ return KERN_FAILURE;
+ }
+ vnode_pager_throttle();
+
VS_LOCK(vs);
vs->vs_xfer_pending = TRUE;
vs_wait_for_sync_writers(vs);
/* NEED TO ISSUE WITH SYNC & NO COMMIT */
error = ps_read_file(ps, upl, (upl_offset_t) 0, actual_offset,
size, &residual,
- (UPL_IOSYNC | UPL_NOCOMMIT));
+ (UPL_IOSYNC | UPL_NOCOMMIT | (dp_encryption ? UPL_PAGING_ENCRYPTED : 0)));
}
read_vsmap = *vsmap_ptr;
PS_LOCK_INIT(ps);
ps->ps_bmap = (unsigned char *) kalloc(RMAPSIZE(ps->ps_ncls));
if (!ps->ps_bmap) {
+ PS_LOCK_DESTROY(ps);
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
if ((error = ps_enter(ps)) != 0) {
kfree(ps->ps_bmap, RMAPSIZE(ps->ps_ncls));
+ PS_LOCK_DESTROY(ps);
kfree(ps, sizeof *ps);
BS_UNLOCK(bs);
return KERN_RESOURCE_SHORTAGE;
* emergency segment will be back to its original state of
* online but not activated (till it's needed the next time).
*/
- ps = paging_segments[EMERGENCY_PSEG_INDEX];
- if(IS_PS_EMERGENCY_SEGMENT(ps) && IS_PS_OK_TO_USE(ps)) {
- if(default_pager_backing_store_delete(emergency_segment_backing_store)) {
- dprintf(("Failed to recover emergency paging segment\n"));
- } else {
- dprintf(("Recovered emergency paging segment\n"));
+#if CONFIG_FREEZE
+ if (!memorystatus_freeze_enabled)
+#endif
+ {
+ ps = paging_segments[EMERGENCY_PSEG_INDEX];
+ if(IS_PS_EMERGENCY_SEGMENT(ps) && IS_PS_OK_TO_USE(ps)) {
+ if(default_pager_backing_store_delete(emergency_segment_backing_store)) {
+ dprintf(("Failed to recover emergency paging segment\n"));
+ } else {
+ dprintf(("Recovered emergency paging segment\n"));
+ }
}
}
return result;
}
+static inline void ps_vnode_trim_init(struct ps_vnode_trim_data *data)
+{
+#pragma unused(data)
+}
+
+static inline void ps_vnode_trim_now(struct ps_vnode_trim_data *data)
+{
+#pragma unused(data)
+}
+
+static inline void ps_vnode_trim_more(struct ps_vnode_trim_data *data, struct vs_map *map, unsigned int shift, dp_size_t length)
+{
+#pragma unused(data, map, shift, length)
+}
+
kern_return_t
default_pager_triggers( __unused MACH_PORT_FACE default_pager,
int hi_wat,
int flags,
MACH_PORT_FACE trigger_port)
{
- MACH_PORT_FACE release;
+ MACH_PORT_FACE release = IPC_PORT_NULL;
kern_return_t kr;
clock_sec_t now;
clock_nsec_t nanoseconds_dummy;
}
} else if (flags == HI_WAT_ALERT) {
release = min_pages_trigger_port;
- min_pages_trigger_port = trigger_port;
- minimum_pages_remaining = hi_wat/vm_page_size;
- bs_low = FALSE;
- kr = KERN_SUCCESS;
+#if CONFIG_FREEZE
+ /* High and low water signals aren't applicable when freeze is */
+ /* enabled, so release the trigger ports here and return */
+ /* KERN_FAILURE. */
+ if (memorystatus_freeze_enabled) {
+ if (IP_VALID( trigger_port )){
+ ipc_port_release_send( trigger_port );
+ }
+ min_pages_trigger_port = IPC_PORT_NULL;
+ kr = KERN_FAILURE;
+ }
+ else
+#endif
+ {
+ min_pages_trigger_port = trigger_port;
+ minimum_pages_remaining = hi_wat/vm_page_size;
+ bs_low = FALSE;
+ kr = KERN_SUCCESS;
+ }
} else if (flags == LO_WAT_ALERT) {
release = max_pages_trigger_port;
- max_pages_trigger_port = trigger_port;
- maximum_pages_free = lo_wat/vm_page_size;
- kr = KERN_SUCCESS;
+#if CONFIG_FREEZE
+ if (memorystatus_freeze_enabled) {
+ if (IP_VALID( trigger_port )){
+ ipc_port_release_send( trigger_port );
+ }
+ max_pages_trigger_port = IPC_PORT_NULL;
+ kr = KERN_FAILURE;
+ }
+ else
+#endif
+ {
+ max_pages_trigger_port = trigger_port;
+ maximum_pages_free = lo_wat/vm_page_size;
+ kr = KERN_SUCCESS;
+ }
} else if (flags == USE_EMERGENCY_SWAP_FILE_FIRST) {
use_emergency_swap_file_first = TRUE;
release = trigger_port;
} else {
VSL_UNLOCK();
}
+ dprintf(("default_pager_backing_store_monitor - send LO_WAT_ALERT\n"));
+
default_pager_space_alert(trigger, LO_WAT_ALERT);
ipc_port_release_send(trigger);
dp_pages_free_low_count = 0;
clock_interval_to_deadline(PF_INTERVAL, NSEC_PER_SEC, &deadline);
thread_call_enter_delayed(default_pager_backing_store_monitor_callout, deadline);
}
+
+#if CONFIG_FREEZE
+unsigned int default_pager_swap_pages_free() {
+ return dp_pages_free;
+}
+#endif