+kern_return_t
+ps_vstruct_reclaim(
+ vstruct_t vs,
+ boolean_t return_to_vm,
+ boolean_t reclaim_backing_store)
+{
+ unsigned int i, j;
+ struct vs_map *vsmap;
+ boolean_t vsmap_all_clear, vsimap_all_clear;
+ struct vm_object_fault_info fault_info;
+ int clmap_off;
+ unsigned int vsmap_size;
+ kern_return_t kr = KERN_SUCCESS;
+
+ VS_MAP_LOCK(vs);
+
+ fault_info.cluster_size = VM_SUPER_CLUSTER;
+ fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
+ fault_info.user_tag = 0;
+ fault_info.lo_offset = 0;
+ fault_info.hi_offset = ptoa_32(vs->vs_size << vs->vs_clshift);
+ fault_info.io_sync = reclaim_backing_store;
+ fault_info.batch_pmap_op = FALSE;
+
+ /*
+ * If this is an indirect structure, then we walk through the valid
+ * (non-zero) indirect pointers and deallocate the clusters
+ * associated with each used map entry (via ps_dealloc_vsmap).
+ * When all of the clusters in an indirect block have been
+ * freed, we deallocate the block. When all of the indirect
+ * blocks have been deallocated we deallocate the memory
+ * holding the indirect pointers.
+ */
+ if (vs->vs_indirect) {
+ vsimap_all_clear = TRUE;
+ for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) {
+ vsmap = vs->vs_imap[i];
+ if (vsmap == NULL)
+ continue;
+ /* loop on clusters in this indirect map */
+ clmap_off = (vm_page_size * CLMAP_ENTRIES *
+ VSCLSIZE(vs) * i);
+ if (i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size))
+ vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i);
+ else
+ vsmap_size = CLMAP_ENTRIES;
+ vsmap_all_clear = TRUE;
+ if (return_to_vm) {
+ for (j = 0; j < vsmap_size;) {
+ if (VSM_ISCLR(vsmap[j]) ||
+ VSM_ISERR(vsmap[j])) {
+ j++;
+ clmap_off += vm_page_size * VSCLSIZE(vs);
+ continue;
+ }
+ VS_MAP_UNLOCK(vs);
+ kr = pvs_cluster_read(
+ vs,
+ clmap_off,
+ (dp_size_t) -1, /* read whole cluster */
+ &fault_info);
+
+ VS_MAP_LOCK(vs); /* XXX what if it changed ? */
+ if (kr != KERN_SUCCESS) {
+ vsmap_all_clear = FALSE;
+ vsimap_all_clear = FALSE;
+
+ kr = KERN_MEMORY_ERROR;
+ goto out;
+ }
+ }
+ }
+ if (vsmap_all_clear) {
+ ps_dealloc_vsmap(vsmap, CLMAP_ENTRIES);
+ kfree(vsmap, CLMAP_THRESHOLD);
+ vs->vs_imap[i] = NULL;
+ }
+ }
+ if (vsimap_all_clear) {
+// kfree(vs->vs_imap, INDIRECT_CLMAP_SIZE(vs->vs_size));
+ }
+ } else {
+ /*
+ * Direct map. Free used clusters, then memory.
+ */
+ vsmap = vs->vs_dmap;
+ if (vsmap == NULL) {
+ goto out;
+ }
+ vsmap_all_clear = TRUE;
+ /* loop on clusters in the direct map */
+ if (return_to_vm) {
+ for (j = 0; j < vs->vs_size;) {
+ if (VSM_ISCLR(vsmap[j]) ||
+ VSM_ISERR(vsmap[j])) {
+ j++;
+ continue;
+ }
+ clmap_off = vm_page_size * (j << vs->vs_clshift);
+ VS_MAP_UNLOCK(vs);
+ kr = pvs_cluster_read(
+ vs,
+ clmap_off,
+ (dp_size_t) -1, /* read whole cluster */
+ &fault_info);
+
+ VS_MAP_LOCK(vs); /* XXX what if it changed ? */
+ if (kr != KERN_SUCCESS) {
+ vsmap_all_clear = FALSE;
+
+ kr = KERN_MEMORY_ERROR;
+ goto out;
+ } else {
+// VSM_CLR(vsmap[j]);
+ }
+ }
+ }
+ if (vsmap_all_clear) {
+ ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size);
+// kfree(vs->vs_dmap, CLMAP_SIZE(vs->vs_size));
+ }
+ }
+out:
+ VS_MAP_UNLOCK(vs);
+
+ return kr;
+}
+