+
+#if CONFIG_FREEZE
+
+kern_return_t vm_map_freeze_walk(
+ vm_map_t map,
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ unsigned int dirty_budget,
+ boolean_t *has_shared)
+{
+ vm_map_entry_t entry;
+
+ vm_map_lock_read(map);
+
+ *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+ *has_shared = FALSE;
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ unsigned int purgeable, clean, dirty, wired;
+ boolean_t shared;
+
+ if ((entry->object.vm_object == 0) ||
+ (entry->is_sub_map) ||
+ (entry->object.vm_object->phys_contiguous)) {
+ continue;
+ }
+
+ default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared, entry->object.vm_object, NULL);
+
+ *purgeable_count += purgeable;
+ *wired_count += wired;
+ *clean_count += clean;
+ *dirty_count += dirty;
+
+ if (shared) {
+ *has_shared = TRUE;
+ }
+
+ /* Adjust pageout budget and finish up if reached */
+ if (dirty_budget) {
+ dirty_budget -= dirty;
+ if (dirty_budget == 0) {
+ break;
+ }
+ }
+ }
+
+ vm_map_unlock_read(map);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t vm_map_freeze(
+ vm_map_t map,
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ unsigned int dirty_budget,
+ boolean_t *has_shared)
+{
+ vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL;
+ kern_return_t kr = KERN_SUCCESS;
+ boolean_t default_freezer_active = TRUE;
+
+ *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+ *has_shared = FALSE;
+
+ /*
+ * We need the exclusive lock here so that we can
+ * block any page faults or lookups while we are
+ * in the middle of freezing this vm map.
+ */
+ vm_map_lock(map);
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ default_freezer_active = FALSE;
+ }
+
+ if (default_freezer_active) {
+ if (map->default_freezer_handle == NULL) {
+ map->default_freezer_handle = default_freezer_handle_allocate();
+ }
+
+ if ((kr = default_freezer_handle_init(map->default_freezer_handle)) != KERN_SUCCESS) {
+ /*
+ * Can happen if default_freezer_handle passed in is NULL
+ * Or, a table has already been allocated and associated
+ * with this handle, i.e. the map is already frozen.
+ */
+ goto done;
+ }
+ }
+
+ for (entry2 = vm_map_first_entry(map);
+ entry2 != vm_map_to_entry(map);
+ entry2 = entry2->vme_next) {
+
+ vm_object_t src_object = entry2->object.vm_object;
+
+ if (entry2->object.vm_object && !entry2->is_sub_map && !entry2->object.vm_object->phys_contiguous) {
+ /* If eligible, scan the entry, moving eligible pages over to our parent object */
+ if (default_freezer_active) {
+ unsigned int purgeable, clean, dirty, wired;
+ boolean_t shared;
+
+ default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared,
+ src_object, map->default_freezer_handle);
+
+ *purgeable_count += purgeable;
+ *wired_count += wired;
+ *clean_count += clean;
+ *dirty_count += dirty;
+
+ /* Adjust pageout budget and finish up if reached */
+ if (dirty_budget) {
+ dirty_budget -= dirty;
+ if (dirty_budget == 0) {
+ break;
+ }
+ }
+
+ if (shared) {
+ *has_shared = TRUE;
+ }
+ } else {
+ /*
+ * To the compressor.
+ */
+ if (entry2->object.vm_object->internal == TRUE) {
+ vm_object_pageout(entry2->object.vm_object);
+ }
+ }
+ }
+ }
+
+ if (default_freezer_active) {
+ /* Finally, throw out the pages to swap */
+ default_freezer_pageout(map->default_freezer_handle);
+ }
+
+done:
+ vm_map_unlock(map);
+
+ return kr;
+}
+
+kern_return_t
+vm_map_thaw(
+ vm_map_t map)
+{
+ kern_return_t kr = KERN_SUCCESS;
+
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ /*
+ * We will on-demand thaw in the presence of the compressed pager.
+ */
+ return kr;
+ }
+
+ vm_map_lock(map);
+
+ if (map->default_freezer_handle == NULL) {
+ /*
+ * This map is not in a frozen state.
+ */
+ kr = KERN_FAILURE;
+ goto out;
+ }
+
+ kr = default_freezer_unpack(map->default_freezer_handle);
+out:
+ vm_map_unlock(map);
+
+ return kr;
+}
+#endif
+
+/*
+ * vm_map_entry_should_cow_for_true_share:
+ *
+ * Determines if the map entry should be clipped and setup for copy-on-write
+ * to avoid applying "true_share" to a large VM object when only a subset is
+ * targeted.
+ *
+ * For now, we target only the map entries created for the Objective C
+ * Garbage Collector, which initially have the following properties:
+ * - alias == VM_MEMORY_MALLOC
+ * - wired_count == 0
+ * - !needs_copy
+ * and a VM object with:
+ * - internal
+ * - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC
+ * - !true_share
+ * - vo_size == ANON_CHUNK_SIZE
+ */
+boolean_t
+vm_map_entry_should_cow_for_true_share(
+ vm_map_entry_t entry)
+{
+ vm_object_t object;
+
+ if (entry->is_sub_map) {
+ /* entry does not point at a VM object */
+ return FALSE;
+ }
+
+ if (entry->needs_copy) {
+ /* already set for copy_on_write: done! */
+ return FALSE;
+ }
+
+ if (entry->alias != VM_MEMORY_MALLOC) {
+ /* not tagged as an ObjectiveC's Garbage Collector entry */
+ return FALSE;
+ }
+
+ if (entry->wired_count) {
+ /* wired: can't change the map entry... */
+ return FALSE;
+ }
+
+ object = entry->object.vm_object;
+
+ if (object == VM_OBJECT_NULL) {
+ /* no object yet... */
+ return FALSE;
+ }
+
+ if (!object->internal) {
+ /* not an internal object */
+ return FALSE;
+ }
+
+ if (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
+ /* not the default copy strategy */
+ return FALSE;
+ }
+
+ if (object->true_share) {
+ /* already true_share: too late to avoid it */
+ return FALSE;
+ }
+
+ if (object->vo_size != ANON_CHUNK_SIZE) {
+ /* not an object created for the ObjC Garbage Collector */
+ return FALSE;
+ }
+
+ /*
+ * All the criteria match: we have a large object being targeted for "true_share".
+ * To limit the adverse side-effects linked with "true_share", tell the caller to
+ * try and avoid setting up the entire object for "true_share" by clipping the
+ * targeted range and setting it up for copy-on-write.
+ */
+ return TRUE;
+}
+
+vm_map_offset_t
+vm_map_round_page_mask(
+ vm_map_offset_t offset,
+ vm_map_offset_t mask)
+{
+ return VM_MAP_ROUND_PAGE(offset, mask);
+}
+
+vm_map_offset_t
+vm_map_trunc_page_mask(
+ vm_map_offset_t offset,
+ vm_map_offset_t mask)
+{
+ return VM_MAP_TRUNC_PAGE(offset, mask);
+}
+
+int
+vm_map_page_shift(
+ vm_map_t map)
+{
+ return VM_MAP_PAGE_SHIFT(map);
+}
+
+int
+vm_map_page_size(
+ vm_map_t map)
+{
+ return VM_MAP_PAGE_SIZE(map);
+}
+
+int
+vm_map_page_mask(
+ vm_map_t map)
+{
+ return VM_MAP_PAGE_MASK(map);
+}
+
+kern_return_t
+vm_map_set_page_shift(
+ vm_map_t map,
+ int pageshift)
+{
+ if (map->hdr.nentries != 0) {
+ /* too late to change page size */
+ return KERN_FAILURE;
+ }
+
+ map->hdr.page_shift = pageshift;
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_map_query_volatile(
+ vm_map_t map,
+ mach_vm_size_t *volatile_virtual_size_p,
+ mach_vm_size_t *volatile_resident_size_p,
+ mach_vm_size_t *volatile_pmap_size_p)
+{
+ mach_vm_size_t volatile_virtual_size;
+ mach_vm_size_t volatile_resident_count;
+ mach_vm_size_t volatile_pmap_count;
+ mach_vm_size_t resident_count;
+ vm_map_entry_t entry;
+ vm_object_t object;
+
+ /* map should be locked by caller */
+
+ volatile_virtual_size = 0;
+ volatile_resident_count = 0;
+ volatile_pmap_count = 0;
+
+ for (entry = vm_map_first_entry(map);
+ entry != vm_map_to_entry(map);
+ entry = entry->vme_next) {
+ if (entry->is_sub_map) {
+ continue;
+ }
+ if (! (entry->protection & VM_PROT_WRITE)) {
+ continue;
+ }
+ object = entry->object.vm_object;
+ if (object == VM_OBJECT_NULL) {
+ continue;
+ }
+ if (object->purgable != VM_PURGABLE_VOLATILE) {
+ continue;
+ }
+ if (entry->offset != 0) {
+ /*
+ * If the map entry has been split and the object now
+ * appears several times in the VM map, we don't want
+ * to count the object's resident_page_count more than
+ * once. We count it only for the first one, starting
+ * at offset 0 and ignore the other VM map entries.
+ */
+ continue;
+ }
+ resident_count = object->resident_page_count;
+ if ((entry->offset / PAGE_SIZE) >= resident_count) {
+ resident_count = 0;
+ } else {
+ resident_count -= (entry->offset / PAGE_SIZE);
+ }
+
+ volatile_virtual_size += entry->vme_end - entry->vme_start;
+ volatile_resident_count += resident_count;
+ volatile_pmap_count += pmap_query_resident(map->pmap,
+ entry->vme_start,
+ entry->vme_end);
+ }
+
+ /* map is still locked on return */
+
+ *volatile_virtual_size_p = volatile_virtual_size;
+ *volatile_resident_size_p = volatile_resident_count * PAGE_SIZE;
+ *volatile_pmap_size_p = volatile_pmap_count * PAGE_SIZE;
+
+ return KERN_SUCCESS;
+}