+
+
+unsigned int vm_object_change_wimg_mode_count = 0;
+
+/*
+ * The object must be locked
+ */
+void
+vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
+{
+ vm_page_t p;
+
+ vm_object_lock_assert_exclusive(object);
+
+ vm_object_paging_wait(object, THREAD_UNINT);
+
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+
+ if (!p->fictitious)
+ pmap_set_cache_attributes(p->phys_page, wimg_mode);
+ }
+ if (wimg_mode == VM_WIMG_USE_DEFAULT)
+ object->set_cache_attr = FALSE;
+ else
+ object->set_cache_attr = TRUE;
+
+ object->wimg_bits = wimg_mode;
+
+ vm_object_change_wimg_mode_count++;
+}
+
+#if CONFIG_FREEZE
+
+__private_extern__ void default_freezer_pack_page(vm_page_t , vm_object_t , vm_object_offset_t, void**);
+__private_extern__ void default_freezer_unpack(vm_object_t , void**);
+
+kern_return_t vm_object_pack(
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ boolean_t *shared,
+ vm_object_t src_object,
+ vm_object_t compact_object,
+ void **table,
+ vm_object_offset_t *offset)
+{
+ kern_return_t kr = KERN_SUCCESS;
+
+ vm_object_lock(src_object);
+
+ *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+ *shared = FALSE;
+
+ if (!src_object->alive || src_object->terminating){
+ kr = KERN_FAILURE;
+ goto done;
+ }
+
+ if (src_object->purgable == VM_PURGABLE_VOLATILE) {
+ *purgeable_count = src_object->resident_page_count;
+
+ /* If the destination object is null, we're just walking the pages to discover how many can be hibernated */
+ if (VM_OBJECT_NULL != compact_object) {
+ purgeable_q_t queue;
+ /* object should be on a queue */
+ assert(src_object->objq.next != NULL &&
+ src_object->objq.prev != NULL);
+ queue = vm_purgeable_object_remove(src_object);
+ assert(queue);
+ vm_page_lock_queues();
+ vm_purgeable_token_delete_first(queue);
+ vm_page_unlock_queues();
+ vm_object_purge(src_object);
+ }
+ goto done;
+ }
+
+ if (src_object->ref_count == 1) {
+ vm_object_pack_pages(wired_count, clean_count, dirty_count, src_object, compact_object, table, offset);
+ } else {
+ if (src_object->internal) {
+ *shared = TRUE;
+ }
+ }
+done:
+ vm_object_unlock(src_object);
+
+ return kr;
+}
+
+
+void
+vm_object_pack_pages(
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ vm_object_t src_object,
+ vm_object_t compact_object,
+ void **table,
+ vm_object_offset_t *offset)
+{
+ vm_page_t p, next;
+
+ next = (vm_page_t)queue_first(&src_object->memq);
+
+ /* Since this function is dual purpose in order that we can count
+ * the freezable pages as well as prepare them, assert that our
+ * arguments are sane. Gnarly, but avoids code duplication.
+ */
+ if (VM_OBJECT_NULL == compact_object){
+ assert(!table);
+ assert(!offset);
+ } else {
+ assert(table);
+ assert(offset);
+ }
+
+ while (!queue_end(&src_object->memq, (queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ if (p->fictitious || p->busy )
+ continue;
+
+ if (p->absent || p->unusual || p->error)
+ continue;
+
+ if (VM_PAGE_WIRED(p)) {
+ (*wired_count)++;
+ continue;
+ }
+
+ if (VM_OBJECT_NULL == compact_object) {
+ if (p->dirty || pmap_is_modified(p->phys_page)) {
+ (*dirty_count)++;
+ } else {
+ (*clean_count)++;
+ }
+ continue;
+ }
+
+ if (p->cleaning) {
+ p->busy = TRUE;
+ p->pageout = TRUE;
+ p->dump_cleaning = TRUE;
+
+ vm_page_lockspin_queues();
+ vm_page_wire(p);
+ vm_page_unlock_queues();
+
+ continue;
+ }
+
+ if (p->pmapped == TRUE) {
+ int refmod_state;
+ refmod_state = pmap_disconnect(p->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ p->dirty = TRUE;
+ }
+ }
+
+ if (p->dirty) {
+ p->busy = TRUE;
+
+ default_freezer_pack_page(p, compact_object, *offset, table);
+ *offset += PAGE_SIZE;
+
+ (*dirty_count)++;
+ }
+ else {
+ VM_PAGE_FREE(p);
+ (*clean_count)++;
+ }
+ }
+}
+
+void
+vm_object_pageout(
+ vm_object_t object)
+{
+ vm_page_t p, next;
+
+ assert(object != VM_OBJECT_NULL );
+
+ vm_object_lock(object);
+
+ next = (vm_page_t)queue_first(&object->memq);
+
+ while (!queue_end(&object->memq, (queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ /* Throw to the pageout queue */
+ vm_page_lockspin_queues();
+
+ VM_PAGE_QUEUES_REMOVE(p);
+ vm_pageout_cluster(p);
+
+ vm_page_unlock_queues();
+ }
+
+ vm_object_unlock(object);
+}
+
+kern_return_t
+vm_object_pagein(
+ vm_object_t object)
+{
+ memory_object_t pager;
+ kern_return_t kr;
+
+ vm_object_lock(object);
+
+ pager = object->pager;
+
+ if (!object->pager_ready || pager == MEMORY_OBJECT_NULL) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ vm_object_paging_wait(object, THREAD_UNINT);
+ vm_object_paging_begin(object);
+
+ object->blocked_access = TRUE;
+ vm_object_unlock(object);
+
+ kr = memory_object_data_reclaim(pager, TRUE);
+
+ vm_object_lock(object);
+
+ object->blocked_access = FALSE;
+ vm_object_paging_end(object);
+
+ vm_object_unlock(object);
+
+ return kr;
+}
+
+void
+vm_object_unpack(
+ vm_object_t compact_object,
+ void **table)
+{
+ /*
+ * Future Work:
+ * Right now we treat the default freezer much like
+ * the default pager with respect to when it is
+ * created and terminated.
+ * But, in the future, we may want to terminate the
+ * default freezer at the very instant that an object
+ * has been completely re-filled with all it's previously
+ * paged-out pages.
+ * At that time we'll need to reset the object fields like
+ * "pager" and the associated "pager_{created,initialized,trusted}"
+ * fields right here.
+ */
+ default_freezer_unpack(compact_object, table);
+}
+
+#endif /* CONFIG_FREEZE */