/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <mach/mach_types.h>
#include <default_pager/default_pager_internal.h>
#include <IOKit/IOPlatformExpert.h>
-#define KERNEL
#include <IOKit/IOHibernatePrivate.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_purgeable_internal.h>
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static boolean_t
consider_discard(vm_page_t m)
{
- register vm_object_t object = 0;
+ vm_object_t object = NULL;
int refmod_state;
boolean_t discard = FALSE;
if (m->cleaning)
break;
+ if (m->laundry || m->list_req_pending)
+ break;
+
if (!m->dirty)
{
refmod_state = pmap_get_refmod(m->phys_page);
}
/*
- * If it's clean we can discard the page on wakeup.
+ * If it's clean or purgeable we can discard the page on wakeup.
+ * JMM - consider purgeable (volatile or empty) objects here as well.
*/
- discard = !m->dirty;
+ discard = (!m->dirty)
+ || (VM_PURGABLE_VOLATILE == object->purgable)
+ || (VM_PURGABLE_EMPTY == m->object->purgable);
}
while (FALSE);
*/
return;
- if (!m->no_isync)
+ if (m->pmapped == TRUE)
{
- int refmod_state = pmap_disconnect(m->phys_page);
-
- if (refmod_state & VM_MEM_REFERENCED)
- m->reference = TRUE;
- if (refmod_state & VM_MEM_MODIFIED)
- m->dirty = TRUE;
+ __unused int refmod_state = pmap_disconnect(m->phys_page);
}
- if (m->dirty)
- panic("discard_page(%p) dirty", m);
if (m->laundry)
panic("discard_page(%p) laundry", m);
if (m->private)
if (m->fictitious)
panic("discard_page(%p) fictitious", m);
+ if (VM_PURGABLE_VOLATILE == m->object->purgable)
+ {
+ assert(m->object->objq.next != NULL && m->object->objq.prev != NULL); /* object should be on a queue */
+ purgeable_q_t old_queue=vm_purgeable_object_remove(m->object);
+ assert(old_queue);
+ /* No need to lock page queue for token delete, hibernate_vm_unlock()
+ makes sure these locks are uncontended before sleep */
+ vm_purgeable_token_delete_first(old_queue);
+ m->object->purgable = VM_PURGABLE_EMPTY;
+ }
+
+ if (m->tabled)
+ vm_page_remove(m);
+
vm_page_free(m);
}
pages known to VM to not need saving are subtracted.
Wired pages to be saved are present in page_list_wired, pageable in page_list.
*/
-extern vm_page_t vm_lopage_queue_free;
void
hibernate_page_list_setall(hibernate_page_list_t * page_list,
uint64_t start, end, nsec;
vm_page_t m;
uint32_t pages = page_list->page_count;
- uint32_t count_zf = 0, count_inactive = 0, count_active = 0;
+ uint32_t count_zf = 0, count_throttled = 0, count_inactive = 0, count_active = 0;
uint32_t count_wire = pages;
- uint32_t count_discard_active = 0, count_discard_inactive = 0;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_purgeable = 0;
uint32_t i;
+ uint32_t bank;
+ hibernate_bitmap_t * bitmap;
+ hibernate_bitmap_t * bitmap_wired;
+
HIBLOG("hibernate_page_list_setall start\n");
m = (vm_page_t) m->pageq.next;
}
- m = (vm_page_t) vm_page_queue_free;
- while(m)
+ for( i = 0; i < vm_colors; i++ )
+ {
+ queue_iterate(&vm_page_queue_free[i],
+ m,
+ vm_page_t,
+ pageq)
+ {
+ pages--;
+ count_wire--;
+ hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ }
+ }
+
+ queue_iterate(&vm_lopage_queue_free,
+ m,
+ vm_page_t,
+ pageq)
{
pages--;
count_wire--;
hibernate_page_bitset(page_list, TRUE, m->phys_page);
hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- m = (vm_page_t) m->pageq.next;
}
- m = (vm_page_t) vm_lopage_queue_free;
- while(m)
+ queue_iterate( &vm_page_queue_throttled,
+ m,
+ vm_page_t,
+ pageq )
{
- pages--;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && consider_discard(m))
+ {
+ hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ count_discard_inactive++;
+ }
+ else
+ count_throttled++;
count_wire--;
- hibernate_page_bitset(page_list, TRUE, m->phys_page);
hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- m = (vm_page_t) m->pageq.next;
}
queue_iterate( &vm_page_queue_zf,
&& consider_discard(m))
{
hibernate_page_bitset(page_list, TRUE, m->phys_page);
- count_discard_inactive++;
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
}
else
count_zf++;
&& consider_discard(m))
{
hibernate_page_bitset(page_list, TRUE, m->phys_page);
- count_discard_inactive++;
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
}
else
count_inactive++;
&& consider_discard(m))
{
hibernate_page_bitset(page_list, TRUE, m->phys_page);
- count_discard_active++;
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
}
else
count_active++;
// pull wired from hibernate_bitmap
- uint32_t bank;
- hibernate_bitmap_t * bitmap;
- hibernate_bitmap_t * bitmap_wired;
-
bitmap = &page_list->bank_bitmap[0];
bitmap_wired = &page_list_wired->bank_bitmap[0];
for (bank = 0; bank < page_list->bank_count; bank++)
absolutetime_to_nanoseconds(end - start, &nsec);
HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
- HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
- pages, count_wire, count_active, count_inactive, count_zf,
- count_discard_active, count_discard_inactive);
+ HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, throt %d, could discard act %d inact %d purgeable %d\n",
+ pages, count_wire, count_active, count_inactive, count_zf, count_throttled,
+ count_discard_active, count_discard_inactive, count_discard_purgeable);
- *pagesOut = pages;
+ *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable;
}
void
uint64_t start, end, nsec;
vm_page_t m;
vm_page_t next;
- uint32_t count_discard_active = 0, count_discard_inactive = 0;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_purgeable = 0;
clock_get_uptime(&start);
next = (vm_page_t) m->pageq.next;
if (hibernate_page_bittst(page_list, m->phys_page))
{
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
discard_page(m);
- count_discard_inactive++;
}
m = next;
}
next = (vm_page_t) m->pageq.next;
if (hibernate_page_bittst(page_list, m->phys_page))
{
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
discard_page(m);
- count_discard_inactive++;
}
m = next;
}
next = (vm_page_t) m->pageq.next;
if (hibernate_page_bittst(page_list, m->phys_page))
{
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
discard_page(m);
- count_discard_active++;
}
m = next;
}
clock_get_uptime(&end);
absolutetime_to_nanoseconds(end - start, &nsec);
- HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
+ HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d\n",
nsec / 1000000ULL,
- count_discard_active, count_discard_inactive);
+ count_discard_active, count_discard_inactive, count_discard_purgeable);
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
hibernate_processor_setup(header);
- HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
+ HIBLOG("hibernate_alloc_pages flags %08x, gobbling %d pages\n",
header->processorFlags, gobble_count);
if (gobble_count)