+
+
+
+#if HIBERNATION
+
+#include <IOKit/IOHibernatePrivate.h>
+
+int32_t pmap_npages;
+int32_t pmap_teardown_last_valid_compact_indx = -1;
+
+
+void hibernate_rebuild_pmap_structs(void);
+void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+void pmap_pack_index(uint32_t);
+int32_t pmap_unpack_index(pv_rooted_entry_t);
+
+
+int32_t
+pmap_unpack_index(pv_rooted_entry_t pv_h)
+{
+ int32_t indx = 0;
+
+ indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48);
+ indx = indx << 16;
+ indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48);
+
+ *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48);
+ *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48);
+
+ return indx;
+}
+
+
+void
+pmap_pack_index(uint32_t indx)
+{
+ pv_rooted_entry_t pv_h;
+
+ pv_h = &pv_head_table[indx];
+
+ *((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48);
+ *((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48);
+
+ *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48;
+ *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48;
+}
+
+
+void
+hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end)
+{
+ int32_t i;
+ int32_t compact_target_indx;
+
+ compact_target_indx = 0;
+
+ for (i = 0; i < pmap_npages; i++) {
+ if (pv_head_table[i].pmap == PMAP_NULL) {
+ if (pv_head_table[compact_target_indx].pmap != PMAP_NULL) {
+ compact_target_indx = i;
+ }
+ } else {
+ pmap_pack_index((uint32_t)i);
+
+ if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) {
+ /*
+ * we've got a hole to fill, so
+ * move this pv_rooted_entry_t to it's new home
+ */
+ pv_head_table[compact_target_indx] = pv_head_table[i];
+ pv_head_table[i].pmap = PMAP_NULL;
+
+ pmap_teardown_last_valid_compact_indx = compact_target_indx;
+ compact_target_indx++;
+ } else {
+ pmap_teardown_last_valid_compact_indx = i;
+ }
+ }
+ }
+ *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx + 1];
+ *unneeded_end = (addr64_t)&pv_head_table[pmap_npages - 1];
+
+ HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
+}
+
+
+void
+hibernate_rebuild_pmap_structs(void)
+{
+ int32_t cindx, eindx, rindx = 0;
+ pv_rooted_entry_t pv_h;
+
+ eindx = (int32_t)pmap_npages;
+
+ for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+ pv_h = &pv_head_table[cindx];
+
+ rindx = pmap_unpack_index(pv_h);
+ assert(rindx < pmap_npages);
+
+ if (rindx != cindx) {
+ /*
+ * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs,
+ * so move it back to its real location
+ */
+ pv_head_table[rindx] = pv_head_table[cindx];
+ }
+ if (rindx + 1 != eindx) {
+ /*
+ * the 'hole' between this vm_rooted_entry_t and the previous
+ * vm_rooted_entry_t we moved needs to be initialized as
+ * a range of zero'd vm_rooted_entry_t's
+ */
+ bzero((char *)&pv_head_table[rindx + 1], (eindx - rindx - 1) * sizeof(struct pv_rooted_entry));
+ }
+ eindx = rindx;
+ }
+ if (rindx) {
+ bzero((char *)&pv_head_table[0], rindx * sizeof(struct pv_rooted_entry));
+ }
+
+ HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
+}
+
+#endif
+
+/*
+ * Create pv entries for kernel pages mapped by early startup code.
+ * These have to exist so we can ml_static_mfree() them later.
+ */
+static void
+pmap_pv_fixup(vm_offset_t start_va, vm_offset_t end_va)
+{
+ ppnum_t ppn;
+ pv_rooted_entry_t pv_h;
+ uint32_t pgsz;
+
+ start_va = round_page(start_va);
+ end_va = trunc_page(end_va);
+ while (start_va < end_va) {
+ pgsz = PAGE_SIZE;
+ ppn = pmap_find_phys(kernel_pmap, start_va);
+ if (ppn != 0 && IS_MANAGED_PAGE(ppn)) {
+ pv_h = pai_to_pvh(ppn);
+ assert(pv_h->qlink.next == 0); /* shouldn't be init'd yet */
+ assert(pv_h->pmap == 0);
+ pv_h->va_and_flags = start_va;
+ pv_h->pmap = kernel_pmap;
+ queue_init(&pv_h->qlink);
+ if (pmap_query_pagesize(kernel_pmap, start_va) == I386_LPGBYTES) {
+ pgsz = I386_LPGBYTES;
+ }
+ }
+ start_va += pgsz;
+ }
+}
+