+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+ ppnm_last_found = ppnm;
+ break;
+ }
+ }
+ if (ppnm == NULL)
+ panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+ return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ addr64_t saddr_aligned;
+ addr64_t eaddr_aligned;
+ addr64_t addr;
+ ppnum_t paddr;
+ unsigned int mark_as_unneeded_pages = 0;
+
+ saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+ eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+ for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+ paddr = pmap_find_phys(kernel_pmap, addr);
+
+ assert(paddr);
+
+ hibernate_page_bitset(page_list, TRUE, paddr);
+ hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+ mark_as_unneeded_pages++;
+ }
+ return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+ vm_page_bucket_t *bucket;
+ int hash_id;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ assert(mem->hashed);
+ assert(m_object);
+ assert(mem->offset != (vm_object_offset_t) -1);
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+ hash_id = vm_page_hash(m_object, mem->offset);
+ bucket = &vm_page_buckets[hash_id];
+
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+ vm_page_t mem;
+ unsigned int color;
+
+ while (sindx < eindx) {
+ mem = &vm_pages[sindx];
+
+ vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+ mem->lopage = FALSE;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+ color = VM_PAGE_GET_PHYS_PAGE(mem) & vm_color_mask;
+ vm_page_queue_enter_first(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+ vm_page_free_count++;
+
+ sindx++;
+ }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+ int cindx, sindx, eindx;
+ vm_page_t mem, tmem, mem_next;
+ AbsoluteTime startTime, endTime;
+ uint64_t nsec;
+
+ if (hibernate_rebuild_needed == FALSE)
+ return;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ HIBLOG("hibernate_rebuild started\n");
+
+ clock_get_uptime(&startTime);
+
+ hibernate_rebuild_pmap_structs();
+
+ bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+ eindx = vm_pages_count;
+
+ for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+
+ mem = &vm_pages[cindx];
+ /*
+ * hibernate_teardown_vm_structs leaves the location where
+ * this vm_page_t must be located in "next".
+ */
+ tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
+
+ sindx = (int)(tmem - &vm_pages[0]);
+
+ if (mem != tmem) {
+ /*
+ * this vm_page_t was moved by hibernate_teardown_vm_structs,
+ * so move it back to its real location
+ */
+ *tmem = *mem;
+ mem = tmem;
+ }
+ if (mem->hashed)
+ hibernate_hash_insert_page(mem);
+ /*
+ * the 'hole' between this vm_page_t and the previous
+ * vm_page_t we moved needs to be initialized as
+ * a range of free vm_page_t's
+ */
+ hibernate_free_range(sindx + 1, eindx);
+
+ eindx = sindx;
+ }
+ if (sindx)
+ hibernate_free_range(0, sindx);
+
+ assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+ /*
+ * process the list of vm_page_t's that were entered in the hash,
+ * but were not located in the vm_pages arrary... these are
+ * vm_page_t's that were created on the fly (i.e. fictitious)
+ */
+ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+ mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+
+ mem->next_m = 0;
+ hibernate_hash_insert_page(mem);
+ }
+ hibernate_rebuild_hash_list = NULL;
+
+ clock_get_uptime(&endTime);
+ SUB_ABSOLUTETIME(&endTime, &startTime);
+ absolutetime_to_nanoseconds(endTime, &nsec);
+
+ HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+ hibernate_rebuild_needed = FALSE;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ unsigned int i;
+ unsigned int compact_target_indx;
+ vm_page_t mem, mem_next;
+ vm_page_bucket_t *bucket;
+ unsigned int mark_as_unneeded_pages = 0;
+ unsigned int unneeded_vm_page_bucket_pages = 0;
+ unsigned int unneeded_vm_pages_pages = 0;
+ unsigned int unneeded_pmap_pages = 0;
+ addr64_t start_of_unneeded = 0;
+ addr64_t end_of_unneeded = 0;
+
+
+ if (hibernate_should_abort())
+ return (0);
+
+ HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+ vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+ vm_page_cleaned_count, compressor_object->resident_page_count);
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+
+ bucket = &vm_page_buckets[i];
+
+ for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
+ assert(mem->hashed);
+
+ mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->next_m));
+
+ if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+ mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+ hibernate_rebuild_hash_list = mem;
+ }
+ }
+ }
+ unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+ hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+ compact_target_indx = 0;
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ mem = &vm_pages[i];
+
+ if (mem->vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+ unsigned int color;
+
+ assert(mem->busy);
+ assert(!mem->lopage);
+
+ color = VM_PAGE_GET_PHYS_PAGE(mem) & vm_color_mask;
+
+ vm_page_queue_remove(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ pageq);
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+
+ vm_page_free_count--;
+
+ hibernate_teardown_found_free_pages++;
+
+ if (vm_pages[compact_target_indx].vm_page_q_state != VM_PAGE_ON_FREE_Q)
+ compact_target_indx = i;
+ } else {
+ /*
+ * record this vm_page_t's original location
+ * we need this even if it doesn't get moved
+ * as an indicator to the rebuild function that
+ * we don't have to move it
+ */
+ mem->next_m = VM_PAGE_PACK_PTR(mem);
+
+ if (vm_pages[compact_target_indx].vm_page_q_state == VM_PAGE_ON_FREE_Q) {
+ /*
+ * we've got a hole to fill, so
+ * move this vm_page_t to it's new home
+ */
+ vm_pages[compact_target_indx] = *mem;
+ mem->vm_page_q_state = VM_PAGE_ON_FREE_Q;
+
+ hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+ compact_target_indx++;
+ } else
+ hibernate_teardown_last_valid_compact_indx = i;
+ }
+ }
+ unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+ (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+ hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+ if (start_of_unneeded) {
+ unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_pmap_pages;
+ }
+ HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+ hibernate_rebuild_needed = TRUE;
+
+ return (mark_as_unneeded_pages);
+}
+
+
+#endif /* HIBERNATION */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <mach_vm_debug.h>
+#if MACH_VM_DEBUG
+
+#include <mach_debug/hash_info.h>
+#include <vm/vm_debug.h>
+
+/*
+ * Routine: vm_page_info
+ * Purpose:
+ * Return information about the global VP table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count)
+{
+ unsigned int i;
+ lck_spin_t *bucket_lock;
+
+ if (vm_page_bucket_count < count)
+ count = vm_page_bucket_count;
+
+ for (i = 0; i < count; i++) {
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ unsigned int bucket_count = 0;
+ vm_page_t m;
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+
+ for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+ m != VM_PAGE_NULL;
+ m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->next_m)))
+ bucket_count++;
+
+ lck_spin_unlock(bucket_lock);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return vm_page_bucket_count;
+}
+#endif /* MACH_VM_DEBUG */
+
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+ unsigned int i;
+ vm_page_t p;
+ unsigned int p_hash;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+
+ if (!vm_page_buckets_check_ready) {
+ return;
+ }
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed ||
+ hibernate_rebuild_hash_list) {
+ panic("BUCKET_CHECK: hibernation in progress: "
+ "rebuild_needed=%d rebuild_hash_list=%p\n",
+ hibernate_rebuild_needed,
+ hibernate_rebuild_hash_list);
+ }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+ char *cp;
+ for (cp = (char *) vm_page_fake_buckets_start;
+ cp < (char *) vm_page_fake_buckets_end;
+ cp++) {
+ if (*cp != 0x5a) {
+ panic("BUCKET_CHECK: corruption at %p in fake buckets "
+ "[0x%llx:0x%llx]\n",
+ cp,
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ }
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ vm_object_t p_object;
+
+ bucket = &vm_page_buckets[i];
+ if (!bucket->page_list) {
+ continue;
+ }
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+ p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+
+ while (p != VM_PAGE_NULL) {
+ p_object = VM_PAGE_OBJECT(p);
+
+ if (!p->hashed) {
+ panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+ "hash %d in bucket %d at %p "
+ "is not hashed\n",
+ p, p_object, p->offset,
+ p_hash, i, bucket);
+ }
+ p_hash = vm_page_hash(p_object, p->offset);
+ if (p_hash != i) {
+ panic("BUCKET_CHECK: corruption in bucket %d "
+ "at %p: page %p object %p offset 0x%llx "
+ "hash %d\n",
+ i, bucket, p, p_object, p->offset,
+ p_hash);
+ }
+ p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->next_m));
+ }
+ lck_spin_unlock(bucket_lock);
+ }
+
+// printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+/*
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues... once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point... we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
+ */
+
+#if CONFIG_BACKGROUND_QUEUE
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq)
+#else
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq)
+#endif
+{
+ boolean_t was_pageable = TRUE;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q)
+ {
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ if (remove_from_backgroundq == TRUE) {
+ vm_page_remove_from_backgroundq(mem);
+ }
+ if (mem->vm_page_on_backgroundq) {
+ assert(mem->vm_page_backgroundq.next != 0);
+ assert(mem->vm_page_backgroundq.prev != 0);
+ } else {
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+ }
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ return;
+ }
+
+ if (mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR)
+ {
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0 &&
+ mem->vm_page_backgroundq.prev == 0 &&
+ mem->vm_page_on_backgroundq == FALSE);
+#endif
+ return;
+ }
+ if (mem->vm_page_q_state == VM_PAGE_IS_WIRED) {
+ /*
+ * might put these guys on a list for debugging purposes
+ * if we do, we'll need to remove this assert
+ */
+ assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0 &&
+ mem->vm_page_backgroundq.prev == 0 &&
+ mem->vm_page_on_backgroundq == FALSE);
+#endif
+ return;
+ }
+
+ assert(m_object != compressor_object);
+ assert(m_object != kernel_object);
+ assert(m_object != vm_submap_object);
+ assert(!mem->fictitious);
+
+ switch(mem->vm_page_q_state) {
+
+ case VM_PAGE_ON_ACTIVE_LOCAL_Q:
+ {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[mem->local_id].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ vm_page_queue_remove(&lq->vpl_queue,
+ mem, vm_page_t, pageq);
+ mem->local_id = 0;
+ lq->vpl_count--;
+ if (m_object->internal) {
+ lq->vpl_internal_count--;
+ } else {
+ lq->vpl_external_count--;
+ }
+ VPL_UNLOCK(&lq->vpl_lock);
+ was_pageable = FALSE;
+ break;
+ }
+ case VM_PAGE_ON_ACTIVE_Q:
+ {
+ vm_page_queue_remove(&vm_page_queue_active,
+ mem, vm_page_t, pageq);
+ vm_page_active_count--;
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
+ {
+ assert(m_object->internal == TRUE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_anonymous,
+ mem, vm_page_t, pageq);
+ vm_page_anonymous_count--;
+ vm_purgeable_q_advance_all();
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_inactive,
+ mem, vm_page_t, pageq);
+ vm_purgeable_q_advance_all();
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_CLEANED_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_cleaned,
+ mem, vm_page_t, pageq);
+ vm_page_cleaned_count--;
+ break;
+ }
+
+ case VM_PAGE_ON_THROTTLED_Q:
+ {
+ assert(m_object->internal == TRUE);
+
+ vm_page_queue_remove(&vm_page_queue_throttled,
+ mem, vm_page_t, pageq);
+ vm_page_throttled_count--;
+ was_pageable = FALSE;
+ break;
+ }
+
+ case VM_PAGE_ON_SPECULATIVE_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_remque(&mem->pageq);
+ vm_page_speculative_count--;
+ break;
+ }
+
+#if CONFIG_SECLUDED_MEMORY
+ case VM_PAGE_ON_SECLUDED_Q:
+ {
+ vm_page_queue_remove(&vm_page_queue_secluded,
+ mem, vm_page_t, pageq);
+ vm_page_secluded_count--;
+ if (m_object == VM_OBJECT_NULL) {
+ vm_page_secluded_count_free--;
+ was_pageable = FALSE;
+ } else {
+ assert(!m_object->internal);
+ vm_page_secluded_count_inuse--;
+ was_pageable = FALSE;
+// was_pageable = TRUE;
+ }
+ break;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ default:
+ {
+ /*
+ * if (mem->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)
+ * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+ * the caller is responsible for determing if the page is on that queue, and if so, must
+ * either first remove it (it needs both the page queues lock and the object lock to do
+ * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+ *
+ * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
+ * or any of the undefined states
+ */
+ panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem, mem->vm_page_q_state);
+ break;
+ }
+
+ }
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+ mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (remove_from_backgroundq == TRUE)
+ vm_page_remove_from_backgroundq(mem);
+#endif
+ if (was_pageable) {
+ if (m_object->internal) {
+ vm_page_pageable_internal_count--;
+ } else {
+ vm_page_pageable_external_count--;
+ }
+ }
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+ vm_object_t __object = VM_PAGE_OBJECT(page);
+ if (page == __object->memq_hint) {
+ vm_page_t __new_hint;
+ vm_page_queue_entry_t __qe;
+ __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->listq);
+ if (vm_page_queue_end(&__object->memq, __qe)) {
+ __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->listq);
+ if (vm_page_queue_end(&__object->memq, __qe)) {
+ __qe = NULL;
+ }
+ }
+ __new_hint = (vm_page_t)((uintptr_t) __qe);
+ __object->memq_hint = __new_hint;
+ }
+ vm_page_queue_remove(&__object->memq, page, vm_page_t, listq);
+#if CONFIG_SECLUDED_MEMORY
+ if (__object->eligible_for_secluded) {
+ vm_page_secluded.eligible_for_secluded--;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!mem->fictitious);
+ assert(!mem->laundry);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_check_pageable_safe(mem);
+
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ vm_page_secluded_target != 0 &&
+ num_tasks_can_use_secluded_mem == 0 &&
+ m_object->eligible_for_secluded &&
+ secluded_aging_policy == SECLUDED_AGING_FIFO) {
+ mem->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+ vm_page_queue_enter(&vm_page_queue_secluded, mem,
+ vm_page_t, pageq);
+ vm_page_secluded_count++;
+ vm_page_secluded_count_inuse++;
+ assert(!m_object->internal);
+// vm_page_pageable_external_count++;
+ return;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ if (m_object->internal) {
+ mem->vm_page_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+
+ vm_page_anonymous_count++;
+ vm_page_pageable_internal_count++;
+ } else {
+ mem->vm_page_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+
+ vm_page_pageable_external_count++;
+ }
+ vm_page_inactive_count++;
+ token_new_pagecount++;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (mem->vm_page_in_background)
+ vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+void
+vm_page_enqueue_active(vm_page_t mem, boolean_t first)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!mem->fictitious);
+ assert(!mem->laundry);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_check_pageable_safe(mem);
+
+ mem->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_active, mem, vm_page_t, pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
+ vm_page_active_count++;
+
+ if (m_object->internal) {
+ vm_page_pageable_internal_count++;
+ } else {
+ vm_page_pageable_external_count++;
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (mem->vm_page_in_background)
+ vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+ vm_object_t page_object;
+
+ page_object = VM_PAGE_OBJECT(page);
+
+ if (page_object == kernel_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from kernel object (%p) to pageable queue", kernel_object);
+ }
+
+ if (page_object == compressor_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from compressor object (%p) to pageable queue", compressor_object);
+ }
+
+ if (page_object == vm_submap_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from submap object (%p) to pageable queue", vm_submap_object);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+vm_allocation_site_t *
+vm_allocation_sites[VM_KERN_MEMORY_COUNT];
+
+vm_tag_t
+vm_tag_bt(void)
+{
+ uintptr_t* frameptr;
+ uintptr_t* frameptr_next;
+ uintptr_t retaddr;
+ uintptr_t kstackb, kstackt;
+ const vm_allocation_site_t * site;
+ thread_t cthread;
+
+ cthread = current_thread();
+ if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+ kstackb = cthread->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+ site = NULL;
+ while (frameptr != NULL)
+ {
+ /* Verify thread stack bounds */
+ if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+ if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+ {
+ site = OSKextGetAllocationSiteForCaller(retaddr);
+ break;
+ }
+
+ frameptr = frameptr_next;
+ }
+ return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[256/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site)
+{
+ vm_tag_t tag;
+ uint64_t avail;
+ uint64_t idx;
+
+ if (site->tag) return;
+
+ idx = 0;
+ while (TRUE)
+ {
+ avail = free_tag_bits[idx];
+ if (avail)
+ {
+ tag = __builtin_clzll(avail);
+ avail &= ~(1ULL << (63 - tag));
+ free_tag_bits[idx] = avail;
+ tag += (idx << 6);
+ break;
+ }
+ idx++;
+ if (idx >= (sizeof(free_tag_bits) / sizeof(free_tag_bits[0])))
+ {
+ tag = VM_KERN_MEMORY_ANY;
+ break;
+ }
+ }
+ site->tag = tag;
+ if (VM_KERN_MEMORY_ANY != tag)
+ {
+ assert(!vm_allocation_sites[tag]);
+ vm_allocation_sites[tag] = site;
+ }
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+ uint64_t avail;
+ uint32_t idx;
+ uint64_t bit;
+
+ if (VM_KERN_MEMORY_ANY == tag) return;
+
+ idx = (tag >> 6);
+ avail = free_tag_bits[idx];
+ tag &= 63;
+ bit = (1ULL << (63 - tag));
+ assert(!(avail & bit));
+ free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+ vm_tag_t tag;
+ for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+ {
+ vm_tag_free_locked(tag);
+ }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+ vm_tag_t tag;
+
+ if (VM_TAG_BT & site->flags)
+ {
+ tag = vm_tag_bt();
+ if (VM_KERN_MEMORY_NONE != tag) return (tag);
+ }
+
+ if (!site->tag)
+ {
+ lck_spin_lock(&vm_allocation_sites_lock);
+ vm_tag_alloc_locked(site);
+ lck_spin_unlock(&vm_allocation_sites_lock);
+ }
+
+ return (site->tag);
+}
+
+static void
+vm_page_count_object(mach_memory_info_t * sites, unsigned int __unused num_sites, vm_object_t object)
+{
+ if (!object->wired_page_count) return;
+ if (object != kernel_object)
+ {
+ assert(object->wire_tag < num_sites);
+ sites[object->wire_tag].size += ptoa_64(object->wired_page_count);
+ }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * sites,
+ unsigned int num_sites, vm_object_t object);
+
+static void
+vm_page_iterate_purgeable_objects(mach_memory_info_t * sites, unsigned int num_sites,
+ vm_page_iterate_proc proc, purgeable_q_t queue,
+ int group)
+{
+ vm_object_t object;
+
+ for (object = (vm_object_t) queue_first(&queue->objq[group]);
+ !queue_end(&queue->objq[group], (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(sites, num_sites, object);
+ }
+}
+
+static void
+vm_page_iterate_objects(mach_memory_info_t * sites, unsigned int num_sites,
+ vm_page_iterate_proc proc)
+{
+ purgeable_q_t volatile_q;
+ queue_head_t * nonvolatile_q;
+ vm_object_t object;
+ int group;
+
+ lck_spin_lock(&vm_objects_wired_lock);
+ queue_iterate(&vm_objects_wired,
+ object,
+ vm_object_t,
+ objq)
+ {
+ proc(sites, num_sites, object);
+ }
+ lck_spin_unlock(&vm_objects_wired_lock);
+
+ lck_mtx_lock(&vm_purgeable_queue_lock);
+ nonvolatile_q = &purgeable_nonvolatile_queue;
+ for (object = (vm_object_t) queue_first(nonvolatile_q);
+ !queue_end(nonvolatile_q, (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(sites, num_sites, object);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+ vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, 0);
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+ {
+ vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+ {
+ vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+ }
+ lck_mtx_unlock(&vm_purgeable_queue_lock);
+}
+
+static uint64_t
+process_account(mach_memory_info_t * sites, unsigned int __unused num_sites, uint64_t zones_collectable_bytes)
+{
+ uint64_t found;
+ unsigned int idx;
+ vm_allocation_site_t * site;
+
+ assert(num_sites >= VM_KERN_MEMORY_COUNT);
+ found = 0;
+ for (idx = 0; idx < VM_KERN_MEMORY_COUNT; idx++)
+ {
+ found += sites[idx].size;
+ if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+ {
+ sites[idx].site = idx;
+ sites[idx].flags |= VM_KERN_SITE_TAG;
+ if (VM_KERN_MEMORY_ZONE == idx)
+ {
+ sites[idx].flags |= VM_KERN_SITE_HIDE;
+ sites[idx].collectable_bytes = zones_collectable_bytes;
+ } else sites[idx].flags |= VM_KERN_SITE_WIRED;
+ continue;
+ }
+ lck_spin_lock(&vm_allocation_sites_lock);
+ if ((site = vm_allocation_sites[idx]))
+ {
+ if (sites[idx].size)
+ {
+ sites[idx].flags |= VM_KERN_SITE_WIRED;
+ if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags))
+ {
+ sites[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
+ sites[idx].flags |= VM_KERN_SITE_KMOD;
+ }
+ else
+ {
+ sites[idx].site = VM_KERNEL_UNSLIDE(site);
+ sites[idx].flags |= VM_KERN_SITE_KERNEL;
+ }
+ site = NULL;
+ }
+ else
+ {
+#if 1
+ site = NULL;
+#else
+ /* this code would free a site with no allocations but can race a new
+ * allocation being made */
+ vm_tag_free_locked(site->tag);
+ site->tag = VM_KERN_MEMORY_NONE;
+ vm_allocation_sites[idx] = NULL;
+ if (!(VM_TAG_UNLOAD & site->flags)) site = NULL;
+#endif
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+ if (site) OSKextFreeSite(site);
+ }
+
+ return (found);
+}
+
+kern_return_t
+vm_page_diagnose(mach_memory_info_t * sites, unsigned int num_sites, uint64_t zones_collectable_bytes)
+{
+ enum { kMaxKernelDepth = 1 };
+ vm_map_t maps [kMaxKernelDepth];
+ vm_map_entry_t entries[kMaxKernelDepth];
+ vm_map_t map;
+ vm_map_entry_t entry;
+ vm_object_offset_t offset;
+ vm_page_t page;
+ int stackIdx, count;
+ uint64_t wired_size;
+ uint64_t wired_managed_size;
+ uint64_t wired_reserved_size;
+ mach_memory_info_t * counts;
+
+ bzero(sites, num_sites * sizeof(mach_memory_info_t));
+
+ if (!vm_page_wire_count_initial) return (KERN_ABORTED);
+
+ vm_page_iterate_objects(sites, num_sites, &vm_page_count_object);
+
+ wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+ wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+ wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+ assert(num_sites >= (VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT));
+ counts = &sites[VM_KERN_MEMORY_COUNT];
+
+#define SET_COUNT(xcount, xsize, xflags) \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+ SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
+ SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest) \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].free = (xfree); \
+ counts[xcount].largest = (xlargest); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER;
+
+ vm_map_size_t map_size, map_free, map_largest;
+
+ vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+ vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+ vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+ map = kernel_map;
+ stackIdx = 0;
+ while (map)
+ {
+ vm_map_lock(map);
+ for (entry = map->hdr.links.next; map; entry = entry->links.next)
+ {
+ if (entry->is_sub_map)
+ {
+ assert(stackIdx < kMaxKernelDepth);
+ maps[stackIdx] = map;
+ entries[stackIdx] = entry;
+ stackIdx++;
+ map = VME_SUBMAP(entry);
+ entry = NULL;
+ break;
+ }
+ if (VME_OBJECT(entry) == kernel_object)
+ {
+ count = 0;
+ vm_object_lock(VME_OBJECT(entry));
+ for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+ {
+ page = vm_page_lookup(VME_OBJECT(entry), offset);
+ if (page && VM_PAGE_WIRED(page)) count++;
+ }
+ vm_object_unlock(VME_OBJECT(entry));
+
+ if (count)
+ {
+ assert(VME_ALIAS(entry) < num_sites);
+ sites[VME_ALIAS(entry)].size += ptoa_64(count);
+ }
+ }
+ while (map && (entry == vm_map_last_entry(map)))
+ {
+ vm_map_unlock(map);
+ if (!stackIdx) map = NULL;
+ else
+ {
+ --stackIdx;
+ map = maps[stackIdx];
+ entry = entries[stackIdx];
+ }
+ }
+ }
+ }
+
+ process_account(sites, num_sites, zones_collectable_bytes);
+
+ return (KERN_SUCCESS);
+}
+
+uint32_t
+vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
+{
+ vm_allocation_site_t * site;
+ uint32_t kmodId;
+
+ kmodId = 0;
+ lck_spin_lock(&vm_allocation_sites_lock);
+ if ((site = vm_allocation_sites[tag]))
+ {
+ if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags))
+ {
+ kmodId = OSKextGetKmodIDForSite(site, name, namelen);
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ return (kmodId);
+}
+
+#if DEBUG || DEVELOPMENT
+
+#define vm_tag_set_lock(set) lck_spin_lock(&set->lock)
+#define vm_tag_set_unlock(set) lck_spin_unlock(&set->lock)
+
+void
+vm_tag_set_init(vm_tag_set_t set, uint32_t count)
+{
+ lck_spin_init(&set->lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ bzero(&set->entries, count * sizeof(struct vm_tag_set_entry));
+}
+
+kern_return_t
+vm_tag_set_enter(vm_tag_set_t set, uint32_t count, vm_tag_t tag)
+{
+ kern_return_t kr;
+ uint32_t idx, free;
+
+ vm_tag_set_lock(set);
+
+ assert(tag != VM_KERN_MEMORY_NONE);
+
+ kr = KERN_NO_SPACE;
+ free = -1U;
+ for (idx = 0; idx < count; idx++)
+ {
+ if (tag == set->entries[idx].tag)
+ {
+ set->entries[idx].count++;
+ kr = KERN_SUCCESS;
+ break;
+ }
+ if ((free == -1U) && !set->entries[idx].count) free = idx;
+ }
+
+ if ((KERN_SUCCESS != kr) && (free != -1U))
+ {
+ set->entries[free].tag = tag;
+ set->entries[free].count = 1;
+ kr = KERN_SUCCESS;
+ }
+
+ vm_tag_set_unlock(set);
+
+ return (kr);
+}
+
+kern_return_t
+vm_tag_set_remove(vm_tag_set_t set, uint32_t count, vm_tag_t tag, vm_tag_t * new_tagp)
+{
+ kern_return_t kr;
+ uint32_t idx;
+ vm_tag_t new_tag;
+
+ assert(tag != VM_KERN_MEMORY_NONE);
+ new_tag = VM_KERN_MEMORY_NONE;
+ vm_tag_set_lock(set);
+
+ kr = KERN_NOT_IN_SET;
+ for (idx = 0; idx < count; idx++)
+ {
+ if ((tag != VM_KERN_MEMORY_NONE)
+ && (tag == set->entries[idx].tag)
+ && set->entries[idx].count)
+ {
+ set->entries[idx].count--;
+ kr = KERN_SUCCESS;
+ if (set->entries[idx].count)
+ {
+ new_tag = tag;
+ break;
+ }
+ if (!new_tagp) break;
+ tag = VM_KERN_MEMORY_NONE;
+ }
+
+ if (set->entries[idx].count && (VM_KERN_MEMORY_NONE == new_tag))
+ {
+ new_tag = set->entries[idx].tag;
+ if (VM_KERN_MEMORY_NONE == tag) break;
+ }
+ }
+
+ vm_tag_set_unlock(set);
+ if (new_tagp) *new_tagp = new_tag;
+
+ return (kr);
+}
+
+#endif /* DEBUG || DEVELOPMENT */