+
+#if defined(__LP64__)
+
+#define VM_VPLQ_ALIGNMENT 128
+#define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */
+#define VM_PACKED_POINTER_SHIFT 6
+
+#define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000
+
+static inline vm_page_packed_t vm_page_pack_ptr(uintptr_t p)
+{
+ vm_page_packed_t packed_ptr;
+
+ if (!p)
+ return ((vm_page_packed_t)0);
+
+ if (p >= (uintptr_t)(vm_page_array_beginning_addr) && p < (uintptr_t)(vm_page_array_ending_addr)) {
+ packed_ptr = ((vm_page_packed_t)(((vm_page_t)p - vm_page_array_beginning_addr)));
+ assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY));
+ packed_ptr |= VM_PACKED_FROM_VM_PAGES_ARRAY;
+ return packed_ptr;
+ }
+
+ assert((p & (VM_PACKED_POINTER_ALIGNMENT - 1)) == 0);
+
+ packed_ptr = ((vm_page_packed_t)(((uintptr_t)(p - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> VM_PACKED_POINTER_SHIFT));
+ assert(packed_ptr != 0);
+ assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY));
+ return packed_ptr;
+}
+
+
+static inline uintptr_t vm_page_unpack_ptr(uintptr_t p)
+{
+ if (!p)
+ return ((uintptr_t)0);
+
+ if (p & VM_PACKED_FROM_VM_PAGES_ARRAY)
+ return ((uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)]));
+ return (((p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS));
+}
+
+
+#define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p))
+#define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p))
+
+#define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vm_page_object)))
+#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
+
+
+#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
+MACRO_BEGIN \
+ (p)->snext = 0; \
+MACRO_END
+
+
+#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p)
+
+
+static __inline__ void
+vm_page_enqueue_tail(
+ vm_page_queue_t que,
+ vm_page_queue_entry_t elt)
+{
+ vm_page_queue_entry_t old_tail;
+
+ old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
+ elt->next = VM_PAGE_PACK_PTR(que);
+ elt->prev = que->prev;
+ old_tail->next = VM_PAGE_PACK_PTR(elt);
+ que->prev = VM_PAGE_PACK_PTR(elt);
+}
+
+
+static __inline__ void
+vm_page_remque(
+ vm_page_queue_entry_t elt)
+{
+ vm_page_queue_entry_t next_elt, prev_elt;
+
+ next_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->next);
+
+ /* next_elt may equal prev_elt (and the queue head) if elt was the only element */
+ prev_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->prev);
+
+ next_elt->prev = VM_PAGE_PACK_PTR(prev_elt);
+ prev_elt->next = VM_PAGE_PACK_PTR(next_elt);
+
+ elt->next = 0;
+ elt->prev = 0;
+}
+
+
+/*
+ * Macro: vm_page_queue_init
+ * Function:
+ * Initialize the given queue.
+ * Header:
+ * void vm_page_queue_init(q)
+ * vm_page_queue_t q; \* MODIFIED *\
+ */
+#define vm_page_queue_init(q) \
+MACRO_BEGIN \
+ assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \
+ assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \
+ (q)->next = VM_PAGE_PACK_PTR(q); \
+ (q)->prev = VM_PAGE_PACK_PTR(q); \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_enter
+ * Function:
+ * Insert a new element at the tail of the queue.
+ * Header:
+ * void vm_page_queue_enter(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_enter(head, elt, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __prev; \
+ \
+ __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->prev)); \
+ if ((head) == __prev) { \
+ (head)->next = VM_PAGE_PACK_PTR(elt); \
+ } \
+ else { \
+ ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(elt); \
+ } \
+ (elt)->field.prev = VM_PAGE_PACK_PTR(__prev); \
+ (elt)->field.next = VM_PAGE_PACK_PTR(head); \
+ (head)->prev = VM_PAGE_PACK_PTR(elt); \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_enter_first
+ * Function:
+ * Insert a new element at the head of the queue.
+ * Header:
+ * void queue_enter_first(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_enter_first(head, elt, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next; \
+ \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->next)); \
+ if ((head) == __next) { \
+ (head)->prev = VM_PAGE_PACK_PTR(elt); \
+ } \
+ else { \
+ ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(elt); \
+ } \
+ (elt)->field.next = VM_PAGE_PACK_PTR(__next); \
+ (elt)->field.prev = VM_PAGE_PACK_PTR(head); \
+ (head)->next = VM_PAGE_PACK_PTR(elt); \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove
+ * Function:
+ * Remove an arbitrary item from the queue.
+ * Header:
+ * void vm_page_queue_remove(q, qe, type, field)
+ * arguments as in vm_page_queue_enter
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_remove(head, elt, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next, __prev; \
+ \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.next)); \
+ __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.prev)); \
+ \
+ if ((head) == __next) \
+ (head)->prev = VM_PAGE_PACK_PTR(__prev); \
+ else \
+ ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(__prev); \
+ \
+ if ((head) == __prev) \
+ (head)->next = VM_PAGE_PACK_PTR(__next); \
+ else \
+ ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(__next); \
+ \
+ (elt)->field.next = 0; \
+ (elt)->field.prev = 0; \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove_first
+ * Function:
+ * Remove and return the entry at the head of
+ * the queue.
+ * Header:
+ * vm_page_queue_remove_first(head, entry, type, field)
+ * entry is returned by reference
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_remove_first(head, entry, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next; \
+ \
+ (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \
+ \
+ if ((head) == __next) \
+ (head)->prev = VM_PAGE_PACK_PTR(head); \
+ else \
+ ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \
+ (head)->next = VM_PAGE_PACK_PTR(__next); \
+ \
+ (entry)->field.next = 0; \
+ (entry)->field.prev = 0; \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_end
+ * Function:
+ * Tests whether a new entry is really the end of
+ * the queue.
+ * Header:
+ * boolean_t vm_page_queue_end(q, qe)
+ * vm_page_queue_t q;
+ * vm_page_queue_entry_t qe;
+ */
+#define vm_page_queue_end(q, qe) ((q) == (qe))
+
+
+/*
+ * Macro: vm_page_queue_empty
+ * Function:
+ * Tests whether a queue is empty.
+ * Header:
+ * boolean_t vm_page_queue_empty(q)
+ * vm_page_queue_t q;
+ */
+#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
+
+
+
+/*
+ * Macro: vm_page_queue_first
+ * Function:
+ * Returns the first entry in the queue,
+ * Header:
+ * uintpr_t vm_page_queue_first(q)
+ * vm_page_queue_t q; \* IN *\
+ */
+#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
+
+
+
+/*
+ * Macro: vm_page_queue_last
+ * Function:
+ * Returns the last entry in the queue.
+ * Header:
+ * vm_page_queue_entry_t queue_last(q)
+ * queue_t q; \* IN *\
+ */
+#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
+
+
+
+/*
+ * Macro: vm_page_queue_next
+ * Function:
+ * Returns the entry after an item in the queue.
+ * Header:
+ * uintpr_t vm_page_queue_next(qc)
+ * vm_page_queue_t qc;
+ */
+#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
+
+
+
+/*
+ * Macro: vm_page_queue_prev
+ * Function:
+ * Returns the entry before an item in the queue.
+ * Header:
+ * uinptr_t vm_page_queue_prev(qc)
+ * vm_page_queue_t qc;
+ */
+#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
+
+
+
+/*
+ * Macro: vm_page_queue_iterate
+ * Function:
+ * iterate over each item in the queue.
+ * Generates a 'for' loop, setting elt to
+ * each item in turn (by reference).
+ * Header:
+ * vm_page_queue_iterate(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_iterate(head, elt, type, field) \
+ for ((elt) = (type)(void *) vm_page_queue_first(head); \
+ !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
+ (elt) = (type)(void *) vm_page_queue_next(&(elt)->field))
+
+#else
+
+#define VM_VPLQ_ALIGNMENT 128
+#define VM_PACKED_POINTER_ALIGNMENT 4
+#define VM_PACKED_POINTER_SHIFT 0
+
+#define VM_PACKED_FROM_VM_PAGES_ARRAY 0
+
+#define VM_PAGE_PACK_PTR(p) (p)
+#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p))
+
+#define VM_PAGE_OBJECT(p) (vm_object_t)(p->vm_page_object)
+#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
+
+
+#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
+MACRO_BEGIN \
+ (p)->pageq.next = 0; \
+ (p)->pageq.prev = 0; \
+MACRO_END
+
+#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p))
+
+#define vm_page_remque remque
+#define vm_page_enqueue_tail enqueue_tail
+#define vm_page_queue_init queue_init
+#define vm_page_queue_enter queue_enter
+#define vm_page_queue_enter_first queue_enter_first
+#define vm_page_queue_remove queue_remove
+#define vm_page_queue_remove_first queue_remove_first
+#define vm_page_queue_end queue_end
+#define vm_page_queue_empty queue_empty
+#define vm_page_queue_first queue_first
+#define vm_page_queue_last queue_last
+#define vm_page_queue_next queue_next
+#define vm_page_queue_prev queue_prev
+#define vm_page_queue_iterate queue_iterate
+
+#endif
+
+
+
+/*
+ * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ * represents a set of aging bins that are 'protected'...
+ *
+ * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
+ * not yet been 'claimed' but have been aged out of the protective bins
+ * this occurs in vm_page_speculate when it advances to the next bin
+ * and discovers that it is still occupied... at that point, all of the
+ * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
+ * in that bin are all guaranteed to have reached at least the maximum age
+ * we allow for a protected page... they can be older if there is no
+ * memory pressure to pull them from the bin, or there are no new speculative pages
+ * being generated to push them out.
+ * this list is the one that vm_pageout_scan will prefer when looking
+ * for pages to move to the underweight free list
+ *
+ * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
+ * defines the amount of time a speculative page is normally
+ * allowed to live in the 'protected' state (i.e. not available
+ * to be stolen if vm_pageout_scan is running and looking for
+ * pages)... however, if the total number of speculative pages
+ * in the protected state exceeds our limit (defined in vm_pageout.c)
+ * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
+ * vm_pageout_scan is allowed to steal pages from the protected
+ * bucket even if they are underage.
+ *
+ * vm_pageout_scan is also allowed to pull pages from a protected
+ * bin if the bin has reached the "age of consent" we've set
+ */
+#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
+#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
+#define VM_PAGE_SPECULATIVE_AGED_Q 0
+
+#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
+
+struct vm_speculative_age_q {
+ /*
+ * memory queue for speculative pages via clustered pageins
+ */
+ vm_page_queue_head_t age_q;
+ mach_timespec_t age_ts;
+} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+
+
+extern
+struct vm_speculative_age_q vm_page_queue_speculative[];
+
+extern int speculative_steal_index;
+extern int speculative_age_index;
+extern unsigned int vm_page_speculative_q_age_ms;
+
+
+typedef struct vm_locks_array {
+ char pad __attribute__ ((aligned (64)));
+ lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64)));
+ lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64)));
+ char pad2 __attribute__ ((aligned (64)));
+} vm_locks_array_t;
+
+
+#if CONFIG_BACKGROUND_QUEUE
+extern void vm_page_assign_background_state(vm_page_t mem);
+extern void vm_page_update_background_state(vm_page_t mem);
+extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first);
+extern void vm_page_remove_from_backgroundq(vm_page_t mem);
+#endif
+
+#define VM_PAGE_WIRED(m) ((m)->vm_page_q_state == VM_PAGE_IS_WIRED)
+#define NEXT_PAGE(m) ((m)->snext)
+#define NEXT_PAGE_PTR(m) (&(m)->snext)