+
+/*
+ * Macro: vm_page_queue_enter
+ * Function:
+ * Insert a new element at the tail of the queue.
+ * Header:
+ * void vm_page_queue_enter(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_enter(head, elt, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __prev; \
+ \
+ __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->prev)); \
+ if ((head) == __prev) { \
+ (head)->next = VM_PAGE_PACK_PTR(elt); \
+ } \
+ else { \
+ ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(elt); \
+ } \
+ (elt)->field.prev = VM_PAGE_PACK_PTR(__prev); \
+ (elt)->field.next = VM_PAGE_PACK_PTR(head); \
+ (head)->prev = VM_PAGE_PACK_PTR(elt); \
+MACRO_END
+
+
+/*
+ * These are helper macros for vm_page_queue_enter_clump to assist
+ * with conditional compilation (release / debug / development)
+ */
+#if DEVELOPMENT || DEBUG
+
+#define __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field) \
+MACRO_BEGIN \
+ if(__check) { /* if first forward buddy.. */ \
+ if(__prev) { /* ..and if a backward buddy was found, verify link consistency */ \
+ assert(__p == (vm_page_t) VM_PAGE_UNPACK_PTR(__prev->next)); \
+ assert(__prev == (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__p->field.prev)); \
+ } \
+ __check=0; \
+ } \
+MACRO_END
+
+#define __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __tmp; \
+ for(__i=0, __tmp=__first; __i<__n_free; __i++) \
+ __tmp=(vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__tmp->next); \
+ assert(__tmp == __last_next); \
+MACRO_END
+
+#define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++
+#define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++
+#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free
+
+#else
+
+#define __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field) __check=1
+#define __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next)
+#define __DEBUG_STAT_INCREMENT_INRANGE
+#define __DEBUG_STAT_INCREMENT_INSERTS
+#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
+
+#endif /* if DEVELOPMENT || DEBUG */
+
+/*
+ * Macro: vm_page_queue_enter_clump
+ * Function:
+ * Insert a new element into the free queue and clump pages within the same 16K boundary together
+ *
+ * Header:
+ * void vm_page_queue_enter_clump(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#if defined(__x86_64__)
+#define vm_page_queue_enter_clump(head, elt, type, field) \
+MACRO_BEGIN \
+ ppnum_t __clump_num; \
+ unsigned int __i, __n, __n_free=1, __check=1; \
+ vm_page_queue_entry_t __prev=0, __next, __last, __last_next, __first, __first_prev, __head_next; \
+ vm_page_t __p; \
+ \
+ /* if elt is part of vm_pages[] */ \
+ if((elt) >= vm_page_array_beginning_addr && (elt) < vm_page_array_boundary) { \
+ __first = __last = (vm_page_queue_entry_t) (elt); \
+ __clump_num = VM_PAGE_GET_CLUMP(elt); \
+ __n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask; \
+ /* scan backward looking for a buddy page */ \
+ for(__i=0, __p=(elt)-1; __i<__n && __p>=vm_page_array_beginning_addr; __i++, __p--) { \
+ if(__p->vm_page_q_state == VM_PAGE_ON_FREE_Q && __clump_num == VM_PAGE_GET_CLUMP(__p)) { \
+ if(__prev == 0) __prev = (vm_page_queue_entry_t) __p; \
+ __first = (vm_page_queue_entry_t) __p; \
+ __n_free++; \
+ } \
+ } \
+ /* scan forward looking for a buddy page */ \
+ for(__i=__n+1, __p=(elt)+1; __i<vm_clump_size && __p<vm_page_array_boundary; __i++, __p++) { \
+ if(__p->vm_page_q_state == VM_PAGE_ON_FREE_Q && __clump_num == VM_PAGE_GET_CLUMP(__p)) { \
+ __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field); \
+ if(__prev == 0) __prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__p->field.prev); \
+ __last = (vm_page_queue_entry_t) __p; \
+ __n_free++; \
+ } \
+ } \
+ __DEBUG_STAT_INCREMENT_INRANGE; \
+ } \
+ /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */ \
+ if(__prev == 0) __prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR((head)->prev); \
+ \
+ /* insert the element */ \
+ __next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__prev->next); \
+ (elt)->field.next = __prev->next; \
+ (elt)->field.prev = __next->prev; \
+ __prev->next = __next->prev = VM_PAGE_PACK_PTR(elt); \
+ __DEBUG_STAT_INCREMENT_INSERTS; \
+ \
+ /* check if clump needs to be promoted to head */ \
+ if(__n_free >= vm_clump_promote_threshold && __n_free > 1) { \
+ __first_prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__first->prev); \
+ if(__first_prev != (head)) { /* if not at head already */ \
+ __last_next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__last->next); \
+ /* verify that the links within the clump are consistent */ \
+ __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next); \
+ /* promote clump to head */ \
+ __first_prev->next = __last->next; \
+ __last_next->prev = __first->prev; \
+ __first->prev = VM_PAGE_PACK_PTR(head); \
+ __last->next = (head)->next; \
+ __head_next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR((head)->next); \
+ __head_next->prev = VM_PAGE_PACK_PTR(__last); \
+ (head)->next = VM_PAGE_PACK_PTR(__first); \
+ __DEBUG_STAT_INCREMENT_PROMOTES(__n_free); \
+ } \
+ } \
+MACRO_END
+#endif
+
+/*
+ * Macro: vm_page_queue_enter_first
+ * Function:
+ * Insert a new element at the head of the queue.
+ * Header:
+ * void queue_enter_first(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_enter_first(head, elt, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next; \
+ \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->next)); \
+ if ((head) == __next) { \
+ (head)->prev = VM_PAGE_PACK_PTR(elt); \
+ } \
+ else { \
+ ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(elt); \
+ } \
+ (elt)->field.next = VM_PAGE_PACK_PTR(__next); \
+ (elt)->field.prev = VM_PAGE_PACK_PTR(head); \
+ (head)->next = VM_PAGE_PACK_PTR(elt); \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove
+ * Function:
+ * Remove an arbitrary item from the queue.
+ * Header:
+ * void vm_page_queue_remove(q, qe, type, field)
+ * arguments as in vm_page_queue_enter
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_remove(head, elt, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next, __prev; \
+ \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.next)); \
+ __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.prev)); \
+ \
+ if ((head) == __next) \
+ (head)->prev = VM_PAGE_PACK_PTR(__prev); \
+ else \
+ ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(__prev); \
+ \
+ if ((head) == __prev) \
+ (head)->next = VM_PAGE_PACK_PTR(__next); \
+ else \
+ ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(__next); \
+ \
+ (elt)->field.next = 0; \
+ (elt)->field.prev = 0; \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove_first
+ * Function:
+ * Remove and return the entry at the head of
+ * the queue.
+ * Header:
+ * vm_page_queue_remove_first(head, entry, type, field)
+ * entry is returned by reference
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_remove_first(head, entry, type, field) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next; \
+ \
+ (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \
+ \
+ if ((head) == __next) \
+ (head)->prev = VM_PAGE_PACK_PTR(head); \
+ else \
+ ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \
+ (head)->next = VM_PAGE_PACK_PTR(__next); \
+ \
+ (entry)->field.next = 0; \
+ (entry)->field.prev = 0; \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove_first_with_clump
+ * Function:
+ * Remove and return the entry at the head of the free queue
+ * end is set to 1 to indicate that we just returned the last page in a clump
+ *
+ * Header:
+ * vm_page_queue_remove_first_with_clump(head, entry, type, field, end)
+ * entry is returned by reference
+ * end is returned by reference
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#if defined(__x86_64__)
+#define vm_page_queue_remove_first_with_clump(head, entry, type, field, end) \
+MACRO_BEGIN \
+ vm_page_queue_entry_t __next; \
+ \
+ (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \
+ __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \
+ \
+ (end)=0; \
+ if ((head) == __next) { \
+ (head)->prev = VM_PAGE_PACK_PTR(head); \
+ (end)=1; \
+ } \
+ else { \
+ ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \
+ if(VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(((type)(void *)(__next)))) (end)=1; \
+ } \
+ (head)->next = VM_PAGE_PACK_PTR(__next); \
+ \
+ (entry)->field.next = 0; \
+ (entry)->field.prev = 0; \
+ \
+MACRO_END
+#endif
+
+/*
+ * Macro: vm_page_queue_end
+ * Function:
+ * Tests whether a new entry is really the end of
+ * the queue.
+ * Header:
+ * boolean_t vm_page_queue_end(q, qe)
+ * vm_page_queue_t q;
+ * vm_page_queue_entry_t qe;
+ */
+#define vm_page_queue_end(q, qe) ((q) == (qe))
+
+
+/*
+ * Macro: vm_page_queue_empty
+ * Function:
+ * Tests whether a queue is empty.
+ * Header:
+ * boolean_t vm_page_queue_empty(q)
+ * vm_page_queue_t q;
+ */
+#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
+
+
+
+/*
+ * Macro: vm_page_queue_first
+ * Function:
+ * Returns the first entry in the queue,
+ * Header:
+ * uintpr_t vm_page_queue_first(q)
+ * vm_page_queue_t q; \* IN *\
+ */
+#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
+
+
+
+/*
+ * Macro: vm_page_queue_last
+ * Function:
+ * Returns the last entry in the queue.
+ * Header:
+ * vm_page_queue_entry_t queue_last(q)
+ * queue_t q; \* IN *\
+ */
+#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
+
+
+
+/*
+ * Macro: vm_page_queue_next
+ * Function:
+ * Returns the entry after an item in the queue.
+ * Header:
+ * uintpr_t vm_page_queue_next(qc)
+ * vm_page_queue_t qc;
+ */
+#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
+
+
+
+/*
+ * Macro: vm_page_queue_prev
+ * Function:
+ * Returns the entry before an item in the queue.
+ * Header:
+ * uinptr_t vm_page_queue_prev(qc)
+ * vm_page_queue_t qc;
+ */
+#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
+
+
+
+/*
+ * Macro: vm_page_queue_iterate
+ * Function:
+ * iterate over each item in the queue.
+ * Generates a 'for' loop, setting elt to
+ * each item in turn (by reference).
+ * Header:
+ * vm_page_queue_iterate(q, elt, type, field)
+ * queue_t q;
+ * <type> elt;
+ * <type> is what's in our queue
+ * <field> is the chain field in (*<type>)
+ * Note:
+ * This should only be used with Method 2 queue iteration (element chains)
+ */
+#define vm_page_queue_iterate(head, elt, type, field) \
+ for ((elt) = (type)(void *) vm_page_queue_first(head); \
+ !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
+ (elt) = (type)(void *) vm_page_queue_next(&(elt)->field))
+
+#else
+
+#define VM_VPLQ_ALIGNMENT 128
+#define VM_PACKED_POINTER_ALIGNMENT 4
+#define VM_PACKED_POINTER_SHIFT 0
+
+#define VM_PACKED_FROM_VM_PAGES_ARRAY 0
+
+#define VM_PAGE_PACK_PTR(p) (p)
+#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p))
+
+#define VM_PAGE_OBJECT(p) (vm_object_t)(p->vm_page_object)
+#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
+
+
+#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
+MACRO_BEGIN \
+ (p)->pageq.next = 0; \
+ (p)->pageq.prev = 0; \
+MACRO_END
+
+#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p))
+
+#define vm_page_remque remque
+#define vm_page_enqueue_tail enqueue_tail
+#define vm_page_queue_init queue_init
+#define vm_page_queue_enter queue_enter
+#define vm_page_queue_enter_first queue_enter_first
+#define vm_page_queue_remove queue_remove
+#define vm_page_queue_remove_first queue_remove_first
+#define vm_page_queue_end queue_end
+#define vm_page_queue_empty queue_empty
+#define vm_page_queue_first queue_first
+#define vm_page_queue_last queue_last
+#define vm_page_queue_next queue_next
+#define vm_page_queue_prev queue_prev
+#define vm_page_queue_iterate queue_iterate
+
+#endif
+
+
+
+/*
+ * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ * represents a set of aging bins that are 'protected'...
+ *
+ * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
+ * not yet been 'claimed' but have been aged out of the protective bins
+ * this occurs in vm_page_speculate when it advances to the next bin
+ * and discovers that it is still occupied... at that point, all of the
+ * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
+ * in that bin are all guaranteed to have reached at least the maximum age
+ * we allow for a protected page... they can be older if there is no
+ * memory pressure to pull them from the bin, or there are no new speculative pages
+ * being generated to push them out.
+ * this list is the one that vm_pageout_scan will prefer when looking
+ * for pages to move to the underweight free list
+ *
+ * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
+ * defines the amount of time a speculative page is normally
+ * allowed to live in the 'protected' state (i.e. not available
+ * to be stolen if vm_pageout_scan is running and looking for
+ * pages)... however, if the total number of speculative pages
+ * in the protected state exceeds our limit (defined in vm_pageout.c)
+ * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
+ * vm_pageout_scan is allowed to steal pages from the protected
+ * bucket even if they are underage.
+ *
+ * vm_pageout_scan is also allowed to pull pages from a protected
+ * bin if the bin has reached the "age of consent" we've set
+ */
+#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
+#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
+#define VM_PAGE_SPECULATIVE_AGED_Q 0
+
+#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
+
+struct vm_speculative_age_q {
+ /*
+ * memory queue for speculative pages via clustered pageins
+ */
+ vm_page_queue_head_t age_q;
+ mach_timespec_t age_ts;
+} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+
+
+
+extern
+struct vm_speculative_age_q vm_page_queue_speculative[];
+
+extern int speculative_steal_index;
+extern int speculative_age_index;
+extern unsigned int vm_page_speculative_q_age_ms;
+
+
+typedef struct vm_locks_array {
+ char pad __attribute__ ((aligned (64)));
+ lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64)));
+ lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64)));
+ char pad2 __attribute__ ((aligned (64)));
+} vm_locks_array_t;
+
+
+#if CONFIG_BACKGROUND_QUEUE
+extern void vm_page_assign_background_state(vm_page_t mem);
+extern void vm_page_update_background_state(vm_page_t mem);
+extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first);
+extern void vm_page_remove_from_backgroundq(vm_page_t mem);
+#endif
+
+#define VM_PAGE_WIRED(m) ((m)->vm_page_q_state == VM_PAGE_IS_WIRED)
+#define NEXT_PAGE(m) ((m)->snext)
+#define NEXT_PAGE_PTR(m) (&(m)->snext)