+#define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift)
+#define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask)
+
+#endif /* defined(__arm__) || defined(__arm64__) */
+
+
+
+#if defined(__LP64__)
+/*
+ * Parameters for pointer packing
+ *
+ *
+ * VM Pages pointers might point to:
+ *
+ * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals,
+ *
+ * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages
+ *
+ * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED
+ * aligned).
+ *
+ *
+ * The current scheme uses 31 bits of storage and 6 bits of shift using the
+ * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the
+ * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY).
+ *
+ * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS.
+ */
+#define VM_VPLQ_ALIGNMENT 128
+#define VM_PAGE_PACKED_PTR_ALIGNMENT 64 /* must be a power of 2 */
+#define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT)))
+#define VM_PAGE_PACKED_PTR_BITS 31
+#define VM_PAGE_PACKED_PTR_SHIFT 6
+#define VM_PAGE_PACKED_PTR_BASE ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS)
+
+#define VM_PAGE_PACKED_FROM_ARRAY 0x80000000
+
+static inline vm_page_packed_t
+vm_page_pack_ptr(uintptr_t p)
+{
+ if (p >= (uintptr_t)vm_page_array_beginning_addr &&
+ p < (uintptr_t)vm_page_array_ending_addr) {
+ ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr;
+ assert((vm_page_t)p == &vm_pages[diff]);
+ return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY);
+ }
+
+ VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR);
+ vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR);
+ return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed);
+}
+
+
+static inline uintptr_t
+vm_page_unpack_ptr(uintptr_t p)
+{
+ extern unsigned int vm_pages_count;
+
+ if (p >= VM_PAGE_PACKED_FROM_ARRAY) {
+ p &= ~VM_PAGE_PACKED_FROM_ARRAY;
+ assert(p < (uintptr_t)vm_pages_count);
+ return (uintptr_t)&vm_pages[p];
+ }
+
+ return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR);
+}
+
+
+#define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p))
+#define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p))
+
+#define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object)))
+#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
+
+
+#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
+MACRO_BEGIN \
+ (p)->vmp_snext = 0; \
+MACRO_END
+
+
+#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p)
+
+
+static __inline__ void
+vm_page_enqueue_tail(
+ vm_page_queue_t que,
+ vm_page_queue_entry_t elt)
+{
+ vm_page_queue_entry_t old_tail;
+
+ old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev);
+ elt->next = VM_PAGE_PACK_PTR(que);
+ elt->prev = que->prev;
+ que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt);
+}
+
+
+static __inline__ void
+vm_page_remque(
+ vm_page_queue_entry_t elt)
+{
+ vm_page_queue_entry_t next;
+ vm_page_queue_entry_t prev;
+ vm_page_packed_t next_pck = elt->next;
+ vm_page_packed_t prev_pck = elt->prev;
+
+ next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck);
+
+ /* next may equal prev (and the queue head) if elt was the only element */
+ prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck);
+
+ next->prev = prev_pck;
+ prev->next = next_pck;
+
+ elt->next = 0;
+ elt->prev = 0;
+}
+
+
+/*
+ * Macro: vm_page_queue_init
+ * Function:
+ * Initialize the given queue.
+ * Header:
+ * void vm_page_queue_init(q)
+ * vm_page_queue_t q; \* MODIFIED *\
+ */
+#define vm_page_queue_init(q) \
+MACRO_BEGIN \
+ VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \
+ (q)->next = VM_PAGE_PACK_PTR(q); \
+ (q)->prev = VM_PAGE_PACK_PTR(q); \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_enter
+ * Function:
+ * Insert a new element at the tail of the vm_page queue.
+ * Header:
+ * void vm_page_queue_enter(q, elt, field)
+ * queue_t q;
+ * vm_page_t elt;
+ * <field> is the list field in vm_page_t
+ *
+ * This macro's arguments have to match the generic "queue_enter()" macro which is
+ * what is used for this on 32 bit kernels.
+ */
+#define vm_page_queue_enter(head, elt, field) \
+MACRO_BEGIN \
+ vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
+ vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
+ vm_page_packed_t __pck_prev = (head)->prev; \
+ \
+ if (__pck_head == __pck_prev) { \
+ (head)->next = __pck_elt; \
+ } else { \
+ vm_page_t __prev; \
+ __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
+ __prev->field.next = __pck_elt; \
+ } \
+ (elt)->field.prev = __pck_prev; \
+ (elt)->field.next = __pck_head; \
+ (head)->prev = __pck_elt; \
+MACRO_END
+
+
+#if defined(__x86_64__)
+/*
+ * These are helper macros for vm_page_queue_enter_clump to assist
+ * with conditional compilation (release / debug / development)
+ */
+#if DEVELOPMENT || DEBUG
+
+#define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \
+MACRO_BEGIN \
+ if (__prev != NULL) { \
+ assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \
+ assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \
+ } \
+MACRO_END
+
+#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \
+MACRO_BEGIN \
+ unsigned int __i; \
+ vm_page_queue_entry_t __tmp; \
+ for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \
+ __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \
+ } \
+ assert(__tmp == __last_next); \
+MACRO_END
+
+#define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++
+#define __DEBUG_STAT_INCREMENT_INSERTS vm_clump_inserts++
+#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) vm_clump_promotes+=__n_free
+
+#else
+
+#define __DEBUG_CHECK_BUDDIES(__prev, __p, field)
+#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next)
+#define __DEBUG_STAT_INCREMENT_INRANGE
+#define __DEBUG_STAT_INCREMENT_INSERTS
+#define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free)
+
+#endif /* if DEVELOPMENT || DEBUG */
+
+/*
+ * Insert a new page into a free queue and clump pages within the same 16K boundary together
+ */
+static inline void
+vm_page_queue_enter_clump(
+ vm_page_queue_t head,
+ vm_page_t elt)
+{
+ vm_page_queue_entry_t first = NULL; /* first page in the clump */
+ vm_page_queue_entry_t last = NULL; /* last page in the clump */
+ vm_page_queue_entry_t prev = NULL;
+ vm_page_queue_entry_t next;
+ uint_t n_free = 1;
+ extern unsigned int vm_pages_count;
+ extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
+ extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
+
+ /*
+ * If elt is part of the vm_pages[] array, find its neighboring buddies in the array.
+ */
+ if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) {
+ vm_page_t p;
+ uint_t i;
+ uint_t n;
+ ppnum_t clump_num;
+
+ first = last = (vm_page_queue_entry_t)elt;
+ clump_num = VM_PAGE_GET_CLUMP(elt);
+ n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask;
+
+ /*
+ * Check for preceeding vm_pages[] entries in the same chunk
+ */
+ for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) {
+ if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
+ if (prev == NULL) {
+ prev = (vm_page_queue_entry_t)p;
+ }
+ first = (vm_page_queue_entry_t)p;
+ n_free++;
+ }
+ }
+
+ /*
+ * Check the following vm_pages[] entries in the same chunk
+ */
+ for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) {
+ if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) {
+ if (last == (vm_page_queue_entry_t)elt) { /* first one only */
+ __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq);
+ }
+
+ if (prev == NULL) {
+ prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev);
+ }
+ last = (vm_page_queue_entry_t)p;
+ n_free++;
+ }
+ }
+ __DEBUG_STAT_INCREMENT_INRANGE;
+ }
+
+ /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */
+ if (prev == NULL) {
+ prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev);
+ }
+
+ /* insert the element */
+ next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next);
+ elt->vmp_pageq.next = prev->next;
+ elt->vmp_pageq.prev = next->prev;
+ prev->next = next->prev = VM_PAGE_PACK_PTR(elt);
+ __DEBUG_STAT_INCREMENT_INSERTS;
+
+ /*
+ * Check if clump needs to be promoted to head.
+ */
+ if (n_free >= vm_clump_promote_threshold && n_free > 1) {
+ vm_page_queue_entry_t first_prev;
+
+ first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev);
+
+ /* If not at head already */
+ if (first_prev != head) {
+ vm_page_queue_entry_t last_next;
+ vm_page_queue_entry_t head_next;
+
+ last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next);
+
+ /* verify that the links within the clump are consistent */
+ __DEBUG_VERIFY_LINKS(first, n_free, last_next);
+
+ /* promote clump to head */
+ first_prev->next = last->next;
+ last_next->prev = first->prev;
+ first->prev = VM_PAGE_PACK_PTR(head);
+ last->next = head->next;
+
+ head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next);
+ head_next->prev = VM_PAGE_PACK_PTR(last);
+ head->next = VM_PAGE_PACK_PTR(first);
+ __DEBUG_STAT_INCREMENT_PROMOTES(n_free);
+ }
+ }
+}
+#endif
+
+/*
+ * Macro: vm_page_queue_enter_first
+ * Function:
+ * Insert a new element at the head of the vm_page queue.
+ * Header:
+ * void queue_enter_first(q, elt, , field)
+ * queue_t q;
+ * vm_page_t elt;
+ * <field> is the linkage field in vm_page
+ *
+ * This macro's arguments have to match the generic "queue_enter_first()" macro which is
+ * what is used for this on 32 bit kernels.
+ */
+#define vm_page_queue_enter_first(head, elt, field) \
+MACRO_BEGIN \
+ vm_page_packed_t __pck_next = (head)->next; \
+ vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
+ vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \
+ \
+ if (__pck_head == __pck_next) { \
+ (head)->prev = __pck_elt; \
+ } else { \
+ vm_page_t __next; \
+ __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
+ __next->field.prev = __pck_elt; \
+ } \
+ \
+ (elt)->field.next = __pck_next; \
+ (elt)->field.prev = __pck_head; \
+ (head)->next = __pck_elt; \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove
+ * Function:
+ * Remove an arbitrary page from a vm_page queue.
+ * Header:
+ * void vm_page_queue_remove(q, qe, field)
+ * arguments as in vm_page_queue_enter
+ *
+ * This macro's arguments have to match the generic "queue_enter()" macro which is
+ * what is used for this on 32 bit kernels.
+ */
+#define vm_page_queue_remove(head, elt, field) \
+MACRO_BEGIN \
+ vm_page_packed_t __pck_next = (elt)->field.next; \
+ vm_page_packed_t __pck_prev = (elt)->field.prev; \
+ vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
+ vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \
+ \
+ if ((void *)(head) == (void *)__next) { \
+ (head)->prev = __pck_prev; \
+ } else { \
+ __next->field.prev = __pck_prev; \
+ } \
+ \
+ if ((void *)(head) == (void *)__prev) { \
+ (head)->next = __pck_next; \
+ } else { \
+ __prev->field.next = __pck_next; \
+ } \
+ \
+ (elt)->field.next = 0; \
+ (elt)->field.prev = 0; \
+MACRO_END
+
+
+/*
+ * Macro: vm_page_queue_remove_first
+ *
+ * Function:
+ * Remove and return the entry at the head of a vm_page queue.
+ *
+ * Header:
+ * vm_page_queue_remove_first(head, entry, field)
+ * N.B. entry is returned by reference
+ *
+ * This macro's arguments have to match the generic "queue_remove_first()" macro which is
+ * what is used for this on 32 bit kernels.
+ */
+#define vm_page_queue_remove_first(head, entry, field) \
+MACRO_BEGIN \
+ vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
+ vm_page_packed_t __pck_next; \
+ vm_page_t __next; \
+ \
+ (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
+ __pck_next = (entry)->field.next; \
+ __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
+ \
+ if (__pck_head == __pck_next) { \
+ (head)->prev = __pck_head; \
+ } else { \
+ __next->field.prev = __pck_head; \
+ } \
+ \
+ (head)->next = __pck_next; \
+ (entry)->field.next = 0; \
+ (entry)->field.prev = 0; \
+MACRO_END
+
+
+#if defined(__x86_64__)
+/*
+ * Macro: vm_page_queue_remove_first_with_clump
+ * Function:
+ * Remove and return the entry at the head of the free queue
+ * end is set to 1 to indicate that we just returned the last page in a clump
+ *
+ * Header:
+ * vm_page_queue_remove_first_with_clump(head, entry, end)
+ * entry is returned by reference
+ * end is returned by reference
+ */
+#define vm_page_queue_remove_first_with_clump(head, entry, end) \
+MACRO_BEGIN \
+ vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \
+ vm_page_packed_t __pck_next; \
+ vm_page_t __next; \
+ \
+ (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \
+ __pck_next = (entry)->vmp_pageq.next; \
+ __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \
+ \
+ (end) = 0; \
+ if (__pck_head == __pck_next) { \
+ (head)->prev = __pck_head; \
+ (end) = 1; \
+ } else { \
+ __next->vmp_pageq.prev = __pck_head; \
+ if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \
+ (end) = 1; \
+ } \
+ } \
+ \
+ (head)->next = __pck_next; \
+ (entry)->vmp_pageq.next = 0; \
+ (entry)->vmp_pageq.prev = 0; \
+MACRO_END
+#endif
+
+/*
+ * Macro: vm_page_queue_end
+ * Function:
+ * Tests whether a new entry is really the end of
+ * the queue.
+ * Header:
+ * boolean_t vm_page_queue_end(q, qe)
+ * vm_page_queue_t q;
+ * vm_page_queue_entry_t qe;
+ */
+#define vm_page_queue_end(q, qe) ((q) == (qe))
+
+
+/*
+ * Macro: vm_page_queue_empty
+ * Function:
+ * Tests whether a queue is empty.
+ * Header:
+ * boolean_t vm_page_queue_empty(q)
+ * vm_page_queue_t q;
+ */
+#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q)))
+
+
+
+/*
+ * Macro: vm_page_queue_first
+ * Function:
+ * Returns the first entry in the queue,
+ * Header:
+ * uintpr_t vm_page_queue_first(q)
+ * vm_page_queue_t q; \* IN *\
+ */
+#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next))
+
+
+
+/*
+ * Macro: vm_page_queue_last
+ * Function:
+ * Returns the last entry in the queue.
+ * Header:
+ * vm_page_queue_entry_t queue_last(q)
+ * queue_t q; \* IN *\
+ */
+#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev))
+
+
+
+/*
+ * Macro: vm_page_queue_next
+ * Function:
+ * Returns the entry after an item in the queue.
+ * Header:
+ * uintpr_t vm_page_queue_next(qc)
+ * vm_page_queue_t qc;
+ */
+#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next))
+
+
+
+/*
+ * Macro: vm_page_queue_prev
+ * Function:
+ * Returns the entry before an item in the queue.
+ * Header:
+ * uinptr_t vm_page_queue_prev(qc)
+ * vm_page_queue_t qc;
+ */
+#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev))
+
+
+
+/*
+ * Macro: vm_page_queue_iterate
+ * Function:
+ * iterate over each item in a vm_page queue.
+ * Generates a 'for' loop, setting elt to
+ * each item in turn (by reference).
+ * Header:
+ * vm_page_queue_iterate(q, elt, field)
+ * queue_t q;
+ * vm_page_t elt;
+ * <field> is the chain field in vm_page_t
+ */
+#define vm_page_queue_iterate(head, elt, field) \
+ for ((elt) = (vm_page_t)vm_page_queue_first(head); \
+ !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \
+ (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \
+
+#else // LP64
+
+#define VM_VPLQ_ALIGNMENT 128
+#define VM_PAGE_PACKED_PTR_ALIGNMENT sizeof(vm_offset_t)
+#define VM_PAGE_PACKED_ALIGNED
+#define VM_PAGE_PACKED_PTR_BITS 32
+#define VM_PAGE_PACKED_PTR_SHIFT 0
+#define VM_PAGE_PACKED_PTR_BASE 0
+
+#define VM_PAGE_PACKED_FROM_ARRAY 0
+
+#define VM_PAGE_PACK_PTR(p) (p)
+#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p))
+
+#define VM_PAGE_OBJECT(p) ((vm_object_t)((p)->vmp_object))
+#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o)))
+
+
+#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \
+MACRO_BEGIN \
+ (p)->vmp_pageq.next = 0; \
+ (p)->vmp_pageq.prev = 0; \
+MACRO_END
+
+#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p))
+
+#define vm_page_remque remque
+#define vm_page_enqueue_tail enqueue_tail
+#define vm_page_queue_init queue_init
+#define vm_page_queue_enter(h, e, f) queue_enter(h, e, vm_page_t, f)
+#define vm_page_queue_enter_first(h, e, f) queue_enter_first(h, e, vm_page_t, f)
+#define vm_page_queue_remove(h, e, f) queue_remove(h, e, vm_page_t, f)
+#define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f)
+#define vm_page_queue_end queue_end
+#define vm_page_queue_empty queue_empty
+#define vm_page_queue_first queue_first
+#define vm_page_queue_last queue_last
+#define vm_page_queue_next queue_next
+#define vm_page_queue_prev queue_prev
+#define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f)
+
+#endif // __LP64__
+
+
+
+/*
+ * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ * represents a set of aging bins that are 'protected'...
+ *
+ * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
+ * not yet been 'claimed' but have been aged out of the protective bins
+ * this occurs in vm_page_speculate when it advances to the next bin
+ * and discovers that it is still occupied... at that point, all of the
+ * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
+ * in that bin are all guaranteed to have reached at least the maximum age
+ * we allow for a protected page... they can be older if there is no
+ * memory pressure to pull them from the bin, or there are no new speculative pages
+ * being generated to push them out.
+ * this list is the one that vm_pageout_scan will prefer when looking
+ * for pages to move to the underweight free list
+ *
+ * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
+ * defines the amount of time a speculative page is normally
+ * allowed to live in the 'protected' state (i.e. not available
+ * to be stolen if vm_pageout_scan is running and looking for
+ * pages)... however, if the total number of speculative pages
+ * in the protected state exceeds our limit (defined in vm_pageout.c)
+ * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
+ * vm_pageout_scan is allowed to steal pages from the protected
+ * bucket even if they are underage.
+ *
+ * vm_pageout_scan is also allowed to pull pages from a protected
+ * bin if the bin has reached the "age of consent" we've set
+ */
+#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
+#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
+#define VM_PAGE_SPECULATIVE_AGED_Q 0
+
+#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
+
+struct vm_speculative_age_q {
+ /*
+ * memory queue for speculative pages via clustered pageins
+ */
+ vm_page_queue_head_t age_q;
+ mach_timespec_t age_ts;
+} VM_PAGE_PACKED_ALIGNED;
+
+
+
+extern
+struct vm_speculative_age_q vm_page_queue_speculative[];
+
+extern int speculative_steal_index;
+extern int speculative_age_index;
+extern unsigned int vm_page_speculative_q_age_ms;