+ was_pageable = TRUE; \
+ } \
+ \
+ else if (mem->throttled) { \
+ assert(mem->object != compressor_object); \
+ assert(!mem->active && !mem->inactive); \
+ assert(!mem->speculative); \
+ assert(!mem->fictitious); \
+ queue_remove(&vm_page_queue_throttled, \
+ mem, vm_page_t, pageq); \
+ mem->throttled = FALSE; \
+ vm_page_throttled_count--; \
+ was_pageable = FALSE; \
+ } \
+ \
+ else if (mem->speculative) { \
+ assert(mem->object != compressor_object); \
+ assert(!mem->active && !mem->inactive); \
+ assert(!mem->throttled); \
+ assert(!mem->fictitious); \
+ remque(&mem->pageq); \
+ mem->speculative = FALSE; \
+ vm_page_speculative_count--; \
+ was_pageable = TRUE; \
+ } \
+ \
+ else if (mem->pageq.next || mem->pageq.prev) { \
+ was_pageable = FALSE; \
+ panic("VM_PAGE_QUEUES_REMOVE: unmarked page on Q"); \
+ } else { \
+ was_pageable = FALSE; \
+ } \
+ \
+ mem->pageq.next = NULL; \
+ mem->pageq.prev = NULL; \
+ VM_PAGE_QUEUES_ASSERT(mem, 0); \
+ if (was_pageable) { \
+ if (mem->object->internal) { \
+ vm_page_pageable_internal_count--; \
+ } else { \
+ vm_page_pageable_external_count--; \
+ } \
+ } \
+ MACRO_END
+
+
+#define VM_PAGE_ENQUEUE_INACTIVE(mem, first) \
+ MACRO_BEGIN \
+ VM_PAGE_QUEUES_ASSERT(mem, 0); \
+ assert(!mem->fictitious); \
+ assert(!mem->laundry); \
+ assert(!mem->pageout_queue); \
+ if (mem->object->internal) { \
+ if (first == TRUE) \
+ queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \
+ else \
+ queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \
+ vm_page_anonymous_count++; \
+ vm_page_pageable_internal_count++; \
+ } else { \
+ if (first == TRUE) \
+ queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq); \
+ else \
+ queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq); \
+ vm_page_pageable_external_count++; \
+ } \
+ mem->inactive = TRUE; \
+ vm_page_inactive_count++; \
+ token_new_pagecount++; \
+ MACRO_END
+
+
+#if DEVELOPMENT || DEBUG
+#define VM_PAGE_SPECULATIVE_USED_ADD() \
+ MACRO_BEGIN \
+ OSAddAtomic(1, &vm_page_speculative_used); \
+ MACRO_END
+#else
+#define VM_PAGE_SPECULATIVE_USED_ADD()
+#endif
+
+
+#define VM_PAGE_CONSUME_CLUSTERED(mem) \
+ MACRO_BEGIN \
+ pmap_lock_phys_page(mem->phys_page); \
+ if (mem->clustered) { \
+ assert(mem->object); \
+ mem->object->pages_used++; \
+ mem->clustered = FALSE; \
+ VM_PAGE_SPECULATIVE_USED_ADD(); \