+#define VM_ZF_COUNT_INCR() \
+ MACRO_BEGIN \
+ OSAddAtomic(1, (SInt32 *) &vm_zf_count); \
+ MACRO_END \
+
+#define VM_ZF_COUNT_DECR() \
+ MACRO_BEGIN \
+ OSAddAtomic(-1, (SInt32 *) &vm_zf_count); \
+ MACRO_END \
+
+#else /* !(defined(__ppc__)) */
+
+extern uint64_t vm_zf_count;
+
+#define VM_ZF_COUNT_INCR() \
+ MACRO_BEGIN \
+ OSAddAtomic64(1, (SInt64 *) &vm_zf_count); \
+ MACRO_END \
+
+#define VM_ZF_COUNT_DECR() \
+ MACRO_BEGIN \
+ OSAddAtomic64(-1, (SInt64 *) &vm_zf_count); \
+ MACRO_END \
+
+#endif /* !(defined(__ppc__)) */
+
+/*
+ * must hold the page queues lock to
+ * manipulate this structure
+ */
+struct vm_pageout_queue {
+ queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
+ unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
+ unsigned int pgo_maxlaundry;
+
+ unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */
+ pgo_busy:1, /* iothread is currently processing request from pgo_pending */
+ pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
+ pgo_draining:1,
+ :0;
+};
+
+#define VM_PAGE_Q_THROTTLED(q) \
+ ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
+
+extern struct vm_pageout_queue vm_pageout_queue_internal;
+extern struct vm_pageout_queue vm_pageout_queue_external;
+