+#ifndef VM_TAG_ACTIVE_UPDATE
+#error VM_TAG_ACTIVE_UPDATE
+#endif
+
+#define VM_OBJECT_WIRED(object, tag) \
+ MACRO_BEGIN \
+ assert(VM_KERN_MEMORY_NONE != (tag)); \
+ assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
+ (object)->wire_tag = (tag); \
+ if (!VM_TAG_ACTIVE_UPDATE \
+ && ((object)->purgable == VM_PURGABLE_DENY)) \
+ { \
+ lck_spin_lock(&vm_objects_wired_lock); \
+ assert(!(object)->objq.next); \
+ assert(!(object)->objq.prev); \
+ queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \
+ lck_spin_unlock(&vm_objects_wired_lock); \
+ } \
+ MACRO_END
+
+#define VM_OBJECT_UNWIRED(object) \
+ MACRO_BEGIN \
+ if (!VM_TAG_ACTIVE_UPDATE \
+ && ((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \
+ { \
+ lck_spin_lock(&vm_objects_wired_lock); \
+ queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \
+ lck_spin_unlock(&vm_objects_wired_lock); \
+ } \
+ if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
+ vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
+ (object)->wire_tag = VM_KERN_MEMORY_NONE; \
+ } \
+ MACRO_END
+
+// These two macros start & end a C block
+#define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
+ MACRO_BEGIN \
+ { \
+ int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
+
+#define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
+ if (__wireddelta) { \
+ boolean_t __overflow __assert_only = \
+ os_add_overflow((object)->wired_page_count, __wireddelta, \
+ (unsigned int *)(uintptr_t)&(object)->wired_page_count); \
+ assert(!__overflow); \
+ if (!(object)->pageout && !(object)->no_tag_update) { \
+ if (__wireddelta > 0) { \
+ assert (VM_KERN_MEMORY_NONE != (tag)); \
+ if (VM_KERN_MEMORY_NONE == __waswired) { \
+ VM_OBJECT_WIRED((object), (tag)); \
+ } \
+ vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
+ } else if (VM_KERN_MEMORY_NONE != __waswired) { \
+ assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
+ vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
+ if (!(object)->wired_page_count) { \
+ VM_OBJECT_UNWIRED((object)); \
+ } \
+ } \
+ } \
+ } \
+ } \
+ MACRO_END
+
+#define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
+ __wireddelta += delta; \
+
+#define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
+ if (!m->private && !m->fictitious) __wireddelta++;
+
+#define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
+ if (!m->private && !m->fictitious) __wireddelta--;
+
+
+
+#define OBJECT_LOCK_SHARED 0
+#define OBJECT_LOCK_EXCLUSIVE 1
+
+extern lck_grp_t vm_object_lck_grp;
+extern lck_grp_attr_t vm_object_lck_grp_attr;
+extern lck_attr_t vm_object_lck_attr;
+extern lck_attr_t kernel_object_lck_attr;
+extern lck_attr_t compressor_object_lck_attr;
+
+extern vm_object_t vm_pageout_scan_wants_object;
+
+extern void vm_object_lock(vm_object_t);
+extern boolean_t vm_object_lock_try(vm_object_t);
+extern boolean_t _vm_object_lock_try(vm_object_t);
+extern boolean_t vm_object_lock_avoid(vm_object_t);
+extern void vm_object_lock_shared(vm_object_t);
+extern boolean_t vm_object_lock_yield_shared(vm_object_t);
+extern boolean_t vm_object_lock_try_shared(vm_object_t);
+extern void vm_object_unlock(vm_object_t);
+extern boolean_t vm_object_lock_upgrade(vm_object_t);
+