+#define page_aligned(x) (((x) & PAGE_MASK) == 0)
+
+extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */
+extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */
+
+/*
+ * The default pager does not handle 64-bit offsets inside its objects,
+ * so this limits the size of anonymous memory objects to 4GB minus 1 page.
+ * When we need to allocate a chunk of anonymous memory over that size,
+ * we have to allocate more than one chunk.
+ */
+#define ANON_MAX_SIZE 0xFFFFF000ULL
+/*
+ * Work-around for <rdar://problem/6626493>
+ * Break large anonymous memory areas into 128MB chunks to alleviate
+ * the cost of copying when copy-on-write is not possible because a small
+ * portion of it being wired.
+ */
+#define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */
+
+/*
+ * The 'medium' malloc allocator would like its regions
+ * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks
+ * and backed by different objects. This avoids contention
+ * on a single large object and showed solid improvements on high
+ * core machines with workloads involving video and graphics processing.
+ */
+#define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */
+
+#ifdef XNU_KERNEL_PRIVATE
+
+#include <kern/debug.h>
+
+extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */
+extern uint64_t sane_size; /* Memory size to use for defaults calculations */
+extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */
+
+extern const vm_offset_t vm_min_kernel_address;
+extern const vm_offset_t vm_max_kernel_address;
+
+extern vm_offset_t vm_kernel_stext;
+extern vm_offset_t vm_kernel_etext;
+extern vm_offset_t vm_kernel_slid_base;
+extern vm_offset_t vm_kernel_slid_top;
+extern vm_offset_t vm_kernel_slide;
+extern vm_offset_t vm_kernel_addrperm;
+extern vm_offset_t vm_kext_base;
+extern vm_offset_t vm_kext_top;
+extern vm_offset_t vm_kernel_base;
+extern vm_offset_t vm_kernel_top;
+extern vm_offset_t vm_hib_base;
+
+extern vm_offset_t vm_kernel_builtinkmod_text;
+extern vm_offset_t vm_kernel_builtinkmod_text_end;
+
+#define VM_KERNEL_IS_SLID(_o) \
+ (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \
+ ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top))
+
+#define VM_KERNEL_SLIDE(_u) ((vm_offset_t)(_u) + vm_kernel_slide)
+
+/*
+ * The following macros are to be used when exposing kernel addresses to
+ * userspace via any of the various debug or info facilities that might exist
+ * (e.g. stackshot, proc_info syscall, etc.). It is important to understand
+ * the goal of each macro and choose the right one depending on what you are
+ * trying to do. Misuse of these macros can result in critical data leaks
+ * which in turn lead to all sorts of system vulnerabilities. It is invalid to
+ * call these macros on a non-kernel address (NULL is allowed).
+ *
+ * VM_KERNEL_UNSLIDE:
+ * Use this macro when you are exposing an address to userspace which is
+ * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text
+ * or data sections). These are the addresses which get "slid" via ASLR on
+ * kernel or kext load, and it's precisely the slide value we are trying to
+ * protect from userspace.
+ *
+ * VM_KERNEL_ADDRHIDE:
+ * Use when exposing an address for internal purposes: debugging, tracing,
+ * etc. The address will be unslid if necessary. Other addresses will be
+ * hidden on customer builds, and unmodified on internal builds.
+ *
+ * VM_KERNEL_ADDRHASH:
+ * Use this macro when exposing a kernel address to userspace on customer
+ * builds. The address can be from the static kernel or kext regions, or the
+ * kernel heap. The address will be unslid or hashed as appropriate.
+ *
+ *
+ * ** SECURITY WARNING: The following macros can leak kernel secrets.
+ * Use *only* in performance *critical* code.
+ *
+ * VM_KERNEL_ADDRPERM:
+ * VM_KERNEL_UNSLIDE_OR_PERM:
+ * Use these macros when exposing a kernel address to userspace on customer
+ * builds. The address can be from the static kernel or kext regions, or the
+ * kernel heap. The address will be unslid or permuted as appropriate.
+ *
+ * Nesting of these macros should be considered invalid.
+ */
+
+__BEGIN_DECLS
+extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr);
+__END_DECLS
+
+#define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide)
+
+#if DEBUG || DEVELOPMENT
+#define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v))
+#else
+#define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0)
+#endif /* DEBUG || DEVELOPMENT */
+
+#define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v))