extern vm_offset_t vm_kernel_base;
extern vm_offset_t vm_kernel_top;
extern vm_offset_t vm_kernel_slide;
+extern vm_offset_t vm_hib_base;
extern vm_offset_t vm_kernel_addrperm;
extern vm_offset_t vm_kext_base;
extern vm_offset_t vm_kext_top;
+extern vm_offset_t vm_prelink_stext;
+extern vm_offset_t vm_prelink_etext;
+extern vm_offset_t vm_prelink_sinfo;
+extern vm_offset_t vm_prelink_einfo;
+extern vm_offset_t vm_slinkedit;
+extern vm_offset_t vm_elinkedit;
#define VM_KERNEL_IS_SLID(_o) \
(((vm_offset_t)(_o) >= vm_kernel_base) && \
- ((vm_offset_t)(_o) < vm_kernel_top))
-#define VM_KERNEL_IS_KEXT(_o) \
- (((vm_offset_t)(_o) >= vm_kext_base) && \
+ ((vm_offset_t)(_o) <= vm_kernel_top))
+#define VM_KERNEL_IS_KEXT(_o) \
+ (((vm_offset_t)(_o) >= vm_kext_base) && \
((vm_offset_t)(_o) < vm_kext_top))
+
+#define VM_KERNEL_IS_PRELINKTEXT(_o) \
+ (((vm_offset_t)(_o) >= vm_prelink_stext) && \
+ ((vm_offset_t)(_o) < vm_prelink_etext))
+
+#define VM_KERNEL_IS_PRELINKINFO(_o) \
+ (((vm_offset_t)(_o) >= vm_prelink_sinfo) && \
+ ((vm_offset_t)(_o) < vm_prelink_einfo))
+
+#define VM_KERNEL_IS_KEXT_LINKEDIT(_o) \
+ (((vm_offset_t)(_o) >= vm_slinkedit) && \
+ ((vm_offset_t)(_o) < vm_elinkedit))
+
+#define VM_KERNEL_SLIDE(_u) \
+ ((vm_offset_t)(_u) + vm_kernel_slide)
+
+/*
+ * The following macros are to be used when exposing kernel addresses to
+ * userspace via any of the various debug or info facilities that might exist
+ * (e.g. stackshot, proc_info syscall, etc.). It is important to understand
+ * the goal of each macro and choose the right one depending on what you are
+ * trying to do. Misuse of these macros can result in critical data leaks
+ * which in turn lead to all sorts of system vulnerabilities.
+ *
+ * Note that in general the ideal goal is to protect addresses from userspace
+ * in a way that is reversible assuming you know the permutation and/or slide.
+ *
+ * The macros are as follows:
+ *
+ * VM_KERNEL_UNSLIDE:
+ * Use this macro when you are exposing an address to userspace which is
+ * a "static" kernel or kext address (i.e. coming from text or data
+ * sections). These are the addresses which get "slid" via ASLR on kernel
+ * or kext load, and it's precisely the slide value we are trying to
+ * protect from userspace.
+ *
+ * VM_KERNEL_ADDRPERM:
+ * Use this macro when you are exposing an address to userspace which is
+ * coming from the kernel's "heap". Since these adresses are not "loaded"
+ * from anywhere, there is no slide applied and we instead apply the
+ * permutation value to obscure the address.
+ *
+ * VM_KERNEL_UNSLIDE_OR_ADDRPERM:
+ * Use this macro when you are exposing an address to userspace that could
+ * come from either kernel text/data *or* the heap. This is a rare case,
+ * but one that does come up and must be handled correctly. If the argument
+ * is known to be lower than any potential heap address, no transformation
+ * is applied, to avoid revealing the operation on a constant.
+ *
+ * Nesting of these macros should be considered invalid.
+ */
#define VM_KERNEL_UNSLIDE(_v) \
((VM_KERNEL_IS_SLID(_v) || \
- VM_KERNEL_IS_KEXT(_v)) ? \
- (vm_offset_t)(_v) - vm_kernel_slide : \
+ VM_KERNEL_IS_KEXT(_v) || \
+ VM_KERNEL_IS_PRELINKTEXT(_v) || \
+ VM_KERNEL_IS_PRELINKINFO(_v) || \
+ VM_KERNEL_IS_KEXT_LINKEDIT(_v)) ? \
+ (vm_offset_t)(_v) - vm_kernel_slide : \
(vm_offset_t)(_v))
-#define VM_KERNEL_SLIDE(_u) \
- ((vm_offset_t)(_u) + vm_kernel_slide)
-#define VM_KERNEL_ADDRPERM(_v) \
- (((vm_offset_t)(_v) == 0) ? \
- (vm_offset_t)(0) : \
+#define VM_KERNEL_ADDRPERM(_v) \
+ (((vm_offset_t)(_v) == 0) ? \
+ (vm_offset_t)(0) : \
(vm_offset_t)(_v) + vm_kernel_addrperm)
+#define VM_KERNEL_UNSLIDE_OR_PERM(_v) \
+ ((VM_KERNEL_IS_SLID(_v) || \
+ VM_KERNEL_IS_KEXT(_v) || \
+ VM_KERNEL_IS_PRELINKTEXT(_v) || \
+ VM_KERNEL_IS_PRELINKINFO(_v) || \
+ VM_KERNEL_IS_KEXT_LINKEDIT(_v)) ? \
+ (vm_offset_t)(_v) - vm_kernel_slide : \
+ ((vm_offset_t)(_v) >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ? VM_KERNEL_ADDRPERM(_v) : (vm_offset_t)(_v)))
+
+
#endif /* XNU_KERNEL_PRIVATE */
extern vm_size_t page_size;