X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..813fb2f63a553c957e917ede5f119b021d6ce391:/osfmk/mach/vm_param.h diff --git a/osfmk/mach/vm_param.h b/osfmk/mach/vm_param.h index 861d5501b..b76a10b21 100644 --- a/osfmk/mach/vm_param.h +++ b/osfmk/mach/vm_param.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -75,6 +75,9 @@ #include #endif /* ASSEMBLER */ +#include +#include + /* * The machine independent pages are refered to as PAGES. A page * is some number of hardware pages, depending on the target machine. @@ -96,6 +99,9 @@ #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) +#define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) +#define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) + /* * While the following block is enabled, the legacy atop and ptoa * macros will behave correctly. If not, they will generate @@ -103,8 +109,8 @@ */ #if 1 -#define atop(x) ((uint32_t)(x) >> PAGE_SHIFT) -#define ptoa(x) ((uint32_t)(x) << PAGE_SHIFT) +#define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) +#define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) #else #define atop(x) (0UL = 0) #define ptoa(x) (0UL = 0) @@ -116,6 +122,18 @@ #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) +#define round_page_overflow(in, out) __os_warn_unused(({ \ + bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ + *out &= ~((__typeof__(*out))PAGE_MASK); \ + __ovr; \ + })) + +static inline int OS_WARN_RESULT +mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) +{ + return round_page_overflow(in, out); +} + #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) @@ -124,8 +142,8 @@ * address space size) VM types. */ -#define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) -#define trunc_page(x) ((vm_offset_t)(x) & ~((signed)PAGE_MASK)) +#define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) +#define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) /* * Round off or truncate to the nearest page. These will work @@ -139,11 +157,10 @@ * associated with the specific VM type should be used. */ -#define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) -#define trunc_page_32(x) ((uint32_t)(x) & ~((signed)PAGE_MASK)) -#define round_page_64(x) (((uint64_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) -#define trunc_page_64(x) ((uint64_t)(x) & ~((signed)PAGE_MASK)) - +#define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) +#define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) +#define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) +#define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) /* * Enable the following block to find uses of xxx_32 macros that should @@ -209,34 +226,125 @@ * an exact page multiple. */ -#define page_aligned(x) ((((vm_object_offset_t) (x)) & PAGE_MASK) == 0) +#define page_aligned(x) (((x) & PAGE_MASK) == 0) extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ +/* + * The default pager does not handle 64-bit offsets inside its objects, + * so this limits the size of anonymous memory objects to 4GB minus 1 page. + * When we need to allocate a chunk of anonymous memory over that size, + * we have to allocate more than one chunk. + */ +#define ANON_MAX_SIZE 0xFFFFF000ULL +/* + * Work-around for + * Break large anonymous memory areas into 128MB chunks to alleviate + * the cost of copying when copy-on-write is not possible because a small + * portion of it being wired. + */ +#define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ + #ifdef XNU_KERNEL_PRIVATE extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ extern uint64_t sane_size; /* Memory size to use for defaults calculations */ extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ +extern const vm_offset_t vm_min_kernel_address; +extern const vm_offset_t vm_max_kernel_address; + +extern vm_offset_t vm_kernel_stext; +extern vm_offset_t vm_kernel_etext; +extern vm_offset_t vm_kernel_slid_base; +extern vm_offset_t vm_kernel_slid_top; +extern vm_offset_t vm_kernel_slide; +extern vm_offset_t vm_kernel_addrperm; +extern vm_offset_t vm_kext_base; +extern vm_offset_t vm_kext_top; +extern vm_offset_t vm_kernel_base; +extern vm_offset_t vm_kernel_top; +extern vm_offset_t vm_hib_base; + +#define VM_KERNEL_IS_SLID(_o) \ + (((vm_offset_t)(_o) >= vm_kernel_slid_base) && \ + ((vm_offset_t)(_o) < vm_kernel_slid_top)) + +#define VM_KERNEL_SLIDE(_u) \ + ((vm_offset_t)(_u) + vm_kernel_slide) + +/* + * The following macros are to be used when exposing kernel addresses to + * userspace via any of the various debug or info facilities that might exist + * (e.g. stackshot, proc_info syscall, etc.). It is important to understand + * the goal of each macro and choose the right one depending on what you are + * trying to do. Misuse of these macros can result in critical data leaks + * which in turn lead to all sorts of system vulnerabilities. + * + * Note that in general the ideal goal is to protect addresses from userspace + * in a way that is reversible assuming you know the permutation and/or slide. + * + * The macros are as follows: + * + * VM_KERNEL_UNSLIDE: + * Use this macro when you are exposing an address to userspace which is + * a "static" kernel or kext address (i.e. coming from text or data + * sections). These are the addresses which get "slid" via ASLR on kernel + * or kext load, and it's precisely the slide value we are trying to + * protect from userspace. + * + * VM_KERNEL_ADDRPERM: + * Use this macro when you are exposing an address to userspace which is + * coming from the kernel's "heap". Since these adresses are not "loaded" + * from anywhere, there is no slide applied and we instead apply the + * permutation value to obscure the address. + * + * VM_KERNEL_UNSLIDE_OR_ADDRPERM: + * Use this macro when you are exposing an address to userspace that could + * come from either kernel text/data *or* the heap. This is a rare case, + * but one that does come up and must be handled correctly. If the argument + * is known to be lower than any potential heap address, no transformation + * is applied, to avoid revealing the operation on a constant. + * + * Nesting of these macros should be considered invalid. + */ +#define VM_KERNEL_UNSLIDE(_v) \ + ((VM_KERNEL_IS_SLID(_v)) ? \ + (vm_offset_t)(_v) - vm_kernel_slide : \ + (vm_offset_t)(_v)) + +#define VM_KERNEL_ADDRPERM(_v) \ + (((vm_offset_t)(_v) == 0) ? \ + (vm_offset_t)(0) : \ + (vm_offset_t)(_v) + vm_kernel_addrperm) + +#define VM_KERNEL_UNSLIDE_OR_PERM(_v) \ + ((VM_KERNEL_IS_SLID(_v)) ? \ + (vm_offset_t)(_v) - vm_kernel_slide : \ + ((vm_offset_t)(_v) >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ? VM_KERNEL_ADDRPERM(_v) : (vm_offset_t)(_v))) + + #endif /* XNU_KERNEL_PRIVATE */ extern vm_size_t page_size; extern vm_size_t page_mask; -extern int page_shift; +extern int page_shift; /* We need a way to get rid of compiler warnings when we cast from */ -/* a 64 bit value to an address that is 32 bits. */ -/* We know at this point the cast is harmless but sometime in */ -/* the future it may not be. */ -/* When size of an int is no longer equal to size of uintptr_t then */ -/* the compile will fail and we know we need to fix our cast. */ +/* a 64 bit value to an address (which may be 32 bits or 64-bits). */ +/* An intptr_t is used convert the value to the right precision, and */ +/* then to an address. This macro is also used to convert addresses */ +/* to 32-bit integers, which is a hard failure for a 64-bit kernel */ #include #ifndef __CAST_DOWN_CHECK #define __CAST_DOWN_CHECK -typedef char __NEED_TO_CHANGE_CAST_DOWN[ sizeof(uintptr_t) == sizeof(int) ? 0 : -1 ]; -#define CAST_DOWN( type, addr ) ( ((type)((uintptr_t) (addr))) ) + +#define CAST_DOWN( type, addr ) \ + ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) + +#define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) + #endif /* __CAST_DOWN_CHECK */ #endif /* ASSEMBLER */