X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..0a7de7458d150b5d4dffc935ba399be265ef0a1a:/osfmk/mach/vm_param.h?ds=sidebyside diff --git a/osfmk/mach/vm_param.h b/osfmk/mach/vm_param.h index b76a10b21..2bb038e21 100644 --- a/osfmk/mach/vm_param.h +++ b/osfmk/mach/vm_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,16 +64,16 @@ * */ -#ifndef _MACH_VM_PARAM_H_ +#ifndef _MACH_VM_PARAM_H_ #define _MACH_VM_PARAM_H_ #include -#ifdef KERNEL +#ifdef KERNEL -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ #include #include @@ -83,10 +83,10 @@ * is some number of hardware pages, depending on the target machine. */ -#ifndef ASSEMBLER +#ifndef ASSEMBLER -#define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ -#define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ +#define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ +#define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ /* * Convert addresses to pages and vice versa. No rounding is used. @@ -109,8 +109,8 @@ */ #if 1 -#define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) -#define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) +#define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) +#define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) #else #define atop(x) (0UL = 0) #define ptoa(x) (0UL = 0) @@ -123,9 +123,9 @@ #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) #define round_page_overflow(in, out) __os_warn_unused(({ \ - bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ - *out &= ~((__typeof__(*out))PAGE_MASK); \ - __ovr; \ + bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ + *out &= ~((__typeof__(*out))PAGE_MASK); \ + __ovr; \ })) static inline int OS_WARN_RESULT @@ -188,23 +188,23 @@ mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) #define atop_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #define ptoa_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #define round_page_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #define trunc_page_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #else #define atop_32(x) (0) @@ -226,10 +226,10 @@ mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) * an exact page multiple. */ -#define page_aligned(x) (((x) & PAGE_MASK) == 0) +#define page_aligned(x) (((x) & PAGE_MASK) == 0) -extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ -extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ +extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ +extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ /* * The default pager does not handle 64-bit offsets inside its objects, @@ -237,42 +237,55 @@ extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ * When we need to allocate a chunk of anonymous memory over that size, * we have to allocate more than one chunk. */ -#define ANON_MAX_SIZE 0xFFFFF000ULL +#define ANON_MAX_SIZE 0xFFFFF000ULL /* * Work-around for * Break large anonymous memory areas into 128MB chunks to alleviate * the cost of copying when copy-on-write is not possible because a small * portion of it being wired. */ -#define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ +#define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ -#ifdef XNU_KERNEL_PRIVATE +/* + * The 'medium' malloc allocator would like its regions + * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks + * and backed by different objects. This avoids contention + * on a single large object and showed solid improvements on high + * core machines with workloads involving video and graphics processing. + */ +#define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */ -extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ -extern uint64_t sane_size; /* Memory size to use for defaults calculations */ -extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ +#ifdef XNU_KERNEL_PRIVATE -extern const vm_offset_t vm_min_kernel_address; -extern const vm_offset_t vm_max_kernel_address; +#include + +extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ +extern uint64_t sane_size; /* Memory size to use for defaults calculations */ +extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ + +extern const vm_offset_t vm_min_kernel_address; +extern const vm_offset_t vm_max_kernel_address; extern vm_offset_t vm_kernel_stext; extern vm_offset_t vm_kernel_etext; -extern vm_offset_t vm_kernel_slid_base; -extern vm_offset_t vm_kernel_slid_top; -extern vm_offset_t vm_kernel_slide; -extern vm_offset_t vm_kernel_addrperm; -extern vm_offset_t vm_kext_base; -extern vm_offset_t vm_kext_top; -extern vm_offset_t vm_kernel_base; -extern vm_offset_t vm_kernel_top; -extern vm_offset_t vm_hib_base; - -#define VM_KERNEL_IS_SLID(_o) \ - (((vm_offset_t)(_o) >= vm_kernel_slid_base) && \ - ((vm_offset_t)(_o) < vm_kernel_slid_top)) - -#define VM_KERNEL_SLIDE(_u) \ - ((vm_offset_t)(_u) + vm_kernel_slide) +extern vm_offset_t vm_kernel_slid_base; +extern vm_offset_t vm_kernel_slid_top; +extern vm_offset_t vm_kernel_slide; +extern vm_offset_t vm_kernel_addrperm; +extern vm_offset_t vm_kext_base; +extern vm_offset_t vm_kext_top; +extern vm_offset_t vm_kernel_base; +extern vm_offset_t vm_kernel_top; +extern vm_offset_t vm_hib_base; + +extern vm_offset_t vm_kernel_builtinkmod_text; +extern vm_offset_t vm_kernel_builtinkmod_text_end; + +#define VM_KERNEL_IS_SLID(_o) \ + (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \ + ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top)) + +#define VM_KERNEL_SLIDE(_u) ((vm_offset_t)(_u) + vm_kernel_slide) /* * The following macros are to be used when exposing kernel addresses to @@ -280,56 +293,111 @@ extern vm_offset_t vm_hib_base; * (e.g. stackshot, proc_info syscall, etc.). It is important to understand * the goal of each macro and choose the right one depending on what you are * trying to do. Misuse of these macros can result in critical data leaks - * which in turn lead to all sorts of system vulnerabilities. - * - * Note that in general the ideal goal is to protect addresses from userspace - * in a way that is reversible assuming you know the permutation and/or slide. + * which in turn lead to all sorts of system vulnerabilities. It is invalid to + * call these macros on a non-kernel address (NULL is allowed). * - * The macros are as follows: - * * VM_KERNEL_UNSLIDE: * Use this macro when you are exposing an address to userspace which is - * a "static" kernel or kext address (i.e. coming from text or data - * sections). These are the addresses which get "slid" via ASLR on kernel - * or kext load, and it's precisely the slide value we are trying to + * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text + * or data sections). These are the addresses which get "slid" via ASLR on + * kernel or kext load, and it's precisely the slide value we are trying to * protect from userspace. * - * VM_KERNEL_ADDRPERM: - * Use this macro when you are exposing an address to userspace which is - * coming from the kernel's "heap". Since these adresses are not "loaded" - * from anywhere, there is no slide applied and we instead apply the - * permutation value to obscure the address. + * VM_KERNEL_ADDRHIDE: + * Use when exposing an address for internal purposes: debugging, tracing, + * etc. The address will be unslid if necessary. Other addresses will be + * hidden on customer builds, and unmodified on internal builds. + * + * VM_KERNEL_ADDRHASH: + * Use this macro when exposing a kernel address to userspace on customer + * builds. The address can be from the static kernel or kext regions, or the + * kernel heap. The address will be unslid or hashed as appropriate. * - * VM_KERNEL_UNSLIDE_OR_ADDRPERM: - * Use this macro when you are exposing an address to userspace that could - * come from either kernel text/data *or* the heap. This is a rare case, - * but one that does come up and must be handled correctly. If the argument - * is known to be lower than any potential heap address, no transformation - * is applied, to avoid revealing the operation on a constant. + * + * ** SECURITY WARNING: The following macros can leak kernel secrets. + * Use *only* in performance *critical* code. + * + * VM_KERNEL_ADDRPERM: + * VM_KERNEL_UNSLIDE_OR_PERM: + * Use these macros when exposing a kernel address to userspace on customer + * builds. The address can be from the static kernel or kext regions, or the + * kernel heap. The address will be unslid or permuted as appropriate. * * Nesting of these macros should be considered invalid. */ -#define VM_KERNEL_UNSLIDE(_v) \ - ((VM_KERNEL_IS_SLID(_v)) ? \ - (vm_offset_t)(_v) - vm_kernel_slide : \ - (vm_offset_t)(_v)) -#define VM_KERNEL_ADDRPERM(_v) \ - (((vm_offset_t)(_v) == 0) ? \ - (vm_offset_t)(0) : \ - (vm_offset_t)(_v) + vm_kernel_addrperm) +__BEGIN_DECLS +extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr); +__END_DECLS + +#define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide) + +#if DEBUG || DEVELOPMENT +#define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v)) +#else +#define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0) +#endif /* DEBUG || DEVELOPMENT */ + +#define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) + +#define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ + VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ + VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ + (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ + }) + +#define VM_KERNEL_UNSLIDE(_v) ({ \ + VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ + }) + +#define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) + +#undef mach_vm_round_page +#undef round_page +#undef round_page_32 +#undef round_page_64 + +static inline mach_vm_offset_t +mach_vm_round_page(mach_vm_offset_t x) +{ + if (round_page_overflow(x, &x)) { + panic("overflow detected"); + } + return x; +} + +static inline vm_offset_t +round_page(vm_offset_t x) +{ + if (round_page_overflow(x, &x)) { + panic("overflow detected"); + } + return x; +} -#define VM_KERNEL_UNSLIDE_OR_PERM(_v) \ - ((VM_KERNEL_IS_SLID(_v)) ? \ - (vm_offset_t)(_v) - vm_kernel_slide : \ - ((vm_offset_t)(_v) >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ? VM_KERNEL_ADDRPERM(_v) : (vm_offset_t)(_v))) - +static inline mach_vm_offset_t +round_page_64(mach_vm_offset_t x) +{ + if (round_page_overflow(x, &x)) { + panic("overflow detected"); + } + return x; +} + +static inline uint32_t +round_page_32(uint32_t x) +{ + if (round_page_overflow(x, &x)) { + panic("overflow detected"); + } + return x; +} -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -extern vm_size_t page_size; -extern vm_size_t page_mask; -extern int page_shift; +extern vm_size_t page_size; +extern vm_size_t page_mask; +extern int page_shift; /* We need a way to get rid of compiler warnings when we cast from */ /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ @@ -343,12 +411,12 @@ extern int page_shift; #define CAST_DOWN( type, addr ) \ ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) -#define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) +#define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) #endif /* __CAST_DOWN_CHECK */ -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ -#endif /* KERNEL */ +#endif /* KERNEL */ -#endif /* _MACH_VM_PARAM_H_ */ +#endif /* _MACH_VM_PARAM_H_ */