]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: mach/vm_param.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine independent virtual memory parameters. | |
64 | * | |
65 | */ | |
66 | ||
67 | #ifndef _MACH_VM_PARAM_H_ | |
68 | #define _MACH_VM_PARAM_H_ | |
69 | ||
1c79356b | 70 | #include <mach/machine/vm_param.h> |
90556fb8 | 71 | |
91447636 A |
72 | #ifdef KERNEL |
73 | ||
55e303ae | 74 | #ifndef ASSEMBLER |
90556fb8 | 75 | #include <mach/vm_types.h> |
55e303ae | 76 | #endif /* ASSEMBLER */ |
1c79356b | 77 | |
39037602 A |
78 | #include <os/base.h> |
79 | #include <os/overflow.h> | |
80 | ||
1c79356b A |
81 | /* |
82 | * The machine independent pages are refered to as PAGES. A page | |
83 | * is some number of hardware pages, depending on the target machine. | |
84 | */ | |
85 | ||
55e303ae A |
86 | #ifndef ASSEMBLER |
87 | ||
91447636 A |
88 | #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ |
89 | #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ | |
1c79356b | 90 | |
90556fb8 A |
91 | /* |
92 | * Convert addresses to pages and vice versa. No rounding is used. | |
93 | * The atop_32 and ptoa_32 macros should not be use on 64 bit types. | |
94 | * The round_page_64 and trunc_page_64 macros should be used instead. | |
95 | */ | |
96 | ||
97 | #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT) | |
98 | #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT) | |
99 | #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) | |
100 | #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) | |
101 | ||
b0d623f7 A |
102 | #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
103 | #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
104 | ||
1c79356b | 105 | /* |
90556fb8 A |
106 | * While the following block is enabled, the legacy atop and ptoa |
107 | * macros will behave correctly. If not, they will generate | |
108 | * invalid lvalue errors. | |
d7e50217 A |
109 | */ |
110 | ||
90556fb8 | 111 | #if 1 |
316670eb A |
112 | #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
113 | #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
90556fb8 A |
114 | #else |
115 | #define atop(x) (0UL = 0) | |
116 | #define ptoa(x) (0UL = 0) | |
117 | #endif | |
118 | ||
91447636 A |
119 | /* |
120 | * Page-size rounding macros for the Public fixed-width VM types. | |
121 | */ | |
122 | #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) | |
123 | #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) | |
124 | ||
39037602 A |
125 | #define round_page_overflow(in, out) __os_warn_unused(({ \ |
126 | bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ | |
127 | *out &= ~((__typeof__(*out))PAGE_MASK); \ | |
128 | __ovr; \ | |
129 | })) | |
130 | ||
131 | static inline int OS_WARN_RESULT | |
132 | mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) | |
133 | { | |
134 | return round_page_overflow(in, out); | |
135 | } | |
136 | ||
91447636 A |
137 | #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
138 | #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) | |
139 | ||
140 | /* | |
141 | * Rounding macros for the legacy (scalable with the current task's | |
142 | * address space size) VM types. | |
143 | */ | |
144 | ||
b0d623f7 A |
145 | #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) |
146 | #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) | |
1c79356b A |
147 | |
148 | /* | |
149 | * Round off or truncate to the nearest page. These will work | |
150 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page | |
90556fb8 A |
151 | * bytes. The round_page_32 and trunc_page_32 macros should not be |
152 | * use on 64 bit types. The round_page_64 and trunc_page_64 macros | |
153 | * should be used instead. | |
91447636 A |
154 | * |
155 | * These should only be used in the rare case the size of the address | |
156 | * or length is hard-coded as 32 or 64 bit. Otherwise, the macros | |
157 | * associated with the specific VM type should be used. | |
90556fb8 A |
158 | */ |
159 | ||
b0d623f7 A |
160 | #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) |
161 | #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) | |
162 | #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) | |
163 | #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) | |
90556fb8 | 164 | |
90556fb8 A |
165 | /* |
166 | * Enable the following block to find uses of xxx_32 macros that should | |
167 | * be xxx_64. These macros only work in C code, not C++. The resulting | |
168 | * binaries are not functional. Look for invalid lvalue errors in | |
169 | * the compiler output. | |
170 | * | |
171 | * Enabling the following block will also find use of the xxx_64 macros | |
172 | * that have been passed pointers. The parameters should be case to an | |
173 | * unsigned long type first. Look for invalid operands to binary + error | |
174 | * in the compiler output. | |
175 | */ | |
1c79356b | 176 | |
90556fb8 A |
177 | #if 0 |
178 | #undef atop_32 | |
179 | #undef ptoa_32 | |
180 | #undef round_page_32 | |
181 | #undef trunc_page_32 | |
182 | #undef atop_64 | |
183 | #undef ptoa_64 | |
184 | #undef round_page_64 | |
185 | #undef trunc_page_64 | |
186 | ||
187 | #ifndef __cplusplus | |
188 | ||
189 | #define atop_32(x) \ | |
190 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
191 | (*(long *)0), \ | |
192 | (0UL)) = 0) | |
193 | ||
194 | #define ptoa_32(x) \ | |
195 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
196 | (*(long *)0), \ | |
197 | (0UL)) = 0) | |
198 | ||
199 | #define round_page_32(x) \ | |
200 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
201 | (*(long *)0), \ | |
202 | (0UL)) = 0) | |
203 | ||
204 | #define trunc_page_32(x) \ | |
205 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
206 | (*(long *)0), \ | |
207 | (0UL)) = 0) | |
208 | #else | |
209 | ||
210 | #define atop_32(x) (0) | |
211 | #define ptoa_32(x) (0) | |
212 | #define round_page_32(x) (0) | |
213 | #define trunc_page_32(x) (0) | |
214 | ||
215 | #endif /* ! __cplusplus */ | |
216 | ||
217 | #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
218 | #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
219 | #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
220 | #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
221 | ||
222 | #endif | |
1c79356b A |
223 | |
224 | /* | |
225 | * Determine whether an address is page-aligned, or a count is | |
226 | * an exact page multiple. | |
227 | */ | |
228 | ||
b0d623f7 | 229 | #define page_aligned(x) (((x) & PAGE_MASK) == 0) |
1c79356b | 230 | |
55e303ae | 231 | extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ |
91447636 A |
232 | extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ |
233 | ||
b0d623f7 A |
234 | /* |
235 | * The default pager does not handle 64-bit offsets inside its objects, | |
236 | * so this limits the size of anonymous memory objects to 4GB minus 1 page. | |
237 | * When we need to allocate a chunk of anonymous memory over that size, | |
238 | * we have to allocate more than one chunk. | |
239 | */ | |
240 | #define ANON_MAX_SIZE 0xFFFFF000ULL | |
241 | /* | |
242 | * Work-around for <rdar://problem/6626493> | |
243 | * Break large anonymous memory areas into 128MB chunks to alleviate | |
244 | * the cost of copying when copy-on-write is not possible because a small | |
245 | * portion of it being wired. | |
246 | */ | |
247 | #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ | |
248 | ||
91447636 A |
249 | #ifdef XNU_KERNEL_PRIVATE |
250 | ||
251 | extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ | |
252 | extern uint64_t sane_size; /* Memory size to use for defaults calculations */ | |
55e303ae A |
253 | extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ |
254 | ||
0c530ab8 A |
255 | extern const vm_offset_t vm_min_kernel_address; |
256 | extern const vm_offset_t vm_max_kernel_address; | |
257 | ||
39037602 A |
258 | extern vm_offset_t vm_kernel_stext; |
259 | extern vm_offset_t vm_kernel_etext; | |
260 | extern vm_offset_t vm_kernel_slid_base; | |
261 | extern vm_offset_t vm_kernel_slid_top; | |
316670eb A |
262 | extern vm_offset_t vm_kernel_slide; |
263 | extern vm_offset_t vm_kernel_addrperm; | |
39236c6e A |
264 | extern vm_offset_t vm_kext_base; |
265 | extern vm_offset_t vm_kext_top; | |
39037602 A |
266 | extern vm_offset_t vm_kernel_base; |
267 | extern vm_offset_t vm_kernel_top; | |
268 | extern vm_offset_t vm_hib_base; | |
39236c6e | 269 | |
316670eb | 270 | #define VM_KERNEL_IS_SLID(_o) \ |
39037602 A |
271 | (((vm_offset_t)(_o) >= vm_kernel_slid_base) && \ |
272 | ((vm_offset_t)(_o) < vm_kernel_slid_top)) | |
a1c7dba1 | 273 | |
fe8ab488 A |
274 | #define VM_KERNEL_SLIDE(_u) \ |
275 | ((vm_offset_t)(_u) + vm_kernel_slide) | |
276 | ||
277 | /* | |
278 | * The following macros are to be used when exposing kernel addresses to | |
279 | * userspace via any of the various debug or info facilities that might exist | |
280 | * (e.g. stackshot, proc_info syscall, etc.). It is important to understand | |
281 | * the goal of each macro and choose the right one depending on what you are | |
282 | * trying to do. Misuse of these macros can result in critical data leaks | |
283 | * which in turn lead to all sorts of system vulnerabilities. | |
284 | * | |
285 | * Note that in general the ideal goal is to protect addresses from userspace | |
286 | * in a way that is reversible assuming you know the permutation and/or slide. | |
287 | * | |
288 | * The macros are as follows: | |
289 | * | |
290 | * VM_KERNEL_UNSLIDE: | |
291 | * Use this macro when you are exposing an address to userspace which is | |
292 | * a "static" kernel or kext address (i.e. coming from text or data | |
293 | * sections). These are the addresses which get "slid" via ASLR on kernel | |
294 | * or kext load, and it's precisely the slide value we are trying to | |
295 | * protect from userspace. | |
296 | * | |
297 | * VM_KERNEL_ADDRPERM: | |
298 | * Use this macro when you are exposing an address to userspace which is | |
299 | * coming from the kernel's "heap". Since these adresses are not "loaded" | |
300 | * from anywhere, there is no slide applied and we instead apply the | |
301 | * permutation value to obscure the address. | |
302 | * | |
303 | * VM_KERNEL_UNSLIDE_OR_ADDRPERM: | |
304 | * Use this macro when you are exposing an address to userspace that could | |
305 | * come from either kernel text/data *or* the heap. This is a rare case, | |
4bd07ac2 A |
306 | * but one that does come up and must be handled correctly. If the argument |
307 | * is known to be lower than any potential heap address, no transformation | |
308 | * is applied, to avoid revealing the operation on a constant. | |
fe8ab488 A |
309 | * |
310 | * Nesting of these macros should be considered invalid. | |
311 | */ | |
39037602 A |
312 | #define VM_KERNEL_UNSLIDE(_v) \ |
313 | ((VM_KERNEL_IS_SLID(_v)) ? \ | |
314 | (vm_offset_t)(_v) - vm_kernel_slide : \ | |
316670eb | 315 | (vm_offset_t)(_v)) |
316670eb | 316 | |
fe8ab488 A |
317 | #define VM_KERNEL_ADDRPERM(_v) \ |
318 | (((vm_offset_t)(_v) == 0) ? \ | |
319 | (vm_offset_t)(0) : \ | |
316670eb A |
320 | (vm_offset_t)(_v) + vm_kernel_addrperm) |
321 | ||
fe8ab488 | 322 | #define VM_KERNEL_UNSLIDE_OR_PERM(_v) \ |
39037602 | 323 | ((VM_KERNEL_IS_SLID(_v)) ? \ |
3e170ce0 | 324 | (vm_offset_t)(_v) - vm_kernel_slide : \ |
4bd07ac2 | 325 | ((vm_offset_t)(_v) >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ? VM_KERNEL_ADDRPERM(_v) : (vm_offset_t)(_v))) |
fe8ab488 A |
326 | |
327 | ||
91447636 A |
328 | #endif /* XNU_KERNEL_PRIVATE */ |
329 | ||
330 | extern vm_size_t page_size; | |
331 | extern vm_size_t page_mask; | |
316670eb | 332 | extern int page_shift; |
91447636 | 333 | |
55e303ae | 334 | /* We need a way to get rid of compiler warnings when we cast from */ |
b0d623f7 A |
335 | /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ |
336 | /* An intptr_t is used convert the value to the right precision, and */ | |
337 | /* then to an address. This macro is also used to convert addresses */ | |
338 | /* to 32-bit integers, which is a hard failure for a 64-bit kernel */ | |
55e303ae A |
339 | #include <stdint.h> |
340 | #ifndef __CAST_DOWN_CHECK | |
341 | #define __CAST_DOWN_CHECK | |
b0d623f7 A |
342 | |
343 | #define CAST_DOWN( type, addr ) \ | |
344 | ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) | |
345 | ||
346 | #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) | |
347 | ||
55e303ae | 348 | #endif /* __CAST_DOWN_CHECK */ |
1c79356b A |
349 | |
350 | #endif /* ASSEMBLER */ | |
91447636 A |
351 | |
352 | #endif /* KERNEL */ | |
353 | ||
1c79356b | 354 | #endif /* _MACH_VM_PARAM_H_ */ |