]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: mach/vm_param.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine independent virtual memory parameters. | |
64 | * | |
65 | */ | |
66 | ||
67 | #ifndef _MACH_VM_PARAM_H_ | |
68 | #define _MACH_VM_PARAM_H_ | |
69 | ||
1c79356b | 70 | #include <mach/machine/vm_param.h> |
90556fb8 | 71 | |
91447636 A |
72 | #ifdef KERNEL |
73 | ||
55e303ae | 74 | #ifndef ASSEMBLER |
90556fb8 | 75 | #include <mach/vm_types.h> |
55e303ae | 76 | #endif /* ASSEMBLER */ |
1c79356b | 77 | |
39037602 A |
78 | #include <os/base.h> |
79 | #include <os/overflow.h> | |
80 | ||
1c79356b A |
81 | /* |
82 | * The machine independent pages are refered to as PAGES. A page | |
83 | * is some number of hardware pages, depending on the target machine. | |
84 | */ | |
85 | ||
55e303ae A |
86 | #ifndef ASSEMBLER |
87 | ||
91447636 A |
88 | #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ |
89 | #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ | |
1c79356b | 90 | |
90556fb8 A |
91 | /* |
92 | * Convert addresses to pages and vice versa. No rounding is used. | |
93 | * The atop_32 and ptoa_32 macros should not be use on 64 bit types. | |
94 | * The round_page_64 and trunc_page_64 macros should be used instead. | |
95 | */ | |
96 | ||
97 | #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT) | |
98 | #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT) | |
99 | #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) | |
100 | #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) | |
101 | ||
b0d623f7 A |
102 | #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
103 | #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
104 | ||
1c79356b | 105 | /* |
90556fb8 A |
106 | * While the following block is enabled, the legacy atop and ptoa |
107 | * macros will behave correctly. If not, they will generate | |
108 | * invalid lvalue errors. | |
d7e50217 A |
109 | */ |
110 | ||
90556fb8 | 111 | #if 1 |
316670eb A |
112 | #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
113 | #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
90556fb8 A |
114 | #else |
115 | #define atop(x) (0UL = 0) | |
116 | #define ptoa(x) (0UL = 0) | |
117 | #endif | |
118 | ||
91447636 A |
119 | /* |
120 | * Page-size rounding macros for the Public fixed-width VM types. | |
121 | */ | |
122 | #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) | |
123 | #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) | |
124 | ||
39037602 A |
125 | #define round_page_overflow(in, out) __os_warn_unused(({ \ |
126 | bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ | |
127 | *out &= ~((__typeof__(*out))PAGE_MASK); \ | |
128 | __ovr; \ | |
129 | })) | |
130 | ||
131 | static inline int OS_WARN_RESULT | |
132 | mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) | |
133 | { | |
134 | return round_page_overflow(in, out); | |
135 | } | |
136 | ||
91447636 A |
137 | #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
138 | #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) | |
139 | ||
140 | /* | |
141 | * Rounding macros for the legacy (scalable with the current task's | |
142 | * address space size) VM types. | |
143 | */ | |
144 | ||
b0d623f7 A |
145 | #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) |
146 | #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) | |
1c79356b A |
147 | |
148 | /* | |
149 | * Round off or truncate to the nearest page. These will work | |
150 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page | |
90556fb8 A |
151 | * bytes. The round_page_32 and trunc_page_32 macros should not be |
152 | * use on 64 bit types. The round_page_64 and trunc_page_64 macros | |
153 | * should be used instead. | |
91447636 A |
154 | * |
155 | * These should only be used in the rare case the size of the address | |
156 | * or length is hard-coded as 32 or 64 bit. Otherwise, the macros | |
157 | * associated with the specific VM type should be used. | |
90556fb8 A |
158 | */ |
159 | ||
b0d623f7 A |
160 | #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) |
161 | #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) | |
162 | #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) | |
163 | #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) | |
90556fb8 | 164 | |
90556fb8 A |
165 | /* |
166 | * Enable the following block to find uses of xxx_32 macros that should | |
167 | * be xxx_64. These macros only work in C code, not C++. The resulting | |
168 | * binaries are not functional. Look for invalid lvalue errors in | |
169 | * the compiler output. | |
170 | * | |
171 | * Enabling the following block will also find use of the xxx_64 macros | |
172 | * that have been passed pointers. The parameters should be case to an | |
173 | * unsigned long type first. Look for invalid operands to binary + error | |
174 | * in the compiler output. | |
175 | */ | |
1c79356b | 176 | |
90556fb8 A |
177 | #if 0 |
178 | #undef atop_32 | |
179 | #undef ptoa_32 | |
180 | #undef round_page_32 | |
181 | #undef trunc_page_32 | |
182 | #undef atop_64 | |
183 | #undef ptoa_64 | |
184 | #undef round_page_64 | |
185 | #undef trunc_page_64 | |
186 | ||
187 | #ifndef __cplusplus | |
188 | ||
189 | #define atop_32(x) \ | |
190 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
191 | (*(long *)0), \ | |
192 | (0UL)) = 0) | |
193 | ||
194 | #define ptoa_32(x) \ | |
195 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
196 | (*(long *)0), \ | |
197 | (0UL)) = 0) | |
198 | ||
199 | #define round_page_32(x) \ | |
200 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
201 | (*(long *)0), \ | |
202 | (0UL)) = 0) | |
203 | ||
204 | #define trunc_page_32(x) \ | |
205 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
206 | (*(long *)0), \ | |
207 | (0UL)) = 0) | |
208 | #else | |
209 | ||
210 | #define atop_32(x) (0) | |
211 | #define ptoa_32(x) (0) | |
212 | #define round_page_32(x) (0) | |
213 | #define trunc_page_32(x) (0) | |
214 | ||
215 | #endif /* ! __cplusplus */ | |
216 | ||
217 | #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
218 | #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
219 | #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
220 | #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
221 | ||
222 | #endif | |
1c79356b A |
223 | |
224 | /* | |
225 | * Determine whether an address is page-aligned, or a count is | |
226 | * an exact page multiple. | |
227 | */ | |
228 | ||
b0d623f7 | 229 | #define page_aligned(x) (((x) & PAGE_MASK) == 0) |
1c79356b | 230 | |
55e303ae | 231 | extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ |
91447636 A |
232 | extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ |
233 | ||
b0d623f7 A |
234 | /* |
235 | * The default pager does not handle 64-bit offsets inside its objects, | |
236 | * so this limits the size of anonymous memory objects to 4GB minus 1 page. | |
237 | * When we need to allocate a chunk of anonymous memory over that size, | |
238 | * we have to allocate more than one chunk. | |
239 | */ | |
240 | #define ANON_MAX_SIZE 0xFFFFF000ULL | |
241 | /* | |
242 | * Work-around for <rdar://problem/6626493> | |
243 | * Break large anonymous memory areas into 128MB chunks to alleviate | |
244 | * the cost of copying when copy-on-write is not possible because a small | |
245 | * portion of it being wired. | |
246 | */ | |
247 | #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ | |
248 | ||
91447636 A |
249 | #ifdef XNU_KERNEL_PRIVATE |
250 | ||
5ba3f43e A |
251 | #include <kern/debug.h> |
252 | ||
91447636 A |
253 | extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ |
254 | extern uint64_t sane_size; /* Memory size to use for defaults calculations */ | |
55e303ae A |
255 | extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ |
256 | ||
0c530ab8 A |
257 | extern const vm_offset_t vm_min_kernel_address; |
258 | extern const vm_offset_t vm_max_kernel_address; | |
259 | ||
39037602 A |
260 | extern vm_offset_t vm_kernel_stext; |
261 | extern vm_offset_t vm_kernel_etext; | |
262 | extern vm_offset_t vm_kernel_slid_base; | |
263 | extern vm_offset_t vm_kernel_slid_top; | |
316670eb A |
264 | extern vm_offset_t vm_kernel_slide; |
265 | extern vm_offset_t vm_kernel_addrperm; | |
39236c6e A |
266 | extern vm_offset_t vm_kext_base; |
267 | extern vm_offset_t vm_kext_top; | |
39037602 A |
268 | extern vm_offset_t vm_kernel_base; |
269 | extern vm_offset_t vm_kernel_top; | |
270 | extern vm_offset_t vm_hib_base; | |
39236c6e | 271 | |
316670eb | 272 | #define VM_KERNEL_IS_SLID(_o) \ |
39037602 A |
273 | (((vm_offset_t)(_o) >= vm_kernel_slid_base) && \ |
274 | ((vm_offset_t)(_o) < vm_kernel_slid_top)) | |
a1c7dba1 | 275 | |
fe8ab488 A |
276 | #define VM_KERNEL_SLIDE(_u) \ |
277 | ((vm_offset_t)(_u) + vm_kernel_slide) | |
278 | ||
279 | /* | |
280 | * The following macros are to be used when exposing kernel addresses to | |
281 | * userspace via any of the various debug or info facilities that might exist | |
282 | * (e.g. stackshot, proc_info syscall, etc.). It is important to understand | |
283 | * the goal of each macro and choose the right one depending on what you are | |
284 | * trying to do. Misuse of these macros can result in critical data leaks | |
5ba3f43e A |
285 | * which in turn lead to all sorts of system vulnerabilities. It is invalid to |
286 | * call these macros on a non-kernel address (NULL is allowed). | |
fe8ab488 | 287 | * |
fe8ab488 A |
288 | * VM_KERNEL_UNSLIDE: |
289 | * Use this macro when you are exposing an address to userspace which is | |
5ba3f43e A |
290 | * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text |
291 | * or data sections). These are the addresses which get "slid" via ASLR on | |
292 | * kernel or kext load, and it's precisely the slide value we are trying to | |
fe8ab488 A |
293 | * protect from userspace. |
294 | * | |
5ba3f43e A |
295 | * VM_KERNEL_ADDRHIDE: |
296 | * Use when exposing an address for internal purposes: debugging, tracing, | |
297 | * etc. The address will be unslid if necessary. Other addresses will be | |
298 | * hidden on customer builds, and unmodified on internal builds. | |
fe8ab488 | 299 | * |
5ba3f43e A |
300 | * VM_KERNEL_ADDRHASH: |
301 | * Use this macro when exposing a kernel address to userspace on customer | |
302 | * builds. The address can be from the static kernel or kext regions, or the | |
303 | * kernel heap. The address will be unslid or hashed as appropriate. | |
304 | * | |
305 | * | |
306 | * ** SECURITY WARNING: The following macros can leak kernel secrets. | |
307 | * Use *only* in performance *critical* code. | |
308 | * | |
309 | * VM_KERNEL_ADDRPERM: | |
310 | * VM_KERNEL_UNSLIDE_OR_PERM: | |
311 | * Use these macros when exposing a kernel address to userspace on customer | |
312 | * builds. The address can be from the static kernel or kext regions, or the | |
313 | * kernel heap. The address will be unslid or permuted as appropriate. | |
fe8ab488 A |
314 | * |
315 | * Nesting of these macros should be considered invalid. | |
316 | */ | |
5ba3f43e A |
317 | |
318 | __BEGIN_DECLS | |
319 | extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr); | |
320 | __END_DECLS | |
321 | ||
322 | #define __DO_UNSLIDE(_v) ((vm_offset_t)(_v) - vm_kernel_slide) | |
323 | ||
324 | #if DEBUG || DEVELOPMENT | |
325 | # define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)(_v)) | |
326 | #else | |
327 | # define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0) | |
328 | #endif | |
329 | ||
330 | #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) | |
331 | ||
332 | #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ | |
333 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ | |
334 | VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)(_v) + vm_kernel_addrperm) : \ | |
335 | (vm_offset_t)(_v); \ | |
336 | }) | |
337 | ||
338 | #define VM_KERNEL_UNSLIDE(_v) ({ \ | |
339 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ | |
340 | }) | |
341 | ||
342 | #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) | |
343 | ||
344 | #undef mach_vm_round_page | |
345 | #undef round_page | |
346 | #undef round_page_32 | |
347 | #undef round_page_64 | |
348 | ||
349 | static inline mach_vm_offset_t | |
350 | mach_vm_round_page(mach_vm_offset_t x) | |
351 | { | |
352 | if (round_page_overflow(x, &x)) { | |
353 | panic("overflow detected"); | |
354 | } | |
355 | return x; | |
356 | } | |
357 | ||
358 | static inline vm_offset_t | |
359 | round_page(vm_offset_t x) | |
360 | { | |
361 | if (round_page_overflow(x, &x)) { | |
362 | panic("overflow detected"); | |
363 | } | |
364 | return x; | |
365 | } | |
366 | ||
367 | static inline mach_vm_offset_t | |
368 | round_page_64(mach_vm_offset_t x) | |
369 | { | |
370 | if (round_page_overflow(x, &x)) { | |
371 | panic("overflow detected"); | |
372 | } | |
373 | return x; | |
374 | } | |
375 | ||
376 | static inline uint32_t | |
377 | round_page_32(uint32_t x) | |
378 | { | |
379 | if (round_page_overflow(x, &x)) { | |
380 | panic("overflow detected"); | |
381 | } | |
382 | return x; | |
383 | } | |
fe8ab488 | 384 | |
91447636 A |
385 | #endif /* XNU_KERNEL_PRIVATE */ |
386 | ||
387 | extern vm_size_t page_size; | |
388 | extern vm_size_t page_mask; | |
316670eb | 389 | extern int page_shift; |
91447636 | 390 | |
55e303ae | 391 | /* We need a way to get rid of compiler warnings when we cast from */ |
b0d623f7 A |
392 | /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ |
393 | /* An intptr_t is used convert the value to the right precision, and */ | |
394 | /* then to an address. This macro is also used to convert addresses */ | |
395 | /* to 32-bit integers, which is a hard failure for a 64-bit kernel */ | |
55e303ae A |
396 | #include <stdint.h> |
397 | #ifndef __CAST_DOWN_CHECK | |
398 | #define __CAST_DOWN_CHECK | |
b0d623f7 A |
399 | |
400 | #define CAST_DOWN( type, addr ) \ | |
401 | ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) | |
402 | ||
403 | #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) | |
404 | ||
55e303ae | 405 | #endif /* __CAST_DOWN_CHECK */ |
1c79356b A |
406 | |
407 | #endif /* ASSEMBLER */ | |
91447636 A |
408 | |
409 | #endif /* KERNEL */ | |
410 | ||
1c79356b | 411 | #endif /* _MACH_VM_PARAM_H_ */ |