]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: mach/vm_param.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine independent virtual memory parameters. | |
64 | * | |
65 | */ | |
66 | ||
0a7de745 | 67 | #ifndef _MACH_VM_PARAM_H_ |
1c79356b A |
68 | #define _MACH_VM_PARAM_H_ |
69 | ||
1c79356b | 70 | #include <mach/machine/vm_param.h> |
90556fb8 | 71 | |
0a7de745 | 72 | #ifdef KERNEL |
91447636 | 73 | |
0a7de745 | 74 | #ifndef ASSEMBLER |
90556fb8 | 75 | #include <mach/vm_types.h> |
0a7de745 | 76 | #endif /* ASSEMBLER */ |
1c79356b | 77 | |
39037602 A |
78 | #include <os/base.h> |
79 | #include <os/overflow.h> | |
80 | ||
1c79356b A |
81 | /* |
82 | * The machine independent pages are refered to as PAGES. A page | |
83 | * is some number of hardware pages, depending on the target machine. | |
84 | */ | |
85 | ||
0a7de745 | 86 | #ifndef ASSEMBLER |
55e303ae | 87 | |
0a7de745 A |
88 | #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ |
89 | #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ | |
1c79356b | 90 | |
90556fb8 A |
91 | /* |
92 | * Convert addresses to pages and vice versa. No rounding is used. | |
93 | * The atop_32 and ptoa_32 macros should not be use on 64 bit types. | |
94 | * The round_page_64 and trunc_page_64 macros should be used instead. | |
95 | */ | |
96 | ||
97 | #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT) | |
98 | #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT) | |
99 | #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) | |
100 | #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) | |
101 | ||
b0d623f7 A |
102 | #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
103 | #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
104 | ||
1c79356b | 105 | /* |
90556fb8 A |
106 | * While the following block is enabled, the legacy atop and ptoa |
107 | * macros will behave correctly. If not, they will generate | |
108 | * invalid lvalue errors. | |
d7e50217 A |
109 | */ |
110 | ||
90556fb8 | 111 | #if 1 |
0a7de745 A |
112 | #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
113 | #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
90556fb8 A |
114 | #else |
115 | #define atop(x) (0UL = 0) | |
116 | #define ptoa(x) (0UL = 0) | |
117 | #endif | |
118 | ||
91447636 A |
119 | /* |
120 | * Page-size rounding macros for the Public fixed-width VM types. | |
121 | */ | |
122 | #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) | |
123 | #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) | |
124 | ||
39037602 | 125 | #define round_page_overflow(in, out) __os_warn_unused(({ \ |
0a7de745 A |
126 | bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ |
127 | *out &= ~((__typeof__(*out))PAGE_MASK); \ | |
128 | __ovr; \ | |
39037602 A |
129 | })) |
130 | ||
131 | static inline int OS_WARN_RESULT | |
132 | mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) | |
133 | { | |
134 | return round_page_overflow(in, out); | |
135 | } | |
136 | ||
91447636 A |
137 | #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
138 | #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) | |
139 | ||
140 | /* | |
141 | * Rounding macros for the legacy (scalable with the current task's | |
142 | * address space size) VM types. | |
143 | */ | |
144 | ||
b0d623f7 A |
145 | #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) |
146 | #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) | |
1c79356b A |
147 | |
148 | /* | |
149 | * Round off or truncate to the nearest page. These will work | |
150 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page | |
90556fb8 A |
151 | * bytes. The round_page_32 and trunc_page_32 macros should not be |
152 | * use on 64 bit types. The round_page_64 and trunc_page_64 macros | |
153 | * should be used instead. | |
91447636 A |
154 | * |
155 | * These should only be used in the rare case the size of the address | |
156 | * or length is hard-coded as 32 or 64 bit. Otherwise, the macros | |
157 | * associated with the specific VM type should be used. | |
90556fb8 A |
158 | */ |
159 | ||
b0d623f7 A |
160 | #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) |
161 | #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) | |
162 | #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) | |
163 | #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) | |
90556fb8 | 164 | |
90556fb8 A |
165 | /* |
166 | * Enable the following block to find uses of xxx_32 macros that should | |
167 | * be xxx_64. These macros only work in C code, not C++. The resulting | |
168 | * binaries are not functional. Look for invalid lvalue errors in | |
169 | * the compiler output. | |
170 | * | |
171 | * Enabling the following block will also find use of the xxx_64 macros | |
172 | * that have been passed pointers. The parameters should be case to an | |
173 | * unsigned long type first. Look for invalid operands to binary + error | |
174 | * in the compiler output. | |
175 | */ | |
1c79356b | 176 | |
90556fb8 A |
177 | #if 0 |
178 | #undef atop_32 | |
179 | #undef ptoa_32 | |
180 | #undef round_page_32 | |
181 | #undef trunc_page_32 | |
182 | #undef atop_64 | |
183 | #undef ptoa_64 | |
184 | #undef round_page_64 | |
185 | #undef trunc_page_64 | |
186 | ||
187 | #ifndef __cplusplus | |
188 | ||
189 | #define atop_32(x) \ | |
190 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
191 | (*(long *)0), \ |
192 | (0UL)) = 0) | |
90556fb8 A |
193 | |
194 | #define ptoa_32(x) \ | |
195 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
196 | (*(long *)0), \ |
197 | (0UL)) = 0) | |
90556fb8 A |
198 | |
199 | #define round_page_32(x) \ | |
200 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
201 | (*(long *)0), \ |
202 | (0UL)) = 0) | |
90556fb8 A |
203 | |
204 | #define trunc_page_32(x) \ | |
205 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
206 | (*(long *)0), \ |
207 | (0UL)) = 0) | |
90556fb8 A |
208 | #else |
209 | ||
210 | #define atop_32(x) (0) | |
211 | #define ptoa_32(x) (0) | |
212 | #define round_page_32(x) (0) | |
213 | #define trunc_page_32(x) (0) | |
214 | ||
215 | #endif /* ! __cplusplus */ | |
216 | ||
217 | #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
218 | #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
219 | #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
220 | #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
221 | ||
222 | #endif | |
1c79356b A |
223 | |
224 | /* | |
225 | * Determine whether an address is page-aligned, or a count is | |
226 | * an exact page multiple. | |
227 | */ | |
228 | ||
0a7de745 | 229 | #define page_aligned(x) (((x) & PAGE_MASK) == 0) |
1c79356b | 230 | |
0a7de745 A |
231 | extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ |
232 | extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ | |
91447636 | 233 | |
b0d623f7 A |
234 | /* |
235 | * The default pager does not handle 64-bit offsets inside its objects, | |
236 | * so this limits the size of anonymous memory objects to 4GB minus 1 page. | |
237 | * When we need to allocate a chunk of anonymous memory over that size, | |
238 | * we have to allocate more than one chunk. | |
239 | */ | |
0a7de745 | 240 | #define ANON_MAX_SIZE 0xFFFFF000ULL |
b0d623f7 A |
241 | /* |
242 | * Work-around for <rdar://problem/6626493> | |
243 | * Break large anonymous memory areas into 128MB chunks to alleviate | |
244 | * the cost of copying when copy-on-write is not possible because a small | |
245 | * portion of it being wired. | |
246 | */ | |
0a7de745 A |
247 | #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ |
248 | ||
249 | /* | |
250 | * The 'medium' malloc allocator would like its regions | |
251 | * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks | |
252 | * and backed by different objects. This avoids contention | |
253 | * on a single large object and showed solid improvements on high | |
254 | * core machines with workloads involving video and graphics processing. | |
255 | */ | |
256 | #define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */ | |
b0d623f7 | 257 | |
0a7de745 | 258 | #ifdef XNU_KERNEL_PRIVATE |
91447636 | 259 | |
5ba3f43e A |
260 | #include <kern/debug.h> |
261 | ||
0a7de745 A |
262 | extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ |
263 | extern uint64_t sane_size; /* Memory size to use for defaults calculations */ | |
264 | extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ | |
55e303ae | 265 | |
0a7de745 A |
266 | extern const vm_offset_t vm_min_kernel_address; |
267 | extern const vm_offset_t vm_max_kernel_address; | |
0c530ab8 | 268 | |
39037602 A |
269 | extern vm_offset_t vm_kernel_stext; |
270 | extern vm_offset_t vm_kernel_etext; | |
0a7de745 A |
271 | extern vm_offset_t vm_kernel_slid_base; |
272 | extern vm_offset_t vm_kernel_slid_top; | |
273 | extern vm_offset_t vm_kernel_slide; | |
274 | extern vm_offset_t vm_kernel_addrperm; | |
275 | extern vm_offset_t vm_kext_base; | |
276 | extern vm_offset_t vm_kext_top; | |
277 | extern vm_offset_t vm_kernel_base; | |
278 | extern vm_offset_t vm_kernel_top; | |
279 | extern vm_offset_t vm_hib_base; | |
280 | ||
281 | extern vm_offset_t vm_kernel_builtinkmod_text; | |
282 | extern vm_offset_t vm_kernel_builtinkmod_text_end; | |
283 | ||
284 | #define VM_KERNEL_IS_SLID(_o) \ | |
d9a64523 A |
285 | (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \ |
286 | ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top)) | |
287 | ||
288 | #define VM_KERNEL_SLIDE(_u) ((vm_offset_t)(_u) + vm_kernel_slide) | |
fe8ab488 A |
289 | |
290 | /* | |
291 | * The following macros are to be used when exposing kernel addresses to | |
292 | * userspace via any of the various debug or info facilities that might exist | |
293 | * (e.g. stackshot, proc_info syscall, etc.). It is important to understand | |
294 | * the goal of each macro and choose the right one depending on what you are | |
295 | * trying to do. Misuse of these macros can result in critical data leaks | |
5ba3f43e A |
296 | * which in turn lead to all sorts of system vulnerabilities. It is invalid to |
297 | * call these macros on a non-kernel address (NULL is allowed). | |
fe8ab488 | 298 | * |
fe8ab488 A |
299 | * VM_KERNEL_UNSLIDE: |
300 | * Use this macro when you are exposing an address to userspace which is | |
5ba3f43e A |
301 | * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text |
302 | * or data sections). These are the addresses which get "slid" via ASLR on | |
303 | * kernel or kext load, and it's precisely the slide value we are trying to | |
fe8ab488 A |
304 | * protect from userspace. |
305 | * | |
5ba3f43e A |
306 | * VM_KERNEL_ADDRHIDE: |
307 | * Use when exposing an address for internal purposes: debugging, tracing, | |
308 | * etc. The address will be unslid if necessary. Other addresses will be | |
309 | * hidden on customer builds, and unmodified on internal builds. | |
fe8ab488 | 310 | * |
5ba3f43e A |
311 | * VM_KERNEL_ADDRHASH: |
312 | * Use this macro when exposing a kernel address to userspace on customer | |
313 | * builds. The address can be from the static kernel or kext regions, or the | |
314 | * kernel heap. The address will be unslid or hashed as appropriate. | |
315 | * | |
316 | * | |
317 | * ** SECURITY WARNING: The following macros can leak kernel secrets. | |
318 | * Use *only* in performance *critical* code. | |
319 | * | |
320 | * VM_KERNEL_ADDRPERM: | |
321 | * VM_KERNEL_UNSLIDE_OR_PERM: | |
322 | * Use these macros when exposing a kernel address to userspace on customer | |
323 | * builds. The address can be from the static kernel or kext regions, or the | |
324 | * kernel heap. The address will be unslid or permuted as appropriate. | |
fe8ab488 A |
325 | * |
326 | * Nesting of these macros should be considered invalid. | |
327 | */ | |
5ba3f43e A |
328 | |
329 | __BEGIN_DECLS | |
cb323159 A |
330 | #if XNU_KERNEL_PRIVATE |
331 | extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr) | |
332 | __XNU_INTERNAL(vm_kernel_addrhash); | |
333 | #else | |
5ba3f43e | 334 | extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr); |
cb323159 | 335 | #endif |
5ba3f43e A |
336 | __END_DECLS |
337 | ||
d9a64523 | 338 | #define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide) |
5ba3f43e A |
339 | |
340 | #if DEBUG || DEVELOPMENT | |
d9a64523 | 341 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v)) |
5ba3f43e | 342 | #else |
d9a64523 A |
343 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0) |
344 | #endif /* DEBUG || DEVELOPMENT */ | |
5ba3f43e A |
345 | |
346 | #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) | |
347 | ||
348 | #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ | |
0a7de745 A |
349 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ |
350 | VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ | |
351 | (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ | |
5ba3f43e A |
352 | }) |
353 | ||
354 | #define VM_KERNEL_UNSLIDE(_v) ({ \ | |
0a7de745 | 355 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ |
5ba3f43e A |
356 | }) |
357 | ||
358 | #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) | |
359 | ||
360 | #undef mach_vm_round_page | |
361 | #undef round_page | |
362 | #undef round_page_32 | |
363 | #undef round_page_64 | |
364 | ||
365 | static inline mach_vm_offset_t | |
366 | mach_vm_round_page(mach_vm_offset_t x) | |
367 | { | |
368 | if (round_page_overflow(x, &x)) { | |
369 | panic("overflow detected"); | |
370 | } | |
371 | return x; | |
372 | } | |
373 | ||
374 | static inline vm_offset_t | |
375 | round_page(vm_offset_t x) | |
376 | { | |
377 | if (round_page_overflow(x, &x)) { | |
378 | panic("overflow detected"); | |
379 | } | |
380 | return x; | |
381 | } | |
382 | ||
383 | static inline mach_vm_offset_t | |
384 | round_page_64(mach_vm_offset_t x) | |
385 | { | |
386 | if (round_page_overflow(x, &x)) { | |
387 | panic("overflow detected"); | |
388 | } | |
389 | return x; | |
390 | } | |
391 | ||
392 | static inline uint32_t | |
393 | round_page_32(uint32_t x) | |
394 | { | |
395 | if (round_page_overflow(x, &x)) { | |
396 | panic("overflow detected"); | |
397 | } | |
398 | return x; | |
399 | } | |
fe8ab488 | 400 | |
0a7de745 | 401 | #endif /* XNU_KERNEL_PRIVATE */ |
91447636 | 402 | |
0a7de745 A |
403 | extern vm_size_t page_size; |
404 | extern vm_size_t page_mask; | |
405 | extern int page_shift; | |
91447636 | 406 | |
55e303ae | 407 | /* We need a way to get rid of compiler warnings when we cast from */ |
b0d623f7 A |
408 | /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ |
409 | /* An intptr_t is used convert the value to the right precision, and */ | |
410 | /* then to an address. This macro is also used to convert addresses */ | |
411 | /* to 32-bit integers, which is a hard failure for a 64-bit kernel */ | |
55e303ae A |
412 | #include <stdint.h> |
413 | #ifndef __CAST_DOWN_CHECK | |
414 | #define __CAST_DOWN_CHECK | |
b0d623f7 A |
415 | |
416 | #define CAST_DOWN( type, addr ) \ | |
417 | ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) | |
418 | ||
0a7de745 | 419 | #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) |
b0d623f7 | 420 | |
55e303ae | 421 | #endif /* __CAST_DOWN_CHECK */ |
1c79356b | 422 | |
0a7de745 | 423 | #endif /* ASSEMBLER */ |
91447636 | 424 | |
0a7de745 | 425 | #endif /* KERNEL */ |
91447636 | 426 | |
0a7de745 | 427 | #endif /* _MACH_VM_PARAM_H_ */ |