]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: mach/vm_param.h | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine independent virtual memory parameters. | |
64 | * | |
65 | */ | |
66 | ||
0a7de745 | 67 | #ifndef _MACH_VM_PARAM_H_ |
1c79356b A |
68 | #define _MACH_VM_PARAM_H_ |
69 | ||
1c79356b | 70 | #include <mach/machine/vm_param.h> |
90556fb8 | 71 | |
0a7de745 | 72 | #ifdef KERNEL |
91447636 | 73 | |
0a7de745 | 74 | #ifndef ASSEMBLER |
90556fb8 | 75 | #include <mach/vm_types.h> |
0a7de745 | 76 | #endif /* ASSEMBLER */ |
1c79356b | 77 | |
39037602 A |
78 | #include <os/base.h> |
79 | #include <os/overflow.h> | |
80 | ||
1c79356b A |
81 | /* |
82 | * The machine independent pages are refered to as PAGES. A page | |
83 | * is some number of hardware pages, depending on the target machine. | |
84 | */ | |
85 | ||
0a7de745 | 86 | #ifndef ASSEMBLER |
55e303ae | 87 | |
0a7de745 A |
88 | #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ |
89 | #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ | |
1c79356b | 90 | |
90556fb8 A |
91 | /* |
92 | * Convert addresses to pages and vice versa. No rounding is used. | |
93 | * The atop_32 and ptoa_32 macros should not be use on 64 bit types. | |
94 | * The round_page_64 and trunc_page_64 macros should be used instead. | |
95 | */ | |
96 | ||
97 | #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT) | |
98 | #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT) | |
99 | #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT) | |
100 | #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT) | |
101 | ||
b0d623f7 A |
102 | #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
103 | #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
104 | ||
1c79356b | 105 | /* |
90556fb8 A |
106 | * While the following block is enabled, the legacy atop and ptoa |
107 | * macros will behave correctly. If not, they will generate | |
108 | * invalid lvalue errors. | |
d7e50217 A |
109 | */ |
110 | ||
90556fb8 | 111 | #if 1 |
0a7de745 A |
112 | #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) |
113 | #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) | |
90556fb8 A |
114 | #else |
115 | #define atop(x) (0UL = 0) | |
116 | #define ptoa(x) (0UL = 0) | |
117 | #endif | |
118 | ||
91447636 A |
119 | /* |
120 | * Page-size rounding macros for the Public fixed-width VM types. | |
121 | */ | |
122 | #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) | |
123 | #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) | |
124 | ||
39037602 | 125 | #define round_page_overflow(in, out) __os_warn_unused(({ \ |
0a7de745 A |
126 | bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ |
127 | *out &= ~((__typeof__(*out))PAGE_MASK); \ | |
128 | __ovr; \ | |
39037602 A |
129 | })) |
130 | ||
131 | static inline int OS_WARN_RESULT | |
132 | mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) | |
133 | { | |
134 | return round_page_overflow(in, out); | |
135 | } | |
136 | ||
91447636 A |
137 | #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) |
138 | #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK)) | |
139 | ||
140 | /* | |
141 | * Rounding macros for the legacy (scalable with the current task's | |
142 | * address space size) VM types. | |
143 | */ | |
144 | ||
b0d623f7 A |
145 | #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK)) |
146 | #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK)) | |
1c79356b A |
147 | |
148 | /* | |
149 | * Round off or truncate to the nearest page. These will work | |
150 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page | |
90556fb8 A |
151 | * bytes. The round_page_32 and trunc_page_32 macros should not be |
152 | * use on 64 bit types. The round_page_64 and trunc_page_64 macros | |
153 | * should be used instead. | |
91447636 A |
154 | * |
155 | * These should only be used in the rare case the size of the address | |
156 | * or length is hard-coded as 32 or 64 bit. Otherwise, the macros | |
157 | * associated with the specific VM type should be used. | |
90556fb8 A |
158 | */ |
159 | ||
b0d623f7 A |
160 | #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK)) |
161 | #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK)) | |
162 | #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) | |
163 | #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) | |
90556fb8 | 164 | |
f427ee49 A |
165 | #define round_page_mask_32(x, mask) (((uint32_t)(x) + (mask)) & ~((uint32_t)(mask))) |
166 | #define trunc_page_mask_32(x, mask) ((uint32_t)(x) & ~((uint32_t)(mask))) | |
167 | #define round_page_mask_64(x, mask) (((uint64_t)(x) + (mask)) & ~((uint64_t)(mask))) | |
168 | #define trunc_page_mask_64(x, mask) ((uint64_t)(x) & ~((uint64_t)(mask))) | |
169 | ||
90556fb8 A |
170 | /* |
171 | * Enable the following block to find uses of xxx_32 macros that should | |
172 | * be xxx_64. These macros only work in C code, not C++. The resulting | |
173 | * binaries are not functional. Look for invalid lvalue errors in | |
174 | * the compiler output. | |
175 | * | |
176 | * Enabling the following block will also find use of the xxx_64 macros | |
177 | * that have been passed pointers. The parameters should be case to an | |
178 | * unsigned long type first. Look for invalid operands to binary + error | |
179 | * in the compiler output. | |
180 | */ | |
1c79356b | 181 | |
90556fb8 A |
182 | #if 0 |
183 | #undef atop_32 | |
184 | #undef ptoa_32 | |
185 | #undef round_page_32 | |
186 | #undef trunc_page_32 | |
187 | #undef atop_64 | |
188 | #undef ptoa_64 | |
189 | #undef round_page_64 | |
190 | #undef trunc_page_64 | |
191 | ||
192 | #ifndef __cplusplus | |
193 | ||
194 | #define atop_32(x) \ | |
195 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
196 | (*(long *)0), \ |
197 | (0UL)) = 0) | |
90556fb8 A |
198 | |
199 | #define ptoa_32(x) \ | |
200 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
201 | (*(long *)0), \ |
202 | (0UL)) = 0) | |
90556fb8 A |
203 | |
204 | #define round_page_32(x) \ | |
205 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
206 | (*(long *)0), \ |
207 | (0UL)) = 0) | |
90556fb8 A |
208 | |
209 | #define trunc_page_32(x) \ | |
210 | (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ | |
0a7de745 A |
211 | (*(long *)0), \ |
212 | (0UL)) = 0) | |
90556fb8 A |
213 | #else |
214 | ||
215 | #define atop_32(x) (0) | |
216 | #define ptoa_32(x) (0) | |
217 | #define round_page_32(x) (0) | |
218 | #define trunc_page_32(x) (0) | |
219 | ||
220 | #endif /* ! __cplusplus */ | |
221 | ||
222 | #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
223 | #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
224 | #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
225 | #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0)) | |
226 | ||
227 | #endif | |
1c79356b A |
228 | |
229 | /* | |
230 | * Determine whether an address is page-aligned, or a count is | |
231 | * an exact page multiple. | |
232 | */ | |
233 | ||
0a7de745 | 234 | #define page_aligned(x) (((x) & PAGE_MASK) == 0) |
1c79356b | 235 | |
0a7de745 A |
236 | extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ |
237 | extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ | |
91447636 | 238 | |
b0d623f7 A |
239 | /* |
240 | * The default pager does not handle 64-bit offsets inside its objects, | |
241 | * so this limits the size of anonymous memory objects to 4GB minus 1 page. | |
242 | * When we need to allocate a chunk of anonymous memory over that size, | |
243 | * we have to allocate more than one chunk. | |
244 | */ | |
0a7de745 | 245 | #define ANON_MAX_SIZE 0xFFFFF000ULL |
b0d623f7 A |
246 | /* |
247 | * Work-around for <rdar://problem/6626493> | |
248 | * Break large anonymous memory areas into 128MB chunks to alleviate | |
249 | * the cost of copying when copy-on-write is not possible because a small | |
250 | * portion of it being wired. | |
251 | */ | |
0a7de745 A |
252 | #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ |
253 | ||
254 | /* | |
255 | * The 'medium' malloc allocator would like its regions | |
256 | * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks | |
257 | * and backed by different objects. This avoids contention | |
258 | * on a single large object and showed solid improvements on high | |
259 | * core machines with workloads involving video and graphics processing. | |
260 | */ | |
261 | #define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */ | |
b0d623f7 | 262 | |
0a7de745 | 263 | #ifdef XNU_KERNEL_PRIVATE |
91447636 | 264 | |
5ba3f43e A |
265 | #include <kern/debug.h> |
266 | ||
0a7de745 | 267 | extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ |
f427ee49 | 268 | extern uint64_t max_mem_actual; /* Size of physical memory adjusted by maxmem */ |
0a7de745 A |
269 | extern uint64_t sane_size; /* Memory size to use for defaults calculations */ |
270 | extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ | |
55e303ae | 271 | |
0a7de745 A |
272 | extern const vm_offset_t vm_min_kernel_address; |
273 | extern const vm_offset_t vm_max_kernel_address; | |
0c530ab8 | 274 | |
39037602 A |
275 | extern vm_offset_t vm_kernel_stext; |
276 | extern vm_offset_t vm_kernel_etext; | |
0a7de745 A |
277 | extern vm_offset_t vm_kernel_slid_base; |
278 | extern vm_offset_t vm_kernel_slid_top; | |
279 | extern vm_offset_t vm_kernel_slide; | |
280 | extern vm_offset_t vm_kernel_addrperm; | |
281 | extern vm_offset_t vm_kext_base; | |
282 | extern vm_offset_t vm_kext_top; | |
283 | extern vm_offset_t vm_kernel_base; | |
284 | extern vm_offset_t vm_kernel_top; | |
285 | extern vm_offset_t vm_hib_base; | |
286 | ||
287 | extern vm_offset_t vm_kernel_builtinkmod_text; | |
288 | extern vm_offset_t vm_kernel_builtinkmod_text_end; | |
289 | ||
290 | #define VM_KERNEL_IS_SLID(_o) \ | |
d9a64523 A |
291 | (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \ |
292 | ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top)) | |
293 | ||
294 | #define VM_KERNEL_SLIDE(_u) ((vm_offset_t)(_u) + vm_kernel_slide) | |
fe8ab488 A |
295 | |
296 | /* | |
297 | * The following macros are to be used when exposing kernel addresses to | |
298 | * userspace via any of the various debug or info facilities that might exist | |
299 | * (e.g. stackshot, proc_info syscall, etc.). It is important to understand | |
300 | * the goal of each macro and choose the right one depending on what you are | |
301 | * trying to do. Misuse of these macros can result in critical data leaks | |
5ba3f43e A |
302 | * which in turn lead to all sorts of system vulnerabilities. It is invalid to |
303 | * call these macros on a non-kernel address (NULL is allowed). | |
fe8ab488 | 304 | * |
fe8ab488 A |
305 | * VM_KERNEL_UNSLIDE: |
306 | * Use this macro when you are exposing an address to userspace which is | |
5ba3f43e A |
307 | * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text |
308 | * or data sections). These are the addresses which get "slid" via ASLR on | |
309 | * kernel or kext load, and it's precisely the slide value we are trying to | |
fe8ab488 A |
310 | * protect from userspace. |
311 | * | |
5ba3f43e A |
312 | * VM_KERNEL_ADDRHIDE: |
313 | * Use when exposing an address for internal purposes: debugging, tracing, | |
314 | * etc. The address will be unslid if necessary. Other addresses will be | |
315 | * hidden on customer builds, and unmodified on internal builds. | |
fe8ab488 | 316 | * |
5ba3f43e A |
317 | * VM_KERNEL_ADDRHASH: |
318 | * Use this macro when exposing a kernel address to userspace on customer | |
319 | * builds. The address can be from the static kernel or kext regions, or the | |
320 | * kernel heap. The address will be unslid or hashed as appropriate. | |
321 | * | |
322 | * | |
323 | * ** SECURITY WARNING: The following macros can leak kernel secrets. | |
324 | * Use *only* in performance *critical* code. | |
325 | * | |
326 | * VM_KERNEL_ADDRPERM: | |
327 | * VM_KERNEL_UNSLIDE_OR_PERM: | |
328 | * Use these macros when exposing a kernel address to userspace on customer | |
329 | * builds. The address can be from the static kernel or kext regions, or the | |
330 | * kernel heap. The address will be unslid or permuted as appropriate. | |
fe8ab488 A |
331 | * |
332 | * Nesting of these macros should be considered invalid. | |
333 | */ | |
5ba3f43e A |
334 | |
335 | __BEGIN_DECLS | |
cb323159 A |
336 | #if XNU_KERNEL_PRIVATE |
337 | extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr) | |
338 | __XNU_INTERNAL(vm_kernel_addrhash); | |
339 | #else | |
5ba3f43e | 340 | extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr); |
cb323159 | 341 | #endif |
5ba3f43e A |
342 | __END_DECLS |
343 | ||
d9a64523 | 344 | #define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide) |
5ba3f43e A |
345 | |
346 | #if DEBUG || DEVELOPMENT | |
d9a64523 | 347 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v)) |
5ba3f43e | 348 | #else |
d9a64523 A |
349 | #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0) |
350 | #endif /* DEBUG || DEVELOPMENT */ | |
5ba3f43e A |
351 | |
352 | #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) | |
353 | ||
354 | #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ | |
0a7de745 A |
355 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ |
356 | VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ | |
357 | (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ | |
5ba3f43e A |
358 | }) |
359 | ||
360 | #define VM_KERNEL_UNSLIDE(_v) ({ \ | |
0a7de745 | 361 | VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ |
5ba3f43e A |
362 | }) |
363 | ||
364 | #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) | |
365 | ||
366 | #undef mach_vm_round_page | |
367 | #undef round_page | |
368 | #undef round_page_32 | |
369 | #undef round_page_64 | |
370 | ||
371 | static inline mach_vm_offset_t | |
372 | mach_vm_round_page(mach_vm_offset_t x) | |
373 | { | |
374 | if (round_page_overflow(x, &x)) { | |
375 | panic("overflow detected"); | |
376 | } | |
377 | return x; | |
378 | } | |
379 | ||
380 | static inline vm_offset_t | |
381 | round_page(vm_offset_t x) | |
382 | { | |
383 | if (round_page_overflow(x, &x)) { | |
384 | panic("overflow detected"); | |
385 | } | |
386 | return x; | |
387 | } | |
388 | ||
389 | static inline mach_vm_offset_t | |
390 | round_page_64(mach_vm_offset_t x) | |
391 | { | |
392 | if (round_page_overflow(x, &x)) { | |
393 | panic("overflow detected"); | |
394 | } | |
395 | return x; | |
396 | } | |
397 | ||
398 | static inline uint32_t | |
399 | round_page_32(uint32_t x) | |
400 | { | |
401 | if (round_page_overflow(x, &x)) { | |
402 | panic("overflow detected"); | |
403 | } | |
404 | return x; | |
405 | } | |
fe8ab488 | 406 | |
f427ee49 A |
407 | |
408 | /*! | |
409 | * @typedef vm_packing_params_t | |
410 | * | |
411 | * @brief | |
412 | * Data structure representing the packing parameters for a given packed pointer | |
413 | * encoding. | |
414 | * | |
415 | * @discussion | |
416 | * Several data structures wish to pack their pointers on less than 64bits | |
417 | * on LP64 in order to save memory. | |
418 | * | |
419 | * Adopters are supposed to define 3 macros: | |
420 | * - @c *_BITS: number of storage bits used for the packing, | |
421 | * - @c *_SHIFT: number of non significant low bits (expected to be 0), | |
422 | * - @c *_BASE: the base against which to encode. | |
423 | * | |
424 | * The encoding is a no-op when @c *_BITS is equal to @c __WORDSIZE and | |
425 | * @c *_SHIFT is 0. | |
426 | * | |
427 | * | |
428 | * The convenience macro @c VM_PACKING_PARAMS can be used to create | |
429 | * a @c vm_packing_params_t structure out of those definitions. | |
430 | * | |
431 | * It is customary to declare a constant global per scheme for the sake | |
432 | * of debuggers to be able to dynamically decide how to unpack various schemes. | |
433 | * | |
434 | * | |
435 | * This uses 2 possible schemes (who both preserve @c NULL): | |
436 | * | |
437 | * 1. When the storage bits and shift are sufficiently large (strictly more than | |
438 | * VM_KERNEL_POINTER_SIGNIFICANT_BITS), a sign-extension scheme can be used. | |
439 | * | |
440 | * This allows to represent any kernel pointer. | |
441 | * | |
442 | * 2. Else, a base-relative scheme can be used, typical bases are: | |
443 | * | |
444 | * - @c KERNEL_PMAP_HEAP_RANGE_START when only pointers to heap (zone) | |
445 | * allocated objects need to be packed, | |
446 | * | |
447 | * - @c VM_MIN_KERNEL_AND_KEXT_ADDRESS when pointers to kernel globals also | |
448 | * need this. | |
449 | * | |
450 | * When such an ecoding is used, @c zone_restricted_va_max() must be taught | |
451 | * about it. | |
452 | */ | |
453 | typedef struct vm_packing_params { | |
454 | vm_offset_t vmpp_base; | |
455 | uint8_t vmpp_bits; | |
456 | uint8_t vmpp_shift; | |
457 | bool vmpp_base_relative; | |
458 | } vm_packing_params_t; | |
459 | ||
460 | ||
461 | /*! | |
462 | * @macro VM_PACKING_IS_BASE_RELATIVE | |
463 | * | |
464 | * @brief | |
465 | * Whether the packing scheme with those parameters will be base-relative. | |
466 | */ | |
467 | #define VM_PACKING_IS_BASE_RELATIVE(ns) \ | |
468 | (ns##_BITS + ns##_SHIFT <= VM_KERNEL_POINTER_SIGNIFICANT_BITS) | |
469 | ||
470 | ||
471 | /*! | |
472 | * @macro VM_PACKING_PARAMS | |
473 | * | |
474 | * @brief | |
475 | * Constructs a @c vm_packing_params_t structure based on the convention that | |
476 | * macros with the @c _BASE, @c _BITS and @c _SHIFT suffixes have been defined | |
477 | * to the proper values. | |
478 | */ | |
479 | #define VM_PACKING_PARAMS(ns) \ | |
480 | (vm_packing_params_t){ \ | |
481 | .vmpp_base = ns##_BASE, \ | |
482 | .vmpp_bits = ns##_BITS, \ | |
483 | .vmpp_shift = ns##_SHIFT, \ | |
484 | .vmpp_base_relative = VM_PACKING_IS_BASE_RELATIVE(ns), \ | |
485 | } | |
486 | ||
487 | /** | |
488 | * @function vm_pack_pointer | |
489 | * | |
490 | * @brief | |
491 | * Packs a pointer according to the specified parameters. | |
492 | * | |
493 | * @discussion | |
494 | * The convenience @c VM_PACK_POINTER macro allows to synthesize | |
495 | * the @c params argument. | |
496 | * | |
497 | * @param ptr The pointer to pack. | |
498 | * @param params The encoding parameters. | |
499 | * @returns The packed pointer. | |
500 | */ | |
501 | static inline vm_offset_t | |
502 | vm_pack_pointer(vm_offset_t ptr, vm_packing_params_t params) | |
503 | { | |
504 | if (!params.vmpp_base_relative) { | |
505 | return ptr >> params.vmpp_shift; | |
506 | } | |
507 | if (ptr) { | |
508 | return (ptr - params.vmpp_base) >> params.vmpp_shift; | |
509 | } | |
510 | return (vm_offset_t)0; | |
511 | } | |
512 | #define VM_PACK_POINTER(ptr, ns) \ | |
513 | vm_pack_pointer(ptr, VM_PACKING_PARAMS(ns)) | |
514 | ||
515 | /** | |
516 | * @function vm_unpack_pointer | |
517 | * | |
518 | * @brief | |
519 | * Unpacks a pointer packed with @c vm_pack_pointer(). | |
520 | * | |
521 | * @discussion | |
522 | * The convenience @c VM_UNPACK_POINTER macro allows to synthesize | |
523 | * the @c params argument. | |
524 | * | |
525 | * @param packed The packed value to decode. | |
526 | * @param params The encoding parameters. | |
527 | * @returns The unpacked pointer. | |
528 | */ | |
529 | static inline vm_offset_t | |
530 | vm_unpack_pointer(vm_offset_t packed, vm_packing_params_t params) | |
531 | { | |
532 | if (!params.vmpp_base_relative) { | |
533 | intptr_t addr = (intptr_t)packed; | |
534 | addr <<= __WORDSIZE - params.vmpp_bits; | |
535 | addr >>= __WORDSIZE - params.vmpp_bits - params.vmpp_shift; | |
536 | return (vm_offset_t)addr; | |
537 | } | |
538 | if (packed) { | |
539 | return (packed << params.vmpp_shift) + params.vmpp_base; | |
540 | } | |
541 | return (vm_offset_t)0; | |
542 | } | |
543 | #define VM_UNPACK_POINTER(packed, ns) \ | |
544 | vm_unpack_pointer(packed, VM_PACKING_PARAMS(ns)) | |
545 | ||
546 | /** | |
547 | * @function vm_packing_max_packable | |
548 | * | |
549 | * @brief | |
550 | * Returns the largest packable address for the given parameters. | |
551 | * | |
552 | * @discussion | |
553 | * The convenience @c VM_PACKING_MAX_PACKABLE macro allows to synthesize | |
554 | * the @c params argument. | |
555 | * | |
556 | * @param params The encoding parameters. | |
557 | * @returns The largest packable pointer. | |
558 | */ | |
559 | static inline vm_offset_t | |
560 | vm_packing_max_packable(vm_packing_params_t params) | |
561 | { | |
562 | if (!params.vmpp_base_relative) { | |
563 | return VM_MAX_KERNEL_ADDRESS; | |
564 | } | |
565 | ||
566 | vm_offset_t ptr = params.vmpp_base + | |
567 | (((1ul << params.vmpp_bits) - 1) << params.vmpp_shift); | |
568 | ||
569 | return ptr >= params.vmpp_base ? ptr : VM_MAX_KERNEL_ADDRESS; | |
570 | } | |
571 | #define VM_PACKING_MAX_PACKABLE(ns) \ | |
572 | vm_packing_max_packable(VM_PACKING_PARAMS(ns)) | |
573 | ||
574 | ||
575 | __abortlike | |
576 | extern void | |
577 | vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params); | |
578 | ||
579 | /** | |
580 | * @function vm_verify_pointer_packable | |
581 | * | |
582 | * @brief | |
583 | * Panics if the specified pointer cannot be packed with the specified | |
584 | * parameters. | |
585 | * | |
586 | * @discussion | |
587 | * The convenience @c VM_VERIFY_POINTER_PACKABLE macro allows to synthesize | |
588 | * the @c params argument. | |
589 | * | |
590 | * The convenience @c VM_ASSERT_POINTER_PACKABLE macro allows to synthesize | |
591 | * the @c params argument, and is erased when assertions are disabled. | |
592 | * | |
593 | * @param ptr The packed value to decode. | |
594 | * @param params The encoding parameters. | |
595 | */ | |
596 | static inline void | |
597 | vm_verify_pointer_packable(vm_offset_t ptr, vm_packing_params_t params) | |
598 | { | |
599 | if (ptr & ((1ul << params.vmpp_shift) - 1)) { | |
600 | vm_packing_pointer_invalid(ptr, params); | |
601 | } | |
602 | if (!params.vmpp_base_relative || ptr == 0) { | |
603 | return; | |
604 | } | |
605 | if (ptr <= params.vmpp_base || ptr > vm_packing_max_packable(params)) { | |
606 | vm_packing_pointer_invalid(ptr, params); | |
607 | } | |
608 | } | |
609 | #define VM_VERIFY_POINTER_PACKABLE(ptr, ns) \ | |
610 | vm_verify_pointer_packable(ptr, VM_PACKING_PARAMS(ns)) | |
611 | ||
612 | #if DEBUG || DEVELOPMENT | |
613 | #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) \ | |
614 | VM_VERIFY_POINTER_PACKABLE(ptr, ns) | |
615 | #else | |
616 | #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) ((void)(ptr)) | |
617 | #endif | |
618 | ||
619 | /** | |
620 | * @function vm_verify_pointer_range | |
621 | * | |
622 | * @brief | |
623 | * Panics if some pointers in the specified range can't be packed with the | |
624 | * specified parameters. | |
625 | * | |
626 | * @param subsystem The subsystem requiring the packing. | |
627 | * @param min_address The smallest address of the range. | |
628 | * @param max_address The largest address of the range. | |
629 | * @param params The encoding parameters. | |
630 | */ | |
631 | extern void | |
632 | vm_packing_verify_range( | |
633 | const char *subsystem, | |
634 | vm_offset_t min_address, | |
635 | vm_offset_t max_address, | |
636 | vm_packing_params_t params); | |
637 | ||
0a7de745 | 638 | #endif /* XNU_KERNEL_PRIVATE */ |
91447636 | 639 | |
0a7de745 A |
640 | extern vm_size_t page_size; |
641 | extern vm_size_t page_mask; | |
642 | extern int page_shift; | |
91447636 | 643 | |
55e303ae | 644 | /* We need a way to get rid of compiler warnings when we cast from */ |
b0d623f7 A |
645 | /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ |
646 | /* An intptr_t is used convert the value to the right precision, and */ | |
647 | /* then to an address. This macro is also used to convert addresses */ | |
648 | /* to 32-bit integers, which is a hard failure for a 64-bit kernel */ | |
55e303ae A |
649 | #include <stdint.h> |
650 | #ifndef __CAST_DOWN_CHECK | |
651 | #define __CAST_DOWN_CHECK | |
b0d623f7 A |
652 | |
653 | #define CAST_DOWN( type, addr ) \ | |
654 | ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) | |
655 | ||
0a7de745 | 656 | #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) |
b0d623f7 | 657 | |
55e303ae | 658 | #endif /* __CAST_DOWN_CHECK */ |
1c79356b | 659 | |
0a7de745 | 660 | #endif /* ASSEMBLER */ |
91447636 | 661 | |
0a7de745 | 662 | #endif /* KERNEL */ |
91447636 | 663 | |
0a7de745 | 664 | #endif /* _MACH_VM_PARAM_H_ */ |