2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: mach/vm_param.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Machine independent virtual memory parameters.
67 #ifndef _MACH_VM_PARAM_H_
68 #define _MACH_VM_PARAM_H_
70 #include <mach/machine/vm_param.h>
75 #include <mach/vm_types.h>
76 #endif /* ASSEMBLER */
79 #include <os/overflow.h>
82 * The machine independent pages are refered to as PAGES. A page
83 * is some number of hardware pages, depending on the target machine.
88 #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */
89 #define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */
92 * Convert addresses to pages and vice versa. No rounding is used.
93 * The atop_32 and ptoa_32 macros should not be use on 64 bit types.
94 * The round_page_64 and trunc_page_64 macros should be used instead.
97 #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT)
98 #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT)
99 #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT)
100 #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT)
102 #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT)
103 #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT)
106 * While the following block is enabled, the legacy atop and ptoa
107 * macros will behave correctly. If not, they will generate
108 * invalid lvalue errors.
112 #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT)
113 #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT)
115 #define atop(x) (0UL = 0)
116 #define ptoa(x) (0UL = 0)
120 * Page-size rounding macros for the Public fixed-width VM types.
122 #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
123 #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK))
125 #define round_page_overflow(in, out) __os_warn_unused(({ \
126 bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \
127 *out &= ~((__typeof__(*out))PAGE_MASK); \
131 static inline int OS_WARN_RESULT
132 mach_vm_round_page_overflow(mach_vm_offset_t in
, mach_vm_offset_t
*out
)
134 return round_page_overflow(in
, out
);
137 #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
138 #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK))
141 * Rounding macros for the legacy (scalable with the current task's
142 * address space size) VM types.
145 #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK))
146 #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK))
149 * Round off or truncate to the nearest page. These will work
150 * for either addresses or counts. (i.e. 1 byte rounds to 1 page
151 * bytes. The round_page_32 and trunc_page_32 macros should not be
152 * use on 64 bit types. The round_page_64 and trunc_page_64 macros
153 * should be used instead.
155 * These should only be used in the rare case the size of the address
156 * or length is hard-coded as 32 or 64 bit. Otherwise, the macros
157 * associated with the specific VM type should be used.
160 #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK))
161 #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK))
162 #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64))
163 #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64))
165 #define round_page_mask_32(x, mask) (((uint32_t)(x) + (mask)) & ~((uint32_t)(mask)))
166 #define trunc_page_mask_32(x, mask) ((uint32_t)(x) & ~((uint32_t)(mask)))
167 #define round_page_mask_64(x, mask) (((uint64_t)(x) + (mask)) & ~((uint64_t)(mask)))
168 #define trunc_page_mask_64(x, mask) ((uint64_t)(x) & ~((uint64_t)(mask)))
171 * Enable the following block to find uses of xxx_32 macros that should
172 * be xxx_64. These macros only work in C code, not C++. The resulting
173 * binaries are not functional. Look for invalid lvalue errors in
174 * the compiler output.
176 * Enabling the following block will also find use of the xxx_64 macros
177 * that have been passed pointers. The parameters should be case to an
178 * unsigned long type first. Look for invalid operands to binary + error
179 * in the compiler output.
195 (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
200 (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
204 #define round_page_32(x) \
205 (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
209 #define trunc_page_32(x) \
210 (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
215 #define atop_32(x) (0)
216 #define ptoa_32(x) (0)
217 #define round_page_32(x) (0)
218 #define trunc_page_32(x) (0)
220 #endif /* ! __cplusplus */
222 #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0))
223 #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0))
224 #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0))
225 #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0))
230 * Determine whether an address is page-aligned, or a count is
231 * an exact page multiple.
234 #define page_aligned(x) (((x) & PAGE_MASK) == 0)
236 extern vm_size_t mem_size
; /* 32-bit size of memory - limited by maxmem - deprecated */
237 extern uint64_t max_mem
; /* 64-bit size of memory - limited by maxmem */
240 * The default pager does not handle 64-bit offsets inside its objects,
241 * so this limits the size of anonymous memory objects to 4GB minus 1 page.
242 * When we need to allocate a chunk of anonymous memory over that size,
243 * we have to allocate more than one chunk.
245 #define ANON_MAX_SIZE ((1ULL << 32) - PAGE_SIZE)
247 * Work-around for <rdar://problem/6626493>
248 * Break large anonymous memory areas into 128MB chunks to alleviate
249 * the cost of copying when copy-on-write is not possible because a small
250 * portion of it being wired.
252 #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */
255 * The 'medium' malloc allocator would like its regions
256 * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks
257 * and backed by different objects. This avoids contention
258 * on a single large object and showed solid improvements on high
259 * core machines with workloads involving video and graphics processing.
261 #define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */
263 #ifdef XNU_KERNEL_PRIVATE
265 #include <kern/debug.h>
267 extern uint64_t mem_actual
; /* 64-bit size of memory - not limited by maxmem */
268 extern uint64_t max_mem_actual
; /* Size of physical memory adjusted by maxmem */
269 extern uint64_t sane_size
; /* Memory size to use for defaults calculations */
270 extern addr64_t vm_last_addr
; /* Highest kernel virtual address known to the VM system */
272 extern const vm_offset_t vm_min_kernel_address
;
273 extern const vm_offset_t vm_max_kernel_address
;
275 extern vm_offset_t vm_kernel_stext
;
276 extern vm_offset_t vm_kernel_etext
;
277 extern vm_offset_t vm_kernel_slid_base
;
278 extern vm_offset_t vm_kernel_slid_top
;
279 extern vm_offset_t vm_kernel_slide
;
280 extern vm_offset_t vm_kernel_addrperm
;
281 extern vm_offset_t vm_kext_base
;
282 extern vm_offset_t vm_kext_top
;
283 extern vm_offset_t vm_kernel_base
;
284 extern vm_offset_t vm_kernel_top
;
285 extern vm_offset_t vm_hib_base
;
287 extern vm_offset_t vm_kernel_builtinkmod_text
;
288 extern vm_offset_t vm_kernel_builtinkmod_text_end
;
290 #define VM_KERNEL_IS_SLID(_o) \
291 (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \
292 ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top))
294 #define VM_KERNEL_SLIDE(_u) ((vm_offset_t)(_u) + vm_kernel_slide)
297 * The following macros are to be used when exposing kernel addresses to
298 * userspace via any of the various debug or info facilities that might exist
299 * (e.g. stackshot, proc_info syscall, etc.). It is important to understand
300 * the goal of each macro and choose the right one depending on what you are
301 * trying to do. Misuse of these macros can result in critical data leaks
302 * which in turn lead to all sorts of system vulnerabilities. It is invalid to
303 * call these macros on a non-kernel address (NULL is allowed).
306 * Use this macro when you are exposing an address to userspace which is
307 * *guaranteed* to be a "static" kernel or kext address (i.e. coming from text
308 * or data sections). These are the addresses which get "slid" via ASLR on
309 * kernel or kext load, and it's precisely the slide value we are trying to
310 * protect from userspace.
312 * VM_KERNEL_ADDRHIDE:
313 * Use when exposing an address for internal purposes: debugging, tracing,
314 * etc. The address will be unslid if necessary. Other addresses will be
315 * hidden on customer builds, and unmodified on internal builds.
317 * VM_KERNEL_ADDRHASH:
318 * Use this macro when exposing a kernel address to userspace on customer
319 * builds. The address can be from the static kernel or kext regions, or the
320 * kernel heap. The address will be unslid or hashed as appropriate.
323 * ** SECURITY WARNING: The following macros can leak kernel secrets.
324 * Use *only* in performance *critical* code.
326 * VM_KERNEL_ADDRPERM:
327 * VM_KERNEL_UNSLIDE_OR_PERM:
328 * Use these macros when exposing a kernel address to userspace on customer
329 * builds. The address can be from the static kernel or kext regions, or the
330 * kernel heap. The address will be unslid or permuted as appropriate.
332 * Nesting of these macros should be considered invalid.
336 #if XNU_KERNEL_PRIVATE
337 extern vm_offset_t
vm_kernel_addrhash(vm_offset_t addr
)
338 __XNU_INTERNAL(vm_kernel_addrhash
);
340 extern vm_offset_t
vm_kernel_addrhash(vm_offset_t addr
);
344 #define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide)
346 #if DEBUG || DEVELOPMENT
347 #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v))
349 #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0)
350 #endif /* DEBUG || DEVELOPMENT */
352 #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v))
354 #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \
355 VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \
356 VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \
357 (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \
360 #define VM_KERNEL_UNSLIDE(_v) ({ \
361 VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \
364 #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v)
366 #undef mach_vm_round_page
371 static inline mach_vm_offset_t
372 mach_vm_round_page(mach_vm_offset_t x
)
374 if (round_page_overflow(x
, &x
)) {
375 panic("overflow detected");
380 static inline vm_offset_t
381 round_page(vm_offset_t x
)
383 if (round_page_overflow(x
, &x
)) {
384 panic("overflow detected");
389 static inline mach_vm_offset_t
390 round_page_64(mach_vm_offset_t x
)
392 if (round_page_overflow(x
, &x
)) {
393 panic("overflow detected");
398 static inline uint32_t
399 round_page_32(uint32_t x
)
401 if (round_page_overflow(x
, &x
)) {
402 panic("overflow detected");
409 * @typedef vm_packing_params_t
412 * Data structure representing the packing parameters for a given packed pointer
416 * Several data structures wish to pack their pointers on less than 64bits
417 * on LP64 in order to save memory.
419 * Adopters are supposed to define 3 macros:
420 * - @c *_BITS: number of storage bits used for the packing,
421 * - @c *_SHIFT: number of non significant low bits (expected to be 0),
422 * - @c *_BASE: the base against which to encode.
424 * The encoding is a no-op when @c *_BITS is equal to @c __WORDSIZE and
428 * The convenience macro @c VM_PACKING_PARAMS can be used to create
429 * a @c vm_packing_params_t structure out of those definitions.
431 * It is customary to declare a constant global per scheme for the sake
432 * of debuggers to be able to dynamically decide how to unpack various schemes.
435 * This uses 2 possible schemes (who both preserve @c NULL):
437 * 1. When the storage bits and shift are sufficiently large (strictly more than
438 * VM_KERNEL_POINTER_SIGNIFICANT_BITS), a sign-extension scheme can be used.
440 * This allows to represent any kernel pointer.
442 * 2. Else, a base-relative scheme can be used, typical bases are:
444 * - @c KERNEL_PMAP_HEAP_RANGE_START when only pointers to heap (zone)
445 * allocated objects need to be packed,
447 * - @c VM_MIN_KERNEL_AND_KEXT_ADDRESS when pointers to kernel globals also
450 * When such an ecoding is used, @c zone_restricted_va_max() must be taught
453 typedef struct vm_packing_params
{
454 vm_offset_t vmpp_base
;
457 bool vmpp_base_relative
;
458 } vm_packing_params_t
;
462 * @macro VM_PACKING_IS_BASE_RELATIVE
465 * Whether the packing scheme with those parameters will be base-relative.
467 #define VM_PACKING_IS_BASE_RELATIVE(ns) \
468 (ns##_BITS + ns##_SHIFT <= VM_KERNEL_POINTER_SIGNIFICANT_BITS)
472 * @macro VM_PACKING_PARAMS
475 * Constructs a @c vm_packing_params_t structure based on the convention that
476 * macros with the @c _BASE, @c _BITS and @c _SHIFT suffixes have been defined
477 * to the proper values.
479 #define VM_PACKING_PARAMS(ns) \
480 (vm_packing_params_t){ \
481 .vmpp_base = ns##_BASE, \
482 .vmpp_bits = ns##_BITS, \
483 .vmpp_shift = ns##_SHIFT, \
484 .vmpp_base_relative = VM_PACKING_IS_BASE_RELATIVE(ns), \
488 * @function vm_pack_pointer
491 * Packs a pointer according to the specified parameters.
494 * The convenience @c VM_PACK_POINTER macro allows to synthesize
495 * the @c params argument.
497 * @param ptr The pointer to pack.
498 * @param params The encoding parameters.
499 * @returns The packed pointer.
501 static inline vm_offset_t
502 vm_pack_pointer(vm_offset_t ptr
, vm_packing_params_t params
)
504 if (!params
.vmpp_base_relative
) {
505 return ptr
>> params
.vmpp_shift
;
508 return (ptr
- params
.vmpp_base
) >> params
.vmpp_shift
;
510 return (vm_offset_t
)0;
512 #define VM_PACK_POINTER(ptr, ns) \
513 vm_pack_pointer(ptr, VM_PACKING_PARAMS(ns))
516 * @function vm_unpack_pointer
519 * Unpacks a pointer packed with @c vm_pack_pointer().
522 * The convenience @c VM_UNPACK_POINTER macro allows to synthesize
523 * the @c params argument.
525 * @param packed The packed value to decode.
526 * @param params The encoding parameters.
527 * @returns The unpacked pointer.
529 static inline vm_offset_t
530 vm_unpack_pointer(vm_offset_t packed
, vm_packing_params_t params
)
532 if (!params
.vmpp_base_relative
) {
533 intptr_t addr
= (intptr_t)packed
;
534 addr
<<= __WORDSIZE
- params
.vmpp_bits
;
535 addr
>>= __WORDSIZE
- params
.vmpp_bits
- params
.vmpp_shift
;
536 return (vm_offset_t
)addr
;
539 return (packed
<< params
.vmpp_shift
) + params
.vmpp_base
;
541 return (vm_offset_t
)0;
543 #define VM_UNPACK_POINTER(packed, ns) \
544 vm_unpack_pointer(packed, VM_PACKING_PARAMS(ns))
547 * @function vm_packing_max_packable
550 * Returns the largest packable address for the given parameters.
553 * The convenience @c VM_PACKING_MAX_PACKABLE macro allows to synthesize
554 * the @c params argument.
556 * @param params The encoding parameters.
557 * @returns The largest packable pointer.
559 static inline vm_offset_t
560 vm_packing_max_packable(vm_packing_params_t params
)
562 if (!params
.vmpp_base_relative
) {
563 return VM_MAX_KERNEL_ADDRESS
;
566 vm_offset_t ptr
= params
.vmpp_base
+
567 (((1ul << params
.vmpp_bits
) - 1) << params
.vmpp_shift
);
569 return ptr
>= params
.vmpp_base
? ptr
: VM_MAX_KERNEL_ADDRESS
;
571 #define VM_PACKING_MAX_PACKABLE(ns) \
572 vm_packing_max_packable(VM_PACKING_PARAMS(ns))
577 vm_packing_pointer_invalid(vm_offset_t ptr
, vm_packing_params_t params
);
580 * @function vm_verify_pointer_packable
583 * Panics if the specified pointer cannot be packed with the specified
587 * The convenience @c VM_VERIFY_POINTER_PACKABLE macro allows to synthesize
588 * the @c params argument.
590 * The convenience @c VM_ASSERT_POINTER_PACKABLE macro allows to synthesize
591 * the @c params argument, and is erased when assertions are disabled.
593 * @param ptr The packed value to decode.
594 * @param params The encoding parameters.
597 vm_verify_pointer_packable(vm_offset_t ptr
, vm_packing_params_t params
)
599 if (ptr
& ((1ul << params
.vmpp_shift
) - 1)) {
600 vm_packing_pointer_invalid(ptr
, params
);
602 if (!params
.vmpp_base_relative
|| ptr
== 0) {
605 if (ptr
<= params
.vmpp_base
|| ptr
> vm_packing_max_packable(params
)) {
606 vm_packing_pointer_invalid(ptr
, params
);
609 #define VM_VERIFY_POINTER_PACKABLE(ptr, ns) \
610 vm_verify_pointer_packable(ptr, VM_PACKING_PARAMS(ns))
612 #if DEBUG || DEVELOPMENT
613 #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) \
614 VM_VERIFY_POINTER_PACKABLE(ptr, ns)
616 #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) ((void)(ptr))
620 * @function vm_verify_pointer_range
623 * Panics if some pointers in the specified range can't be packed with the
624 * specified parameters.
626 * @param subsystem The subsystem requiring the packing.
627 * @param min_address The smallest address of the range.
628 * @param max_address The largest address of the range.
629 * @param params The encoding parameters.
632 vm_packing_verify_range(
633 const char *subsystem
,
634 vm_offset_t min_address
,
635 vm_offset_t max_address
,
636 vm_packing_params_t params
);
638 #endif /* XNU_KERNEL_PRIVATE */
640 extern vm_size_t page_size
;
641 extern vm_size_t page_mask
;
642 extern int page_shift
;
644 /* We need a way to get rid of compiler warnings when we cast from */
645 /* a 64 bit value to an address (which may be 32 bits or 64-bits). */
646 /* An intptr_t is used convert the value to the right precision, and */
647 /* then to an address. This macro is also used to convert addresses */
648 /* to 32-bit integers, which is a hard failure for a 64-bit kernel */
650 #ifndef __CAST_DOWN_CHECK
651 #define __CAST_DOWN_CHECK
653 #define CAST_DOWN( type, addr ) \
654 ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) )
656 #define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) )
658 #endif /* __CAST_DOWN_CHECK */
660 #endif /* ASSEMBLER */
664 #endif /* _MACH_VM_PARAM_H_ */