2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
77 #include <kern/trustcache.h>
83 * The following is a description of the interface to the
84 * machine-dependent "physical map" data structure. The module
85 * must provide a "pmap_t" data type that represents the
86 * set of valid virtual-to-physical addresses for one user
87 * address space. [The kernel address space is represented
88 * by a distinguished "pmap_t".] The routines described manage
89 * this type, install and update virtual-to-physical mappings,
90 * and perform operations on physical addresses common to
91 * many address spaces.
94 /* Copy between a physical page and a virtual address */
95 /* LP64todo - switch to vm_map_offset_t when it grows */
96 extern kern_return_t
copypv(
109 #define cppvNoModSnk 16
110 #define cppvNoModSnkb 27
111 #define cppvNoRefSrc 32
112 #define cppvNoRefSrcb 26
113 #define cppvKmap 64 /* Use the kernel's vm_map */
116 extern boolean_t
pmap_has_managed_page(ppnum_t first
, ppnum_t last
);
118 #ifdef MACH_KERNEL_PRIVATE
120 #include <mach_assert.h>
122 #include <machine/pmap.h>
123 #include <vm/memory_types.h>
126 * Routines used for initialization.
127 * There is traditionally also a pmap_bootstrap,
128 * used very early by machine-dependent code,
129 * but it is not part of the interface.
132 * These interfaces are tied to the size of the
133 * kernel pmap - and therefore use the "local"
134 * vm_offset_t, etc... types.
137 extern void *pmap_steal_memory(vm_size_t size
); /* Early memory allocation */
138 extern void *pmap_steal_freeable_memory(vm_size_t size
); /* Early memory allocation */
140 extern uint_t
pmap_free_pages(void); /* report remaining unused physical pages */
141 #if defined(__arm__) || defined(__arm64__)
142 extern uint_t
pmap_free_pages_span(void); /* report phys address range of unused physical pages */
143 #endif /* defined(__arm__) || defined(__arm64__) */
145 extern void pmap_startup(vm_offset_t
*startp
, vm_offset_t
*endp
); /* allocate vm_page structs */
147 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */
149 extern void mapping_adjust(void); /* Adjust free mapping count */
151 extern void mapping_free_prime(void); /* Primes the mapping block release list */
153 #ifndef MACHINE_PAGES
155 * If machine/pmap.h defines MACHINE_PAGES, it must implement
156 * the above functions. The pmap module has complete control.
157 * Otherwise, it must implement the following functions:
162 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
163 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
164 * and pmap_enter. pmap_free_pages may over-estimate the number
165 * of unused physical pages, and pmap_next_page may return FALSE
166 * to indicate that there are no more unused pages to return.
167 * However, for best performance pmap_free_pages should be accurate.
171 * Routines to return the next unused physical page.
173 extern boolean_t
pmap_next_page(ppnum_t
*pnum
);
174 extern boolean_t
pmap_next_page_hi(ppnum_t
*pnum
, boolean_t might_free
);
176 extern kern_return_t
pmap_next_page_large(ppnum_t
*pnum
);
177 extern void pmap_hi_pages_done(void);
181 * Report virtual space available for the kernel.
183 extern void pmap_virtual_space(
184 vm_offset_t
*virtual_start
,
185 vm_offset_t
*virtual_end
);
186 #endif /* MACHINE_PAGES */
189 * Routines to manage the physical map data structure.
191 extern pmap_t
pmap_create_options( /* Create a pmap_t. */
196 extern pmap_t(pmap_kernel
)(void); /* Return the kernel's pmap */
197 extern void pmap_reference(pmap_t pmap
); /* Gain a reference. */
198 extern void pmap_destroy(pmap_t pmap
); /* Release a reference. */
199 extern void pmap_switch(pmap_t
);
200 extern void pmap_require(pmap_t pmap
);
203 extern void pmap_set_process(pmap_t pmap
,
206 #endif /* MACH_ASSERT */
208 extern kern_return_t
pmap_enter( /* Enter a mapping */
213 vm_prot_t fault_type
,
217 extern kern_return_t
pmap_enter_options(
222 vm_prot_t fault_type
,
225 unsigned int options
,
227 extern kern_return_t
pmap_enter_options_addr(
232 vm_prot_t fault_type
,
235 unsigned int options
,
238 extern void pmap_remove_some_phys(
242 extern void pmap_lock_phys_page(
245 extern void pmap_unlock_phys_page(
250 * Routines that operate on physical addresses.
253 extern void pmap_page_protect( /* Restrict access to page. */
257 extern void pmap_page_protect_options( /* Restrict access to page. */
260 unsigned int options
,
263 extern void(pmap_zero_page
)(
266 extern void(pmap_zero_part_page
)(
271 extern void(pmap_copy_page
)(
275 extern void(pmap_copy_part_page
)(
277 vm_offset_t src_offset
,
279 vm_offset_t dst_offset
,
282 extern void(pmap_copy_part_lpage
)(
285 vm_offset_t dst_offset
,
288 extern void(pmap_copy_part_rpage
)(
290 vm_offset_t src_offset
,
294 extern unsigned int(pmap_disconnect
)( /* disconnect mappings and return reference and change */
297 extern unsigned int(pmap_disconnect_options
)( /* disconnect mappings and return reference and change */
299 unsigned int options
,
302 extern kern_return_t(pmap_attribute_cache_sync
)( /* Flush appropriate
304 * page number sent */
307 vm_machine_attribute_t attribute
,
308 vm_machine_attribute_val_t
* value
);
310 extern unsigned int(pmap_cache_attributes
)(
314 * Set (override) cache attributes for the specified physical page
316 extern void pmap_set_cache_attributes(
320 extern void *pmap_map_compressor_page(
323 extern void pmap_unmap_compressor_page(
327 #if defined(__arm__) || defined(__arm64__)
329 extern boolean_t
pmap_batch_set_cache_attributes(
337 extern void pmap_sync_page_data_phys(ppnum_t pa
);
338 extern void pmap_sync_page_attributes_phys(ppnum_t pa
);
341 * debug/assertions. pmap_verify_free returns true iff
342 * the given physical page is mapped into no pmap.
343 * pmap_assert_free() will panic() if pn is not free.
345 extern boolean_t
pmap_verify_free(ppnum_t pn
);
347 extern void pmap_assert_free(ppnum_t pn
);
351 * Statistics routines
353 extern int(pmap_compressed
)(pmap_t pmap
);
354 extern int(pmap_resident_count
)(pmap_t pmap
);
355 extern int(pmap_resident_max
)(pmap_t pmap
);
358 * Sundry required (internal) routines
360 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
361 extern void pmap_collect(pmap_t pmap
);/* Perform garbage
362 * collection, if any */
367 extern void(pmap_copy
)( /* Copy range of mappings,
371 vm_map_offset_t dest_va
,
373 vm_map_offset_t source_va
);
375 extern kern_return_t(pmap_attribute
)( /* Get/Set special memory
380 vm_machine_attribute_t attribute
,
381 vm_machine_attribute_val_t
* value
);
384 * Routines defined as macros.
386 #ifndef PMAP_ACTIVATE_USER
387 #ifndef PMAP_ACTIVATE
388 #define PMAP_ACTIVATE_USER(thr, cpu)
389 #else /* PMAP_ACTIVATE */
390 #define PMAP_ACTIVATE_USER(thr, cpu) { \
393 pmap = (thr)->map->pmap; \
394 if (pmap != pmap_kernel()) \
395 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
397 #endif /* PMAP_ACTIVATE */
398 #endif /* PMAP_ACTIVATE_USER */
400 #ifndef PMAP_DEACTIVATE_USER
401 #ifndef PMAP_DEACTIVATE
402 #define PMAP_DEACTIVATE_USER(thr, cpu)
403 #else /* PMAP_DEACTIVATE */
404 #define PMAP_DEACTIVATE_USER(thr, cpu) { \
407 pmap = (thr)->map->pmap; \
408 if ((pmap) != pmap_kernel()) \
409 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
411 #endif /* PMAP_DEACTIVATE */
412 #endif /* PMAP_DEACTIVATE_USER */
414 #ifndef PMAP_ACTIVATE_KERNEL
415 #ifndef PMAP_ACTIVATE
416 #define PMAP_ACTIVATE_KERNEL(cpu)
417 #else /* PMAP_ACTIVATE */
418 #define PMAP_ACTIVATE_KERNEL(cpu) \
419 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
420 #endif /* PMAP_ACTIVATE */
421 #endif /* PMAP_ACTIVATE_KERNEL */
423 #ifndef PMAP_DEACTIVATE_KERNEL
424 #ifndef PMAP_DEACTIVATE
425 #define PMAP_DEACTIVATE_KERNEL(cpu)
426 #else /* PMAP_DEACTIVATE */
427 #define PMAP_DEACTIVATE_KERNEL(cpu) \
428 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
429 #endif /* PMAP_DEACTIVATE */
430 #endif /* PMAP_DEACTIVATE_KERNEL */
434 * Macro to be used in place of pmap_enter()
436 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
437 flags, wired, result) \
439 pmap_t __pmap = (pmap); \
440 vm_page_t __page = (page); \
444 PMAP_ENTER_CHECK(__pmap, __page) \
445 __obj = VM_PAGE_OBJECT(__page); \
446 if (__obj->internal) { \
447 __options |= PMAP_OPTIONS_INTERNAL; \
449 if (__page->vmp_reusable || __obj->all_reusable) { \
450 __options |= PMAP_OPTIONS_REUSABLE; \
452 result = pmap_enter_options(__pmap, \
454 VM_PAGE_GET_PHYS_PAGE(__page), \
462 #endif /* !PMAP_ENTER */
464 #ifndef PMAP_ENTER_OPTIONS
465 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset, \
467 fault_type, flags, wired, options, result) \
469 pmap_t __pmap = (pmap); \
470 vm_page_t __page = (page); \
471 int __extra_options = 0; \
474 PMAP_ENTER_CHECK(__pmap, __page) \
475 __obj = VM_PAGE_OBJECT(__page); \
476 if (__obj->internal) { \
477 __extra_options |= PMAP_OPTIONS_INTERNAL; \
479 if (__page->vmp_reusable || __obj->all_reusable) { \
480 __extra_options |= PMAP_OPTIONS_REUSABLE; \
482 result = pmap_enter_options_addr(__pmap, \
485 VM_PAGE_GET_PHYS_PAGE(__page) \
487 + fault_phys_offset), \
492 (options) | __extra_options, \
495 #endif /* !PMAP_ENTER_OPTIONS */
497 #ifndef PMAP_SET_CACHE_ATTR
498 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
500 if (!batch_pmap_op) { \
501 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
502 object->set_cache_attr = TRUE; \
505 #endif /* PMAP_SET_CACHE_ATTR */
507 #ifndef PMAP_BATCH_SET_CACHE_ATTR
508 #if defined(__arm__) || defined(__arm64__)
509 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
510 cache_attr, num_pages, batch_pmap_op) \
512 if ((batch_pmap_op)) { \
513 unsigned int __page_idx=0; \
514 unsigned int res=0; \
515 boolean_t batch=TRUE; \
516 while (__page_idx < (num_pages)) { \
517 if (!pmap_batch_set_cache_attributes( \
518 user_page_list[__page_idx].phys_addr, \
531 while (__page_idx < (num_pages)) { \
533 (void)pmap_batch_set_cache_attributes( \
534 user_page_list[__page_idx].phys_addr, \
541 pmap_set_cache_attributes( \
542 user_page_list[__page_idx].phys_addr, \
546 (object)->set_cache_attr = TRUE; \
550 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
551 cache_attr, num_pages, batch_pmap_op) \
553 if ((batch_pmap_op)) { \
554 unsigned int __page_idx=0; \
555 while (__page_idx < (num_pages)) { \
556 pmap_set_cache_attributes( \
557 user_page_list[__page_idx].phys_addr, \
561 (object)->set_cache_attr = TRUE; \
565 #endif /* PMAP_BATCH_SET_CACHE_ATTR */
567 #define PMAP_ENTER_CHECK(pmap, page) \
569 if ((page)->vmp_error) { \
570 panic("VM page %p should not have an error\n", \
576 * Routines to manage reference/modify bits based on
577 * physical addresses, simulating them if not provided
582 long pfc_invalid_global
;
585 typedef struct pfc pmap_flush_context
;
587 /* Clear reference bit */
588 extern void pmap_clear_reference(ppnum_t pn
);
589 /* Return reference bit */
590 extern boolean_t(pmap_is_referenced
)(ppnum_t pn
);
592 extern void pmap_set_modify(ppnum_t pn
);
593 /* Clear modify bit */
594 extern void pmap_clear_modify(ppnum_t pn
);
595 /* Return modify bit */
596 extern boolean_t
pmap_is_modified(ppnum_t pn
);
597 /* Return modified and referenced bits */
598 extern unsigned int pmap_get_refmod(ppnum_t pn
);
599 /* Clear modified and referenced bits */
600 extern void pmap_clear_refmod(ppnum_t pn
, unsigned int mask
);
601 #define VM_MEM_MODIFIED 0x01 /* Modified bit */
602 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */
603 extern void pmap_clear_refmod_options(ppnum_t pn
, unsigned int mask
, unsigned int options
, void *);
606 * Clears the reference and/or modified bits on a range of virtually
608 * It returns true if the operation succeeded. If it returns false,
609 * nothing has been modified.
610 * This operation is only supported on some platforms, so callers MUST
611 * handle the case where it returns false.
614 pmap_clear_refmod_range_options(
616 vm_map_address_t start
,
617 vm_map_address_t end
,
619 unsigned int options
);
622 extern void pmap_flush_context_init(pmap_flush_context
*);
623 extern void pmap_flush(pmap_flush_context
*);
626 * Routines that operate on ranges of virtual addresses.
628 extern void pmap_protect( /* Change protections. */
634 extern void pmap_protect_options( /* Change protections. */
639 unsigned int options
,
642 extern void(pmap_pageable
)(
644 vm_map_offset_t start
,
648 extern uint64_t pmap_shared_region_size_min(pmap_t map
);
650 /* TODO: <rdar://problem/65247502> Completely remove pmap_nesting_size_max() */
651 extern uint64_t pmap_nesting_size_max(pmap_t map
);
653 extern kern_return_t
pmap_nest(pmap_t
,
657 extern kern_return_t
pmap_unnest(pmap_t
,
661 #define PMAP_UNNEST_CLEAN 1
663 extern kern_return_t
pmap_unnest_options(pmap_t
,
667 extern boolean_t
pmap_adjust_unnest_parameters(pmap_t
, vm_map_offset_t
*, vm_map_offset_t
*);
668 extern void pmap_advise_pagezero_range(pmap_t
, uint64_t);
669 #endif /* MACH_KERNEL_PRIVATE */
671 extern boolean_t
pmap_is_noencrypt(ppnum_t
);
672 extern void pmap_set_noencrypt(ppnum_t pn
);
673 extern void pmap_clear_noencrypt(ppnum_t pn
);
676 * JMM - This portion is exported to other kernel components right now,
677 * but will be pulled back in the future when the needed functionality
678 * is provided in a cleaner manner.
681 extern pmap_t kernel_pmap
; /* The kernel's map */
682 #define pmap_kernel() (kernel_pmap)
684 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
685 #define VM_MEM_STACK 0x200
687 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
688 * definitions in i386/pmap_internal.h
690 #define PMAP_CREATE_64BIT 0x1
694 #define PMAP_CREATE_EPT 0x2
695 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
699 #define PMAP_CREATE_STAGE2 0
701 #define PMAP_CREATE_DISABLE_JOP 0x4
703 #define PMAP_CREATE_DISABLE_JOP 0
705 #if __ARM_MIXED_PAGE_SIZE__
706 #define PMAP_CREATE_FORCE_4K_PAGES 0x8
708 #define PMAP_CREATE_FORCE_4K_PAGES 0
709 #endif /* __ARM_MIXED_PAGE_SIZE__ */
711 #define PMAP_CREATE_X86_64 0
713 #define PMAP_CREATE_X86_64 0
716 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
717 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64)
719 #endif /* __x86_64__ */
721 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
722 * KERN_RESOURCE_SHORTAGE
724 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
725 * but don't enter mapping
727 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
729 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
730 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
731 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
732 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
733 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
734 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
735 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
736 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
737 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
738 * iff page was modified */
739 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be
741 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000
742 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
743 #if defined(__arm__) || defined(__arm64__)
744 #define PMAP_OPTIONS_FF_LOCKED 0x8000
745 #define PMAP_OPTIONS_FF_WIRED 0x10000
748 #if !defined(__LP64__)
749 extern vm_offset_t
pmap_extract(pmap_t pmap
,
752 extern void pmap_change_wiring( /* Specify pageability */
757 /* LP64todo - switch to vm_map_offset_t when it grows */
758 extern void pmap_remove( /* Remove mappings. */
763 extern void pmap_remove_options( /* Remove mappings. */
769 extern void fillPage(ppnum_t pa
, unsigned int fill
);
771 #if defined(__LP64__)
772 extern void pmap_pre_expand(pmap_t pmap
, vm_map_offset_t vaddr
);
773 extern kern_return_t
pmap_pre_expand_large(pmap_t pmap
, vm_map_offset_t vaddr
);
774 extern vm_size_t
pmap_query_pagesize(pmap_t map
, vm_map_offset_t vaddr
);
777 mach_vm_size_t
pmap_query_resident(pmap_t pmap
,
780 mach_vm_size_t
*compressed_bytes_p
);
782 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap
, bool new_value
);
783 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap
);
785 /* Inform the pmap layer that there is a JIT entry in this map. */
786 extern void pmap_set_jit_entitled(pmap_t pmap
);
788 /* Ask the pmap layer if there is a JIT entry in this map. */
789 extern bool pmap_get_jit_entitled(pmap_t pmap
);
792 * Tell the pmap layer what range within the nested region the VM intends to
795 extern void pmap_trim(pmap_t grand
, pmap_t subord
, addr64_t vstart
, uint64_t size
);
798 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE
799 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
800 * This is expected to only be called from kernel debugger context,
801 * so synchronization is not required.
804 extern kern_return_t
pmap_dump_page_tables(pmap_t pmap
, void *bufp
, void *buf_end
, unsigned int level_mask
, size_t *bytes_copied
);
807 * Indicates if any special policy is applied to this protection by the pmap
810 bool pmap_has_prot_policy(pmap_t pmap
, bool translated_allow_execute
, vm_prot_t prot
);
813 * Causes the pmap to return any available pages that it can return cheaply to
816 uint64_t pmap_release_pages_fast(void);
818 #define PMAP_QUERY_PAGE_PRESENT 0x01
819 #define PMAP_QUERY_PAGE_REUSABLE 0x02
820 #define PMAP_QUERY_PAGE_INTERNAL 0x04
821 #define PMAP_QUERY_PAGE_ALTACCT 0x08
822 #define PMAP_QUERY_PAGE_COMPRESSED 0x10
823 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
824 extern kern_return_t
pmap_query_page_info(
830 int pmap_pgtrace_add_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
831 int pmap_pgtrace_delete_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
832 kern_return_t
pmap_pgtrace_fault(pmap_t pmap
, vm_map_offset_t va
, arm_saved_state_t
*ss
);
836 #ifdef PLATFORM_BridgeOS
837 struct pmap_legacy_trust_cache
{
838 struct pmap_legacy_trust_cache
*next
;
841 uint8_t hashes
[][CS_CDHASH_LEN
];
844 struct pmap_legacy_trust_cache
;
847 extern kern_return_t
pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache
*trust_cache
,
848 const vm_size_t trust_cache_len
);
851 PMAP_TC_TYPE_PERSONALIZED
,
853 PMAP_TC_TYPE_CRYPTEX
,
854 PMAP_TC_TYPE_ENGINEERING
,
855 PMAP_TC_TYPE_GLOBAL_FF00
,
856 PMAP_TC_TYPE_GLOBAL_FF01
,
859 #define PMAP_IMAGE4_TRUST_CACHE_HAS_TYPE 1
860 struct pmap_image4_trust_cache
{
861 // Filled by pmap layer.
862 struct pmap_image4_trust_cache
const *next
; // linked list linkage
863 struct trust_cache_module1
const *module; // pointer into module (within data below)
866 // data is either an image4,
867 // or just the trust cache payload itself if the image4 manifest is external.
870 uint8_t const bnch
[48];
872 uint8_t const data
[];
877 PMAP_TC_UNKNOWN_FORMAT
= -1,
878 PMAP_TC_TOO_SMALL_FOR_HEADER
= -2,
879 PMAP_TC_TOO_SMALL_FOR_ENTRIES
= -3,
880 PMAP_TC_UNKNOWN_VERSION
= -4,
881 PMAP_TC_ALREADY_LOADED
= -5,
882 PMAP_TC_TOO_BIG
= -6,
883 PMAP_TC_RESOURCE_SHORTAGE
= -7,
884 PMAP_TC_MANIFEST_TOO_BIG
= -8,
885 PMAP_TC_MANIFEST_VIOLATION
= -9,
886 PMAP_TC_PAYLOAD_VIOLATION
= -10,
887 PMAP_TC_EXPIRED
= -11,
888 PMAP_TC_CRYPTO_WRONG
= -12,
889 PMAP_TC_OBJECT_WRONG
= -13,
890 PMAP_TC_UNKNOWN_CALLER
= -14,
891 PMAP_TC_UNKNOWN_FAILURE
= -15,
894 #define PMAP_HAS_LOCKDOWN_IMAGE4_SLAB 1
895 extern void pmap_lockdown_image4_slab(vm_offset_t slab
, vm_size_t slab_len
, uint64_t flags
);
897 extern pmap_tc_ret_t
pmap_load_image4_trust_cache(
898 struct pmap_image4_trust_cache
*trust_cache
, vm_size_t trust_cache_len
,
899 uint8_t const *img4_manifest
,
900 vm_size_t img4_manifest_buffer_len
,
901 vm_size_t img4_manifest_actual_len
,
904 extern bool pmap_is_trust_cache_loaded(const uuid_t uuid
);
905 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash
[CS_CDHASH_LEN
]);
906 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash
[CS_CDHASH_LEN
]);
908 extern void pmap_set_compilation_service_cdhash(const uint8_t cdhash
[CS_CDHASH_LEN
]);
909 extern bool pmap_match_compilation_service_cdhash(const uint8_t cdhash
[CS_CDHASH_LEN
]);
911 extern bool pmap_in_ppl(void);
913 extern void *pmap_claim_reserved_ppl_page(void);
914 extern void pmap_free_reserved_ppl_page(void *kva
);
916 extern void pmap_ledger_alloc_init(size_t);
917 extern ledger_t
pmap_ledger_alloc(void);
918 extern void pmap_ledger_free(ledger_t
);
920 extern bool pmap_is_bad_ram(ppnum_t ppn
);
921 extern void pmap_retire_page(ppnum_t ppn
);
922 extern kern_return_t
pmap_cs_allow_invalid(pmap_t pmap
);
925 extern bool pmap_is_exotic(pmap_t pmap
);
926 #else /* __arm64__ */
927 #define pmap_is_exotic(pmap) false
928 #endif /* __arm64__ */
930 #endif /* KERNEL_PRIVATE */
932 #endif /* _VM_PMAP_H_ */