2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
77 #include <kern/trustcache.h>
83 * The following is a description of the interface to the
84 * machine-dependent "physical map" data structure. The module
85 * must provide a "pmap_t" data type that represents the
86 * set of valid virtual-to-physical addresses for one user
87 * address space. [The kernel address space is represented
88 * by a distinguished "pmap_t".] The routines described manage
89 * this type, install and update virtual-to-physical mappings,
90 * and perform operations on physical addresses common to
91 * many address spaces.
94 /* Copy between a physical page and a virtual address */
95 /* LP64todo - switch to vm_map_offset_t when it grows */
96 extern kern_return_t
copypv(
109 #define cppvNoModSnk 16
110 #define cppvNoModSnkb 27
111 #define cppvNoRefSrc 32
112 #define cppvNoRefSrcb 26
113 #define cppvKmap 64 /* Use the kernel's vm_map */
116 extern boolean_t
pmap_has_managed_page(ppnum_t first
, ppnum_t last
);
118 #ifdef MACH_KERNEL_PRIVATE
120 #include <mach_assert.h>
122 #include <machine/pmap.h>
123 #include <vm/memory_types.h>
126 * Routines used for initialization.
127 * There is traditionally also a pmap_bootstrap,
128 * used very early by machine-dependent code,
129 * but it is not part of the interface.
132 * These interfaces are tied to the size of the
133 * kernel pmap - and therefore use the "local"
134 * vm_offset_t, etc... types.
137 extern void *pmap_steal_memory(vm_size_t size
); /* Early memory allocation */
138 extern void *pmap_steal_freeable_memory(vm_size_t size
); /* Early memory allocation */
140 extern uint_t
pmap_free_pages(void); /* report remaining unused physical pages */
142 extern void pmap_startup(vm_offset_t
*startp
, vm_offset_t
*endp
); /* allocate vm_page structs */
144 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */
146 extern void mapping_adjust(void); /* Adjust free mapping count */
148 extern void mapping_free_prime(void); /* Primes the mapping block release list */
150 #ifndef MACHINE_PAGES
152 * If machine/pmap.h defines MACHINE_PAGES, it must implement
153 * the above functions. The pmap module has complete control.
154 * Otherwise, it must implement the following functions:
159 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
160 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
161 * and pmap_enter. pmap_free_pages may over-estimate the number
162 * of unused physical pages, and pmap_next_page may return FALSE
163 * to indicate that there are no more unused pages to return.
164 * However, for best performance pmap_free_pages should be accurate.
168 * Routines to return the next unused physical page.
170 extern boolean_t
pmap_next_page(ppnum_t
*pnum
);
171 extern boolean_t
pmap_next_page_hi(ppnum_t
*pnum
, boolean_t might_free
);
173 extern kern_return_t
pmap_next_page_large(ppnum_t
*pnum
);
174 extern void pmap_hi_pages_done(void);
178 * Report virtual space available for the kernel.
180 extern void pmap_virtual_space(
181 vm_offset_t
*virtual_start
,
182 vm_offset_t
*virtual_end
);
183 #endif /* MACHINE_PAGES */
186 * Routines to manage the physical map data structure.
188 extern pmap_t
pmap_create_options( /* Create a pmap_t. */
193 extern pmap_t(pmap_kernel
)(void); /* Return the kernel's pmap */
194 extern void pmap_reference(pmap_t pmap
); /* Gain a reference. */
195 extern void pmap_destroy(pmap_t pmap
); /* Release a reference. */
196 extern void pmap_switch(pmap_t
);
197 extern void pmap_require(pmap_t pmap
);
200 extern void pmap_set_process(pmap_t pmap
,
203 #endif /* MACH_ASSERT */
205 extern kern_return_t
pmap_enter( /* Enter a mapping */
210 vm_prot_t fault_type
,
214 extern kern_return_t
pmap_enter_options(
219 vm_prot_t fault_type
,
222 unsigned int options
,
224 extern kern_return_t
pmap_enter_options_addr(
229 vm_prot_t fault_type
,
232 unsigned int options
,
235 extern void pmap_remove_some_phys(
239 extern void pmap_lock_phys_page(
242 extern void pmap_unlock_phys_page(
247 * Routines that operate on physical addresses.
250 extern void pmap_page_protect( /* Restrict access to page. */
254 extern void pmap_page_protect_options( /* Restrict access to page. */
257 unsigned int options
,
260 extern void(pmap_zero_page
)(
263 extern void(pmap_zero_part_page
)(
268 extern void(pmap_copy_page
)(
272 extern void(pmap_copy_part_page
)(
274 vm_offset_t src_offset
,
276 vm_offset_t dst_offset
,
279 extern void(pmap_copy_part_lpage
)(
282 vm_offset_t dst_offset
,
285 extern void(pmap_copy_part_rpage
)(
287 vm_offset_t src_offset
,
291 extern unsigned int(pmap_disconnect
)( /* disconnect mappings and return reference and change */
294 extern unsigned int(pmap_disconnect_options
)( /* disconnect mappings and return reference and change */
296 unsigned int options
,
299 extern kern_return_t(pmap_attribute_cache_sync
)( /* Flush appropriate
301 * page number sent */
304 vm_machine_attribute_t attribute
,
305 vm_machine_attribute_val_t
* value
);
307 extern unsigned int(pmap_cache_attributes
)(
311 * Set (override) cache attributes for the specified physical page
313 extern void pmap_set_cache_attributes(
317 extern void *pmap_map_compressor_page(
320 extern void pmap_unmap_compressor_page(
324 #if defined(__arm__) || defined(__arm64__)
326 extern boolean_t
pmap_batch_set_cache_attributes(
334 extern void pmap_sync_page_data_phys(ppnum_t pa
);
335 extern void pmap_sync_page_attributes_phys(ppnum_t pa
);
338 * debug/assertions. pmap_verify_free returns true iff
339 * the given physical page is mapped into no pmap.
340 * pmap_assert_free() will panic() if pn is not free.
342 extern boolean_t
pmap_verify_free(ppnum_t pn
);
344 extern void pmap_assert_free(ppnum_t pn
);
348 * Statistics routines
350 extern int(pmap_compressed
)(pmap_t pmap
);
351 extern int(pmap_resident_count
)(pmap_t pmap
);
352 extern int(pmap_resident_max
)(pmap_t pmap
);
355 * Sundry required (internal) routines
357 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
358 extern void pmap_collect(pmap_t pmap
);/* Perform garbage
359 * collection, if any */
364 extern void(pmap_copy
)( /* Copy range of mappings,
368 vm_map_offset_t dest_va
,
370 vm_map_offset_t source_va
);
372 extern kern_return_t(pmap_attribute
)( /* Get/Set special memory
377 vm_machine_attribute_t attribute
,
378 vm_machine_attribute_val_t
* value
);
381 * Routines defined as macros.
383 #ifndef PMAP_ACTIVATE_USER
384 #ifndef PMAP_ACTIVATE
385 #define PMAP_ACTIVATE_USER(thr, cpu)
386 #else /* PMAP_ACTIVATE */
387 #define PMAP_ACTIVATE_USER(thr, cpu) { \
390 pmap = (thr)->map->pmap; \
391 if (pmap != pmap_kernel()) \
392 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
394 #endif /* PMAP_ACTIVATE */
395 #endif /* PMAP_ACTIVATE_USER */
397 #ifndef PMAP_DEACTIVATE_USER
398 #ifndef PMAP_DEACTIVATE
399 #define PMAP_DEACTIVATE_USER(thr, cpu)
400 #else /* PMAP_DEACTIVATE */
401 #define PMAP_DEACTIVATE_USER(thr, cpu) { \
404 pmap = (thr)->map->pmap; \
405 if ((pmap) != pmap_kernel()) \
406 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
408 #endif /* PMAP_DEACTIVATE */
409 #endif /* PMAP_DEACTIVATE_USER */
411 #ifndef PMAP_ACTIVATE_KERNEL
412 #ifndef PMAP_ACTIVATE
413 #define PMAP_ACTIVATE_KERNEL(cpu)
414 #else /* PMAP_ACTIVATE */
415 #define PMAP_ACTIVATE_KERNEL(cpu) \
416 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
417 #endif /* PMAP_ACTIVATE */
418 #endif /* PMAP_ACTIVATE_KERNEL */
420 #ifndef PMAP_DEACTIVATE_KERNEL
421 #ifndef PMAP_DEACTIVATE
422 #define PMAP_DEACTIVATE_KERNEL(cpu)
423 #else /* PMAP_DEACTIVATE */
424 #define PMAP_DEACTIVATE_KERNEL(cpu) \
425 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
426 #endif /* PMAP_DEACTIVATE */
427 #endif /* PMAP_DEACTIVATE_KERNEL */
431 * Macro to be used in place of pmap_enter()
433 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
434 flags, wired, result) \
436 pmap_t __pmap = (pmap); \
437 vm_page_t __page = (page); \
441 PMAP_ENTER_CHECK(__pmap, __page) \
442 __obj = VM_PAGE_OBJECT(__page); \
443 if (__obj->internal) { \
444 __options |= PMAP_OPTIONS_INTERNAL; \
446 if (__page->vmp_reusable || __obj->all_reusable) { \
447 __options |= PMAP_OPTIONS_REUSABLE; \
449 result = pmap_enter_options(__pmap, \
451 VM_PAGE_GET_PHYS_PAGE(__page), \
459 #endif /* !PMAP_ENTER */
461 #ifndef PMAP_ENTER_OPTIONS
462 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset, \
464 fault_type, flags, wired, options, result) \
466 pmap_t __pmap = (pmap); \
467 vm_page_t __page = (page); \
468 int __extra_options = 0; \
471 PMAP_ENTER_CHECK(__pmap, __page) \
472 __obj = VM_PAGE_OBJECT(__page); \
473 if (__obj->internal) { \
474 __extra_options |= PMAP_OPTIONS_INTERNAL; \
476 if (__page->vmp_reusable || __obj->all_reusable) { \
477 __extra_options |= PMAP_OPTIONS_REUSABLE; \
479 result = pmap_enter_options_addr(__pmap, \
482 VM_PAGE_GET_PHYS_PAGE(__page) \
484 + fault_phys_offset), \
489 (options) | __extra_options, \
492 #endif /* !PMAP_ENTER_OPTIONS */
494 #ifndef PMAP_SET_CACHE_ATTR
495 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
497 if (!batch_pmap_op) { \
498 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
499 object->set_cache_attr = TRUE; \
502 #endif /* PMAP_SET_CACHE_ATTR */
504 #ifndef PMAP_BATCH_SET_CACHE_ATTR
505 #if defined(__arm__) || defined(__arm64__)
506 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
507 cache_attr, num_pages, batch_pmap_op) \
509 if ((batch_pmap_op)) { \
510 unsigned int __page_idx=0; \
511 unsigned int res=0; \
512 boolean_t batch=TRUE; \
513 while (__page_idx < (num_pages)) { \
514 if (!pmap_batch_set_cache_attributes( \
515 user_page_list[__page_idx].phys_addr, \
528 while (__page_idx < (num_pages)) { \
530 (void)pmap_batch_set_cache_attributes( \
531 user_page_list[__page_idx].phys_addr, \
538 pmap_set_cache_attributes( \
539 user_page_list[__page_idx].phys_addr, \
543 (object)->set_cache_attr = TRUE; \
547 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
548 cache_attr, num_pages, batch_pmap_op) \
550 if ((batch_pmap_op)) { \
551 unsigned int __page_idx=0; \
552 while (__page_idx < (num_pages)) { \
553 pmap_set_cache_attributes( \
554 user_page_list[__page_idx].phys_addr, \
558 (object)->set_cache_attr = TRUE; \
562 #endif /* PMAP_BATCH_SET_CACHE_ATTR */
564 #define PMAP_ENTER_CHECK(pmap, page) \
566 if ((page)->vmp_error) { \
567 panic("VM page %p should not have an error\n", \
573 * Routines to manage reference/modify bits based on
574 * physical addresses, simulating them if not provided
579 long pfc_invalid_global
;
582 typedef struct pfc pmap_flush_context
;
584 /* Clear reference bit */
585 extern void pmap_clear_reference(ppnum_t pn
);
586 /* Return reference bit */
587 extern boolean_t(pmap_is_referenced
)(ppnum_t pn
);
589 extern void pmap_set_modify(ppnum_t pn
);
590 /* Clear modify bit */
591 extern void pmap_clear_modify(ppnum_t pn
);
592 /* Return modify bit */
593 extern boolean_t
pmap_is_modified(ppnum_t pn
);
594 /* Return modified and referenced bits */
595 extern unsigned int pmap_get_refmod(ppnum_t pn
);
596 /* Clear modified and referenced bits */
597 extern void pmap_clear_refmod(ppnum_t pn
, unsigned int mask
);
598 #define VM_MEM_MODIFIED 0x01 /* Modified bit */
599 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */
600 extern void pmap_clear_refmod_options(ppnum_t pn
, unsigned int mask
, unsigned int options
, void *);
603 * Clears the reference and/or modified bits on a range of virtually
605 * It returns true if the operation succeeded. If it returns false,
606 * nothing has been modified.
607 * This operation is only supported on some platforms, so callers MUST
608 * handle the case where it returns false.
611 pmap_clear_refmod_range_options(
613 vm_map_address_t start
,
614 vm_map_address_t end
,
616 unsigned int options
);
619 extern void pmap_flush_context_init(pmap_flush_context
*);
620 extern void pmap_flush(pmap_flush_context
*);
623 * Routines that operate on ranges of virtual addresses.
625 extern void pmap_protect( /* Change protections. */
631 extern void pmap_protect_options( /* Change protections. */
636 unsigned int options
,
639 extern void(pmap_pageable
)(
641 vm_map_offset_t start
,
645 extern uint64_t pmap_shared_region_size_min(pmap_t map
);
647 /* TODO: <rdar://problem/65247502> Completely remove pmap_nesting_size_max() */
648 extern uint64_t pmap_nesting_size_max(pmap_t map
);
650 extern kern_return_t
pmap_nest(pmap_t
,
654 extern kern_return_t
pmap_unnest(pmap_t
,
658 #define PMAP_UNNEST_CLEAN 1
660 extern kern_return_t
pmap_unnest_options(pmap_t
,
664 extern boolean_t
pmap_adjust_unnest_parameters(pmap_t
, vm_map_offset_t
*, vm_map_offset_t
*);
665 extern void pmap_advise_pagezero_range(pmap_t
, uint64_t);
666 #endif /* MACH_KERNEL_PRIVATE */
668 extern boolean_t
pmap_is_noencrypt(ppnum_t
);
669 extern void pmap_set_noencrypt(ppnum_t pn
);
670 extern void pmap_clear_noencrypt(ppnum_t pn
);
673 * JMM - This portion is exported to other kernel components right now,
674 * but will be pulled back in the future when the needed functionality
675 * is provided in a cleaner manner.
678 extern pmap_t kernel_pmap
; /* The kernel's map */
679 #define pmap_kernel() (kernel_pmap)
681 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
682 #define VM_MEM_STACK 0x200
684 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
685 * definitions in i386/pmap_internal.h
687 #define PMAP_CREATE_64BIT 0x1
691 #define PMAP_CREATE_EPT 0x2
692 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
696 #define PMAP_CREATE_STAGE2 0
697 #define PMAP_CREATE_DISABLE_JOP 0
698 #if __ARM_MIXED_PAGE_SIZE__
699 #define PMAP_CREATE_FORCE_4K_PAGES 0x8
701 #define PMAP_CREATE_FORCE_4K_PAGES 0
702 #endif /* __ARM_MIXED_PAGE_SIZE__ */
704 #define PMAP_CREATE_X86_64 0
706 #define PMAP_CREATE_X86_64 0
709 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
710 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64)
712 #endif /* __x86_64__ */
714 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
715 * KERN_RESOURCE_SHORTAGE
717 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
718 * but don't enter mapping
720 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
722 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
723 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
724 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
725 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
726 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
727 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
728 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
729 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
730 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
731 * iff page was modified */
732 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be
734 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000
735 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
736 #if defined(__arm__) || defined(__arm64__)
737 #define PMAP_OPTIONS_FF_LOCKED 0x8000
738 #define PMAP_OPTIONS_FF_WIRED 0x10000
741 #if !defined(__LP64__)
742 extern vm_offset_t
pmap_extract(pmap_t pmap
,
745 extern void pmap_change_wiring( /* Specify pageability */
750 /* LP64todo - switch to vm_map_offset_t when it grows */
751 extern void pmap_remove( /* Remove mappings. */
756 extern void pmap_remove_options( /* Remove mappings. */
762 extern void fillPage(ppnum_t pa
, unsigned int fill
);
764 #if defined(__LP64__)
765 extern void pmap_pre_expand(pmap_t pmap
, vm_map_offset_t vaddr
);
766 extern kern_return_t
pmap_pre_expand_large(pmap_t pmap
, vm_map_offset_t vaddr
);
767 extern vm_size_t
pmap_query_pagesize(pmap_t map
, vm_map_offset_t vaddr
);
770 mach_vm_size_t
pmap_query_resident(pmap_t pmap
,
773 mach_vm_size_t
*compressed_bytes_p
);
775 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap
, bool new_value
);
776 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap
);
778 /* Inform the pmap layer that there is a JIT entry in this map. */
779 extern void pmap_set_jit_entitled(pmap_t pmap
);
781 /* Ask the pmap layer if there is a JIT entry in this map. */
782 extern bool pmap_get_jit_entitled(pmap_t pmap
);
785 * Tell the pmap layer what range within the nested region the VM intends to
788 extern void pmap_trim(pmap_t grand
, pmap_t subord
, addr64_t vstart
, uint64_t size
);
791 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE
792 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
793 * This is expected to only be called from kernel debugger context,
794 * so synchronization is not required.
797 extern kern_return_t
pmap_dump_page_tables(pmap_t pmap
, void *bufp
, void *buf_end
, unsigned int level_mask
, size_t *bytes_copied
);
800 * Indicates if any special policy is applied to this protection by the pmap
803 bool pmap_has_prot_policy(pmap_t pmap
, bool translated_allow_execute
, vm_prot_t prot
);
806 * Causes the pmap to return any available pages that it can return cheaply to
809 uint64_t pmap_release_pages_fast(void);
811 #define PMAP_QUERY_PAGE_PRESENT 0x01
812 #define PMAP_QUERY_PAGE_REUSABLE 0x02
813 #define PMAP_QUERY_PAGE_INTERNAL 0x04
814 #define PMAP_QUERY_PAGE_ALTACCT 0x08
815 #define PMAP_QUERY_PAGE_COMPRESSED 0x10
816 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
817 extern kern_return_t
pmap_query_page_info(
823 int pmap_pgtrace_add_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
824 int pmap_pgtrace_delete_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
825 kern_return_t
pmap_pgtrace_fault(pmap_t pmap
, vm_map_offset_t va
, arm_saved_state_t
*ss
);
829 #ifdef PLATFORM_BridgeOS
830 struct pmap_legacy_trust_cache
{
831 struct pmap_legacy_trust_cache
*next
;
834 uint8_t hashes
[][CS_CDHASH_LEN
];
837 struct pmap_legacy_trust_cache
;
840 extern kern_return_t
pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache
*trust_cache
,
841 const vm_size_t trust_cache_len
);
844 PMAP_TC_TYPE_PERSONALIZED
,
846 PMAP_TC_TYPE_CRYPTEX
,
847 PMAP_TC_TYPE_ENGINEERING
,
848 PMAP_TC_TYPE_GLOBAL_FF00
,
849 PMAP_TC_TYPE_GLOBAL_FF01
,
852 #define PMAP_IMAGE4_TRUST_CACHE_HAS_TYPE 1
853 struct pmap_image4_trust_cache
{
854 // Filled by pmap layer.
855 struct pmap_image4_trust_cache
const *next
; // linked list linkage
856 struct trust_cache_module1
const *module; // pointer into module (within data below)
859 // data is either an image4,
860 // or just the trust cache payload itself if the image4 manifest is external.
863 uint8_t const bnch
[48];
865 uint8_t const data
[];
870 PMAP_TC_UNKNOWN_FORMAT
= -1,
871 PMAP_TC_TOO_SMALL_FOR_HEADER
= -2,
872 PMAP_TC_TOO_SMALL_FOR_ENTRIES
= -3,
873 PMAP_TC_UNKNOWN_VERSION
= -4,
874 PMAP_TC_ALREADY_LOADED
= -5,
875 PMAP_TC_TOO_BIG
= -6,
876 PMAP_TC_RESOURCE_SHORTAGE
= -7,
877 PMAP_TC_MANIFEST_TOO_BIG
= -8,
878 PMAP_TC_MANIFEST_VIOLATION
= -9,
879 PMAP_TC_PAYLOAD_VIOLATION
= -10,
880 PMAP_TC_EXPIRED
= -11,
881 PMAP_TC_CRYPTO_WRONG
= -12,
882 PMAP_TC_OBJECT_WRONG
= -13,
883 PMAP_TC_UNKNOWN_CALLER
= -14,
884 PMAP_TC_UNKNOWN_FAILURE
= -15,
887 #define PMAP_HAS_LOCKDOWN_IMAGE4_SLAB 1
888 extern void pmap_lockdown_image4_slab(vm_offset_t slab
, vm_size_t slab_len
, uint64_t flags
);
890 extern pmap_tc_ret_t
pmap_load_image4_trust_cache(
891 struct pmap_image4_trust_cache
*trust_cache
, vm_size_t trust_cache_len
,
892 uint8_t const *img4_manifest
,
893 vm_size_t img4_manifest_buffer_len
,
894 vm_size_t img4_manifest_actual_len
,
897 extern bool pmap_is_trust_cache_loaded(const uuid_t uuid
);
898 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash
[CS_CDHASH_LEN
]);
899 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash
[CS_CDHASH_LEN
]);
901 extern bool pmap_in_ppl(void);
903 extern void *pmap_claim_reserved_ppl_page(void);
904 extern void pmap_free_reserved_ppl_page(void *kva
);
906 extern void pmap_ledger_alloc_init(size_t);
907 extern ledger_t
pmap_ledger_alloc(void);
908 extern void pmap_ledger_free(ledger_t
);
911 extern bool pmap_is_exotic(pmap_t pmap
);
912 #else /* __arm64__ */
913 #define pmap_is_exotic(pmap) false
914 #endif /* __arm64__ */
916 #endif /* KERNEL_PRIVATE */
918 #endif /* _VM_PMAP_H_ */