2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
77 #include <kern/trustcache.h>
82 * The following is a description of the interface to the
83 * machine-dependent "physical map" data structure. The module
84 * must provide a "pmap_t" data type that represents the
85 * set of valid virtual-to-physical addresses for one user
86 * address space. [The kernel address space is represented
87 * by a distinguished "pmap_t".] The routines described manage
88 * this type, install and update virtual-to-physical mappings,
89 * and perform operations on physical addresses common to
90 * many address spaces.
93 /* Copy between a physical page and a virtual address */
94 /* LP64todo - switch to vm_map_offset_t when it grows */
95 extern kern_return_t
copypv(
108 #define cppvNoModSnk 16
109 #define cppvNoModSnkb 27
110 #define cppvNoRefSrc 32
111 #define cppvNoRefSrcb 26
112 #define cppvKmap 64 /* Use the kernel's vm_map */
115 extern boolean_t
pmap_has_managed_page(ppnum_t first
, ppnum_t last
);
117 #ifdef MACH_KERNEL_PRIVATE
119 #include <mach_assert.h>
121 #include <machine/pmap.h>
122 #include <vm/memory_types.h>
125 * Routines used for initialization.
126 * There is traditionally also a pmap_bootstrap,
127 * used very early by machine-dependent code,
128 * but it is not part of the interface.
131 * These interfaces are tied to the size of the
132 * kernel pmap - and therefore use the "local"
133 * vm_offset_t, etc... types.
136 extern void *pmap_steal_memory(vm_size_t size
); /* Early memory allocation */
137 extern void *pmap_steal_freeable_memory(vm_size_t size
); /* Early memory allocation */
139 extern uint_t
pmap_free_pages(void); /* report remaining unused physical pages */
141 extern void pmap_startup(vm_offset_t
*startp
, vm_offset_t
*endp
); /* allocate vm_page structs */
143 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */
145 extern void mapping_adjust(void); /* Adjust free mapping count */
147 extern void mapping_free_prime(void); /* Primes the mapping block release list */
149 #ifndef MACHINE_PAGES
151 * If machine/pmap.h defines MACHINE_PAGES, it must implement
152 * the above functions. The pmap module has complete control.
153 * Otherwise, it must implement the following functions:
158 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
159 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
160 * and pmap_enter. pmap_free_pages may over-estimate the number
161 * of unused physical pages, and pmap_next_page may return FALSE
162 * to indicate that there are no more unused pages to return.
163 * However, for best performance pmap_free_pages should be accurate.
167 * Routines to return the next unused physical page.
169 extern boolean_t
pmap_next_page(ppnum_t
*pnum
);
170 extern boolean_t
pmap_next_page_hi(ppnum_t
*pnum
, boolean_t might_free
);
172 extern kern_return_t
pmap_next_page_large(ppnum_t
*pnum
);
173 extern void pmap_hi_pages_done(void);
177 * Report virtual space available for the kernel.
179 extern void pmap_virtual_space(
180 vm_offset_t
*virtual_start
,
181 vm_offset_t
*virtual_end
);
182 #endif /* MACHINE_PAGES */
185 * Routines to manage the physical map data structure.
187 extern pmap_t
pmap_create_options( /* Create a pmap_t. */
192 extern pmap_t(pmap_kernel
)(void); /* Return the kernel's pmap */
193 extern void pmap_reference(pmap_t pmap
); /* Gain a reference. */
194 extern void pmap_destroy(pmap_t pmap
); /* Release a reference. */
195 extern void pmap_switch(pmap_t
);
198 extern void pmap_set_process(pmap_t pmap
,
201 #endif /* MACH_ASSERT */
203 extern kern_return_t
pmap_enter( /* Enter a mapping */
208 vm_prot_t fault_type
,
212 extern kern_return_t
pmap_enter_options(
217 vm_prot_t fault_type
,
220 unsigned int options
,
223 extern void pmap_remove_some_phys(
227 extern void pmap_lock_phys_page(
230 extern void pmap_unlock_phys_page(
235 * Routines that operate on physical addresses.
238 extern void pmap_page_protect( /* Restrict access to page. */
242 extern void pmap_page_protect_options( /* Restrict access to page. */
245 unsigned int options
,
248 extern void(pmap_zero_page
)(
251 extern void(pmap_zero_part_page
)(
256 extern void(pmap_copy_page
)(
260 extern void(pmap_copy_part_page
)(
262 vm_offset_t src_offset
,
264 vm_offset_t dst_offset
,
267 extern void(pmap_copy_part_lpage
)(
270 vm_offset_t dst_offset
,
273 extern void(pmap_copy_part_rpage
)(
275 vm_offset_t src_offset
,
279 extern unsigned int(pmap_disconnect
)( /* disconnect mappings and return reference and change */
282 extern unsigned int(pmap_disconnect_options
)( /* disconnect mappings and return reference and change */
284 unsigned int options
,
287 extern kern_return_t(pmap_attribute_cache_sync
)( /* Flush appropriate
289 * page number sent */
292 vm_machine_attribute_t attribute
,
293 vm_machine_attribute_val_t
* value
);
295 extern unsigned int(pmap_cache_attributes
)(
299 * Set (override) cache attributes for the specified physical page
301 extern void pmap_set_cache_attributes(
305 extern void *pmap_map_compressor_page(
308 extern void pmap_unmap_compressor_page(
312 #if defined(__arm__) || defined(__arm64__)
314 extern boolean_t
pmap_batch_set_cache_attributes(
322 extern void pmap_sync_page_data_phys(ppnum_t pa
);
323 extern void pmap_sync_page_attributes_phys(ppnum_t pa
);
326 * debug/assertions. pmap_verify_free returns true iff
327 * the given physical page is mapped into no pmap.
328 * pmap_assert_free() will panic() if pn is not free.
330 extern boolean_t
pmap_verify_free(ppnum_t pn
);
332 extern void pmap_assert_free(ppnum_t pn
);
336 * Statistics routines
338 extern int(pmap_compressed
)(pmap_t pmap
);
339 extern int(pmap_resident_count
)(pmap_t pmap
);
340 extern int(pmap_resident_max
)(pmap_t pmap
);
343 * Sundry required (internal) routines
345 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
346 extern void pmap_collect(pmap_t pmap
);/* Perform garbage
347 * collection, if any */
352 extern void(pmap_copy
)( /* Copy range of mappings,
356 vm_map_offset_t dest_va
,
358 vm_map_offset_t source_va
);
360 extern kern_return_t(pmap_attribute
)( /* Get/Set special memory
365 vm_machine_attribute_t attribute
,
366 vm_machine_attribute_val_t
* value
);
369 * Routines defined as macros.
371 #ifndef PMAP_ACTIVATE_USER
372 #ifndef PMAP_ACTIVATE
373 #define PMAP_ACTIVATE_USER(thr, cpu)
374 #else /* PMAP_ACTIVATE */
375 #define PMAP_ACTIVATE_USER(thr, cpu) { \
378 pmap = (thr)->map->pmap; \
379 if (pmap != pmap_kernel()) \
380 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
382 #endif /* PMAP_ACTIVATE */
383 #endif /* PMAP_ACTIVATE_USER */
385 #ifndef PMAP_DEACTIVATE_USER
386 #ifndef PMAP_DEACTIVATE
387 #define PMAP_DEACTIVATE_USER(thr, cpu)
388 #else /* PMAP_DEACTIVATE */
389 #define PMAP_DEACTIVATE_USER(thr, cpu) { \
392 pmap = (thr)->map->pmap; \
393 if ((pmap) != pmap_kernel()) \
394 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
396 #endif /* PMAP_DEACTIVATE */
397 #endif /* PMAP_DEACTIVATE_USER */
399 #ifndef PMAP_ACTIVATE_KERNEL
400 #ifndef PMAP_ACTIVATE
401 #define PMAP_ACTIVATE_KERNEL(cpu)
402 #else /* PMAP_ACTIVATE */
403 #define PMAP_ACTIVATE_KERNEL(cpu) \
404 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
405 #endif /* PMAP_ACTIVATE */
406 #endif /* PMAP_ACTIVATE_KERNEL */
408 #ifndef PMAP_DEACTIVATE_KERNEL
409 #ifndef PMAP_DEACTIVATE
410 #define PMAP_DEACTIVATE_KERNEL(cpu)
411 #else /* PMAP_DEACTIVATE */
412 #define PMAP_DEACTIVATE_KERNEL(cpu) \
413 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
414 #endif /* PMAP_DEACTIVATE */
415 #endif /* PMAP_DEACTIVATE_KERNEL */
419 * Macro to be used in place of pmap_enter()
421 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
422 flags, wired, result) \
424 pmap_t __pmap = (pmap); \
425 vm_page_t __page = (page); \
429 PMAP_ENTER_CHECK(__pmap, __page) \
430 __obj = VM_PAGE_OBJECT(__page); \
431 if (__obj->internal) { \
432 __options |= PMAP_OPTIONS_INTERNAL; \
434 if (__page->vmp_reusable || __obj->all_reusable) { \
435 __options |= PMAP_OPTIONS_REUSABLE; \
437 result = pmap_enter_options(__pmap, \
439 VM_PAGE_GET_PHYS_PAGE(__page), \
447 #endif /* !PMAP_ENTER */
449 #ifndef PMAP_ENTER_OPTIONS
450 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
451 fault_type, flags, wired, options, result) \
453 pmap_t __pmap = (pmap); \
454 vm_page_t __page = (page); \
455 int __extra_options = 0; \
458 PMAP_ENTER_CHECK(__pmap, __page) \
459 __obj = VM_PAGE_OBJECT(__page); \
460 if (__obj->internal) { \
461 __extra_options |= PMAP_OPTIONS_INTERNAL; \
463 if (__page->vmp_reusable || __obj->all_reusable) { \
464 __extra_options |= PMAP_OPTIONS_REUSABLE; \
466 result = pmap_enter_options(__pmap, \
468 VM_PAGE_GET_PHYS_PAGE(__page), \
473 (options) | __extra_options, \
476 #endif /* !PMAP_ENTER_OPTIONS */
478 #ifndef PMAP_SET_CACHE_ATTR
479 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
481 if (!batch_pmap_op) { \
482 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
483 object->set_cache_attr = TRUE; \
486 #endif /* PMAP_SET_CACHE_ATTR */
488 #ifndef PMAP_BATCH_SET_CACHE_ATTR
489 #if defined(__arm__) || defined(__arm64__)
490 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
491 cache_attr, num_pages, batch_pmap_op) \
493 if ((batch_pmap_op)) { \
494 unsigned int __page_idx=0; \
495 unsigned int res=0; \
496 boolean_t batch=TRUE; \
497 while (__page_idx < (num_pages)) { \
498 if (!pmap_batch_set_cache_attributes( \
499 user_page_list[__page_idx].phys_addr, \
512 while (__page_idx < (num_pages)) { \
514 (void)pmap_batch_set_cache_attributes( \
515 user_page_list[__page_idx].phys_addr, \
522 pmap_set_cache_attributes( \
523 user_page_list[__page_idx].phys_addr, \
527 (object)->set_cache_attr = TRUE; \
531 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
532 cache_attr, num_pages, batch_pmap_op) \
534 if ((batch_pmap_op)) { \
535 unsigned int __page_idx=0; \
536 while (__page_idx < (num_pages)) { \
537 pmap_set_cache_attributes( \
538 user_page_list[__page_idx].phys_addr, \
542 (object)->set_cache_attr = TRUE; \
546 #endif /* PMAP_BATCH_SET_CACHE_ATTR */
548 #define PMAP_ENTER_CHECK(pmap, page) \
550 if ((page)->vmp_error) { \
551 panic("VM page %p should not have an error\n", \
557 * Routines to manage reference/modify bits based on
558 * physical addresses, simulating them if not provided
563 long pfc_invalid_global
;
566 typedef struct pfc pmap_flush_context
;
568 /* Clear reference bit */
569 extern void pmap_clear_reference(ppnum_t pn
);
570 /* Return reference bit */
571 extern boolean_t(pmap_is_referenced
)(ppnum_t pn
);
573 extern void pmap_set_modify(ppnum_t pn
);
574 /* Clear modify bit */
575 extern void pmap_clear_modify(ppnum_t pn
);
576 /* Return modify bit */
577 extern boolean_t
pmap_is_modified(ppnum_t pn
);
578 /* Return modified and referenced bits */
579 extern unsigned int pmap_get_refmod(ppnum_t pn
);
580 /* Clear modified and referenced bits */
581 extern void pmap_clear_refmod(ppnum_t pn
, unsigned int mask
);
582 #define VM_MEM_MODIFIED 0x01 /* Modified bit */
583 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */
584 extern void pmap_clear_refmod_options(ppnum_t pn
, unsigned int mask
, unsigned int options
, void *);
587 extern void pmap_flush_context_init(pmap_flush_context
*);
588 extern void pmap_flush(pmap_flush_context
*);
591 * Routines that operate on ranges of virtual addresses.
593 extern void pmap_protect( /* Change protections. */
599 extern void pmap_protect_options( /* Change protections. */
604 unsigned int options
,
607 extern void(pmap_pageable
)(
609 vm_map_offset_t start
,
614 extern uint64_t pmap_nesting_size_min
;
615 extern uint64_t pmap_nesting_size_max
;
617 extern kern_return_t
pmap_nest(pmap_t
,
622 extern kern_return_t
pmap_unnest(pmap_t
,
626 #define PMAP_UNNEST_CLEAN 1
628 extern kern_return_t
pmap_unnest_options(pmap_t
,
632 extern boolean_t
pmap_adjust_unnest_parameters(pmap_t
, vm_map_offset_t
*, vm_map_offset_t
*);
633 extern void pmap_advise_pagezero_range(pmap_t
, uint64_t);
634 #endif /* MACH_KERNEL_PRIVATE */
636 extern boolean_t
pmap_is_noencrypt(ppnum_t
);
637 extern void pmap_set_noencrypt(ppnum_t pn
);
638 extern void pmap_clear_noencrypt(ppnum_t pn
);
641 * JMM - This portion is exported to other kernel components right now,
642 * but will be pulled back in the future when the needed functionality
643 * is provided in a cleaner manner.
646 extern pmap_t kernel_pmap
; /* The kernel's map */
647 #define pmap_kernel() (kernel_pmap)
649 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
650 #define VM_MEM_STACK 0x200
652 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
653 * definitions in i386/pmap_internal.h
655 #define PMAP_CREATE_64BIT 0x1
657 #define PMAP_CREATE_EPT 0x2
658 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
661 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
662 * KERN_RESOURCE_SHORTAGE
664 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
665 * but don't enter mapping
667 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
669 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
670 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
671 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
672 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
673 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
674 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
675 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
676 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
677 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
678 * iff page was modified */
679 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be
681 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000
684 #if !defined(__LP64__)
685 extern vm_offset_t
pmap_extract(pmap_t pmap
,
688 extern void pmap_change_wiring( /* Specify pageability */
693 /* LP64todo - switch to vm_map_offset_t when it grows */
694 extern void pmap_remove( /* Remove mappings. */
699 extern void pmap_remove_options( /* Remove mappings. */
705 extern void fillPage(ppnum_t pa
, unsigned int fill
);
707 #if defined(__LP64__)
708 extern void pmap_pre_expand(pmap_t pmap
, vm_map_offset_t vaddr
);
709 extern kern_return_t
pmap_pre_expand_large(pmap_t pmap
, vm_map_offset_t vaddr
);
710 extern vm_size_t
pmap_query_pagesize(pmap_t map
, vm_map_offset_t vaddr
);
713 mach_vm_size_t
pmap_query_resident(pmap_t pmap
,
716 mach_vm_size_t
*compressed_bytes_p
);
718 /* Inform the pmap layer that there is a JIT entry in this map. */
719 extern void pmap_set_jit_entitled(pmap_t pmap
);
722 * Tell the pmap layer what range within the nested region the VM intends to
725 extern void pmap_trim(pmap_t grand
, pmap_t subord
, addr64_t vstart
, addr64_t nstart
, uint64_t size
);
728 * Dump page table contents into the specified buffer. Returns the number of
729 * bytes copied, 0 if insufficient space, (size_t)-1 if unsupported.
730 * This is expected to only be called from kernel debugger context,
731 * so synchronization is not required.
734 extern size_t pmap_dump_page_tables(pmap_t pmap
, void *bufp
, void *buf_end
);
737 * Indicates if any special policy is applied to this protection by the pmap
740 bool pmap_has_prot_policy(vm_prot_t prot
);
743 * Causes the pmap to return any available pages that it can return cheaply to
746 uint64_t pmap_release_pages_fast(void);
748 #define PMAP_QUERY_PAGE_PRESENT 0x01
749 #define PMAP_QUERY_PAGE_REUSABLE 0x02
750 #define PMAP_QUERY_PAGE_INTERNAL 0x04
751 #define PMAP_QUERY_PAGE_ALTACCT 0x08
752 #define PMAP_QUERY_PAGE_COMPRESSED 0x10
753 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
754 extern kern_return_t
pmap_query_page_info(
760 int pmap_pgtrace_add_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
761 int pmap_pgtrace_delete_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
762 kern_return_t
pmap_pgtrace_fault(pmap_t pmap
, vm_map_offset_t va
, arm_saved_state_t
*ss
);
766 #ifdef PLATFORM_BridgeOS
767 struct pmap_legacy_trust_cache
{
768 struct pmap_legacy_trust_cache
*next
;
771 uint8_t hashes
[][CS_CDHASH_LEN
];
774 struct pmap_legacy_trust_cache
;
777 extern kern_return_t
pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache
*trust_cache
,
778 const vm_size_t trust_cache_len
);
780 struct pmap_image4_trust_cache
{
781 // Filled by pmap layer.
782 struct pmap_image4_trust_cache
const *next
; // linked list linkage
783 struct trust_cache_module1
const *module; // pointer into module (within data below)
786 // data is either an image4,
787 // or just the trust cache payload itself if the image4 manifest is external.
789 uint8_t const data
[];
794 PMAP_TC_UNKNOWN_FORMAT
= -1,
795 PMAP_TC_TOO_SMALL_FOR_HEADER
= -2,
796 PMAP_TC_TOO_SMALL_FOR_ENTRIES
= -3,
797 PMAP_TC_UNKNOWN_VERSION
= -4,
798 PMAP_TC_ALREADY_LOADED
= -5,
799 PMAP_TC_TOO_BIG
= -6,
800 PMAP_TC_RESOURCE_SHORTAGE
= -7,
801 PMAP_TC_MANIFEST_TOO_BIG
= -8,
804 extern pmap_tc_ret_t
pmap_load_image4_trust_cache(
805 struct pmap_image4_trust_cache
*trust_cache
, vm_size_t trust_cache_len
,
806 uint8_t const *img4_manifest
,
807 vm_size_t img4_manifest_buffer_len
,
808 vm_size_t img4_manifest_actual_len
,
811 extern bool pmap_is_trust_cache_loaded(const uuid_t uuid
);
812 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash
[CS_CDHASH_LEN
]);
813 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash
[CS_CDHASH_LEN
]);
815 extern bool pmap_in_ppl(void);
817 extern void *pmap_claim_reserved_ppl_page(void);
818 extern void pmap_free_reserved_ppl_page(void *kva
);
820 extern void pmap_ledger_alloc_init(size_t);
821 extern ledger_t
pmap_ledger_alloc(void);
822 extern void pmap_ledger_free(ledger_t
);
824 #endif /* KERNEL_PRIVATE */
826 #endif /* _VM_PMAP_H_ */