]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/pmap.h
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / vm / pmap.h
CommitLineData
1c79356b 1/*
f427ee49 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/pmap.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
65 */
66
0a7de745 67#ifndef _VM_PMAP_H_
1c79356b
A
68#define _VM_PMAP_H_
69
70#include <mach/kern_return.h>
71#include <mach/vm_param.h>
72#include <mach/vm_types.h>
73#include <mach/vm_attributes.h>
74#include <mach/boolean.h>
75#include <mach/vm_prot.h>
76
d9a64523
A
77#include <kern/trustcache.h>
78
f427ee49 79
0a7de745 80#ifdef KERNEL_PRIVATE
91447636 81
1c79356b
A
82/*
83 * The following is a description of the interface to the
84 * machine-dependent "physical map" data structure. The module
85 * must provide a "pmap_t" data type that represents the
86 * set of valid virtual-to-physical addresses for one user
87 * address space. [The kernel address space is represented
88 * by a distinguished "pmap_t".] The routines described manage
89 * this type, install and update virtual-to-physical mappings,
90 * and perform operations on physical addresses common to
91 * many address spaces.
92 */
93
55e303ae 94/* Copy between a physical page and a virtual address */
91447636 95/* LP64todo - switch to vm_map_offset_t when it grows */
0a7de745
A
96extern kern_return_t copypv(
97 addr64_t source,
98 addr64_t sink,
99 unsigned int size,
100 int which);
91447636
A
101#define cppvPsnk 1
102#define cppvPsnkb 31
103#define cppvPsrc 2
104#define cppvPsrcb 30
105#define cppvFsnk 4
106#define cppvFsnkb 29
107#define cppvFsrc 8
108#define cppvFsrcb 28
55e303ae 109#define cppvNoModSnk 16
91447636 110#define cppvNoModSnkb 27
55e303ae 111#define cppvNoRefSrc 32
91447636 112#define cppvNoRefSrcb 26
0a7de745 113#define cppvKmap 64 /* Use the kernel's vm_map */
91447636 114#define cppvKmapb 25
1c79356b 115
3e170ce0
A
116extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last);
117
0a7de745 118#ifdef MACH_KERNEL_PRIVATE
1c79356b 119
fe8ab488
A
120#include <mach_assert.h>
121
1c79356b 122#include <machine/pmap.h>
cb323159 123#include <vm/memory_types.h>
1c79356b
A
124
125/*
126 * Routines used for initialization.
127 * There is traditionally also a pmap_bootstrap,
128 * used very early by machine-dependent code,
129 * but it is not part of the interface.
91447636
A
130 *
131 * LP64todo -
132 * These interfaces are tied to the size of the
133 * kernel pmap - and therefore use the "local"
134 * vm_offset_t, etc... types.
1c79356b
A
135 */
136
0a7de745 137extern void *pmap_steal_memory(vm_size_t size); /* Early memory allocation */
cb323159 138extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */
0a7de745
A
139
140extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */
141
142extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */
143
144extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */
145
0a7de745
A
146extern void mapping_adjust(void); /* Adjust free mapping count */
147
148extern void mapping_free_prime(void); /* Primes the mapping block release list */
149
150#ifndef MACHINE_PAGES
1c79356b
A
151/*
152 * If machine/pmap.h defines MACHINE_PAGES, it must implement
153 * the above functions. The pmap module has complete control.
cb323159 154 * Otherwise, it must implement the following functions:
1c79356b
A
155 * pmap_free_pages
156 * pmap_virtual_space
157 * pmap_next_page
158 * pmap_init
159 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
160 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
161 * and pmap_enter. pmap_free_pages may over-estimate the number
162 * of unused physical pages, and pmap_next_page may return FALSE
163 * to indicate that there are no more unused pages to return.
164 * However, for best performance pmap_free_pages should be accurate.
165 */
166
cb323159
A
167/*
168 * Routines to return the next unused physical page.
169 */
170extern boolean_t pmap_next_page(ppnum_t *pnum);
171extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free);
172#ifdef __x86_64__
173extern kern_return_t pmap_next_page_large(ppnum_t *pnum);
174extern void pmap_hi_pages_done(void);
175#endif
176
177/*
178 * Report virtual space available for the kernel.
0a7de745 179 */
cb323159 180extern void pmap_virtual_space(
0a7de745
A
181 vm_offset_t *virtual_start,
182 vm_offset_t *virtual_end);
0a7de745 183#endif /* MACHINE_PAGES */
1c79356b
A
184
185/*
cb323159 186 * Routines to manage the physical map data structure.
1c79356b 187 */
cb323159 188extern pmap_t pmap_create_options( /* Create a pmap_t. */
0a7de745
A
189 ledger_t ledger,
190 vm_map_size_t size,
cb323159 191 unsigned int flags);
3e170ce0 192
0a7de745
A
193extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */
194extern void pmap_reference(pmap_t pmap); /* Gain a reference. */
195extern void pmap_destroy(pmap_t pmap); /* Release a reference. */
196extern void pmap_switch(pmap_t);
f427ee49 197extern void pmap_require(pmap_t pmap);
1c79356b 198
fe8ab488 199#if MACH_ASSERT
0a7de745
A
200extern void pmap_set_process(pmap_t pmap,
201 int pid,
202 char *procname);
fe8ab488 203#endif /* MACH_ASSERT */
1c79356b 204
0a7de745
A
205extern kern_return_t pmap_enter( /* Enter a mapping */
206 pmap_t pmap,
207 vm_map_offset_t v,
208 ppnum_t pn,
209 vm_prot_t prot,
210 vm_prot_t fault_type,
211 unsigned int flags,
212 boolean_t wired);
213
214extern kern_return_t pmap_enter_options(
215 pmap_t pmap,
216 vm_map_offset_t v,
217 ppnum_t pn,
218 vm_prot_t prot,
219 vm_prot_t fault_type,
220 unsigned int flags,
221 boolean_t wired,
222 unsigned int options,
223 void *arg);
f427ee49
A
224extern kern_return_t pmap_enter_options_addr(
225 pmap_t pmap,
226 vm_map_offset_t v,
227 pmap_paddr_t pa,
228 vm_prot_t prot,
229 vm_prot_t fault_type,
230 unsigned int flags,
231 boolean_t wired,
232 unsigned int options,
233 void *arg);
0a7de745
A
234
235extern void pmap_remove_some_phys(
236 pmap_t pmap,
237 ppnum_t pn);
238
239extern void pmap_lock_phys_page(
240 ppnum_t pn);
241
242extern void pmap_unlock_phys_page(
243 ppnum_t pn);
fe8ab488 244
1c79356b
A
245
246/*
247 * Routines that operate on physical addresses.
248 */
0b4e3aa0 249
0a7de745
A
250extern void pmap_page_protect( /* Restrict access to page. */
251 ppnum_t phys,
252 vm_prot_t prot);
253
254extern void pmap_page_protect_options( /* Restrict access to page. */
255 ppnum_t phys,
256 vm_prot_t prot,
257 unsigned int options,
258 void *arg);
259
260extern void(pmap_zero_page)(
261 ppnum_t pn);
262
263extern void(pmap_zero_part_page)(
264 ppnum_t pn,
265 vm_offset_t offset,
266 vm_size_t len);
267
268extern void(pmap_copy_page)(
269 ppnum_t src,
270 ppnum_t dest);
271
272extern void(pmap_copy_part_page)(
273 ppnum_t src,
274 vm_offset_t src_offset,
275 ppnum_t dst,
276 vm_offset_t dst_offset,
277 vm_size_t len);
278
279extern void(pmap_copy_part_lpage)(
280 vm_offset_t src,
281 ppnum_t dst,
282 vm_offset_t dst_offset,
283 vm_size_t len);
284
285extern void(pmap_copy_part_rpage)(
286 ppnum_t src,
287 vm_offset_t src_offset,
288 vm_offset_t dst,
289 vm_size_t len);
290
291extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */
292 ppnum_t phys);
293
294extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */
295 ppnum_t phys,
296 unsigned int options,
297 void *arg);
298
299extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate
300 * cache based on
301 * page number sent */
302 ppnum_t pn,
303 vm_size_t size,
304 vm_machine_attribute_t attribute,
305 vm_machine_attribute_val_t* value);
306
307extern unsigned int(pmap_cache_attributes)(
308 ppnum_t pn);
0c530ab8 309
6d2010ae
A
310/*
311 * Set (override) cache attributes for the specified physical page
312 */
0a7de745
A
313extern void pmap_set_cache_attributes(
314 ppnum_t,
315 unsigned int);
316
317extern void *pmap_map_compressor_page(
318 ppnum_t);
319
320extern void pmap_unmap_compressor_page(
321 ppnum_t,
322 void*);
323
5ba3f43e
A
324#if defined(__arm__) || defined(__arm64__)
325/* ARM64_TODO */
0a7de745
A
326extern boolean_t pmap_batch_set_cache_attributes(
327 ppnum_t,
328 unsigned int,
329 unsigned int,
330 unsigned int,
331 boolean_t,
332 unsigned int*);
5ba3f43e 333#endif
2d21ac55
A
334extern void pmap_sync_page_data_phys(ppnum_t pa);
335extern void pmap_sync_page_attributes_phys(ppnum_t pa);
336
1c79356b
A
337/*
338 * debug/assertions. pmap_verify_free returns true iff
339 * the given physical page is mapped into no pmap.
0a7de745 340 * pmap_assert_free() will panic() if pn is not free.
1c79356b 341 */
cb323159 342extern boolean_t pmap_verify_free(ppnum_t pn);
0a7de745 343#if MACH_ASSERT
cb323159 344extern void pmap_assert_free(ppnum_t pn);
0a7de745 345#endif
1c79356b
A
346
347/*
348 * Statistics routines
349 */
0a7de745
A
350extern int(pmap_compressed)(pmap_t pmap);
351extern int(pmap_resident_count)(pmap_t pmap);
352extern int(pmap_resident_max)(pmap_t pmap);
1c79356b
A
353
354/*
355 * Sundry required (internal) routines
356 */
2d21ac55 357#ifdef CURRENTLY_UNUSED_AND_UNTESTED
0a7de745
A
358extern void pmap_collect(pmap_t pmap);/* Perform garbage
359 * collection, if any */
2d21ac55 360#endif
1c79356b
A
361/*
362 * Optional routines
363 */
0a7de745
A
364extern void(pmap_copy)( /* Copy range of mappings,
365 * if desired. */
366 pmap_t dest,
367 pmap_t source,
368 vm_map_offset_t dest_va,
369 vm_map_size_t size,
370 vm_map_offset_t source_va);
371
372extern kern_return_t(pmap_attribute)( /* Get/Set special memory
373 * attributes */
374 pmap_t pmap,
375 vm_map_offset_t va,
376 vm_map_size_t size,
377 vm_machine_attribute_t attribute,
378 vm_machine_attribute_val_t* value);
1c79356b
A
379
380/*
381 * Routines defined as macros.
382 */
383#ifndef PMAP_ACTIVATE_USER
0a7de745 384#ifndef PMAP_ACTIVATE
91447636 385#define PMAP_ACTIVATE_USER(thr, cpu)
0a7de745
A
386#else /* PMAP_ACTIVATE */
387#define PMAP_ACTIVATE_USER(thr, cpu) { \
388 pmap_t pmap; \
389 \
390 pmap = (thr)->map->pmap; \
391 if (pmap != pmap_kernel()) \
392 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
1c79356b 393}
91447636 394#endif /* PMAP_ACTIVATE */
1c79356b
A
395#endif /* PMAP_ACTIVATE_USER */
396
397#ifndef PMAP_DEACTIVATE_USER
91447636
A
398#ifndef PMAP_DEACTIVATE
399#define PMAP_DEACTIVATE_USER(thr, cpu)
0a7de745
A
400#else /* PMAP_DEACTIVATE */
401#define PMAP_DEACTIVATE_USER(thr, cpu) { \
402 pmap_t pmap; \
403 \
404 pmap = (thr)->map->pmap; \
405 if ((pmap) != pmap_kernel()) \
406 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
1c79356b 407}
0a7de745 408#endif /* PMAP_DEACTIVATE */
1c79356b
A
409#endif /* PMAP_DEACTIVATE_USER */
410
0a7de745 411#ifndef PMAP_ACTIVATE_KERNEL
91447636 412#ifndef PMAP_ACTIVATE
0a7de745
A
413#define PMAP_ACTIVATE_KERNEL(cpu)
414#else /* PMAP_ACTIVATE */
415#define PMAP_ACTIVATE_KERNEL(cpu) \
416 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
417#endif /* PMAP_ACTIVATE */
418#endif /* PMAP_ACTIVATE_KERNEL */
419
420#ifndef PMAP_DEACTIVATE_KERNEL
91447636 421#ifndef PMAP_DEACTIVATE
0a7de745
A
422#define PMAP_DEACTIVATE_KERNEL(cpu)
423#else /* PMAP_DEACTIVATE */
424#define PMAP_DEACTIVATE_KERNEL(cpu) \
425 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
426#endif /* PMAP_DEACTIVATE */
427#endif /* PMAP_DEACTIVATE_KERNEL */
428
429#ifndef PMAP_ENTER
1c79356b
A
430/*
431 * Macro to be used in place of pmap_enter()
432 */
0a7de745
A
433#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
434 flags, wired, result) \
435 MACRO_BEGIN \
436 pmap_t __pmap = (pmap); \
437 vm_page_t __page = (page); \
438 int __options = 0; \
439 vm_object_t __obj; \
440 \
441 PMAP_ENTER_CHECK(__pmap, __page) \
442 __obj = VM_PAGE_OBJECT(__page); \
443 if (__obj->internal) { \
444 __options |= PMAP_OPTIONS_INTERNAL; \
445 } \
446 if (__page->vmp_reusable || __obj->all_reusable) { \
447 __options |= PMAP_OPTIONS_REUSABLE; \
448 } \
449 result = pmap_enter_options(__pmap, \
450 (virtual_address), \
451 VM_PAGE_GET_PHYS_PAGE(__page), \
452 (protection), \
453 (fault_type), \
454 (flags), \
455 (wired), \
456 __options, \
457 NULL); \
d1ecb069 458 MACRO_END
0a7de745
A
459#endif /* !PMAP_ENTER */
460
461#ifndef PMAP_ENTER_OPTIONS
f427ee49
A
462#define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset, \
463 page, protection, \
464 fault_type, flags, wired, options, result) \
0a7de745
A
465 MACRO_BEGIN \
466 pmap_t __pmap = (pmap); \
467 vm_page_t __page = (page); \
468 int __extra_options = 0; \
469 vm_object_t __obj; \
470 \
471 PMAP_ENTER_CHECK(__pmap, __page) \
472 __obj = VM_PAGE_OBJECT(__page); \
473 if (__obj->internal) { \
474 __extra_options |= PMAP_OPTIONS_INTERNAL; \
475 } \
476 if (__page->vmp_reusable || __obj->all_reusable) { \
477 __extra_options |= PMAP_OPTIONS_REUSABLE; \
478 } \
f427ee49 479 result = pmap_enter_options_addr(__pmap, \
0a7de745 480 (virtual_address), \
f427ee49
A
481 (((pmap_paddr_t) \
482 VM_PAGE_GET_PHYS_PAGE(__page) \
483 << PAGE_SHIFT) \
484 + fault_phys_offset), \
0a7de745
A
485 (protection), \
486 (fault_type), \
487 (flags), \
488 (wired), \
489 (options) | __extra_options, \
490 NULL); \
d1ecb069 491 MACRO_END
0a7de745 492#endif /* !PMAP_ENTER_OPTIONS */
d1ecb069 493
316670eb 494#ifndef PMAP_SET_CACHE_ATTR
0a7de745
A
495#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
496 MACRO_BEGIN \
497 if (!batch_pmap_op) { \
498 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
499 object->set_cache_attr = TRUE; \
500 } \
501 MACRO_END
502#endif /* PMAP_SET_CACHE_ATTR */
316670eb
A
503
504#ifndef PMAP_BATCH_SET_CACHE_ATTR
0a7de745
A
505#if defined(__arm__) || defined(__arm64__)
506#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
507 cache_attr, num_pages, batch_pmap_op) \
508 MACRO_BEGIN \
509 if ((batch_pmap_op)) { \
510 unsigned int __page_idx=0; \
511 unsigned int res=0; \
512 boolean_t batch=TRUE; \
513 while (__page_idx < (num_pages)) { \
514 if (!pmap_batch_set_cache_attributes( \
515 user_page_list[__page_idx].phys_addr, \
516 (cache_attr), \
517 (num_pages), \
518 (__page_idx), \
519 FALSE, \
520 (&res))) { \
521 batch = FALSE; \
522 break; \
523 } \
524 __page_idx++; \
525 } \
526 __page_idx=0; \
527 res=0; \
528 while (__page_idx < (num_pages)) { \
529 if (batch) \
530 (void)pmap_batch_set_cache_attributes( \
531 user_page_list[__page_idx].phys_addr, \
532 (cache_attr), \
533 (num_pages), \
534 (__page_idx), \
535 TRUE, \
536 (&res)); \
537 else \
538 pmap_set_cache_attributes( \
539 user_page_list[__page_idx].phys_addr, \
540 (cache_attr)); \
541 __page_idx++; \
542 } \
543 (object)->set_cache_attr = TRUE; \
544 } \
5ba3f43e
A
545 MACRO_END
546#else
0a7de745
A
547#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
548 cache_attr, num_pages, batch_pmap_op) \
549 MACRO_BEGIN \
550 if ((batch_pmap_op)) { \
551 unsigned int __page_idx=0; \
552 while (__page_idx < (num_pages)) { \
553 pmap_set_cache_attributes( \
554 user_page_list[__page_idx].phys_addr, \
555 (cache_attr)); \
556 __page_idx++; \
557 } \
558 (object)->set_cache_attr = TRUE; \
559 } \
316670eb 560 MACRO_END
5ba3f43e 561#endif
0a7de745
A
562#endif /* PMAP_BATCH_SET_CACHE_ATTR */
563
564#define PMAP_ENTER_CHECK(pmap, page) \
565{ \
566 if ((page)->vmp_error) { \
567 panic("VM page %p should not have an error\n", \
568 (page)); \
569 } \
d1ecb069 570}
1c79356b 571
1c79356b
A
572/*
573 * Routines to manage reference/modify bits based on
574 * physical addresses, simulating them if not provided
575 * by the hardware.
576 */
39236c6e 577struct pfc {
0a7de745
A
578 long pfc_cpus;
579 long pfc_invalid_global;
39236c6e
A
580};
581
0a7de745
A
582typedef struct pfc pmap_flush_context;
583
584/* Clear reference bit */
585extern void pmap_clear_reference(ppnum_t pn);
586/* Return reference bit */
587extern boolean_t(pmap_is_referenced)(ppnum_t pn);
588/* Set modify bit */
589extern void pmap_set_modify(ppnum_t pn);
590/* Clear modify bit */
591extern void pmap_clear_modify(ppnum_t pn);
592/* Return modify bit */
593extern boolean_t pmap_is_modified(ppnum_t pn);
594/* Return modified and referenced bits */
91447636 595extern unsigned int pmap_get_refmod(ppnum_t pn);
0a7de745
A
596/* Clear modified and referenced bits */
597extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask);
598#define VM_MEM_MODIFIED 0x01 /* Modified bit */
599#define VM_MEM_REFERENCED 0x02 /* Referenced bit */
600extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
39236c6e 601
f427ee49
A
602/*
603 * Clears the reference and/or modified bits on a range of virtually
604 * contiguous pages.
605 * It returns true if the operation succeeded. If it returns false,
606 * nothing has been modified.
607 * This operation is only supported on some platforms, so callers MUST
608 * handle the case where it returns false.
609 */
610extern bool
611pmap_clear_refmod_range_options(
612 pmap_t pmap,
613 vm_map_address_t start,
614 vm_map_address_t end,
615 unsigned int mask,
616 unsigned int options);
617
39236c6e
A
618
619extern void pmap_flush_context_init(pmap_flush_context *);
620extern void pmap_flush(pmap_flush_context *);
1c79356b
A
621
622/*
623 * Routines that operate on ranges of virtual addresses.
624 */
0a7de745
A
625extern void pmap_protect( /* Change protections. */
626 pmap_t map,
627 vm_map_offset_t s,
628 vm_map_offset_t e,
629 vm_prot_t prot);
630
631extern void pmap_protect_options( /* Change protections. */
632 pmap_t map,
633 vm_map_offset_t s,
634 vm_map_offset_t e,
635 vm_prot_t prot,
636 unsigned int options,
637 void *arg);
638
639extern void(pmap_pageable)(
640 pmap_t pmap,
641 vm_map_offset_t start,
642 vm_map_offset_t end,
643 boolean_t pageable);
1c79356b 644
f427ee49 645extern uint64_t pmap_shared_region_size_min(pmap_t map);
b0d623f7 646
f427ee49
A
647/* TODO: <rdar://problem/65247502> Completely remove pmap_nesting_size_max() */
648extern uint64_t pmap_nesting_size_max(pmap_t map);
b0d623f7 649
6d2010ae 650extern kern_return_t pmap_nest(pmap_t,
0a7de745
A
651 pmap_t,
652 addr64_t,
0a7de745 653 uint64_t);
6d2010ae 654extern kern_return_t pmap_unnest(pmap_t,
0a7de745
A
655 addr64_t,
656 uint64_t);
3e170ce0 657
0a7de745 658#define PMAP_UNNEST_CLEAN 1
3e170ce0
A
659
660extern kern_return_t pmap_unnest_options(pmap_t,
0a7de745
A
661 addr64_t,
662 uint64_t,
663 unsigned int);
b0d623f7 664extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
0a7de745
A
665extern void pmap_advise_pagezero_range(pmap_t, uint64_t);
666#endif /* MACH_KERNEL_PRIVATE */
9bccf70c 667
0a7de745
A
668extern boolean_t pmap_is_noencrypt(ppnum_t);
669extern void pmap_set_noencrypt(ppnum_t pn);
670extern void pmap_clear_noencrypt(ppnum_t pn);
0b4c1975 671
9bccf70c
A
672/*
673 * JMM - This portion is exported to other kernel components right now,
674 * but will be pulled back in the future when the needed functionality
675 * is provided in a cleaner manner.
676 */
677
0a7de745
A
678extern pmap_t kernel_pmap; /* The kernel's map */
679#define pmap_kernel() (kernel_pmap)
9bccf70c 680
0a7de745
A
681#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
682#define VM_MEM_STACK 0x200
d1ecb069 683
5c9f4661
A
684/* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
685 * definitions in i386/pmap_internal.h
686 */
f427ee49
A
687#define PMAP_CREATE_64BIT 0x1
688
cb323159 689#if __x86_64__
f427ee49
A
690
691#define PMAP_CREATE_EPT 0x2
3e170ce0 692#define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
f427ee49
A
693
694#else
695
696#define PMAP_CREATE_STAGE2 0
2a1bd2d3
A
697#if __arm64e__
698#define PMAP_CREATE_DISABLE_JOP 0x4
699#else
f427ee49 700#define PMAP_CREATE_DISABLE_JOP 0
2a1bd2d3 701#endif
f427ee49
A
702#if __ARM_MIXED_PAGE_SIZE__
703#define PMAP_CREATE_FORCE_4K_PAGES 0x8
704#else
705#define PMAP_CREATE_FORCE_4K_PAGES 0
706#endif /* __ARM_MIXED_PAGE_SIZE__ */
707#if __arm64__
708#define PMAP_CREATE_X86_64 0
709#else
710#define PMAP_CREATE_X86_64 0
3e170ce0
A
711#endif
712
f427ee49
A
713/* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
714#define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64)
715
716#endif /* __x86_64__ */
717
0a7de745
A
718#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
719 * KERN_RESOURCE_SHORTAGE
720 * instead */
721#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
722 * but don't enter mapping
723 */
724#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
725 * this operation */
726#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
727#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
728#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
729#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
730#define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
731#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
732#define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
733#define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
3e170ce0 734#define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
0a7de745
A
735 * iff page was modified */
736#define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be
737 * be upgraded */
d9a64523 738#define PMAP_OPTIONS_CLEAR_WRITE 0x2000
f427ee49
A
739#define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
740#if defined(__arm__) || defined(__arm64__)
741#define PMAP_OPTIONS_FF_LOCKED 0x8000
742#define PMAP_OPTIONS_FF_WIRED 0x10000
743#endif
d1ecb069 744
0a7de745
A
745#if !defined(__LP64__)
746extern vm_offset_t pmap_extract(pmap_t pmap,
747 vm_map_offset_t va);
b0d623f7 748#endif
0a7de745
A
749extern void pmap_change_wiring( /* Specify pageability */
750 pmap_t pmap,
751 vm_map_offset_t va,
752 boolean_t wired);
9bccf70c 753
91447636 754/* LP64todo - switch to vm_map_offset_t when it grows */
0a7de745
A
755extern void pmap_remove( /* Remove mappings. */
756 pmap_t map,
757 vm_map_offset_t s,
758 vm_map_offset_t e);
2d21ac55 759
0a7de745
A
760extern void pmap_remove_options( /* Remove mappings. */
761 pmap_t map,
762 vm_map_offset_t s,
763 vm_map_offset_t e,
764 int options);
39236c6e 765
0a7de745 766extern void fillPage(ppnum_t pa, unsigned int fill);
55e303ae 767
b0d623f7 768#if defined(__LP64__)
cb323159
A
769extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
770extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr);
771extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr);
b0d623f7
A
772#endif
773
4bd07ac2 774mach_vm_size_t pmap_query_resident(pmap_t pmap,
0a7de745
A
775 vm_map_offset_t s,
776 vm_map_offset_t e,
777 mach_vm_size_t *compressed_bytes_p);
3e170ce0 778
f427ee49
A
779extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value);
780extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap);
781
5ba3f43e
A
782/* Inform the pmap layer that there is a JIT entry in this map. */
783extern void pmap_set_jit_entitled(pmap_t pmap);
784
f427ee49
A
785/* Ask the pmap layer if there is a JIT entry in this map. */
786extern bool pmap_get_jit_entitled(pmap_t pmap);
787
d9a64523
A
788/*
789 * Tell the pmap layer what range within the nested region the VM intends to
790 * use.
791 */
f427ee49 792extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size);
d9a64523
A
793
794/*
f427ee49
A
795 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE
796 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
d9a64523
A
797 * This is expected to only be called from kernel debugger context,
798 * so synchronization is not required.
799 */
800
f427ee49 801extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied);
d9a64523 802
5ba3f43e
A
803/*
804 * Indicates if any special policy is applied to this protection by the pmap
805 * layer.
806 */
f427ee49 807bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot);
5ba3f43e
A
808
809/*
810 * Causes the pmap to return any available pages that it can return cheaply to
811 * the VM.
812 */
d9a64523 813uint64_t pmap_release_pages_fast(void);
5ba3f43e 814
0a7de745
A
815#define PMAP_QUERY_PAGE_PRESENT 0x01
816#define PMAP_QUERY_PAGE_REUSABLE 0x02
817#define PMAP_QUERY_PAGE_INTERNAL 0x04
818#define PMAP_QUERY_PAGE_ALTACCT 0x08
819#define PMAP_QUERY_PAGE_COMPRESSED 0x10
820#define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
39037602 821extern kern_return_t pmap_query_page_info(
0a7de745
A
822 pmap_t pmap,
823 vm_map_offset_t va,
824 int *disp);
39037602 825
3e170ce0
A
826#if CONFIG_PGTRACE
827int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
828int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
829kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss);
830#endif
39236c6e 831
d9a64523 832
cb323159 833#ifdef PLATFORM_BridgeOS
0a7de745
A
834struct pmap_legacy_trust_cache {
835 struct pmap_legacy_trust_cache *next;
836 uuid_t uuid;
837 uint32_t num_hashes;
838 uint8_t hashes[][CS_CDHASH_LEN];
839};
cb323159
A
840#else
841struct pmap_legacy_trust_cache;
842#endif
0a7de745
A
843
844extern kern_return_t pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache *trust_cache,
845 const vm_size_t trust_cache_len);
846
f427ee49
A
847typedef enum {
848 PMAP_TC_TYPE_PERSONALIZED,
849 PMAP_TC_TYPE_PDI,
850 PMAP_TC_TYPE_CRYPTEX,
851 PMAP_TC_TYPE_ENGINEERING,
852 PMAP_TC_TYPE_GLOBAL_FF00,
853 PMAP_TC_TYPE_GLOBAL_FF01,
854} pmap_tc_type_t;
855
856#define PMAP_IMAGE4_TRUST_CACHE_HAS_TYPE 1
0a7de745
A
857struct pmap_image4_trust_cache {
858 // Filled by pmap layer.
859 struct pmap_image4_trust_cache const *next; // linked list linkage
860 struct trust_cache_module1 const *module; // pointer into module (within data below)
861
862 // Filled by caller.
863 // data is either an image4,
864 // or just the trust cache payload itself if the image4 manifest is external.
f427ee49
A
865 pmap_tc_type_t type;
866 size_t bnch_len;
867 uint8_t const bnch[48];
0a7de745
A
868 size_t data_len;
869 uint8_t const data[];
870};
871
872typedef enum {
873 PMAP_TC_SUCCESS = 0,
874 PMAP_TC_UNKNOWN_FORMAT = -1,
875 PMAP_TC_TOO_SMALL_FOR_HEADER = -2,
876 PMAP_TC_TOO_SMALL_FOR_ENTRIES = -3,
877 PMAP_TC_UNKNOWN_VERSION = -4,
878 PMAP_TC_ALREADY_LOADED = -5,
879 PMAP_TC_TOO_BIG = -6,
880 PMAP_TC_RESOURCE_SHORTAGE = -7,
881 PMAP_TC_MANIFEST_TOO_BIG = -8,
f427ee49
A
882 PMAP_TC_MANIFEST_VIOLATION = -9,
883 PMAP_TC_PAYLOAD_VIOLATION = -10,
884 PMAP_TC_EXPIRED = -11,
885 PMAP_TC_CRYPTO_WRONG = -12,
886 PMAP_TC_OBJECT_WRONG = -13,
887 PMAP_TC_UNKNOWN_CALLER = -14,
888 PMAP_TC_UNKNOWN_FAILURE = -15,
0a7de745
A
889} pmap_tc_ret_t;
890
f427ee49
A
891#define PMAP_HAS_LOCKDOWN_IMAGE4_SLAB 1
892extern void pmap_lockdown_image4_slab(vm_offset_t slab, vm_size_t slab_len, uint64_t flags);
893
0a7de745
A
894extern pmap_tc_ret_t pmap_load_image4_trust_cache(
895 struct pmap_image4_trust_cache *trust_cache, vm_size_t trust_cache_len,
896 uint8_t const *img4_manifest,
897 vm_size_t img4_manifest_buffer_len,
898 vm_size_t img4_manifest_actual_len,
899 bool dry_run);
900
cb323159
A
901extern bool pmap_is_trust_cache_loaded(const uuid_t uuid);
902extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
903extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]);
904
905extern bool pmap_in_ppl(void);
906
907extern void *pmap_claim_reserved_ppl_page(void);
908extern void pmap_free_reserved_ppl_page(void *kva);
909
d9a64523
A
910extern void pmap_ledger_alloc_init(size_t);
911extern ledger_t pmap_ledger_alloc(void);
912extern void pmap_ledger_free(ledger_t);
913
a991bd8d
A
914extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap);
915
f427ee49
A
916#if __arm64__
917extern bool pmap_is_exotic(pmap_t pmap);
918#else /* __arm64__ */
919#define pmap_is_exotic(pmap) false
920#endif /* __arm64__ */
921
91447636 922#endif /* KERNEL_PRIVATE */
9bccf70c 923
0a7de745 924#endif /* _VM_PMAP_H_ */