]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/pmap.h | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine address mapping definitions -- machine-independent | |
64 | * section. [For machine-dependent section, see "machine/pmap.h".] | |
65 | */ | |
66 | ||
67 | #ifndef _VM_PMAP_H_ | |
68 | #define _VM_PMAP_H_ | |
69 | ||
70 | #include <mach/kern_return.h> | |
71 | #include <mach/vm_param.h> | |
72 | #include <mach/vm_types.h> | |
73 | #include <mach/vm_attributes.h> | |
74 | #include <mach/boolean.h> | |
75 | #include <mach/vm_prot.h> | |
76 | ||
77 | #ifdef KERNEL_PRIVATE | |
78 | ||
79 | /* | |
80 | * The following is a description of the interface to the | |
81 | * machine-dependent "physical map" data structure. The module | |
82 | * must provide a "pmap_t" data type that represents the | |
83 | * set of valid virtual-to-physical addresses for one user | |
84 | * address space. [The kernel address space is represented | |
85 | * by a distinguished "pmap_t".] The routines described manage | |
86 | * this type, install and update virtual-to-physical mappings, | |
87 | * and perform operations on physical addresses common to | |
88 | * many address spaces. | |
89 | */ | |
90 | ||
91 | /* Copy between a physical page and a virtual address */ | |
92 | /* LP64todo - switch to vm_map_offset_t when it grows */ | |
93 | extern kern_return_t copypv( | |
94 | addr64_t source, | |
95 | addr64_t sink, | |
96 | unsigned int size, | |
97 | int which); | |
98 | #define cppvPsnk 1 | |
99 | #define cppvPsnkb 31 | |
100 | #define cppvPsrc 2 | |
101 | #define cppvPsrcb 30 | |
102 | #define cppvFsnk 4 | |
103 | #define cppvFsnkb 29 | |
104 | #define cppvFsrc 8 | |
105 | #define cppvFsrcb 28 | |
106 | #define cppvNoModSnk 16 | |
107 | #define cppvNoModSnkb 27 | |
108 | #define cppvNoRefSrc 32 | |
109 | #define cppvNoRefSrcb 26 | |
110 | #define cppvKmap 64 /* Use the kernel's vm_map */ | |
111 | #define cppvKmapb 25 | |
112 | ||
113 | extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); | |
114 | ||
115 | #ifdef MACH_KERNEL_PRIVATE | |
116 | ||
117 | #include <mach_assert.h> | |
118 | ||
119 | #include <machine/pmap.h> | |
120 | ||
121 | /* | |
122 | * Routines used for initialization. | |
123 | * There is traditionally also a pmap_bootstrap, | |
124 | * used very early by machine-dependent code, | |
125 | * but it is not part of the interface. | |
126 | * | |
127 | * LP64todo - | |
128 | * These interfaces are tied to the size of the | |
129 | * kernel pmap - and therefore use the "local" | |
130 | * vm_offset_t, etc... types. | |
131 | */ | |
132 | ||
133 | extern void *pmap_steal_memory(vm_size_t size); | |
134 | /* During VM initialization, | |
135 | * steal a chunk of memory. | |
136 | */ | |
137 | extern unsigned int pmap_free_pages(void); /* During VM initialization, | |
138 | * report remaining unused | |
139 | * physical pages. | |
140 | */ | |
141 | extern void pmap_startup( | |
142 | vm_offset_t *startp, | |
143 | vm_offset_t *endp); | |
144 | /* During VM initialization, | |
145 | * use remaining physical pages | |
146 | * to allocate page frames. | |
147 | */ | |
148 | extern void pmap_init(void); | |
149 | /* Initialization, | |
150 | * after kernel runs | |
151 | * in virtual memory. | |
152 | */ | |
153 | ||
154 | extern void mapping_adjust(void); /* Adjust free mapping count */ | |
155 | ||
156 | extern void mapping_free_prime(void); /* Primes the mapping block release list */ | |
157 | ||
158 | #ifndef MACHINE_PAGES | |
159 | /* | |
160 | * If machine/pmap.h defines MACHINE_PAGES, it must implement | |
161 | * the above functions. The pmap module has complete control. | |
162 | * Otherwise, it must implement | |
163 | * pmap_free_pages | |
164 | * pmap_virtual_space | |
165 | * pmap_next_page | |
166 | * pmap_init | |
167 | * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup | |
168 | * using pmap_free_pages, pmap_next_page, pmap_virtual_space, | |
169 | * and pmap_enter. pmap_free_pages may over-estimate the number | |
170 | * of unused physical pages, and pmap_next_page may return FALSE | |
171 | * to indicate that there are no more unused pages to return. | |
172 | * However, for best performance pmap_free_pages should be accurate. | |
173 | */ | |
174 | ||
175 | extern boolean_t pmap_next_page(ppnum_t *pnum); | |
176 | extern boolean_t pmap_next_page_hi(ppnum_t *pnum); | |
177 | /* During VM initialization, | |
178 | * return the next unused | |
179 | * physical page. | |
180 | */ | |
181 | extern void pmap_virtual_space( | |
182 | vm_offset_t *virtual_start, | |
183 | vm_offset_t *virtual_end); | |
184 | /* During VM initialization, | |
185 | * report virtual space | |
186 | * available for the kernel. | |
187 | */ | |
188 | #endif /* MACHINE_PAGES */ | |
189 | ||
190 | /* | |
191 | * Routines to manage the physical map data structure. | |
192 | */ | |
193 | extern pmap_t pmap_create( /* Create a pmap_t. */ | |
194 | ledger_t ledger, | |
195 | vm_map_size_t size, | |
196 | boolean_t is_64bit); | |
197 | #if __x86_64__ | |
198 | extern pmap_t pmap_create_options( | |
199 | ledger_t ledger, | |
200 | vm_map_size_t size, | |
201 | int flags); | |
202 | #endif | |
203 | ||
204 | extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ | |
205 | extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ | |
206 | extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ | |
207 | extern void pmap_switch(pmap_t); | |
208 | ||
209 | #if MACH_ASSERT | |
210 | extern void pmap_set_process(pmap_t pmap, | |
211 | int pid, | |
212 | char *procname); | |
213 | #endif /* MACH_ASSERT */ | |
214 | ||
215 | extern void pmap_enter( /* Enter a mapping */ | |
216 | pmap_t pmap, | |
217 | vm_map_offset_t v, | |
218 | ppnum_t pn, | |
219 | vm_prot_t prot, | |
220 | vm_prot_t fault_type, | |
221 | unsigned int flags, | |
222 | boolean_t wired); | |
223 | ||
224 | extern kern_return_t pmap_enter_options( | |
225 | pmap_t pmap, | |
226 | vm_map_offset_t v, | |
227 | ppnum_t pn, | |
228 | vm_prot_t prot, | |
229 | vm_prot_t fault_type, | |
230 | unsigned int flags, | |
231 | boolean_t wired, | |
232 | unsigned int options, | |
233 | void *arg); | |
234 | ||
235 | extern void pmap_remove_some_phys( | |
236 | pmap_t pmap, | |
237 | ppnum_t pn); | |
238 | ||
239 | extern void pmap_lock_phys_page( | |
240 | ppnum_t pn); | |
241 | ||
242 | extern void pmap_unlock_phys_page( | |
243 | ppnum_t pn); | |
244 | ||
245 | ||
246 | /* | |
247 | * Routines that operate on physical addresses. | |
248 | */ | |
249 | ||
250 | extern void pmap_page_protect( /* Restrict access to page. */ | |
251 | ppnum_t phys, | |
252 | vm_prot_t prot); | |
253 | ||
254 | extern void pmap_page_protect_options( /* Restrict access to page. */ | |
255 | ppnum_t phys, | |
256 | vm_prot_t prot, | |
257 | unsigned int options, | |
258 | void *arg); | |
259 | ||
260 | extern void (pmap_zero_page)( | |
261 | ppnum_t pn); | |
262 | ||
263 | extern void (pmap_zero_part_page)( | |
264 | ppnum_t pn, | |
265 | vm_offset_t offset, | |
266 | vm_size_t len); | |
267 | ||
268 | extern void (pmap_copy_page)( | |
269 | ppnum_t src, | |
270 | ppnum_t dest); | |
271 | ||
272 | extern void (pmap_copy_part_page)( | |
273 | ppnum_t src, | |
274 | vm_offset_t src_offset, | |
275 | ppnum_t dst, | |
276 | vm_offset_t dst_offset, | |
277 | vm_size_t len); | |
278 | ||
279 | extern void (pmap_copy_part_lpage)( | |
280 | vm_offset_t src, | |
281 | ppnum_t dst, | |
282 | vm_offset_t dst_offset, | |
283 | vm_size_t len); | |
284 | ||
285 | extern void (pmap_copy_part_rpage)( | |
286 | ppnum_t src, | |
287 | vm_offset_t src_offset, | |
288 | vm_offset_t dst, | |
289 | vm_size_t len); | |
290 | ||
291 | extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ | |
292 | ppnum_t phys); | |
293 | ||
294 | extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ | |
295 | ppnum_t phys, | |
296 | unsigned int options, | |
297 | void *arg); | |
298 | ||
299 | extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate | |
300 | * cache based on | |
301 | * page number sent */ | |
302 | ppnum_t pn, | |
303 | vm_size_t size, | |
304 | vm_machine_attribute_t attribute, | |
305 | vm_machine_attribute_val_t* value); | |
306 | ||
307 | extern unsigned int (pmap_cache_attributes)( | |
308 | ppnum_t pn); | |
309 | ||
310 | /* | |
311 | * Set (override) cache attributes for the specified physical page | |
312 | */ | |
313 | extern void pmap_set_cache_attributes( | |
314 | ppnum_t, | |
315 | unsigned int); | |
316 | extern void pmap_sync_page_data_phys(ppnum_t pa); | |
317 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); | |
318 | ||
319 | /* | |
320 | * debug/assertions. pmap_verify_free returns true iff | |
321 | * the given physical page is mapped into no pmap. | |
322 | */ | |
323 | extern boolean_t pmap_verify_free(ppnum_t pn); | |
324 | ||
325 | /* | |
326 | * Statistics routines | |
327 | */ | |
328 | extern int (pmap_compressed)(pmap_t pmap); | |
329 | extern int (pmap_resident_count)(pmap_t pmap); | |
330 | extern int (pmap_resident_max)(pmap_t pmap); | |
331 | ||
332 | /* | |
333 | * Sundry required (internal) routines | |
334 | */ | |
335 | #ifdef CURRENTLY_UNUSED_AND_UNTESTED | |
336 | extern void pmap_collect(pmap_t pmap);/* Perform garbage | |
337 | * collection, if any */ | |
338 | #endif | |
339 | /* | |
340 | * Optional routines | |
341 | */ | |
342 | extern void (pmap_copy)( /* Copy range of mappings, | |
343 | * if desired. */ | |
344 | pmap_t dest, | |
345 | pmap_t source, | |
346 | vm_map_offset_t dest_va, | |
347 | vm_map_size_t size, | |
348 | vm_map_offset_t source_va); | |
349 | ||
350 | extern kern_return_t (pmap_attribute)( /* Get/Set special memory | |
351 | * attributes */ | |
352 | pmap_t pmap, | |
353 | vm_map_offset_t va, | |
354 | vm_map_size_t size, | |
355 | vm_machine_attribute_t attribute, | |
356 | vm_machine_attribute_val_t* value); | |
357 | ||
358 | /* | |
359 | * Routines defined as macros. | |
360 | */ | |
361 | #ifndef PMAP_ACTIVATE_USER | |
362 | #ifndef PMAP_ACTIVATE | |
363 | #define PMAP_ACTIVATE_USER(thr, cpu) | |
364 | #else /* PMAP_ACTIVATE */ | |
365 | #define PMAP_ACTIVATE_USER(thr, cpu) { \ | |
366 | pmap_t pmap; \ | |
367 | \ | |
368 | pmap = (thr)->map->pmap; \ | |
369 | if (pmap != pmap_kernel()) \ | |
370 | PMAP_ACTIVATE(pmap, (thr), (cpu)); \ | |
371 | } | |
372 | #endif /* PMAP_ACTIVATE */ | |
373 | #endif /* PMAP_ACTIVATE_USER */ | |
374 | ||
375 | #ifndef PMAP_DEACTIVATE_USER | |
376 | #ifndef PMAP_DEACTIVATE | |
377 | #define PMAP_DEACTIVATE_USER(thr, cpu) | |
378 | #else /* PMAP_DEACTIVATE */ | |
379 | #define PMAP_DEACTIVATE_USER(thr, cpu) { \ | |
380 | pmap_t pmap; \ | |
381 | \ | |
382 | pmap = (thr)->map->pmap; \ | |
383 | if ((pmap) != pmap_kernel()) \ | |
384 | PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ | |
385 | } | |
386 | #endif /* PMAP_DEACTIVATE */ | |
387 | #endif /* PMAP_DEACTIVATE_USER */ | |
388 | ||
389 | #ifndef PMAP_ACTIVATE_KERNEL | |
390 | #ifndef PMAP_ACTIVATE | |
391 | #define PMAP_ACTIVATE_KERNEL(cpu) | |
392 | #else /* PMAP_ACTIVATE */ | |
393 | #define PMAP_ACTIVATE_KERNEL(cpu) \ | |
394 | PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) | |
395 | #endif /* PMAP_ACTIVATE */ | |
396 | #endif /* PMAP_ACTIVATE_KERNEL */ | |
397 | ||
398 | #ifndef PMAP_DEACTIVATE_KERNEL | |
399 | #ifndef PMAP_DEACTIVATE | |
400 | #define PMAP_DEACTIVATE_KERNEL(cpu) | |
401 | #else /* PMAP_DEACTIVATE */ | |
402 | #define PMAP_DEACTIVATE_KERNEL(cpu) \ | |
403 | PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) | |
404 | #endif /* PMAP_DEACTIVATE */ | |
405 | #endif /* PMAP_DEACTIVATE_KERNEL */ | |
406 | ||
407 | #ifndef PMAP_ENTER | |
408 | /* | |
409 | * Macro to be used in place of pmap_enter() | |
410 | */ | |
411 | #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \ | |
412 | MACRO_BEGIN \ | |
413 | pmap_t __pmap = (pmap); \ | |
414 | vm_page_t __page = (page); \ | |
415 | int __options = 0; \ | |
416 | vm_object_t __obj; \ | |
417 | \ | |
418 | PMAP_ENTER_CHECK(__pmap, __page) \ | |
419 | __obj = VM_PAGE_OBJECT(__page); \ | |
420 | if (__obj->internal) { \ | |
421 | __options |= PMAP_OPTIONS_INTERNAL; \ | |
422 | } \ | |
423 | if (__page->reusable || __obj->all_reusable) { \ | |
424 | __options |= PMAP_OPTIONS_REUSABLE; \ | |
425 | } \ | |
426 | (void) pmap_enter_options(__pmap, \ | |
427 | (virtual_address), \ | |
428 | VM_PAGE_GET_PHYS_PAGE(__page), \ | |
429 | (protection), \ | |
430 | (fault_type), \ | |
431 | (flags), \ | |
432 | (wired), \ | |
433 | __options, \ | |
434 | NULL); \ | |
435 | MACRO_END | |
436 | #endif /* !PMAP_ENTER */ | |
437 | ||
438 | #ifndef PMAP_ENTER_OPTIONS | |
439 | #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ | |
440 | fault_type, flags, wired, options, result) \ | |
441 | MACRO_BEGIN \ | |
442 | pmap_t __pmap = (pmap); \ | |
443 | vm_page_t __page = (page); \ | |
444 | int __extra_options = 0; \ | |
445 | vm_object_t __obj; \ | |
446 | \ | |
447 | PMAP_ENTER_CHECK(__pmap, __page) \ | |
448 | __obj = VM_PAGE_OBJECT(__page); \ | |
449 | if (__obj->internal) { \ | |
450 | __extra_options |= PMAP_OPTIONS_INTERNAL; \ | |
451 | } \ | |
452 | if (__page->reusable || __obj->all_reusable) { \ | |
453 | __extra_options |= PMAP_OPTIONS_REUSABLE; \ | |
454 | } \ | |
455 | result = pmap_enter_options(__pmap, \ | |
456 | (virtual_address), \ | |
457 | VM_PAGE_GET_PHYS_PAGE(__page), \ | |
458 | (protection), \ | |
459 | (fault_type), \ | |
460 | (flags), \ | |
461 | (wired), \ | |
462 | (options) | __extra_options, \ | |
463 | NULL); \ | |
464 | MACRO_END | |
465 | #endif /* !PMAP_ENTER_OPTIONS */ | |
466 | ||
467 | #ifndef PMAP_SET_CACHE_ATTR | |
468 | #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ | |
469 | MACRO_BEGIN \ | |
470 | if (!batch_pmap_op) { \ | |
471 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ | |
472 | object->set_cache_attr = TRUE; \ | |
473 | } \ | |
474 | MACRO_END | |
475 | #endif /* PMAP_SET_CACHE_ATTR */ | |
476 | ||
477 | #ifndef PMAP_BATCH_SET_CACHE_ATTR | |
478 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ | |
479 | cache_attr, num_pages, batch_pmap_op) \ | |
480 | MACRO_BEGIN \ | |
481 | if ((batch_pmap_op)) { \ | |
482 | unsigned int __page_idx=0; \ | |
483 | while (__page_idx < (num_pages)) { \ | |
484 | pmap_set_cache_attributes( \ | |
485 | user_page_list[__page_idx].phys_addr, \ | |
486 | (cache_attr)); \ | |
487 | __page_idx++; \ | |
488 | } \ | |
489 | (object)->set_cache_attr = TRUE; \ | |
490 | } \ | |
491 | MACRO_END | |
492 | #endif /* PMAP_BATCH_SET_CACHE_ATTR */ | |
493 | ||
494 | #define PMAP_ENTER_CHECK(pmap, page) \ | |
495 | { \ | |
496 | if ((pmap) != kernel_pmap) { \ | |
497 | ASSERT_PAGE_DECRYPTED(page); \ | |
498 | } \ | |
499 | if ((page)->error) { \ | |
500 | panic("VM page %p should not have an error\n", \ | |
501 | (page)); \ | |
502 | } \ | |
503 | } | |
504 | ||
505 | /* | |
506 | * Routines to manage reference/modify bits based on | |
507 | * physical addresses, simulating them if not provided | |
508 | * by the hardware. | |
509 | */ | |
510 | struct pfc { | |
511 | long pfc_cpus; | |
512 | long pfc_invalid_global; | |
513 | }; | |
514 | ||
515 | typedef struct pfc pmap_flush_context; | |
516 | ||
517 | /* Clear reference bit */ | |
518 | extern void pmap_clear_reference(ppnum_t pn); | |
519 | /* Return reference bit */ | |
520 | extern boolean_t (pmap_is_referenced)(ppnum_t pn); | |
521 | /* Set modify bit */ | |
522 | extern void pmap_set_modify(ppnum_t pn); | |
523 | /* Clear modify bit */ | |
524 | extern void pmap_clear_modify(ppnum_t pn); | |
525 | /* Return modify bit */ | |
526 | extern boolean_t pmap_is_modified(ppnum_t pn); | |
527 | /* Return modified and referenced bits */ | |
528 | extern unsigned int pmap_get_refmod(ppnum_t pn); | |
529 | /* Clear modified and referenced bits */ | |
530 | extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); | |
531 | #define VM_MEM_MODIFIED 0x01 /* Modified bit */ | |
532 | #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ | |
533 | extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); | |
534 | ||
535 | ||
536 | extern void pmap_flush_context_init(pmap_flush_context *); | |
537 | extern void pmap_flush(pmap_flush_context *); | |
538 | ||
539 | /* | |
540 | * Routines that operate on ranges of virtual addresses. | |
541 | */ | |
542 | extern void pmap_protect( /* Change protections. */ | |
543 | pmap_t map, | |
544 | vm_map_offset_t s, | |
545 | vm_map_offset_t e, | |
546 | vm_prot_t prot); | |
547 | ||
548 | extern void pmap_protect_options( /* Change protections. */ | |
549 | pmap_t map, | |
550 | vm_map_offset_t s, | |
551 | vm_map_offset_t e, | |
552 | vm_prot_t prot, | |
553 | unsigned int options, | |
554 | void *arg); | |
555 | ||
556 | extern void (pmap_pageable)( | |
557 | pmap_t pmap, | |
558 | vm_map_offset_t start, | |
559 | vm_map_offset_t end, | |
560 | boolean_t pageable); | |
561 | ||
562 | ||
563 | extern uint64_t pmap_nesting_size_min; | |
564 | extern uint64_t pmap_nesting_size_max; | |
565 | ||
566 | extern kern_return_t pmap_nest(pmap_t, | |
567 | pmap_t, | |
568 | addr64_t, | |
569 | addr64_t, | |
570 | uint64_t); | |
571 | extern kern_return_t pmap_unnest(pmap_t, | |
572 | addr64_t, | |
573 | uint64_t); | |
574 | ||
575 | #define PMAP_UNNEST_CLEAN 1 | |
576 | ||
577 | extern kern_return_t pmap_unnest_options(pmap_t, | |
578 | addr64_t, | |
579 | uint64_t, | |
580 | unsigned int); | |
581 | extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); | |
582 | extern void pmap_advise_pagezero_range(pmap_t, uint64_t); | |
583 | #endif /* MACH_KERNEL_PRIVATE */ | |
584 | ||
585 | extern boolean_t pmap_is_noencrypt(ppnum_t); | |
586 | extern void pmap_set_noencrypt(ppnum_t pn); | |
587 | extern void pmap_clear_noencrypt(ppnum_t pn); | |
588 | ||
589 | /* | |
590 | * JMM - This portion is exported to other kernel components right now, | |
591 | * but will be pulled back in the future when the needed functionality | |
592 | * is provided in a cleaner manner. | |
593 | */ | |
594 | ||
595 | extern pmap_t kernel_pmap; /* The kernel's map */ | |
596 | #define pmap_kernel() (kernel_pmap) | |
597 | ||
598 | /* machine independent WIMG bits */ | |
599 | ||
600 | #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ | |
601 | #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ | |
602 | #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ | |
603 | #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ | |
604 | ||
605 | #define VM_WIMG_USE_DEFAULT 0x80 | |
606 | #define VM_WIMG_MASK 0xFF | |
607 | ||
608 | #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ | |
609 | #define VM_MEM_STACK 0x200 | |
610 | ||
611 | #if __x86_64__ | |
612 | #define PMAP_CREATE_64BIT 0x1 | |
613 | #define PMAP_CREATE_EPT 0x2 | |
614 | #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) | |
615 | #endif | |
616 | ||
617 | #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return | |
618 | * KERN_RESOURCE_SHORTAGE | |
619 | * instead */ | |
620 | #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed | |
621 | * but don't enter mapping | |
622 | */ | |
623 | #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for | |
624 | * this operation */ | |
625 | #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ | |
626 | #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ | |
627 | #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ | |
628 | #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ | |
629 | #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ | |
630 | #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ | |
631 | #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ | |
632 | #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ | |
633 | #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor | |
634 | * iff page was modified */ | |
635 | ||
636 | #if !defined(__LP64__) | |
637 | extern vm_offset_t pmap_extract(pmap_t pmap, | |
638 | vm_map_offset_t va); | |
639 | #endif | |
640 | extern void pmap_change_wiring( /* Specify pageability */ | |
641 | pmap_t pmap, | |
642 | vm_map_offset_t va, | |
643 | boolean_t wired); | |
644 | ||
645 | /* LP64todo - switch to vm_map_offset_t when it grows */ | |
646 | extern void pmap_remove( /* Remove mappings. */ | |
647 | pmap_t map, | |
648 | vm_map_offset_t s, | |
649 | vm_map_offset_t e); | |
650 | ||
651 | extern void pmap_remove_options( /* Remove mappings. */ | |
652 | pmap_t map, | |
653 | vm_map_offset_t s, | |
654 | vm_map_offset_t e, | |
655 | int options); | |
656 | ||
657 | extern void fillPage(ppnum_t pa, unsigned int fill); | |
658 | ||
659 | extern void pmap_map_sharedpage(task_t task, pmap_t pmap); | |
660 | extern void pmap_unmap_sharedpage(pmap_t pmap); | |
661 | ||
662 | #if defined(__LP64__) | |
663 | void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); | |
664 | #endif | |
665 | ||
666 | mach_vm_size_t pmap_query_resident(pmap_t pmap, | |
667 | vm_map_offset_t s, | |
668 | vm_map_offset_t e, | |
669 | mach_vm_size_t *compressed_bytes_p); | |
670 | ||
671 | #define PMAP_QUERY_PAGE_PRESENT 0x01 | |
672 | #define PMAP_QUERY_PAGE_REUSABLE 0x02 | |
673 | #define PMAP_QUERY_PAGE_INTERNAL 0x04 | |
674 | #define PMAP_QUERY_PAGE_ALTACCT 0x08 | |
675 | #define PMAP_QUERY_PAGE_COMPRESSED 0x10 | |
676 | #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 | |
677 | extern kern_return_t pmap_query_page_info( | |
678 | pmap_t pmap, | |
679 | vm_map_offset_t va, | |
680 | int *disp); | |
681 | ||
682 | #if CONFIG_PGTRACE | |
683 | int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
684 | int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
685 | kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss); | |
686 | #endif | |
687 | ||
688 | #endif /* KERNEL_PRIVATE */ | |
689 | ||
690 | #endif /* _VM_PMAP_H_ */ |