]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/pmap.h | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine address mapping definitions -- machine-independent | |
64 | * section. [For machine-dependent section, see "machine/pmap.h".] | |
65 | */ | |
66 | ||
67 | #ifndef _VM_PMAP_H_ | |
68 | #define _VM_PMAP_H_ | |
69 | ||
70 | #include <mach/kern_return.h> | |
71 | #include <mach/vm_param.h> | |
72 | #include <mach/vm_types.h> | |
73 | #include <mach/vm_attributes.h> | |
74 | #include <mach/boolean.h> | |
75 | #include <mach/vm_prot.h> | |
76 | ||
91447636 A |
77 | #ifdef KERNEL_PRIVATE |
78 | ||
1c79356b A |
79 | /* |
80 | * The following is a description of the interface to the | |
81 | * machine-dependent "physical map" data structure. The module | |
82 | * must provide a "pmap_t" data type that represents the | |
83 | * set of valid virtual-to-physical addresses for one user | |
84 | * address space. [The kernel address space is represented | |
85 | * by a distinguished "pmap_t".] The routines described manage | |
86 | * this type, install and update virtual-to-physical mappings, | |
87 | * and perform operations on physical addresses common to | |
88 | * many address spaces. | |
89 | */ | |
90 | ||
55e303ae | 91 | /* Copy between a physical page and a virtual address */ |
91447636 | 92 | /* LP64todo - switch to vm_map_offset_t when it grows */ |
55e303ae A |
93 | extern kern_return_t copypv( |
94 | addr64_t source, | |
95 | addr64_t sink, | |
96 | unsigned int size, | |
97 | int which); | |
91447636 A |
98 | #define cppvPsnk 1 |
99 | #define cppvPsnkb 31 | |
100 | #define cppvPsrc 2 | |
101 | #define cppvPsrcb 30 | |
102 | #define cppvFsnk 4 | |
103 | #define cppvFsnkb 29 | |
104 | #define cppvFsrc 8 | |
105 | #define cppvFsrcb 28 | |
55e303ae | 106 | #define cppvNoModSnk 16 |
91447636 | 107 | #define cppvNoModSnkb 27 |
55e303ae | 108 | #define cppvNoRefSrc 32 |
91447636 A |
109 | #define cppvNoRefSrcb 26 |
110 | #define cppvKmap 64 /* Use the kernel's vm_map */ | |
111 | #define cppvKmapb 25 | |
1c79356b | 112 | |
3e170ce0 A |
113 | extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); |
114 | ||
91447636 | 115 | #ifdef MACH_KERNEL_PRIVATE |
1c79356b | 116 | |
fe8ab488 A |
117 | #include <mach_assert.h> |
118 | ||
1c79356b A |
119 | #include <machine/pmap.h> |
120 | ||
121 | /* | |
122 | * Routines used for initialization. | |
123 | * There is traditionally also a pmap_bootstrap, | |
124 | * used very early by machine-dependent code, | |
125 | * but it is not part of the interface. | |
91447636 A |
126 | * |
127 | * LP64todo - | |
128 | * These interfaces are tied to the size of the | |
129 | * kernel pmap - and therefore use the "local" | |
130 | * vm_offset_t, etc... types. | |
1c79356b A |
131 | */ |
132 | ||
91447636 | 133 | extern void *pmap_steal_memory(vm_size_t size); |
1c79356b A |
134 | /* During VM initialization, |
135 | * steal a chunk of memory. | |
136 | */ | |
137 | extern unsigned int pmap_free_pages(void); /* During VM initialization, | |
138 | * report remaining unused | |
139 | * physical pages. | |
140 | */ | |
141 | extern void pmap_startup( | |
142 | vm_offset_t *startp, | |
143 | vm_offset_t *endp); | |
144 | /* During VM initialization, | |
145 | * use remaining physical pages | |
146 | * to allocate page frames. | |
147 | */ | |
39236c6e | 148 | extern void pmap_init(void); |
2d21ac55 | 149 | /* Initialization, |
1c79356b A |
150 | * after kernel runs |
151 | * in virtual memory. | |
152 | */ | |
153 | ||
91447636 A |
154 | extern void mapping_adjust(void); /* Adjust free mapping count */ |
155 | ||
156 | extern void mapping_free_prime(void); /* Primes the mapping block release list */ | |
157 | ||
1c79356b A |
158 | #ifndef MACHINE_PAGES |
159 | /* | |
160 | * If machine/pmap.h defines MACHINE_PAGES, it must implement | |
161 | * the above functions. The pmap module has complete control. | |
162 | * Otherwise, it must implement | |
163 | * pmap_free_pages | |
164 | * pmap_virtual_space | |
165 | * pmap_next_page | |
166 | * pmap_init | |
167 | * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup | |
168 | * using pmap_free_pages, pmap_next_page, pmap_virtual_space, | |
169 | * and pmap_enter. pmap_free_pages may over-estimate the number | |
170 | * of unused physical pages, and pmap_next_page may return FALSE | |
171 | * to indicate that there are no more unused pages to return. | |
172 | * However, for best performance pmap_free_pages should be accurate. | |
173 | */ | |
174 | ||
55e303ae | 175 | extern boolean_t pmap_next_page(ppnum_t *pnum); |
0b4c1975 | 176 | extern boolean_t pmap_next_page_hi(ppnum_t *pnum); |
1c79356b A |
177 | /* During VM initialization, |
178 | * return the next unused | |
179 | * physical page. | |
180 | */ | |
181 | extern void pmap_virtual_space( | |
182 | vm_offset_t *virtual_start, | |
183 | vm_offset_t *virtual_end); | |
184 | /* During VM initialization, | |
185 | * report virtual space | |
186 | * available for the kernel. | |
187 | */ | |
188 | #endif /* MACHINE_PAGES */ | |
189 | ||
190 | /* | |
191 | * Routines to manage the physical map data structure. | |
192 | */ | |
0c530ab8 | 193 | extern pmap_t pmap_create( /* Create a pmap_t. */ |
316670eb | 194 | ledger_t ledger, |
0c530ab8 | 195 | vm_map_size_t size, |
3e170ce0 A |
196 | boolean_t is_64bit); |
197 | #if __x86_64__ | |
198 | extern pmap_t pmap_create_options( | |
199 | ledger_t ledger, | |
200 | vm_map_size_t size, | |
201 | int flags); | |
202 | #endif | |
203 | ||
1c79356b A |
204 | extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ |
205 | extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ | |
206 | extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ | |
207 | extern void pmap_switch(pmap_t); | |
208 | ||
fe8ab488 A |
209 | #if MACH_ASSERT |
210 | extern void pmap_set_process(pmap_t pmap, | |
211 | int pid, | |
212 | char *procname); | |
213 | #endif /* MACH_ASSERT */ | |
1c79356b A |
214 | |
215 | extern void pmap_enter( /* Enter a mapping */ | |
216 | pmap_t pmap, | |
91447636 | 217 | vm_map_offset_t v, |
55e303ae | 218 | ppnum_t pn, |
1c79356b | 219 | vm_prot_t prot, |
316670eb | 220 | vm_prot_t fault_type, |
9bccf70c | 221 | unsigned int flags, |
1c79356b A |
222 | boolean_t wired); |
223 | ||
d1ecb069 A |
224 | extern kern_return_t pmap_enter_options( |
225 | pmap_t pmap, | |
226 | vm_map_offset_t v, | |
227 | ppnum_t pn, | |
228 | vm_prot_t prot, | |
316670eb | 229 | vm_prot_t fault_type, |
d1ecb069 A |
230 | unsigned int flags, |
231 | boolean_t wired, | |
39236c6e A |
232 | unsigned int options, |
233 | void *arg); | |
d1ecb069 | 234 | |
0b4e3aa0 A |
235 | extern void pmap_remove_some_phys( |
236 | pmap_t pmap, | |
55e303ae | 237 | ppnum_t pn); |
0b4e3aa0 | 238 | |
fe8ab488 A |
239 | extern void pmap_lock_phys_page( |
240 | ppnum_t pn); | |
241 | ||
242 | extern void pmap_unlock_phys_page( | |
243 | ppnum_t pn); | |
244 | ||
1c79356b A |
245 | |
246 | /* | |
247 | * Routines that operate on physical addresses. | |
248 | */ | |
0b4e3aa0 | 249 | |
1c79356b | 250 | extern void pmap_page_protect( /* Restrict access to page. */ |
55e303ae | 251 | ppnum_t phys, |
1c79356b A |
252 | vm_prot_t prot); |
253 | ||
39236c6e A |
254 | extern void pmap_page_protect_options( /* Restrict access to page. */ |
255 | ppnum_t phys, | |
256 | vm_prot_t prot, | |
257 | unsigned int options, | |
258 | void *arg); | |
259 | ||
1c79356b | 260 | extern void (pmap_zero_page)( |
55e303ae | 261 | ppnum_t pn); |
1c79356b A |
262 | |
263 | extern void (pmap_zero_part_page)( | |
55e303ae | 264 | ppnum_t pn, |
1c79356b A |
265 | vm_offset_t offset, |
266 | vm_size_t len); | |
267 | ||
268 | extern void (pmap_copy_page)( | |
55e303ae A |
269 | ppnum_t src, |
270 | ppnum_t dest); | |
1c79356b A |
271 | |
272 | extern void (pmap_copy_part_page)( | |
55e303ae | 273 | ppnum_t src, |
1c79356b | 274 | vm_offset_t src_offset, |
55e303ae | 275 | ppnum_t dst, |
1c79356b A |
276 | vm_offset_t dst_offset, |
277 | vm_size_t len); | |
278 | ||
279 | extern void (pmap_copy_part_lpage)( | |
280 | vm_offset_t src, | |
55e303ae | 281 | ppnum_t dst, |
1c79356b A |
282 | vm_offset_t dst_offset, |
283 | vm_size_t len); | |
284 | ||
285 | extern void (pmap_copy_part_rpage)( | |
55e303ae | 286 | ppnum_t src, |
1c79356b A |
287 | vm_offset_t src_offset, |
288 | vm_offset_t dst, | |
289 | vm_size_t len); | |
91447636 A |
290 | |
291 | extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ | |
292 | ppnum_t phys); | |
293 | ||
39236c6e A |
294 | extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ |
295 | ppnum_t phys, | |
296 | unsigned int options, | |
297 | void *arg); | |
298 | ||
91447636 A |
299 | extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate |
300 | * cache based on | |
301 | * page number sent */ | |
302 | ppnum_t pn, | |
303 | vm_size_t size, | |
304 | vm_machine_attribute_t attribute, | |
305 | vm_machine_attribute_val_t* value); | |
1c79356b | 306 | |
0c530ab8 A |
307 | extern unsigned int (pmap_cache_attributes)( |
308 | ppnum_t pn); | |
309 | ||
6d2010ae A |
310 | /* |
311 | * Set (override) cache attributes for the specified physical page | |
312 | */ | |
313 | extern void pmap_set_cache_attributes( | |
314 | ppnum_t, | |
315 | unsigned int); | |
2d21ac55 A |
316 | extern void pmap_sync_page_data_phys(ppnum_t pa); |
317 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); | |
318 | ||
1c79356b A |
319 | /* |
320 | * debug/assertions. pmap_verify_free returns true iff | |
321 | * the given physical page is mapped into no pmap. | |
322 | */ | |
55e303ae | 323 | extern boolean_t pmap_verify_free(ppnum_t pn); |
1c79356b A |
324 | |
325 | /* | |
326 | * Statistics routines | |
327 | */ | |
fe8ab488 | 328 | extern int (pmap_compressed)(pmap_t pmap); |
1c79356b | 329 | extern int (pmap_resident_count)(pmap_t pmap); |
2d21ac55 | 330 | extern int (pmap_resident_max)(pmap_t pmap); |
1c79356b A |
331 | |
332 | /* | |
333 | * Sundry required (internal) routines | |
334 | */ | |
2d21ac55 | 335 | #ifdef CURRENTLY_UNUSED_AND_UNTESTED |
1c79356b A |
336 | extern void pmap_collect(pmap_t pmap);/* Perform garbage |
337 | * collection, if any */ | |
2d21ac55 | 338 | #endif |
1c79356b A |
339 | /* |
340 | * Optional routines | |
341 | */ | |
342 | extern void (pmap_copy)( /* Copy range of mappings, | |
343 | * if desired. */ | |
344 | pmap_t dest, | |
345 | pmap_t source, | |
91447636 A |
346 | vm_map_offset_t dest_va, |
347 | vm_map_size_t size, | |
348 | vm_map_offset_t source_va); | |
1c79356b A |
349 | |
350 | extern kern_return_t (pmap_attribute)( /* Get/Set special memory | |
351 | * attributes */ | |
352 | pmap_t pmap, | |
91447636 A |
353 | vm_map_offset_t va, |
354 | vm_map_size_t size, | |
1c79356b A |
355 | vm_machine_attribute_t attribute, |
356 | vm_machine_attribute_val_t* value); | |
357 | ||
358 | /* | |
359 | * Routines defined as macros. | |
360 | */ | |
361 | #ifndef PMAP_ACTIVATE_USER | |
91447636 A |
362 | #ifndef PMAP_ACTIVATE |
363 | #define PMAP_ACTIVATE_USER(thr, cpu) | |
364 | #else /* PMAP_ACTIVATE */ | |
365 | #define PMAP_ACTIVATE_USER(thr, cpu) { \ | |
1c79356b A |
366 | pmap_t pmap; \ |
367 | \ | |
91447636 | 368 | pmap = (thr)->map->pmap; \ |
1c79356b | 369 | if (pmap != pmap_kernel()) \ |
91447636 | 370 | PMAP_ACTIVATE(pmap, (thr), (cpu)); \ |
1c79356b | 371 | } |
91447636 | 372 | #endif /* PMAP_ACTIVATE */ |
1c79356b A |
373 | #endif /* PMAP_ACTIVATE_USER */ |
374 | ||
375 | #ifndef PMAP_DEACTIVATE_USER | |
91447636 A |
376 | #ifndef PMAP_DEACTIVATE |
377 | #define PMAP_DEACTIVATE_USER(thr, cpu) | |
378 | #else /* PMAP_DEACTIVATE */ | |
379 | #define PMAP_DEACTIVATE_USER(thr, cpu) { \ | |
1c79356b A |
380 | pmap_t pmap; \ |
381 | \ | |
91447636 A |
382 | pmap = (thr)->map->pmap; \ |
383 | if ((pmap) != pmap_kernel()) \ | |
384 | PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ | |
1c79356b | 385 | } |
91447636 | 386 | #endif /* PMAP_DEACTIVATE */ |
1c79356b A |
387 | #endif /* PMAP_DEACTIVATE_USER */ |
388 | ||
389 | #ifndef PMAP_ACTIVATE_KERNEL | |
91447636 A |
390 | #ifndef PMAP_ACTIVATE |
391 | #define PMAP_ACTIVATE_KERNEL(cpu) | |
392 | #else /* PMAP_ACTIVATE */ | |
1c79356b | 393 | #define PMAP_ACTIVATE_KERNEL(cpu) \ |
91447636 A |
394 | PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) |
395 | #endif /* PMAP_ACTIVATE */ | |
1c79356b A |
396 | #endif /* PMAP_ACTIVATE_KERNEL */ |
397 | ||
398 | #ifndef PMAP_DEACTIVATE_KERNEL | |
91447636 A |
399 | #ifndef PMAP_DEACTIVATE |
400 | #define PMAP_DEACTIVATE_KERNEL(cpu) | |
401 | #else /* PMAP_DEACTIVATE */ | |
1c79356b | 402 | #define PMAP_DEACTIVATE_KERNEL(cpu) \ |
91447636 A |
403 | PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) |
404 | #endif /* PMAP_DEACTIVATE */ | |
1c79356b A |
405 | #endif /* PMAP_DEACTIVATE_KERNEL */ |
406 | ||
407 | #ifndef PMAP_ENTER | |
408 | /* | |
409 | * Macro to be used in place of pmap_enter() | |
410 | */ | |
316670eb | 411 | #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \ |
91447636 A |
412 | MACRO_BEGIN \ |
413 | pmap_t __pmap = (pmap); \ | |
414 | vm_page_t __page = (page); \ | |
39236c6e | 415 | int __options = 0; \ |
39037602 | 416 | vm_object_t __obj; \ |
91447636 | 417 | \ |
d1ecb069 | 418 | PMAP_ENTER_CHECK(__pmap, __page) \ |
39037602 A |
419 | __obj = VM_PAGE_OBJECT(__page); \ |
420 | if (__obj->internal) { \ | |
39236c6e A |
421 | __options |= PMAP_OPTIONS_INTERNAL; \ |
422 | } \ | |
39037602 | 423 | if (__page->reusable || __obj->all_reusable) { \ |
39236c6e A |
424 | __options |= PMAP_OPTIONS_REUSABLE; \ |
425 | } \ | |
426 | (void) pmap_enter_options(__pmap, \ | |
427 | (virtual_address), \ | |
39037602 | 428 | VM_PAGE_GET_PHYS_PAGE(__page), \ |
39236c6e A |
429 | (protection), \ |
430 | (fault_type), \ | |
431 | (flags), \ | |
432 | (wired), \ | |
433 | __options, \ | |
434 | NULL); \ | |
d1ecb069 A |
435 | MACRO_END |
436 | #endif /* !PMAP_ENTER */ | |
437 | ||
438 | #ifndef PMAP_ENTER_OPTIONS | |
39236c6e A |
439 | #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ |
440 | fault_type, flags, wired, options, result) \ | |
d1ecb069 A |
441 | MACRO_BEGIN \ |
442 | pmap_t __pmap = (pmap); \ | |
443 | vm_page_t __page = (page); \ | |
39236c6e | 444 | int __extra_options = 0; \ |
39037602 | 445 | vm_object_t __obj; \ |
d1ecb069 A |
446 | \ |
447 | PMAP_ENTER_CHECK(__pmap, __page) \ | |
39037602 A |
448 | __obj = VM_PAGE_OBJECT(__page); \ |
449 | if (__obj->internal) { \ | |
39236c6e A |
450 | __extra_options |= PMAP_OPTIONS_INTERNAL; \ |
451 | } \ | |
39037602 | 452 | if (__page->reusable || __obj->all_reusable) { \ |
39236c6e A |
453 | __extra_options |= PMAP_OPTIONS_REUSABLE; \ |
454 | } \ | |
d1ecb069 | 455 | result = pmap_enter_options(__pmap, \ |
39236c6e | 456 | (virtual_address), \ |
39037602 | 457 | VM_PAGE_GET_PHYS_PAGE(__page), \ |
39236c6e A |
458 | (protection), \ |
459 | (fault_type), \ | |
460 | (flags), \ | |
461 | (wired), \ | |
462 | (options) | __extra_options, \ | |
463 | NULL); \ | |
d1ecb069 A |
464 | MACRO_END |
465 | #endif /* !PMAP_ENTER_OPTIONS */ | |
466 | ||
316670eb A |
467 | #ifndef PMAP_SET_CACHE_ATTR |
468 | #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ | |
469 | MACRO_BEGIN \ | |
470 | if (!batch_pmap_op) { \ | |
39037602 | 471 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ |
316670eb A |
472 | object->set_cache_attr = TRUE; \ |
473 | } \ | |
474 | MACRO_END | |
475 | #endif /* PMAP_SET_CACHE_ATTR */ | |
476 | ||
477 | #ifndef PMAP_BATCH_SET_CACHE_ATTR | |
478 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ | |
479 | cache_attr, num_pages, batch_pmap_op) \ | |
480 | MACRO_BEGIN \ | |
481 | if ((batch_pmap_op)) { \ | |
482 | unsigned int __page_idx=0; \ | |
483 | while (__page_idx < (num_pages)) { \ | |
484 | pmap_set_cache_attributes( \ | |
485 | user_page_list[__page_idx].phys_addr, \ | |
486 | (cache_attr)); \ | |
487 | __page_idx++; \ | |
488 | } \ | |
489 | (object)->set_cache_attr = TRUE; \ | |
490 | } \ | |
491 | MACRO_END | |
492 | #endif /* PMAP_BATCH_SET_CACHE_ATTR */ | |
493 | ||
d1ecb069 A |
494 | #define PMAP_ENTER_CHECK(pmap, page) \ |
495 | { \ | |
496 | if ((pmap) != kernel_pmap) { \ | |
497 | ASSERT_PAGE_DECRYPTED(page); \ | |
91447636 | 498 | } \ |
d1ecb069 | 499 | if ((page)->error) { \ |
2d21ac55 | 500 | panic("VM page %p should not have an error\n", \ |
d1ecb069 | 501 | (page)); \ |
2d21ac55 | 502 | } \ |
d1ecb069 | 503 | } |
1c79356b | 504 | |
1c79356b A |
505 | /* |
506 | * Routines to manage reference/modify bits based on | |
507 | * physical addresses, simulating them if not provided | |
508 | * by the hardware. | |
509 | */ | |
39236c6e A |
510 | struct pfc { |
511 | long pfc_cpus; | |
512 | long pfc_invalid_global; | |
513 | }; | |
514 | ||
515 | typedef struct pfc pmap_flush_context; | |
516 | ||
1c79356b | 517 | /* Clear reference bit */ |
55e303ae | 518 | extern void pmap_clear_reference(ppnum_t pn); |
1c79356b | 519 | /* Return reference bit */ |
55e303ae | 520 | extern boolean_t (pmap_is_referenced)(ppnum_t pn); |
1c79356b | 521 | /* Set modify bit */ |
55e303ae | 522 | extern void pmap_set_modify(ppnum_t pn); |
1c79356b | 523 | /* Clear modify bit */ |
55e303ae | 524 | extern void pmap_clear_modify(ppnum_t pn); |
1c79356b | 525 | /* Return modify bit */ |
55e303ae | 526 | extern boolean_t pmap_is_modified(ppnum_t pn); |
91447636 A |
527 | /* Return modified and referenced bits */ |
528 | extern unsigned int pmap_get_refmod(ppnum_t pn); | |
529 | /* Clear modified and referenced bits */ | |
530 | extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); | |
531 | #define VM_MEM_MODIFIED 0x01 /* Modified bit */ | |
532 | #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ | |
39236c6e A |
533 | extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); |
534 | ||
535 | ||
536 | extern void pmap_flush_context_init(pmap_flush_context *); | |
537 | extern void pmap_flush(pmap_flush_context *); | |
1c79356b A |
538 | |
539 | /* | |
540 | * Routines that operate on ranges of virtual addresses. | |
541 | */ | |
1c79356b A |
542 | extern void pmap_protect( /* Change protections. */ |
543 | pmap_t map, | |
91447636 A |
544 | vm_map_offset_t s, |
545 | vm_map_offset_t e, | |
1c79356b A |
546 | vm_prot_t prot); |
547 | ||
39236c6e A |
548 | extern void pmap_protect_options( /* Change protections. */ |
549 | pmap_t map, | |
550 | vm_map_offset_t s, | |
551 | vm_map_offset_t e, | |
552 | vm_prot_t prot, | |
553 | unsigned int options, | |
554 | void *arg); | |
555 | ||
1c79356b A |
556 | extern void (pmap_pageable)( |
557 | pmap_t pmap, | |
91447636 A |
558 | vm_map_offset_t start, |
559 | vm_map_offset_t end, | |
1c79356b A |
560 | boolean_t pageable); |
561 | ||
b0d623f7 | 562 | |
2d21ac55 A |
563 | extern uint64_t pmap_nesting_size_min; |
564 | extern uint64_t pmap_nesting_size_max; | |
b0d623f7 | 565 | |
6d2010ae A |
566 | extern kern_return_t pmap_nest(pmap_t, |
567 | pmap_t, | |
568 | addr64_t, | |
569 | addr64_t, | |
570 | uint64_t); | |
571 | extern kern_return_t pmap_unnest(pmap_t, | |
572 | addr64_t, | |
573 | uint64_t); | |
3e170ce0 A |
574 | |
575 | #define PMAP_UNNEST_CLEAN 1 | |
576 | ||
577 | extern kern_return_t pmap_unnest_options(pmap_t, | |
578 | addr64_t, | |
579 | uint64_t, | |
580 | unsigned int); | |
b0d623f7 | 581 | extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); |
39037602 | 582 | extern void pmap_advise_pagezero_range(pmap_t, uint64_t); |
91447636 | 583 | #endif /* MACH_KERNEL_PRIVATE */ |
9bccf70c | 584 | |
0b4c1975 A |
585 | extern boolean_t pmap_is_noencrypt(ppnum_t); |
586 | extern void pmap_set_noencrypt(ppnum_t pn); | |
587 | extern void pmap_clear_noencrypt(ppnum_t pn); | |
588 | ||
9bccf70c A |
589 | /* |
590 | * JMM - This portion is exported to other kernel components right now, | |
591 | * but will be pulled back in the future when the needed functionality | |
592 | * is provided in a cleaner manner. | |
593 | */ | |
594 | ||
9bccf70c A |
595 | extern pmap_t kernel_pmap; /* The kernel's map */ |
596 | #define pmap_kernel() (kernel_pmap) | |
597 | ||
598 | /* machine independent WIMG bits */ | |
599 | ||
91447636 A |
600 | #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ |
601 | #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ | |
602 | #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ | |
603 | #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ | |
9bccf70c | 604 | |
6d2010ae | 605 | #define VM_WIMG_USE_DEFAULT 0x80 |
9bccf70c | 606 | #define VM_WIMG_MASK 0xFF |
9bccf70c | 607 | |
b0d623f7 | 608 | #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ |
316670eb | 609 | #define VM_MEM_STACK 0x200 |
d1ecb069 | 610 | |
3e170ce0 A |
611 | #if __x86_64__ |
612 | #define PMAP_CREATE_64BIT 0x1 | |
613 | #define PMAP_CREATE_EPT 0x2 | |
614 | #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) | |
615 | #endif | |
616 | ||
d1ecb069 A |
617 | #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return |
618 | * KERN_RESOURCE_SHORTAGE | |
619 | * instead */ | |
316670eb A |
620 | #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed |
621 | * but don't enter mapping | |
622 | */ | |
39236c6e A |
623 | #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for |
624 | * this operation */ | |
625 | #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ | |
626 | #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ | |
627 | #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ | |
628 | #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ | |
fe8ab488 | 629 | #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ |
39236c6e | 630 | #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ |
fe8ab488 A |
631 | #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ |
632 | #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ | |
3e170ce0 A |
633 | #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor |
634 | * iff page was modified */ | |
d1ecb069 | 635 | |
b0d623f7 | 636 | #if !defined(__LP64__) |
1c79356b | 637 | extern vm_offset_t pmap_extract(pmap_t pmap, |
91447636 | 638 | vm_map_offset_t va); |
b0d623f7 | 639 | #endif |
1c79356b A |
640 | extern void pmap_change_wiring( /* Specify pageability */ |
641 | pmap_t pmap, | |
91447636 | 642 | vm_map_offset_t va, |
1c79356b | 643 | boolean_t wired); |
9bccf70c | 644 | |
91447636 | 645 | /* LP64todo - switch to vm_map_offset_t when it grows */ |
9bccf70c A |
646 | extern void pmap_remove( /* Remove mappings. */ |
647 | pmap_t map, | |
2d21ac55 A |
648 | vm_map_offset_t s, |
649 | vm_map_offset_t e); | |
650 | ||
39236c6e A |
651 | extern void pmap_remove_options( /* Remove mappings. */ |
652 | pmap_t map, | |
653 | vm_map_offset_t s, | |
654 | vm_map_offset_t e, | |
655 | int options); | |
656 | ||
2d21ac55 | 657 | extern void fillPage(ppnum_t pa, unsigned int fill); |
55e303ae | 658 | |
2d21ac55 A |
659 | extern void pmap_map_sharedpage(task_t task, pmap_t pmap); |
660 | extern void pmap_unmap_sharedpage(pmap_t pmap); | |
9bccf70c | 661 | |
b0d623f7 A |
662 | #if defined(__LP64__) |
663 | void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); | |
664 | #endif | |
665 | ||
4bd07ac2 A |
666 | mach_vm_size_t pmap_query_resident(pmap_t pmap, |
667 | vm_map_offset_t s, | |
668 | vm_map_offset_t e, | |
669 | mach_vm_size_t *compressed_bytes_p); | |
3e170ce0 | 670 | |
39037602 A |
671 | #define PMAP_QUERY_PAGE_PRESENT 0x01 |
672 | #define PMAP_QUERY_PAGE_REUSABLE 0x02 | |
673 | #define PMAP_QUERY_PAGE_INTERNAL 0x04 | |
674 | #define PMAP_QUERY_PAGE_ALTACCT 0x08 | |
675 | #define PMAP_QUERY_PAGE_COMPRESSED 0x10 | |
676 | #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 | |
677 | extern kern_return_t pmap_query_page_info( | |
678 | pmap_t pmap, | |
679 | vm_map_offset_t va, | |
680 | int *disp); | |
681 | ||
3e170ce0 A |
682 | #if CONFIG_PGTRACE |
683 | int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
684 | int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
685 | kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss); | |
686 | #endif | |
39236c6e | 687 | |
91447636 | 688 | #endif /* KERNEL_PRIVATE */ |
9bccf70c | 689 | |
1c79356b | 690 | #endif /* _VM_PMAP_H_ */ |