]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/pmap.h | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * Machine address mapping definitions -- machine-independent | |
64 | * section. [For machine-dependent section, see "machine/pmap.h".] | |
65 | */ | |
66 | ||
67 | #ifndef _VM_PMAP_H_ | |
68 | #define _VM_PMAP_H_ | |
69 | ||
70 | #include <mach/kern_return.h> | |
71 | #include <mach/vm_param.h> | |
72 | #include <mach/vm_types.h> | |
73 | #include <mach/vm_attributes.h> | |
74 | #include <mach/boolean.h> | |
75 | #include <mach/vm_prot.h> | |
76 | ||
91447636 A |
77 | #ifdef KERNEL_PRIVATE |
78 | ||
1c79356b A |
79 | /* |
80 | * The following is a description of the interface to the | |
81 | * machine-dependent "physical map" data structure. The module | |
82 | * must provide a "pmap_t" data type that represents the | |
83 | * set of valid virtual-to-physical addresses for one user | |
84 | * address space. [The kernel address space is represented | |
85 | * by a distinguished "pmap_t".] The routines described manage | |
86 | * this type, install and update virtual-to-physical mappings, | |
87 | * and perform operations on physical addresses common to | |
88 | * many address spaces. | |
89 | */ | |
90 | ||
55e303ae | 91 | /* Copy between a physical page and a virtual address */ |
91447636 | 92 | /* LP64todo - switch to vm_map_offset_t when it grows */ |
55e303ae A |
93 | extern kern_return_t copypv( |
94 | addr64_t source, | |
95 | addr64_t sink, | |
96 | unsigned int size, | |
97 | int which); | |
91447636 A |
98 | #define cppvPsnk 1 |
99 | #define cppvPsnkb 31 | |
100 | #define cppvPsrc 2 | |
101 | #define cppvPsrcb 30 | |
102 | #define cppvFsnk 4 | |
103 | #define cppvFsnkb 29 | |
104 | #define cppvFsrc 8 | |
105 | #define cppvFsrcb 28 | |
55e303ae | 106 | #define cppvNoModSnk 16 |
91447636 | 107 | #define cppvNoModSnkb 27 |
55e303ae | 108 | #define cppvNoRefSrc 32 |
91447636 A |
109 | #define cppvNoRefSrcb 26 |
110 | #define cppvKmap 64 /* Use the kernel's vm_map */ | |
111 | #define cppvKmapb 25 | |
1c79356b | 112 | |
3e170ce0 A |
113 | extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); |
114 | ||
91447636 | 115 | #ifdef MACH_KERNEL_PRIVATE |
1c79356b | 116 | |
fe8ab488 A |
117 | #include <mach_assert.h> |
118 | ||
1c79356b A |
119 | #include <machine/pmap.h> |
120 | ||
121 | /* | |
122 | * Routines used for initialization. | |
123 | * There is traditionally also a pmap_bootstrap, | |
124 | * used very early by machine-dependent code, | |
125 | * but it is not part of the interface. | |
91447636 A |
126 | * |
127 | * LP64todo - | |
128 | * These interfaces are tied to the size of the | |
129 | * kernel pmap - and therefore use the "local" | |
130 | * vm_offset_t, etc... types. | |
1c79356b A |
131 | */ |
132 | ||
91447636 | 133 | extern void *pmap_steal_memory(vm_size_t size); |
1c79356b A |
134 | /* During VM initialization, |
135 | * steal a chunk of memory. | |
136 | */ | |
137 | extern unsigned int pmap_free_pages(void); /* During VM initialization, | |
138 | * report remaining unused | |
139 | * physical pages. | |
140 | */ | |
141 | extern void pmap_startup( | |
142 | vm_offset_t *startp, | |
143 | vm_offset_t *endp); | |
144 | /* During VM initialization, | |
145 | * use remaining physical pages | |
146 | * to allocate page frames. | |
147 | */ | |
39236c6e | 148 | extern void pmap_init(void); |
2d21ac55 | 149 | /* Initialization, |
1c79356b A |
150 | * after kernel runs |
151 | * in virtual memory. | |
152 | */ | |
153 | ||
91447636 A |
154 | extern void mapping_adjust(void); /* Adjust free mapping count */ |
155 | ||
156 | extern void mapping_free_prime(void); /* Primes the mapping block release list */ | |
157 | ||
1c79356b A |
158 | #ifndef MACHINE_PAGES |
159 | /* | |
160 | * If machine/pmap.h defines MACHINE_PAGES, it must implement | |
161 | * the above functions. The pmap module has complete control. | |
162 | * Otherwise, it must implement | |
163 | * pmap_free_pages | |
164 | * pmap_virtual_space | |
165 | * pmap_next_page | |
166 | * pmap_init | |
167 | * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup | |
168 | * using pmap_free_pages, pmap_next_page, pmap_virtual_space, | |
169 | * and pmap_enter. pmap_free_pages may over-estimate the number | |
170 | * of unused physical pages, and pmap_next_page may return FALSE | |
171 | * to indicate that there are no more unused pages to return. | |
172 | * However, for best performance pmap_free_pages should be accurate. | |
173 | */ | |
174 | ||
55e303ae | 175 | extern boolean_t pmap_next_page(ppnum_t *pnum); |
0b4c1975 | 176 | extern boolean_t pmap_next_page_hi(ppnum_t *pnum); |
1c79356b A |
177 | /* During VM initialization, |
178 | * return the next unused | |
179 | * physical page. | |
180 | */ | |
181 | extern void pmap_virtual_space( | |
182 | vm_offset_t *virtual_start, | |
183 | vm_offset_t *virtual_end); | |
184 | /* During VM initialization, | |
185 | * report virtual space | |
186 | * available for the kernel. | |
187 | */ | |
188 | #endif /* MACHINE_PAGES */ | |
189 | ||
190 | /* | |
191 | * Routines to manage the physical map data structure. | |
192 | */ | |
0c530ab8 | 193 | extern pmap_t pmap_create( /* Create a pmap_t. */ |
316670eb | 194 | ledger_t ledger, |
0c530ab8 | 195 | vm_map_size_t size, |
3e170ce0 A |
196 | boolean_t is_64bit); |
197 | #if __x86_64__ | |
198 | extern pmap_t pmap_create_options( | |
199 | ledger_t ledger, | |
200 | vm_map_size_t size, | |
201 | int flags); | |
202 | #endif | |
203 | ||
1c79356b A |
204 | extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ |
205 | extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ | |
206 | extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ | |
207 | extern void pmap_switch(pmap_t); | |
208 | ||
fe8ab488 A |
209 | #if MACH_ASSERT |
210 | extern void pmap_set_process(pmap_t pmap, | |
211 | int pid, | |
212 | char *procname); | |
213 | #endif /* MACH_ASSERT */ | |
1c79356b | 214 | |
5ba3f43e | 215 | extern kern_return_t pmap_enter( /* Enter a mapping */ |
1c79356b | 216 | pmap_t pmap, |
91447636 | 217 | vm_map_offset_t v, |
55e303ae | 218 | ppnum_t pn, |
1c79356b | 219 | vm_prot_t prot, |
316670eb | 220 | vm_prot_t fault_type, |
9bccf70c | 221 | unsigned int flags, |
1c79356b A |
222 | boolean_t wired); |
223 | ||
d1ecb069 A |
224 | extern kern_return_t pmap_enter_options( |
225 | pmap_t pmap, | |
226 | vm_map_offset_t v, | |
227 | ppnum_t pn, | |
228 | vm_prot_t prot, | |
316670eb | 229 | vm_prot_t fault_type, |
d1ecb069 A |
230 | unsigned int flags, |
231 | boolean_t wired, | |
39236c6e A |
232 | unsigned int options, |
233 | void *arg); | |
d1ecb069 | 234 | |
0b4e3aa0 A |
235 | extern void pmap_remove_some_phys( |
236 | pmap_t pmap, | |
55e303ae | 237 | ppnum_t pn); |
0b4e3aa0 | 238 | |
fe8ab488 A |
239 | extern void pmap_lock_phys_page( |
240 | ppnum_t pn); | |
241 | ||
242 | extern void pmap_unlock_phys_page( | |
243 | ppnum_t pn); | |
244 | ||
1c79356b A |
245 | |
246 | /* | |
247 | * Routines that operate on physical addresses. | |
248 | */ | |
0b4e3aa0 | 249 | |
1c79356b | 250 | extern void pmap_page_protect( /* Restrict access to page. */ |
55e303ae | 251 | ppnum_t phys, |
1c79356b A |
252 | vm_prot_t prot); |
253 | ||
39236c6e A |
254 | extern void pmap_page_protect_options( /* Restrict access to page. */ |
255 | ppnum_t phys, | |
256 | vm_prot_t prot, | |
257 | unsigned int options, | |
258 | void *arg); | |
259 | ||
1c79356b | 260 | extern void (pmap_zero_page)( |
55e303ae | 261 | ppnum_t pn); |
1c79356b A |
262 | |
263 | extern void (pmap_zero_part_page)( | |
55e303ae | 264 | ppnum_t pn, |
1c79356b A |
265 | vm_offset_t offset, |
266 | vm_size_t len); | |
267 | ||
268 | extern void (pmap_copy_page)( | |
55e303ae A |
269 | ppnum_t src, |
270 | ppnum_t dest); | |
1c79356b A |
271 | |
272 | extern void (pmap_copy_part_page)( | |
55e303ae | 273 | ppnum_t src, |
1c79356b | 274 | vm_offset_t src_offset, |
55e303ae | 275 | ppnum_t dst, |
1c79356b A |
276 | vm_offset_t dst_offset, |
277 | vm_size_t len); | |
278 | ||
279 | extern void (pmap_copy_part_lpage)( | |
280 | vm_offset_t src, | |
55e303ae | 281 | ppnum_t dst, |
1c79356b A |
282 | vm_offset_t dst_offset, |
283 | vm_size_t len); | |
284 | ||
285 | extern void (pmap_copy_part_rpage)( | |
55e303ae | 286 | ppnum_t src, |
1c79356b A |
287 | vm_offset_t src_offset, |
288 | vm_offset_t dst, | |
289 | vm_size_t len); | |
91447636 A |
290 | |
291 | extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ | |
292 | ppnum_t phys); | |
293 | ||
39236c6e A |
294 | extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ |
295 | ppnum_t phys, | |
296 | unsigned int options, | |
297 | void *arg); | |
298 | ||
91447636 A |
299 | extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate |
300 | * cache based on | |
301 | * page number sent */ | |
302 | ppnum_t pn, | |
303 | vm_size_t size, | |
304 | vm_machine_attribute_t attribute, | |
305 | vm_machine_attribute_val_t* value); | |
1c79356b | 306 | |
0c530ab8 A |
307 | extern unsigned int (pmap_cache_attributes)( |
308 | ppnum_t pn); | |
309 | ||
6d2010ae A |
310 | /* |
311 | * Set (override) cache attributes for the specified physical page | |
312 | */ | |
313 | extern void pmap_set_cache_attributes( | |
314 | ppnum_t, | |
315 | unsigned int); | |
5ba3f43e A |
316 | #if defined(__arm__) || defined(__arm64__) |
317 | /* ARM64_TODO */ | |
318 | extern boolean_t pmap_batch_set_cache_attributes( | |
319 | ppnum_t, | |
320 | unsigned int, | |
321 | unsigned int, | |
322 | unsigned int, | |
323 | boolean_t, | |
324 | unsigned int*); | |
325 | #endif | |
2d21ac55 A |
326 | extern void pmap_sync_page_data_phys(ppnum_t pa); |
327 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); | |
328 | ||
1c79356b A |
329 | /* |
330 | * debug/assertions. pmap_verify_free returns true iff | |
331 | * the given physical page is mapped into no pmap. | |
332 | */ | |
55e303ae | 333 | extern boolean_t pmap_verify_free(ppnum_t pn); |
1c79356b A |
334 | |
335 | /* | |
336 | * Statistics routines | |
337 | */ | |
fe8ab488 | 338 | extern int (pmap_compressed)(pmap_t pmap); |
1c79356b | 339 | extern int (pmap_resident_count)(pmap_t pmap); |
2d21ac55 | 340 | extern int (pmap_resident_max)(pmap_t pmap); |
1c79356b A |
341 | |
342 | /* | |
343 | * Sundry required (internal) routines | |
344 | */ | |
2d21ac55 | 345 | #ifdef CURRENTLY_UNUSED_AND_UNTESTED |
1c79356b A |
346 | extern void pmap_collect(pmap_t pmap);/* Perform garbage |
347 | * collection, if any */ | |
2d21ac55 | 348 | #endif |
1c79356b A |
349 | /* |
350 | * Optional routines | |
351 | */ | |
352 | extern void (pmap_copy)( /* Copy range of mappings, | |
353 | * if desired. */ | |
354 | pmap_t dest, | |
355 | pmap_t source, | |
91447636 A |
356 | vm_map_offset_t dest_va, |
357 | vm_map_size_t size, | |
358 | vm_map_offset_t source_va); | |
1c79356b A |
359 | |
360 | extern kern_return_t (pmap_attribute)( /* Get/Set special memory | |
361 | * attributes */ | |
362 | pmap_t pmap, | |
91447636 A |
363 | vm_map_offset_t va, |
364 | vm_map_size_t size, | |
1c79356b A |
365 | vm_machine_attribute_t attribute, |
366 | vm_machine_attribute_val_t* value); | |
367 | ||
368 | /* | |
369 | * Routines defined as macros. | |
370 | */ | |
371 | #ifndef PMAP_ACTIVATE_USER | |
91447636 A |
372 | #ifndef PMAP_ACTIVATE |
373 | #define PMAP_ACTIVATE_USER(thr, cpu) | |
374 | #else /* PMAP_ACTIVATE */ | |
375 | #define PMAP_ACTIVATE_USER(thr, cpu) { \ | |
1c79356b A |
376 | pmap_t pmap; \ |
377 | \ | |
91447636 | 378 | pmap = (thr)->map->pmap; \ |
1c79356b | 379 | if (pmap != pmap_kernel()) \ |
91447636 | 380 | PMAP_ACTIVATE(pmap, (thr), (cpu)); \ |
1c79356b | 381 | } |
91447636 | 382 | #endif /* PMAP_ACTIVATE */ |
1c79356b A |
383 | #endif /* PMAP_ACTIVATE_USER */ |
384 | ||
385 | #ifndef PMAP_DEACTIVATE_USER | |
91447636 A |
386 | #ifndef PMAP_DEACTIVATE |
387 | #define PMAP_DEACTIVATE_USER(thr, cpu) | |
388 | #else /* PMAP_DEACTIVATE */ | |
389 | #define PMAP_DEACTIVATE_USER(thr, cpu) { \ | |
1c79356b A |
390 | pmap_t pmap; \ |
391 | \ | |
91447636 A |
392 | pmap = (thr)->map->pmap; \ |
393 | if ((pmap) != pmap_kernel()) \ | |
394 | PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ | |
1c79356b | 395 | } |
91447636 | 396 | #endif /* PMAP_DEACTIVATE */ |
1c79356b A |
397 | #endif /* PMAP_DEACTIVATE_USER */ |
398 | ||
399 | #ifndef PMAP_ACTIVATE_KERNEL | |
91447636 A |
400 | #ifndef PMAP_ACTIVATE |
401 | #define PMAP_ACTIVATE_KERNEL(cpu) | |
402 | #else /* PMAP_ACTIVATE */ | |
1c79356b | 403 | #define PMAP_ACTIVATE_KERNEL(cpu) \ |
91447636 A |
404 | PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) |
405 | #endif /* PMAP_ACTIVATE */ | |
1c79356b A |
406 | #endif /* PMAP_ACTIVATE_KERNEL */ |
407 | ||
408 | #ifndef PMAP_DEACTIVATE_KERNEL | |
91447636 A |
409 | #ifndef PMAP_DEACTIVATE |
410 | #define PMAP_DEACTIVATE_KERNEL(cpu) | |
411 | #else /* PMAP_DEACTIVATE */ | |
1c79356b | 412 | #define PMAP_DEACTIVATE_KERNEL(cpu) \ |
91447636 A |
413 | PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) |
414 | #endif /* PMAP_DEACTIVATE */ | |
1c79356b A |
415 | #endif /* PMAP_DEACTIVATE_KERNEL */ |
416 | ||
417 | #ifndef PMAP_ENTER | |
418 | /* | |
419 | * Macro to be used in place of pmap_enter() | |
420 | */ | |
5ba3f43e A |
421 | #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \ |
422 | flags, wired, result) \ | |
91447636 A |
423 | MACRO_BEGIN \ |
424 | pmap_t __pmap = (pmap); \ | |
425 | vm_page_t __page = (page); \ | |
39236c6e | 426 | int __options = 0; \ |
39037602 | 427 | vm_object_t __obj; \ |
91447636 | 428 | \ |
d1ecb069 | 429 | PMAP_ENTER_CHECK(__pmap, __page) \ |
39037602 A |
430 | __obj = VM_PAGE_OBJECT(__page); \ |
431 | if (__obj->internal) { \ | |
39236c6e A |
432 | __options |= PMAP_OPTIONS_INTERNAL; \ |
433 | } \ | |
39037602 | 434 | if (__page->reusable || __obj->all_reusable) { \ |
39236c6e A |
435 | __options |= PMAP_OPTIONS_REUSABLE; \ |
436 | } \ | |
5ba3f43e A |
437 | result = pmap_enter_options(__pmap, \ |
438 | (virtual_address), \ | |
439 | VM_PAGE_GET_PHYS_PAGE(__page), \ | |
440 | (protection), \ | |
441 | (fault_type), \ | |
442 | (flags), \ | |
443 | (wired), \ | |
444 | __options, \ | |
445 | NULL); \ | |
d1ecb069 A |
446 | MACRO_END |
447 | #endif /* !PMAP_ENTER */ | |
448 | ||
449 | #ifndef PMAP_ENTER_OPTIONS | |
39236c6e A |
450 | #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ |
451 | fault_type, flags, wired, options, result) \ | |
d1ecb069 A |
452 | MACRO_BEGIN \ |
453 | pmap_t __pmap = (pmap); \ | |
454 | vm_page_t __page = (page); \ | |
39236c6e | 455 | int __extra_options = 0; \ |
39037602 | 456 | vm_object_t __obj; \ |
d1ecb069 A |
457 | \ |
458 | PMAP_ENTER_CHECK(__pmap, __page) \ | |
39037602 A |
459 | __obj = VM_PAGE_OBJECT(__page); \ |
460 | if (__obj->internal) { \ | |
39236c6e A |
461 | __extra_options |= PMAP_OPTIONS_INTERNAL; \ |
462 | } \ | |
39037602 | 463 | if (__page->reusable || __obj->all_reusable) { \ |
39236c6e A |
464 | __extra_options |= PMAP_OPTIONS_REUSABLE; \ |
465 | } \ | |
d1ecb069 | 466 | result = pmap_enter_options(__pmap, \ |
39236c6e | 467 | (virtual_address), \ |
39037602 | 468 | VM_PAGE_GET_PHYS_PAGE(__page), \ |
39236c6e A |
469 | (protection), \ |
470 | (fault_type), \ | |
471 | (flags), \ | |
472 | (wired), \ | |
473 | (options) | __extra_options, \ | |
474 | NULL); \ | |
d1ecb069 A |
475 | MACRO_END |
476 | #endif /* !PMAP_ENTER_OPTIONS */ | |
477 | ||
316670eb A |
478 | #ifndef PMAP_SET_CACHE_ATTR |
479 | #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ | |
480 | MACRO_BEGIN \ | |
481 | if (!batch_pmap_op) { \ | |
39037602 | 482 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ |
316670eb A |
483 | object->set_cache_attr = TRUE; \ |
484 | } \ | |
485 | MACRO_END | |
486 | #endif /* PMAP_SET_CACHE_ATTR */ | |
487 | ||
488 | #ifndef PMAP_BATCH_SET_CACHE_ATTR | |
5ba3f43e A |
489 | #if defined(__arm__) || defined(__arm64__) |
490 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ | |
491 | cache_attr, num_pages, batch_pmap_op) \ | |
492 | MACRO_BEGIN \ | |
493 | if ((batch_pmap_op)) { \ | |
494 | unsigned int __page_idx=0; \ | |
495 | unsigned int res=0; \ | |
496 | boolean_t batch=TRUE; \ | |
497 | while (__page_idx < (num_pages)) { \ | |
498 | if (!pmap_batch_set_cache_attributes( \ | |
499 | user_page_list[__page_idx].phys_addr, \ | |
500 | (cache_attr), \ | |
501 | (num_pages), \ | |
502 | (__page_idx), \ | |
503 | FALSE, \ | |
504 | (&res))) { \ | |
505 | batch = FALSE; \ | |
506 | break; \ | |
507 | } \ | |
508 | __page_idx++; \ | |
509 | } \ | |
510 | __page_idx=0; \ | |
511 | res=0; \ | |
512 | while (__page_idx < (num_pages)) { \ | |
513 | if (batch) \ | |
514 | (void)pmap_batch_set_cache_attributes( \ | |
515 | user_page_list[__page_idx].phys_addr, \ | |
516 | (cache_attr), \ | |
517 | (num_pages), \ | |
518 | (__page_idx), \ | |
519 | TRUE, \ | |
520 | (&res)); \ | |
521 | else \ | |
522 | pmap_set_cache_attributes( \ | |
523 | user_page_list[__page_idx].phys_addr, \ | |
524 | (cache_attr)); \ | |
525 | __page_idx++; \ | |
526 | } \ | |
527 | (object)->set_cache_attr = TRUE; \ | |
528 | } \ | |
529 | MACRO_END | |
530 | #else | |
316670eb A |
531 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ |
532 | cache_attr, num_pages, batch_pmap_op) \ | |
533 | MACRO_BEGIN \ | |
534 | if ((batch_pmap_op)) { \ | |
535 | unsigned int __page_idx=0; \ | |
536 | while (__page_idx < (num_pages)) { \ | |
537 | pmap_set_cache_attributes( \ | |
538 | user_page_list[__page_idx].phys_addr, \ | |
539 | (cache_attr)); \ | |
540 | __page_idx++; \ | |
541 | } \ | |
542 | (object)->set_cache_attr = TRUE; \ | |
543 | } \ | |
544 | MACRO_END | |
5ba3f43e | 545 | #endif |
316670eb A |
546 | #endif /* PMAP_BATCH_SET_CACHE_ATTR */ |
547 | ||
d1ecb069 A |
548 | #define PMAP_ENTER_CHECK(pmap, page) \ |
549 | { \ | |
d1ecb069 | 550 | if ((page)->error) { \ |
2d21ac55 | 551 | panic("VM page %p should not have an error\n", \ |
d1ecb069 | 552 | (page)); \ |
2d21ac55 | 553 | } \ |
d1ecb069 | 554 | } |
1c79356b | 555 | |
1c79356b A |
556 | /* |
557 | * Routines to manage reference/modify bits based on | |
558 | * physical addresses, simulating them if not provided | |
559 | * by the hardware. | |
560 | */ | |
39236c6e A |
561 | struct pfc { |
562 | long pfc_cpus; | |
563 | long pfc_invalid_global; | |
564 | }; | |
565 | ||
566 | typedef struct pfc pmap_flush_context; | |
567 | ||
1c79356b | 568 | /* Clear reference bit */ |
55e303ae | 569 | extern void pmap_clear_reference(ppnum_t pn); |
1c79356b | 570 | /* Return reference bit */ |
55e303ae | 571 | extern boolean_t (pmap_is_referenced)(ppnum_t pn); |
1c79356b | 572 | /* Set modify bit */ |
55e303ae | 573 | extern void pmap_set_modify(ppnum_t pn); |
1c79356b | 574 | /* Clear modify bit */ |
55e303ae | 575 | extern void pmap_clear_modify(ppnum_t pn); |
1c79356b | 576 | /* Return modify bit */ |
55e303ae | 577 | extern boolean_t pmap_is_modified(ppnum_t pn); |
91447636 A |
578 | /* Return modified and referenced bits */ |
579 | extern unsigned int pmap_get_refmod(ppnum_t pn); | |
580 | /* Clear modified and referenced bits */ | |
581 | extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); | |
582 | #define VM_MEM_MODIFIED 0x01 /* Modified bit */ | |
583 | #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ | |
39236c6e A |
584 | extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); |
585 | ||
586 | ||
587 | extern void pmap_flush_context_init(pmap_flush_context *); | |
588 | extern void pmap_flush(pmap_flush_context *); | |
1c79356b A |
589 | |
590 | /* | |
591 | * Routines that operate on ranges of virtual addresses. | |
592 | */ | |
1c79356b A |
593 | extern void pmap_protect( /* Change protections. */ |
594 | pmap_t map, | |
91447636 A |
595 | vm_map_offset_t s, |
596 | vm_map_offset_t e, | |
1c79356b A |
597 | vm_prot_t prot); |
598 | ||
39236c6e A |
599 | extern void pmap_protect_options( /* Change protections. */ |
600 | pmap_t map, | |
601 | vm_map_offset_t s, | |
602 | vm_map_offset_t e, | |
603 | vm_prot_t prot, | |
604 | unsigned int options, | |
605 | void *arg); | |
606 | ||
1c79356b A |
607 | extern void (pmap_pageable)( |
608 | pmap_t pmap, | |
91447636 A |
609 | vm_map_offset_t start, |
610 | vm_map_offset_t end, | |
1c79356b A |
611 | boolean_t pageable); |
612 | ||
b0d623f7 | 613 | |
2d21ac55 A |
614 | extern uint64_t pmap_nesting_size_min; |
615 | extern uint64_t pmap_nesting_size_max; | |
b0d623f7 | 616 | |
6d2010ae A |
617 | extern kern_return_t pmap_nest(pmap_t, |
618 | pmap_t, | |
619 | addr64_t, | |
620 | addr64_t, | |
621 | uint64_t); | |
622 | extern kern_return_t pmap_unnest(pmap_t, | |
623 | addr64_t, | |
624 | uint64_t); | |
3e170ce0 A |
625 | |
626 | #define PMAP_UNNEST_CLEAN 1 | |
627 | ||
628 | extern kern_return_t pmap_unnest_options(pmap_t, | |
629 | addr64_t, | |
630 | uint64_t, | |
631 | unsigned int); | |
b0d623f7 | 632 | extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); |
39037602 | 633 | extern void pmap_advise_pagezero_range(pmap_t, uint64_t); |
91447636 | 634 | #endif /* MACH_KERNEL_PRIVATE */ |
9bccf70c | 635 | |
0b4c1975 A |
636 | extern boolean_t pmap_is_noencrypt(ppnum_t); |
637 | extern void pmap_set_noencrypt(ppnum_t pn); | |
638 | extern void pmap_clear_noencrypt(ppnum_t pn); | |
639 | ||
9bccf70c A |
640 | /* |
641 | * JMM - This portion is exported to other kernel components right now, | |
642 | * but will be pulled back in the future when the needed functionality | |
643 | * is provided in a cleaner manner. | |
644 | */ | |
645 | ||
9bccf70c A |
646 | extern pmap_t kernel_pmap; /* The kernel's map */ |
647 | #define pmap_kernel() (kernel_pmap) | |
648 | ||
649 | /* machine independent WIMG bits */ | |
650 | ||
91447636 A |
651 | #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ |
652 | #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ | |
653 | #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ | |
654 | #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ | |
9bccf70c | 655 | |
6d2010ae | 656 | #define VM_WIMG_USE_DEFAULT 0x80 |
9bccf70c | 657 | #define VM_WIMG_MASK 0xFF |
9bccf70c | 658 | |
b0d623f7 | 659 | #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ |
316670eb | 660 | #define VM_MEM_STACK 0x200 |
d1ecb069 | 661 | |
3e170ce0 | 662 | #if __x86_64__ |
5c9f4661 A |
663 | /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS |
664 | * definitions in i386/pmap_internal.h | |
665 | */ | |
3e170ce0 A |
666 | #define PMAP_CREATE_64BIT 0x1 |
667 | #define PMAP_CREATE_EPT 0x2 | |
668 | #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) | |
669 | #endif | |
670 | ||
d1ecb069 A |
671 | #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return |
672 | * KERN_RESOURCE_SHORTAGE | |
673 | * instead */ | |
316670eb A |
674 | #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed |
675 | * but don't enter mapping | |
676 | */ | |
39236c6e A |
677 | #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for |
678 | * this operation */ | |
679 | #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ | |
680 | #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ | |
681 | #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ | |
682 | #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ | |
fe8ab488 | 683 | #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ |
39236c6e | 684 | #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ |
fe8ab488 A |
685 | #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ |
686 | #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ | |
3e170ce0 A |
687 | #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor |
688 | * iff page was modified */ | |
5ba3f43e A |
689 | #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be |
690 | * be upgraded */ | |
691 | ||
d1ecb069 | 692 | |
b0d623f7 | 693 | #if !defined(__LP64__) |
1c79356b | 694 | extern vm_offset_t pmap_extract(pmap_t pmap, |
91447636 | 695 | vm_map_offset_t va); |
b0d623f7 | 696 | #endif |
1c79356b A |
697 | extern void pmap_change_wiring( /* Specify pageability */ |
698 | pmap_t pmap, | |
91447636 | 699 | vm_map_offset_t va, |
1c79356b | 700 | boolean_t wired); |
9bccf70c | 701 | |
91447636 | 702 | /* LP64todo - switch to vm_map_offset_t when it grows */ |
9bccf70c A |
703 | extern void pmap_remove( /* Remove mappings. */ |
704 | pmap_t map, | |
2d21ac55 A |
705 | vm_map_offset_t s, |
706 | vm_map_offset_t e); | |
707 | ||
39236c6e A |
708 | extern void pmap_remove_options( /* Remove mappings. */ |
709 | pmap_t map, | |
710 | vm_map_offset_t s, | |
711 | vm_map_offset_t e, | |
712 | int options); | |
713 | ||
2d21ac55 | 714 | extern void fillPage(ppnum_t pa, unsigned int fill); |
55e303ae | 715 | |
b0d623f7 A |
716 | #if defined(__LP64__) |
717 | void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); | |
718 | #endif | |
719 | ||
4bd07ac2 A |
720 | mach_vm_size_t pmap_query_resident(pmap_t pmap, |
721 | vm_map_offset_t s, | |
722 | vm_map_offset_t e, | |
723 | mach_vm_size_t *compressed_bytes_p); | |
3e170ce0 | 724 | |
5ba3f43e A |
725 | /* Inform the pmap layer that there is a JIT entry in this map. */ |
726 | extern void pmap_set_jit_entitled(pmap_t pmap); | |
727 | ||
728 | /* | |
729 | * Indicates if any special policy is applied to this protection by the pmap | |
730 | * layer. | |
731 | */ | |
732 | bool pmap_has_prot_policy(vm_prot_t prot); | |
733 | ||
734 | /* | |
735 | * Causes the pmap to return any available pages that it can return cheaply to | |
736 | * the VM. | |
737 | */ | |
738 | void pmap_release_pages_fast(void); | |
739 | ||
39037602 A |
740 | #define PMAP_QUERY_PAGE_PRESENT 0x01 |
741 | #define PMAP_QUERY_PAGE_REUSABLE 0x02 | |
742 | #define PMAP_QUERY_PAGE_INTERNAL 0x04 | |
743 | #define PMAP_QUERY_PAGE_ALTACCT 0x08 | |
744 | #define PMAP_QUERY_PAGE_COMPRESSED 0x10 | |
745 | #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 | |
746 | extern kern_return_t pmap_query_page_info( | |
747 | pmap_t pmap, | |
748 | vm_map_offset_t va, | |
749 | int *disp); | |
750 | ||
3e170ce0 A |
751 | #if CONFIG_PGTRACE |
752 | int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
753 | int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); | |
754 | kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss); | |
755 | #endif | |
39236c6e | 756 | |
91447636 | 757 | #endif /* KERNEL_PRIVATE */ |
9bccf70c | 758 | |
1c79356b | 759 | #endif /* _VM_PMAP_H_ */ |