]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/pmap.h
xnu-2782.10.72.tar.gz
[apple/xnu.git] / osfmk / vm / pmap.h
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/pmap.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
65 */
66
67 #ifndef _VM_PMAP_H_
68 #define _VM_PMAP_H_
69
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76
77 #ifdef KERNEL_PRIVATE
78
79 /*
80 * The following is a description of the interface to the
81 * machine-dependent "physical map" data structure. The module
82 * must provide a "pmap_t" data type that represents the
83 * set of valid virtual-to-physical addresses for one user
84 * address space. [The kernel address space is represented
85 * by a distinguished "pmap_t".] The routines described manage
86 * this type, install and update virtual-to-physical mappings,
87 * and perform operations on physical addresses common to
88 * many address spaces.
89 */
90
91 /* Copy between a physical page and a virtual address */
92 /* LP64todo - switch to vm_map_offset_t when it grows */
93 extern kern_return_t copypv(
94 addr64_t source,
95 addr64_t sink,
96 unsigned int size,
97 int which);
98 #define cppvPsnk 1
99 #define cppvPsnkb 31
100 #define cppvPsrc 2
101 #define cppvPsrcb 30
102 #define cppvFsnk 4
103 #define cppvFsnkb 29
104 #define cppvFsrc 8
105 #define cppvFsrcb 28
106 #define cppvNoModSnk 16
107 #define cppvNoModSnkb 27
108 #define cppvNoRefSrc 32
109 #define cppvNoRefSrcb 26
110 #define cppvKmap 64 /* Use the kernel's vm_map */
111 #define cppvKmapb 25
112
113 #ifdef MACH_KERNEL_PRIVATE
114
115 #include <mach_assert.h>
116
117 #include <machine/pmap.h>
118
119 /*
120 * Routines used for initialization.
121 * There is traditionally also a pmap_bootstrap,
122 * used very early by machine-dependent code,
123 * but it is not part of the interface.
124 *
125 * LP64todo -
126 * These interfaces are tied to the size of the
127 * kernel pmap - and therefore use the "local"
128 * vm_offset_t, etc... types.
129 */
130
131 extern void *pmap_steal_memory(vm_size_t size);
132 /* During VM initialization,
133 * steal a chunk of memory.
134 */
135 extern unsigned int pmap_free_pages(void); /* During VM initialization,
136 * report remaining unused
137 * physical pages.
138 */
139 extern void pmap_startup(
140 vm_offset_t *startp,
141 vm_offset_t *endp);
142 /* During VM initialization,
143 * use remaining physical pages
144 * to allocate page frames.
145 */
146 extern void pmap_init(void);
147 /* Initialization,
148 * after kernel runs
149 * in virtual memory.
150 */
151
152 extern void mapping_adjust(void); /* Adjust free mapping count */
153
154 extern void mapping_free_prime(void); /* Primes the mapping block release list */
155
156 #ifndef MACHINE_PAGES
157 /*
158 * If machine/pmap.h defines MACHINE_PAGES, it must implement
159 * the above functions. The pmap module has complete control.
160 * Otherwise, it must implement
161 * pmap_free_pages
162 * pmap_virtual_space
163 * pmap_next_page
164 * pmap_init
165 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
166 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
167 * and pmap_enter. pmap_free_pages may over-estimate the number
168 * of unused physical pages, and pmap_next_page may return FALSE
169 * to indicate that there are no more unused pages to return.
170 * However, for best performance pmap_free_pages should be accurate.
171 */
172
173 extern boolean_t pmap_next_page(ppnum_t *pnum);
174 extern boolean_t pmap_next_page_hi(ppnum_t *pnum);
175 /* During VM initialization,
176 * return the next unused
177 * physical page.
178 */
179 extern void pmap_virtual_space(
180 vm_offset_t *virtual_start,
181 vm_offset_t *virtual_end);
182 /* During VM initialization,
183 * report virtual space
184 * available for the kernel.
185 */
186 #endif /* MACHINE_PAGES */
187
188 /*
189 * Routines to manage the physical map data structure.
190 */
191 extern pmap_t pmap_create( /* Create a pmap_t. */
192 ledger_t ledger,
193 vm_map_size_t size,
194 __unused boolean_t is_64bit);
195 extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */
196 extern void pmap_reference(pmap_t pmap); /* Gain a reference. */
197 extern void pmap_destroy(pmap_t pmap); /* Release a reference. */
198 extern void pmap_switch(pmap_t);
199
200 #if MACH_ASSERT
201 extern void pmap_set_process(pmap_t pmap,
202 int pid,
203 char *procname);
204 #endif /* MACH_ASSERT */
205
206 extern void pmap_enter( /* Enter a mapping */
207 pmap_t pmap,
208 vm_map_offset_t v,
209 ppnum_t pn,
210 vm_prot_t prot,
211 vm_prot_t fault_type,
212 unsigned int flags,
213 boolean_t wired);
214
215 extern kern_return_t pmap_enter_options(
216 pmap_t pmap,
217 vm_map_offset_t v,
218 ppnum_t pn,
219 vm_prot_t prot,
220 vm_prot_t fault_type,
221 unsigned int flags,
222 boolean_t wired,
223 unsigned int options,
224 void *arg);
225
226 extern void pmap_remove_some_phys(
227 pmap_t pmap,
228 ppnum_t pn);
229
230 extern void pmap_lock_phys_page(
231 ppnum_t pn);
232
233 extern void pmap_unlock_phys_page(
234 ppnum_t pn);
235
236
237 /*
238 * Routines that operate on physical addresses.
239 */
240
241 extern void pmap_page_protect( /* Restrict access to page. */
242 ppnum_t phys,
243 vm_prot_t prot);
244
245 extern void pmap_page_protect_options( /* Restrict access to page. */
246 ppnum_t phys,
247 vm_prot_t prot,
248 unsigned int options,
249 void *arg);
250
251 extern void (pmap_zero_page)(
252 ppnum_t pn);
253
254 extern void (pmap_zero_part_page)(
255 ppnum_t pn,
256 vm_offset_t offset,
257 vm_size_t len);
258
259 extern void (pmap_copy_page)(
260 ppnum_t src,
261 ppnum_t dest);
262
263 extern void (pmap_copy_part_page)(
264 ppnum_t src,
265 vm_offset_t src_offset,
266 ppnum_t dst,
267 vm_offset_t dst_offset,
268 vm_size_t len);
269
270 extern void (pmap_copy_part_lpage)(
271 vm_offset_t src,
272 ppnum_t dst,
273 vm_offset_t dst_offset,
274 vm_size_t len);
275
276 extern void (pmap_copy_part_rpage)(
277 ppnum_t src,
278 vm_offset_t src_offset,
279 vm_offset_t dst,
280 vm_size_t len);
281
282 extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */
283 ppnum_t phys);
284
285 extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */
286 ppnum_t phys,
287 unsigned int options,
288 void *arg);
289
290 extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate
291 * cache based on
292 * page number sent */
293 ppnum_t pn,
294 vm_size_t size,
295 vm_machine_attribute_t attribute,
296 vm_machine_attribute_val_t* value);
297
298 extern unsigned int (pmap_cache_attributes)(
299 ppnum_t pn);
300
301 /*
302 * Set (override) cache attributes for the specified physical page
303 */
304 extern void pmap_set_cache_attributes(
305 ppnum_t,
306 unsigned int);
307 extern void pmap_sync_page_data_phys(ppnum_t pa);
308 extern void pmap_sync_page_attributes_phys(ppnum_t pa);
309
310 /*
311 * debug/assertions. pmap_verify_free returns true iff
312 * the given physical page is mapped into no pmap.
313 */
314 extern boolean_t pmap_verify_free(ppnum_t pn);
315
316 /*
317 * Statistics routines
318 */
319 extern int (pmap_compressed)(pmap_t pmap);
320 extern int (pmap_resident_count)(pmap_t pmap);
321 extern int (pmap_resident_max)(pmap_t pmap);
322
323 /*
324 * Sundry required (internal) routines
325 */
326 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
327 extern void pmap_collect(pmap_t pmap);/* Perform garbage
328 * collection, if any */
329 #endif
330 /*
331 * Optional routines
332 */
333 extern void (pmap_copy)( /* Copy range of mappings,
334 * if desired. */
335 pmap_t dest,
336 pmap_t source,
337 vm_map_offset_t dest_va,
338 vm_map_size_t size,
339 vm_map_offset_t source_va);
340
341 extern kern_return_t (pmap_attribute)( /* Get/Set special memory
342 * attributes */
343 pmap_t pmap,
344 vm_map_offset_t va,
345 vm_map_size_t size,
346 vm_machine_attribute_t attribute,
347 vm_machine_attribute_val_t* value);
348
349 /*
350 * Routines defined as macros.
351 */
352 #ifndef PMAP_ACTIVATE_USER
353 #ifndef PMAP_ACTIVATE
354 #define PMAP_ACTIVATE_USER(thr, cpu)
355 #else /* PMAP_ACTIVATE */
356 #define PMAP_ACTIVATE_USER(thr, cpu) { \
357 pmap_t pmap; \
358 \
359 pmap = (thr)->map->pmap; \
360 if (pmap != pmap_kernel()) \
361 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
362 }
363 #endif /* PMAP_ACTIVATE */
364 #endif /* PMAP_ACTIVATE_USER */
365
366 #ifndef PMAP_DEACTIVATE_USER
367 #ifndef PMAP_DEACTIVATE
368 #define PMAP_DEACTIVATE_USER(thr, cpu)
369 #else /* PMAP_DEACTIVATE */
370 #define PMAP_DEACTIVATE_USER(thr, cpu) { \
371 pmap_t pmap; \
372 \
373 pmap = (thr)->map->pmap; \
374 if ((pmap) != pmap_kernel()) \
375 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
376 }
377 #endif /* PMAP_DEACTIVATE */
378 #endif /* PMAP_DEACTIVATE_USER */
379
380 #ifndef PMAP_ACTIVATE_KERNEL
381 #ifndef PMAP_ACTIVATE
382 #define PMAP_ACTIVATE_KERNEL(cpu)
383 #else /* PMAP_ACTIVATE */
384 #define PMAP_ACTIVATE_KERNEL(cpu) \
385 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
386 #endif /* PMAP_ACTIVATE */
387 #endif /* PMAP_ACTIVATE_KERNEL */
388
389 #ifndef PMAP_DEACTIVATE_KERNEL
390 #ifndef PMAP_DEACTIVATE
391 #define PMAP_DEACTIVATE_KERNEL(cpu)
392 #else /* PMAP_DEACTIVATE */
393 #define PMAP_DEACTIVATE_KERNEL(cpu) \
394 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
395 #endif /* PMAP_DEACTIVATE */
396 #endif /* PMAP_DEACTIVATE_KERNEL */
397
398 #ifndef PMAP_ENTER
399 /*
400 * Macro to be used in place of pmap_enter()
401 */
402 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \
403 MACRO_BEGIN \
404 pmap_t __pmap = (pmap); \
405 vm_page_t __page = (page); \
406 int __options = 0; \
407 \
408 PMAP_ENTER_CHECK(__pmap, __page) \
409 if (__page->object->internal) { \
410 __options |= PMAP_OPTIONS_INTERNAL; \
411 } \
412 if (__page->reusable || __page->object->all_reusable) { \
413 __options |= PMAP_OPTIONS_REUSABLE; \
414 } \
415 (void) pmap_enter_options(__pmap, \
416 (virtual_address), \
417 __page->phys_page, \
418 (protection), \
419 (fault_type), \
420 (flags), \
421 (wired), \
422 __options, \
423 NULL); \
424 MACRO_END
425 #endif /* !PMAP_ENTER */
426
427 #ifndef PMAP_ENTER_OPTIONS
428 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
429 fault_type, flags, wired, options, result) \
430 MACRO_BEGIN \
431 pmap_t __pmap = (pmap); \
432 vm_page_t __page = (page); \
433 int __extra_options = 0; \
434 \
435 PMAP_ENTER_CHECK(__pmap, __page) \
436 if (__page->object->internal) { \
437 __extra_options |= PMAP_OPTIONS_INTERNAL; \
438 } \
439 if (__page->reusable || __page->object->all_reusable) { \
440 __extra_options |= PMAP_OPTIONS_REUSABLE; \
441 } \
442 result = pmap_enter_options(__pmap, \
443 (virtual_address), \
444 __page->phys_page, \
445 (protection), \
446 (fault_type), \
447 (flags), \
448 (wired), \
449 (options) | __extra_options, \
450 NULL); \
451 MACRO_END
452 #endif /* !PMAP_ENTER_OPTIONS */
453
454 #ifndef PMAP_SET_CACHE_ATTR
455 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
456 MACRO_BEGIN \
457 if (!batch_pmap_op) { \
458 pmap_set_cache_attributes(mem->phys_page, cache_attr); \
459 object->set_cache_attr = TRUE; \
460 } \
461 MACRO_END
462 #endif /* PMAP_SET_CACHE_ATTR */
463
464 #ifndef PMAP_BATCH_SET_CACHE_ATTR
465 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
466 cache_attr, num_pages, batch_pmap_op) \
467 MACRO_BEGIN \
468 if ((batch_pmap_op)) { \
469 unsigned int __page_idx=0; \
470 while (__page_idx < (num_pages)) { \
471 pmap_set_cache_attributes( \
472 user_page_list[__page_idx].phys_addr, \
473 (cache_attr)); \
474 __page_idx++; \
475 } \
476 (object)->set_cache_attr = TRUE; \
477 } \
478 MACRO_END
479 #endif /* PMAP_BATCH_SET_CACHE_ATTR */
480
481 #define PMAP_ENTER_CHECK(pmap, page) \
482 { \
483 if ((pmap) != kernel_pmap) { \
484 ASSERT_PAGE_DECRYPTED(page); \
485 } \
486 if ((page)->error) { \
487 panic("VM page %p should not have an error\n", \
488 (page)); \
489 } \
490 }
491
492 /*
493 * Routines to manage reference/modify bits based on
494 * physical addresses, simulating them if not provided
495 * by the hardware.
496 */
497 struct pfc {
498 long pfc_cpus;
499 long pfc_invalid_global;
500 };
501
502 typedef struct pfc pmap_flush_context;
503
504 /* Clear reference bit */
505 extern void pmap_clear_reference(ppnum_t pn);
506 /* Return reference bit */
507 extern boolean_t (pmap_is_referenced)(ppnum_t pn);
508 /* Set modify bit */
509 extern void pmap_set_modify(ppnum_t pn);
510 /* Clear modify bit */
511 extern void pmap_clear_modify(ppnum_t pn);
512 /* Return modify bit */
513 extern boolean_t pmap_is_modified(ppnum_t pn);
514 /* Return modified and referenced bits */
515 extern unsigned int pmap_get_refmod(ppnum_t pn);
516 /* Clear modified and referenced bits */
517 extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask);
518 #define VM_MEM_MODIFIED 0x01 /* Modified bit */
519 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */
520 extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
521
522
523 extern void pmap_flush_context_init(pmap_flush_context *);
524 extern void pmap_flush(pmap_flush_context *);
525
526 /*
527 * Routines that operate on ranges of virtual addresses.
528 */
529 extern void pmap_protect( /* Change protections. */
530 pmap_t map,
531 vm_map_offset_t s,
532 vm_map_offset_t e,
533 vm_prot_t prot);
534
535 extern void pmap_protect_options( /* Change protections. */
536 pmap_t map,
537 vm_map_offset_t s,
538 vm_map_offset_t e,
539 vm_prot_t prot,
540 unsigned int options,
541 void *arg);
542
543 extern void (pmap_pageable)(
544 pmap_t pmap,
545 vm_map_offset_t start,
546 vm_map_offset_t end,
547 boolean_t pageable);
548
549
550 extern uint64_t pmap_nesting_size_min;
551 extern uint64_t pmap_nesting_size_max;
552
553 extern kern_return_t pmap_nest(pmap_t,
554 pmap_t,
555 addr64_t,
556 addr64_t,
557 uint64_t);
558 extern kern_return_t pmap_unnest(pmap_t,
559 addr64_t,
560 uint64_t);
561 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
562 #endif /* MACH_KERNEL_PRIVATE */
563
564 extern boolean_t pmap_is_noencrypt(ppnum_t);
565 extern void pmap_set_noencrypt(ppnum_t pn);
566 extern void pmap_clear_noencrypt(ppnum_t pn);
567
568 /*
569 * JMM - This portion is exported to other kernel components right now,
570 * but will be pulled back in the future when the needed functionality
571 * is provided in a cleaner manner.
572 */
573
574 extern pmap_t kernel_pmap; /* The kernel's map */
575 #define pmap_kernel() (kernel_pmap)
576
577 /* machine independent WIMG bits */
578
579 #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */
580 #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */
581 #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */
582 #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */
583
584 #define VM_WIMG_USE_DEFAULT 0x80
585 #define VM_WIMG_MASK 0xFF
586
587 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
588 #define VM_MEM_STACK 0x200
589
590 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
591 * KERN_RESOURCE_SHORTAGE
592 * instead */
593 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
594 * but don't enter mapping
595 */
596 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
597 * this operation */
598 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
599 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
600 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
601 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
602 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
603 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
604 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
605 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
606
607 #if !defined(__LP64__)
608 extern vm_offset_t pmap_extract(pmap_t pmap,
609 vm_map_offset_t va);
610 #endif
611 extern void pmap_change_wiring( /* Specify pageability */
612 pmap_t pmap,
613 vm_map_offset_t va,
614 boolean_t wired);
615
616 /* LP64todo - switch to vm_map_offset_t when it grows */
617 extern void pmap_remove( /* Remove mappings. */
618 pmap_t map,
619 vm_map_offset_t s,
620 vm_map_offset_t e);
621
622 extern void pmap_remove_options( /* Remove mappings. */
623 pmap_t map,
624 vm_map_offset_t s,
625 vm_map_offset_t e,
626 int options);
627
628 extern void fillPage(ppnum_t pa, unsigned int fill);
629
630 extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
631 extern void pmap_unmap_sharedpage(pmap_t pmap);
632
633 #if defined(__LP64__)
634 void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
635 #endif
636
637 unsigned int pmap_query_resident(pmap_t pmap,
638 vm_map_offset_t s,
639 vm_map_offset_t e);
640
641 #endif /* KERNEL_PRIVATE */
642
643 #endif /* _VM_PMAP_H_ */