2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
80 * The following is a description of the interface to the
81 * machine-dependent "physical map" data structure. The module
82 * must provide a "pmap_t" data type that represents the
83 * set of valid virtual-to-physical addresses for one user
84 * address space. [The kernel address space is represented
85 * by a distinguished "pmap_t".] The routines described manage
86 * this type, install and update virtual-to-physical mappings,
87 * and perform operations on physical addresses common to
88 * many address spaces.
91 /* Copy between a physical page and a virtual address */
92 /* LP64todo - switch to vm_map_offset_t when it grows */
93 extern kern_return_t
copypv(
106 #define cppvNoModSnk 16
107 #define cppvNoModSnkb 27
108 #define cppvNoRefSrc 32
109 #define cppvNoRefSrcb 26
110 #define cppvKmap 64 /* Use the kernel's vm_map */
113 extern boolean_t
pmap_has_managed_page(ppnum_t first
, ppnum_t last
);
115 #ifdef MACH_KERNEL_PRIVATE
117 #include <mach_assert.h>
119 #include <machine/pmap.h>
122 * Routines used for initialization.
123 * There is traditionally also a pmap_bootstrap,
124 * used very early by machine-dependent code,
125 * but it is not part of the interface.
128 * These interfaces are tied to the size of the
129 * kernel pmap - and therefore use the "local"
130 * vm_offset_t, etc... types.
133 extern void *pmap_steal_memory(vm_size_t size
);
134 /* During VM initialization,
135 * steal a chunk of memory.
137 extern unsigned int pmap_free_pages(void); /* During VM initialization,
138 * report remaining unused
141 extern void pmap_startup(
144 /* During VM initialization,
145 * use remaining physical pages
146 * to allocate page frames.
148 extern void pmap_init(void);
154 extern void mapping_adjust(void); /* Adjust free mapping count */
156 extern void mapping_free_prime(void); /* Primes the mapping block release list */
158 #ifndef MACHINE_PAGES
160 * If machine/pmap.h defines MACHINE_PAGES, it must implement
161 * the above functions. The pmap module has complete control.
162 * Otherwise, it must implement
167 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
168 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
169 * and pmap_enter. pmap_free_pages may over-estimate the number
170 * of unused physical pages, and pmap_next_page may return FALSE
171 * to indicate that there are no more unused pages to return.
172 * However, for best performance pmap_free_pages should be accurate.
175 extern boolean_t
pmap_next_page(ppnum_t
*pnum
);
176 extern boolean_t
pmap_next_page_hi(ppnum_t
*pnum
);
177 /* During VM initialization,
178 * return the next unused
181 extern void pmap_virtual_space(
182 vm_offset_t
*virtual_start
,
183 vm_offset_t
*virtual_end
);
184 /* During VM initialization,
185 * report virtual space
186 * available for the kernel.
188 #endif /* MACHINE_PAGES */
191 * Routines to manage the physical map data structure.
193 extern pmap_t
pmap_create( /* Create a pmap_t. */
198 extern pmap_t
pmap_create_options(
204 extern pmap_t (pmap_kernel
)(void); /* Return the kernel's pmap */
205 extern void pmap_reference(pmap_t pmap
); /* Gain a reference. */
206 extern void pmap_destroy(pmap_t pmap
); /* Release a reference. */
207 extern void pmap_switch(pmap_t
);
210 extern void pmap_set_process(pmap_t pmap
,
213 #endif /* MACH_ASSERT */
215 extern void pmap_enter( /* Enter a mapping */
220 vm_prot_t fault_type
,
224 extern kern_return_t
pmap_enter_options(
229 vm_prot_t fault_type
,
232 unsigned int options
,
235 extern void pmap_remove_some_phys(
239 extern void pmap_lock_phys_page(
242 extern void pmap_unlock_phys_page(
247 * Routines that operate on physical addresses.
250 extern void pmap_page_protect( /* Restrict access to page. */
254 extern void pmap_page_protect_options( /* Restrict access to page. */
257 unsigned int options
,
260 extern void (pmap_zero_page
)(
263 extern void (pmap_zero_part_page
)(
268 extern void (pmap_copy_page
)(
272 extern void (pmap_copy_part_page
)(
274 vm_offset_t src_offset
,
276 vm_offset_t dst_offset
,
279 extern void (pmap_copy_part_lpage
)(
282 vm_offset_t dst_offset
,
285 extern void (pmap_copy_part_rpage
)(
287 vm_offset_t src_offset
,
291 extern unsigned int (pmap_disconnect
)( /* disconnect mappings and return reference and change */
294 extern unsigned int (pmap_disconnect_options
)( /* disconnect mappings and return reference and change */
296 unsigned int options
,
299 extern kern_return_t (pmap_attribute_cache_sync
)( /* Flush appropriate
301 * page number sent */
304 vm_machine_attribute_t attribute
,
305 vm_machine_attribute_val_t
* value
);
307 extern unsigned int (pmap_cache_attributes
)(
311 * Set (override) cache attributes for the specified physical page
313 extern void pmap_set_cache_attributes(
316 extern void pmap_sync_page_data_phys(ppnum_t pa
);
317 extern void pmap_sync_page_attributes_phys(ppnum_t pa
);
320 * debug/assertions. pmap_verify_free returns true iff
321 * the given physical page is mapped into no pmap.
323 extern boolean_t
pmap_verify_free(ppnum_t pn
);
326 * Statistics routines
328 extern int (pmap_compressed
)(pmap_t pmap
);
329 extern int (pmap_resident_count
)(pmap_t pmap
);
330 extern int (pmap_resident_max
)(pmap_t pmap
);
333 * Sundry required (internal) routines
335 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
336 extern void pmap_collect(pmap_t pmap
);/* Perform garbage
337 * collection, if any */
342 extern void (pmap_copy
)( /* Copy range of mappings,
346 vm_map_offset_t dest_va
,
348 vm_map_offset_t source_va
);
350 extern kern_return_t (pmap_attribute
)( /* Get/Set special memory
355 vm_machine_attribute_t attribute
,
356 vm_machine_attribute_val_t
* value
);
359 * Routines defined as macros.
361 #ifndef PMAP_ACTIVATE_USER
362 #ifndef PMAP_ACTIVATE
363 #define PMAP_ACTIVATE_USER(thr, cpu)
364 #else /* PMAP_ACTIVATE */
365 #define PMAP_ACTIVATE_USER(thr, cpu) { \
368 pmap = (thr)->map->pmap; \
369 if (pmap != pmap_kernel()) \
370 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
372 #endif /* PMAP_ACTIVATE */
373 #endif /* PMAP_ACTIVATE_USER */
375 #ifndef PMAP_DEACTIVATE_USER
376 #ifndef PMAP_DEACTIVATE
377 #define PMAP_DEACTIVATE_USER(thr, cpu)
378 #else /* PMAP_DEACTIVATE */
379 #define PMAP_DEACTIVATE_USER(thr, cpu) { \
382 pmap = (thr)->map->pmap; \
383 if ((pmap) != pmap_kernel()) \
384 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
386 #endif /* PMAP_DEACTIVATE */
387 #endif /* PMAP_DEACTIVATE_USER */
389 #ifndef PMAP_ACTIVATE_KERNEL
390 #ifndef PMAP_ACTIVATE
391 #define PMAP_ACTIVATE_KERNEL(cpu)
392 #else /* PMAP_ACTIVATE */
393 #define PMAP_ACTIVATE_KERNEL(cpu) \
394 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
395 #endif /* PMAP_ACTIVATE */
396 #endif /* PMAP_ACTIVATE_KERNEL */
398 #ifndef PMAP_DEACTIVATE_KERNEL
399 #ifndef PMAP_DEACTIVATE
400 #define PMAP_DEACTIVATE_KERNEL(cpu)
401 #else /* PMAP_DEACTIVATE */
402 #define PMAP_DEACTIVATE_KERNEL(cpu) \
403 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
404 #endif /* PMAP_DEACTIVATE */
405 #endif /* PMAP_DEACTIVATE_KERNEL */
409 * Macro to be used in place of pmap_enter()
411 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \
413 pmap_t __pmap = (pmap); \
414 vm_page_t __page = (page); \
418 PMAP_ENTER_CHECK(__pmap, __page) \
419 __obj = VM_PAGE_OBJECT(__page); \
420 if (__obj->internal) { \
421 __options |= PMAP_OPTIONS_INTERNAL; \
423 if (__page->reusable || __obj->all_reusable) { \
424 __options |= PMAP_OPTIONS_REUSABLE; \
426 (void) pmap_enter_options(__pmap, \
428 VM_PAGE_GET_PHYS_PAGE(__page), \
436 #endif /* !PMAP_ENTER */
438 #ifndef PMAP_ENTER_OPTIONS
439 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
440 fault_type, flags, wired, options, result) \
442 pmap_t __pmap = (pmap); \
443 vm_page_t __page = (page); \
444 int __extra_options = 0; \
447 PMAP_ENTER_CHECK(__pmap, __page) \
448 __obj = VM_PAGE_OBJECT(__page); \
449 if (__obj->internal) { \
450 __extra_options |= PMAP_OPTIONS_INTERNAL; \
452 if (__page->reusable || __obj->all_reusable) { \
453 __extra_options |= PMAP_OPTIONS_REUSABLE; \
455 result = pmap_enter_options(__pmap, \
457 VM_PAGE_GET_PHYS_PAGE(__page), \
462 (options) | __extra_options, \
465 #endif /* !PMAP_ENTER_OPTIONS */
467 #ifndef PMAP_SET_CACHE_ATTR
468 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
470 if (!batch_pmap_op) { \
471 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
472 object->set_cache_attr = TRUE; \
475 #endif /* PMAP_SET_CACHE_ATTR */
477 #ifndef PMAP_BATCH_SET_CACHE_ATTR
478 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
479 cache_attr, num_pages, batch_pmap_op) \
481 if ((batch_pmap_op)) { \
482 unsigned int __page_idx=0; \
483 while (__page_idx < (num_pages)) { \
484 pmap_set_cache_attributes( \
485 user_page_list[__page_idx].phys_addr, \
489 (object)->set_cache_attr = TRUE; \
492 #endif /* PMAP_BATCH_SET_CACHE_ATTR */
494 #define PMAP_ENTER_CHECK(pmap, page) \
496 if ((pmap) != kernel_pmap) { \
497 ASSERT_PAGE_DECRYPTED(page); \
499 if ((page)->error) { \
500 panic("VM page %p should not have an error\n", \
506 * Routines to manage reference/modify bits based on
507 * physical addresses, simulating them if not provided
512 long pfc_invalid_global
;
515 typedef struct pfc pmap_flush_context
;
517 /* Clear reference bit */
518 extern void pmap_clear_reference(ppnum_t pn
);
519 /* Return reference bit */
520 extern boolean_t (pmap_is_referenced
)(ppnum_t pn
);
522 extern void pmap_set_modify(ppnum_t pn
);
523 /* Clear modify bit */
524 extern void pmap_clear_modify(ppnum_t pn
);
525 /* Return modify bit */
526 extern boolean_t
pmap_is_modified(ppnum_t pn
);
527 /* Return modified and referenced bits */
528 extern unsigned int pmap_get_refmod(ppnum_t pn
);
529 /* Clear modified and referenced bits */
530 extern void pmap_clear_refmod(ppnum_t pn
, unsigned int mask
);
531 #define VM_MEM_MODIFIED 0x01 /* Modified bit */
532 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */
533 extern void pmap_clear_refmod_options(ppnum_t pn
, unsigned int mask
, unsigned int options
, void *);
536 extern void pmap_flush_context_init(pmap_flush_context
*);
537 extern void pmap_flush(pmap_flush_context
*);
540 * Routines that operate on ranges of virtual addresses.
542 extern void pmap_protect( /* Change protections. */
548 extern void pmap_protect_options( /* Change protections. */
553 unsigned int options
,
556 extern void (pmap_pageable
)(
558 vm_map_offset_t start
,
563 extern uint64_t pmap_nesting_size_min
;
564 extern uint64_t pmap_nesting_size_max
;
566 extern kern_return_t
pmap_nest(pmap_t
,
571 extern kern_return_t
pmap_unnest(pmap_t
,
575 #define PMAP_UNNEST_CLEAN 1
577 extern kern_return_t
pmap_unnest_options(pmap_t
,
581 extern boolean_t
pmap_adjust_unnest_parameters(pmap_t
, vm_map_offset_t
*, vm_map_offset_t
*);
582 extern void pmap_advise_pagezero_range(pmap_t
, uint64_t);
583 #endif /* MACH_KERNEL_PRIVATE */
585 extern boolean_t
pmap_is_noencrypt(ppnum_t
);
586 extern void pmap_set_noencrypt(ppnum_t pn
);
587 extern void pmap_clear_noencrypt(ppnum_t pn
);
590 * JMM - This portion is exported to other kernel components right now,
591 * but will be pulled back in the future when the needed functionality
592 * is provided in a cleaner manner.
595 extern pmap_t kernel_pmap
; /* The kernel's map */
596 #define pmap_kernel() (kernel_pmap)
598 /* machine independent WIMG bits */
600 #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */
601 #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */
602 #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */
603 #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */
605 #define VM_WIMG_USE_DEFAULT 0x80
606 #define VM_WIMG_MASK 0xFF
608 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
609 #define VM_MEM_STACK 0x200
612 #define PMAP_CREATE_64BIT 0x1
613 #define PMAP_CREATE_EPT 0x2
614 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
617 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
618 * KERN_RESOURCE_SHORTAGE
620 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
621 * but don't enter mapping
623 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
625 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
626 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
627 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
628 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
629 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */
630 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
631 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */
632 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */
633 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
634 * iff page was modified */
636 #if !defined(__LP64__)
637 extern vm_offset_t
pmap_extract(pmap_t pmap
,
640 extern void pmap_change_wiring( /* Specify pageability */
645 /* LP64todo - switch to vm_map_offset_t when it grows */
646 extern void pmap_remove( /* Remove mappings. */
651 extern void pmap_remove_options( /* Remove mappings. */
657 extern void fillPage(ppnum_t pa
, unsigned int fill
);
659 extern void pmap_map_sharedpage(task_t task
, pmap_t pmap
);
660 extern void pmap_unmap_sharedpage(pmap_t pmap
);
662 #if defined(__LP64__)
663 void pmap_pre_expand(pmap_t pmap
, vm_map_offset_t vaddr
);
666 mach_vm_size_t
pmap_query_resident(pmap_t pmap
,
669 mach_vm_size_t
*compressed_bytes_p
);
671 #define PMAP_QUERY_PAGE_PRESENT 0x01
672 #define PMAP_QUERY_PAGE_REUSABLE 0x02
673 #define PMAP_QUERY_PAGE_INTERNAL 0x04
674 #define PMAP_QUERY_PAGE_ALTACCT 0x08
675 #define PMAP_QUERY_PAGE_COMPRESSED 0x10
676 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20
677 extern kern_return_t
pmap_query_page_info(
683 int pmap_pgtrace_add_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
684 int pmap_pgtrace_delete_page(pmap_t pmap
, vm_map_offset_t start
, vm_map_offset_t end
);
685 kern_return_t
pmap_pgtrace_fault(pmap_t pmap
, vm_map_offset_t va
, arm_saved_state_t
*ss
);
688 #endif /* KERNEL_PRIVATE */
690 #endif /* _VM_PMAP_H_ */