2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Machine address mapping definitions -- machine-independent
64 * section. [For machine-dependent section, see "machine/pmap.h".]
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
80 * The following is a description of the interface to the
81 * machine-dependent "physical map" data structure. The module
82 * must provide a "pmap_t" data type that represents the
83 * set of valid virtual-to-physical addresses for one user
84 * address space. [The kernel address space is represented
85 * by a distinguished "pmap_t".] The routines described manage
86 * this type, install and update virtual-to-physical mappings,
87 * and perform operations on physical addresses common to
88 * many address spaces.
91 /* Copy between a physical page and a virtual address */
92 /* LP64todo - switch to vm_map_offset_t when it grows */
93 extern kern_return_t
copypv(
106 #define cppvNoModSnk 16
107 #define cppvNoModSnkb 27
108 #define cppvNoRefSrc 32
109 #define cppvNoRefSrcb 26
110 #define cppvKmap 64 /* Use the kernel's vm_map */
113 #ifdef MACH_KERNEL_PRIVATE
115 #include <machine/pmap.h>
118 * Routines used for initialization.
119 * There is traditionally also a pmap_bootstrap,
120 * used very early by machine-dependent code,
121 * but it is not part of the interface.
124 * These interfaces are tied to the size of the
125 * kernel pmap - and therefore use the "local"
126 * vm_offset_t, etc... types.
129 extern void *pmap_steal_memory(vm_size_t size
);
130 /* During VM initialization,
131 * steal a chunk of memory.
133 extern unsigned int pmap_free_pages(void); /* During VM initialization,
134 * report remaining unused
137 extern void pmap_startup(
140 /* During VM initialization,
141 * use remaining physical pages
142 * to allocate page frames.
144 extern void pmap_init(void) __attribute__((section("__TEXT, initcode")));
150 extern void mapping_adjust(void); /* Adjust free mapping count */
152 extern void mapping_free_prime(void); /* Primes the mapping block release list */
154 #ifndef MACHINE_PAGES
156 * If machine/pmap.h defines MACHINE_PAGES, it must implement
157 * the above functions. The pmap module has complete control.
158 * Otherwise, it must implement
163 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
164 * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
165 * and pmap_enter. pmap_free_pages may over-estimate the number
166 * of unused physical pages, and pmap_next_page may return FALSE
167 * to indicate that there are no more unused pages to return.
168 * However, for best performance pmap_free_pages should be accurate.
171 extern boolean_t
pmap_next_page(ppnum_t
*pnum
);
172 extern boolean_t
pmap_next_page_hi(ppnum_t
*pnum
);
173 /* During VM initialization,
174 * return the next unused
177 extern void pmap_virtual_space(
178 vm_offset_t
*virtual_start
,
179 vm_offset_t
*virtual_end
);
180 /* During VM initialization,
181 * report virtual space
182 * available for the kernel.
184 #endif /* MACHINE_PAGES */
187 * Routines to manage the physical map data structure.
189 extern pmap_t
pmap_create( /* Create a pmap_t. */
195 __unused boolean_t is_64bit
);
197 extern pmap_t (pmap_kernel
)(void); /* Return the kernel's pmap */
198 extern void pmap_reference(pmap_t pmap
); /* Gain a reference. */
199 extern void pmap_destroy(pmap_t pmap
); /* Release a reference. */
200 extern void pmap_switch(pmap_t
);
203 extern void pmap_enter( /* Enter a mapping */
208 vm_prot_t fault_type
,
212 extern kern_return_t
pmap_enter_options(
217 vm_prot_t fault_type
,
220 unsigned int options
);
222 extern void pmap_remove_some_phys(
228 * Routines that operate on physical addresses.
231 extern void pmap_page_protect( /* Restrict access to page. */
235 extern void (pmap_zero_page
)(
238 extern void (pmap_zero_part_page
)(
243 extern void (pmap_copy_page
)(
247 extern void (pmap_copy_part_page
)(
249 vm_offset_t src_offset
,
251 vm_offset_t dst_offset
,
254 extern void (pmap_copy_part_lpage
)(
257 vm_offset_t dst_offset
,
260 extern void (pmap_copy_part_rpage
)(
262 vm_offset_t src_offset
,
266 extern unsigned int (pmap_disconnect
)( /* disconnect mappings and return reference and change */
269 extern kern_return_t (pmap_attribute_cache_sync
)( /* Flush appropriate
271 * page number sent */
274 vm_machine_attribute_t attribute
,
275 vm_machine_attribute_val_t
* value
);
277 extern unsigned int (pmap_cache_attributes
)(
281 * Set (override) cache attributes for the specified physical page
283 extern void pmap_set_cache_attributes(
286 extern void pmap_sync_page_data_phys(ppnum_t pa
);
287 extern void pmap_sync_page_attributes_phys(ppnum_t pa
);
290 * debug/assertions. pmap_verify_free returns true iff
291 * the given physical page is mapped into no pmap.
293 extern boolean_t
pmap_verify_free(ppnum_t pn
);
296 * Statistics routines
298 extern int (pmap_resident_count
)(pmap_t pmap
);
299 extern int (pmap_resident_max
)(pmap_t pmap
);
302 * Sundry required (internal) routines
304 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
305 extern void pmap_collect(pmap_t pmap
);/* Perform garbage
306 * collection, if any */
311 extern void (pmap_copy
)( /* Copy range of mappings,
315 vm_map_offset_t dest_va
,
317 vm_map_offset_t source_va
);
319 extern kern_return_t (pmap_attribute
)( /* Get/Set special memory
324 vm_machine_attribute_t attribute
,
325 vm_machine_attribute_val_t
* value
);
328 * Routines defined as macros.
330 #ifndef PMAP_ACTIVATE_USER
331 #ifndef PMAP_ACTIVATE
332 #define PMAP_ACTIVATE_USER(thr, cpu)
333 #else /* PMAP_ACTIVATE */
334 #define PMAP_ACTIVATE_USER(thr, cpu) { \
337 pmap = (thr)->map->pmap; \
338 if (pmap != pmap_kernel()) \
339 PMAP_ACTIVATE(pmap, (thr), (cpu)); \
341 #endif /* PMAP_ACTIVATE */
342 #endif /* PMAP_ACTIVATE_USER */
344 #ifndef PMAP_DEACTIVATE_USER
345 #ifndef PMAP_DEACTIVATE
346 #define PMAP_DEACTIVATE_USER(thr, cpu)
347 #else /* PMAP_DEACTIVATE */
348 #define PMAP_DEACTIVATE_USER(thr, cpu) { \
351 pmap = (thr)->map->pmap; \
352 if ((pmap) != pmap_kernel()) \
353 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \
355 #endif /* PMAP_DEACTIVATE */
356 #endif /* PMAP_DEACTIVATE_USER */
358 #ifndef PMAP_ACTIVATE_KERNEL
359 #ifndef PMAP_ACTIVATE
360 #define PMAP_ACTIVATE_KERNEL(cpu)
361 #else /* PMAP_ACTIVATE */
362 #define PMAP_ACTIVATE_KERNEL(cpu) \
363 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
364 #endif /* PMAP_ACTIVATE */
365 #endif /* PMAP_ACTIVATE_KERNEL */
367 #ifndef PMAP_DEACTIVATE_KERNEL
368 #ifndef PMAP_DEACTIVATE
369 #define PMAP_DEACTIVATE_KERNEL(cpu)
370 #else /* PMAP_DEACTIVATE */
371 #define PMAP_DEACTIVATE_KERNEL(cpu) \
372 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
373 #endif /* PMAP_DEACTIVATE */
374 #endif /* PMAP_DEACTIVATE_KERNEL */
378 * Macro to be used in place of pmap_enter()
380 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \
382 pmap_t __pmap = (pmap); \
383 vm_page_t __page = (page); \
385 PMAP_ENTER_CHECK(__pmap, __page) \
394 #endif /* !PMAP_ENTER */
396 #ifndef PMAP_ENTER_OPTIONS
397 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, fault_type, \
398 flags, wired, options, result) \
400 pmap_t __pmap = (pmap); \
401 vm_page_t __page = (page); \
403 PMAP_ENTER_CHECK(__pmap, __page) \
404 result = pmap_enter_options(__pmap, \
413 #endif /* !PMAP_ENTER_OPTIONS */
415 #ifndef PMAP_SET_CACHE_ATTR
416 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
418 if (!batch_pmap_op) { \
419 pmap_set_cache_attributes(mem->phys_page, cache_attr); \
420 object->set_cache_attr = TRUE; \
423 #endif /* PMAP_SET_CACHE_ATTR */
425 #ifndef PMAP_BATCH_SET_CACHE_ATTR
426 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
427 cache_attr, num_pages, batch_pmap_op) \
429 if ((batch_pmap_op)) { \
430 unsigned int __page_idx=0; \
431 while (__page_idx < (num_pages)) { \
432 pmap_set_cache_attributes( \
433 user_page_list[__page_idx].phys_addr, \
437 (object)->set_cache_attr = TRUE; \
440 #endif /* PMAP_BATCH_SET_CACHE_ATTR */
442 #define PMAP_ENTER_CHECK(pmap, page) \
444 if ((pmap) != kernel_pmap) { \
445 ASSERT_PAGE_DECRYPTED(page); \
447 if ((page)->error) { \
448 panic("VM page %p should not have an error\n", \
454 * Routines to manage reference/modify bits based on
455 * physical addresses, simulating them if not provided
458 /* Clear reference bit */
459 extern void pmap_clear_reference(ppnum_t pn
);
460 /* Return reference bit */
461 extern boolean_t (pmap_is_referenced
)(ppnum_t pn
);
463 extern void pmap_set_modify(ppnum_t pn
);
464 /* Clear modify bit */
465 extern void pmap_clear_modify(ppnum_t pn
);
466 /* Return modify bit */
467 extern boolean_t
pmap_is_modified(ppnum_t pn
);
468 /* Return modified and referenced bits */
469 extern unsigned int pmap_get_refmod(ppnum_t pn
);
470 /* Clear modified and referenced bits */
471 extern void pmap_clear_refmod(ppnum_t pn
, unsigned int mask
);
472 #define VM_MEM_MODIFIED 0x01 /* Modified bit */
473 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */
476 * Routines that operate on ranges of virtual addresses.
478 extern void pmap_protect( /* Change protections. */
484 extern void (pmap_pageable
)(
486 vm_map_offset_t start
,
491 extern uint64_t pmap_nesting_size_min
;
492 extern uint64_t pmap_nesting_size_max
;
494 extern kern_return_t
pmap_nest(pmap_t
,
499 extern kern_return_t
pmap_unnest(pmap_t
,
502 extern boolean_t
pmap_adjust_unnest_parameters(pmap_t
, vm_map_offset_t
*, vm_map_offset_t
*);
503 #endif /* MACH_KERNEL_PRIVATE */
505 extern boolean_t
pmap_is_noencrypt(ppnum_t
);
506 extern void pmap_set_noencrypt(ppnum_t pn
);
507 extern void pmap_clear_noencrypt(ppnum_t pn
);
510 * JMM - This portion is exported to other kernel components right now,
511 * but will be pulled back in the future when the needed functionality
512 * is provided in a cleaner manner.
515 extern pmap_t kernel_pmap
; /* The kernel's map */
516 #define pmap_kernel() (kernel_pmap)
518 /* machine independent WIMG bits */
520 #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */
521 #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */
522 #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */
523 #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */
525 #define VM_WIMG_USE_DEFAULT 0x80
526 #define VM_WIMG_MASK 0xFF
528 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
529 #define VM_MEM_STACK 0x200
531 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
532 * KERN_RESOURCE_SHORTAGE
534 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
535 * but don't enter mapping
538 #if !defined(__LP64__)
539 extern vm_offset_t
pmap_extract(pmap_t pmap
,
542 extern void pmap_change_wiring( /* Specify pageability */
547 /* LP64todo - switch to vm_map_offset_t when it grows */
548 extern void pmap_remove( /* Remove mappings. */
553 extern void fillPage(ppnum_t pa
, unsigned int fill
);
555 extern void pmap_map_sharedpage(task_t task
, pmap_t pmap
);
556 extern void pmap_unmap_sharedpage(pmap_t pmap
);
558 #if defined(__LP64__)
559 void pmap_pre_expand(pmap_t pmap
, vm_map_offset_t vaddr
);
562 #endif /* KERNEL_PRIVATE */
564 #endif /* _VM_PMAP_H_ */