]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: vm/pmap.h | |
54 | * Author: Avadis Tevanian, Jr. | |
55 | * Date: 1985 | |
56 | * | |
57 | * Machine address mapping definitions -- machine-independent | |
58 | * section. [For machine-dependent section, see "machine/pmap.h".] | |
59 | */ | |
60 | ||
61 | #ifndef _VM_PMAP_H_ | |
62 | #define _VM_PMAP_H_ | |
63 | ||
64 | #include <mach/kern_return.h> | |
65 | #include <mach/vm_param.h> | |
66 | #include <mach/vm_types.h> | |
67 | #include <mach/vm_attributes.h> | |
68 | #include <mach/boolean.h> | |
69 | #include <mach/vm_prot.h> | |
70 | ||
71 | #ifdef KERNEL_PRIVATE | |
72 | ||
73 | /* | |
74 | * The following is a description of the interface to the | |
75 | * machine-dependent "physical map" data structure. The module | |
76 | * must provide a "pmap_t" data type that represents the | |
77 | * set of valid virtual-to-physical addresses for one user | |
78 | * address space. [The kernel address space is represented | |
79 | * by a distinguished "pmap_t".] The routines described manage | |
80 | * this type, install and update virtual-to-physical mappings, | |
81 | * and perform operations on physical addresses common to | |
82 | * many address spaces. | |
83 | */ | |
84 | ||
85 | /* Copy between a physical page and a virtual address */ | |
86 | /* LP64todo - switch to vm_map_offset_t when it grows */ | |
87 | extern kern_return_t copypv( | |
88 | addr64_t source, | |
89 | addr64_t sink, | |
90 | unsigned int size, | |
91 | int which); | |
92 | #define cppvPsnk 1 | |
93 | #define cppvPsnkb 31 | |
94 | #define cppvPsrc 2 | |
95 | #define cppvPsrcb 30 | |
96 | #define cppvFsnk 4 | |
97 | #define cppvFsnkb 29 | |
98 | #define cppvFsrc 8 | |
99 | #define cppvFsrcb 28 | |
100 | #define cppvNoModSnk 16 | |
101 | #define cppvNoModSnkb 27 | |
102 | #define cppvNoRefSrc 32 | |
103 | #define cppvNoRefSrcb 26 | |
104 | #define cppvKmap 64 /* Use the kernel's vm_map */ | |
105 | #define cppvKmapb 25 | |
106 | ||
107 | #ifdef MACH_KERNEL_PRIVATE | |
108 | ||
109 | #include <machine/pmap.h> | |
110 | ||
111 | /* | |
112 | * Routines used for initialization. | |
113 | * There is traditionally also a pmap_bootstrap, | |
114 | * used very early by machine-dependent code, | |
115 | * but it is not part of the interface. | |
116 | * | |
117 | * LP64todo - | |
118 | * These interfaces are tied to the size of the | |
119 | * kernel pmap - and therefore use the "local" | |
120 | * vm_offset_t, etc... types. | |
121 | */ | |
122 | ||
123 | extern void *pmap_steal_memory(vm_size_t size); | |
124 | /* During VM initialization, | |
125 | * steal a chunk of memory. | |
126 | */ | |
127 | extern unsigned int pmap_free_pages(void); /* During VM initialization, | |
128 | * report remaining unused | |
129 | * physical pages. | |
130 | */ | |
131 | extern void pmap_startup( | |
132 | vm_offset_t *startp, | |
133 | vm_offset_t *endp); | |
134 | /* During VM initialization, | |
135 | * use remaining physical pages | |
136 | * to allocate page frames. | |
137 | */ | |
138 | extern void pmap_init(void); /* Initialization, | |
139 | * after kernel runs | |
140 | * in virtual memory. | |
141 | */ | |
142 | ||
143 | extern void mapping_adjust(void); /* Adjust free mapping count */ | |
144 | ||
145 | extern void mapping_free_prime(void); /* Primes the mapping block release list */ | |
146 | ||
147 | #ifndef MACHINE_PAGES | |
148 | /* | |
149 | * If machine/pmap.h defines MACHINE_PAGES, it must implement | |
150 | * the above functions. The pmap module has complete control. | |
151 | * Otherwise, it must implement | |
152 | * pmap_free_pages | |
153 | * pmap_virtual_space | |
154 | * pmap_next_page | |
155 | * pmap_init | |
156 | * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup | |
157 | * using pmap_free_pages, pmap_next_page, pmap_virtual_space, | |
158 | * and pmap_enter. pmap_free_pages may over-estimate the number | |
159 | * of unused physical pages, and pmap_next_page may return FALSE | |
160 | * to indicate that there are no more unused pages to return. | |
161 | * However, for best performance pmap_free_pages should be accurate. | |
162 | */ | |
163 | ||
164 | extern boolean_t pmap_next_page(ppnum_t *pnum); | |
165 | /* During VM initialization, | |
166 | * return the next unused | |
167 | * physical page. | |
168 | */ | |
169 | extern void pmap_virtual_space( | |
170 | vm_offset_t *virtual_start, | |
171 | vm_offset_t *virtual_end); | |
172 | /* During VM initialization, | |
173 | * report virtual space | |
174 | * available for the kernel. | |
175 | */ | |
176 | #endif /* MACHINE_PAGES */ | |
177 | ||
178 | /* | |
179 | * Routines to manage the physical map data structure. | |
180 | */ | |
181 | extern pmap_t pmap_create(vm_map_size_t size); /* Create a pmap_t. */ | |
182 | extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ | |
183 | extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ | |
184 | extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ | |
185 | extern void pmap_switch(pmap_t); | |
186 | ||
187 | ||
188 | extern void pmap_enter( /* Enter a mapping */ | |
189 | pmap_t pmap, | |
190 | vm_map_offset_t v, | |
191 | ppnum_t pn, | |
192 | vm_prot_t prot, | |
193 | unsigned int flags, | |
194 | boolean_t wired); | |
195 | ||
196 | extern void pmap_remove_some_phys( | |
197 | pmap_t pmap, | |
198 | ppnum_t pn); | |
199 | ||
200 | ||
201 | /* | |
202 | * Routines that operate on physical addresses. | |
203 | */ | |
204 | ||
205 | extern void pmap_page_protect( /* Restrict access to page. */ | |
206 | ppnum_t phys, | |
207 | vm_prot_t prot); | |
208 | ||
209 | extern void (pmap_zero_page)( | |
210 | ppnum_t pn); | |
211 | ||
212 | extern void (pmap_zero_part_page)( | |
213 | ppnum_t pn, | |
214 | vm_offset_t offset, | |
215 | vm_size_t len); | |
216 | ||
217 | extern void (pmap_copy_page)( | |
218 | ppnum_t src, | |
219 | ppnum_t dest); | |
220 | ||
221 | extern void (pmap_copy_part_page)( | |
222 | ppnum_t src, | |
223 | vm_offset_t src_offset, | |
224 | ppnum_t dst, | |
225 | vm_offset_t dst_offset, | |
226 | vm_size_t len); | |
227 | ||
228 | extern void (pmap_copy_part_lpage)( | |
229 | vm_offset_t src, | |
230 | ppnum_t dst, | |
231 | vm_offset_t dst_offset, | |
232 | vm_size_t len); | |
233 | ||
234 | extern void (pmap_copy_part_rpage)( | |
235 | ppnum_t src, | |
236 | vm_offset_t src_offset, | |
237 | vm_offset_t dst, | |
238 | vm_size_t len); | |
239 | ||
240 | extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ | |
241 | ppnum_t phys); | |
242 | ||
243 | extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate | |
244 | * cache based on | |
245 | * page number sent */ | |
246 | ppnum_t pn, | |
247 | vm_size_t size, | |
248 | vm_machine_attribute_t attribute, | |
249 | vm_machine_attribute_val_t* value); | |
250 | ||
251 | /* | |
252 | * debug/assertions. pmap_verify_free returns true iff | |
253 | * the given physical page is mapped into no pmap. | |
254 | */ | |
255 | extern boolean_t pmap_verify_free(ppnum_t pn); | |
256 | ||
257 | /* | |
258 | * Statistics routines | |
259 | */ | |
260 | extern int (pmap_resident_count)(pmap_t pmap); | |
261 | ||
262 | /* | |
263 | * Sundry required (internal) routines | |
264 | */ | |
265 | extern void pmap_collect(pmap_t pmap);/* Perform garbage | |
266 | * collection, if any */ | |
267 | ||
268 | /* | |
269 | * Optional routines | |
270 | */ | |
271 | extern void (pmap_copy)( /* Copy range of mappings, | |
272 | * if desired. */ | |
273 | pmap_t dest, | |
274 | pmap_t source, | |
275 | vm_map_offset_t dest_va, | |
276 | vm_map_size_t size, | |
277 | vm_map_offset_t source_va); | |
278 | ||
279 | extern kern_return_t (pmap_attribute)( /* Get/Set special memory | |
280 | * attributes */ | |
281 | pmap_t pmap, | |
282 | vm_map_offset_t va, | |
283 | vm_map_size_t size, | |
284 | vm_machine_attribute_t attribute, | |
285 | vm_machine_attribute_val_t* value); | |
286 | ||
287 | /* | |
288 | * Routines defined as macros. | |
289 | */ | |
290 | #ifndef PMAP_ACTIVATE_USER | |
291 | #ifndef PMAP_ACTIVATE | |
292 | #define PMAP_ACTIVATE_USER(thr, cpu) | |
293 | #else /* PMAP_ACTIVATE */ | |
294 | #define PMAP_ACTIVATE_USER(thr, cpu) { \ | |
295 | pmap_t pmap; \ | |
296 | \ | |
297 | pmap = (thr)->map->pmap; \ | |
298 | if (pmap != pmap_kernel()) \ | |
299 | PMAP_ACTIVATE(pmap, (thr), (cpu)); \ | |
300 | } | |
301 | #endif /* PMAP_ACTIVATE */ | |
302 | #endif /* PMAP_ACTIVATE_USER */ | |
303 | ||
304 | #ifndef PMAP_DEACTIVATE_USER | |
305 | #ifndef PMAP_DEACTIVATE | |
306 | #define PMAP_DEACTIVATE_USER(thr, cpu) | |
307 | #else /* PMAP_DEACTIVATE */ | |
308 | #define PMAP_DEACTIVATE_USER(thr, cpu) { \ | |
309 | pmap_t pmap; \ | |
310 | \ | |
311 | pmap = (thr)->map->pmap; \ | |
312 | if ((pmap) != pmap_kernel()) \ | |
313 | PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ | |
314 | } | |
315 | #endif /* PMAP_DEACTIVATE */ | |
316 | #endif /* PMAP_DEACTIVATE_USER */ | |
317 | ||
318 | #ifndef PMAP_ACTIVATE_KERNEL | |
319 | #ifndef PMAP_ACTIVATE | |
320 | #define PMAP_ACTIVATE_KERNEL(cpu) | |
321 | #else /* PMAP_ACTIVATE */ | |
322 | #define PMAP_ACTIVATE_KERNEL(cpu) \ | |
323 | PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) | |
324 | #endif /* PMAP_ACTIVATE */ | |
325 | #endif /* PMAP_ACTIVATE_KERNEL */ | |
326 | ||
327 | #ifndef PMAP_DEACTIVATE_KERNEL | |
328 | #ifndef PMAP_DEACTIVATE | |
329 | #define PMAP_DEACTIVATE_KERNEL(cpu) | |
330 | #else /* PMAP_DEACTIVATE */ | |
331 | #define PMAP_DEACTIVATE_KERNEL(cpu) \ | |
332 | PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) | |
333 | #endif /* PMAP_DEACTIVATE */ | |
334 | #endif /* PMAP_DEACTIVATE_KERNEL */ | |
335 | ||
336 | #ifndef PMAP_ENTER | |
337 | /* | |
338 | * Macro to be used in place of pmap_enter() | |
339 | */ | |
340 | #define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \ | |
341 | MACRO_BEGIN \ | |
342 | pmap_t __pmap = (pmap); \ | |
343 | vm_page_t __page = (page); \ | |
344 | \ | |
345 | if (__pmap != kernel_pmap) { \ | |
346 | ASSERT_PAGE_DECRYPTED(__page); \ | |
347 | } \ | |
348 | pmap_enter(__pmap, \ | |
349 | (virtual_address), \ | |
350 | __page->phys_page, \ | |
351 | (protection) & ~__page->page_lock, \ | |
352 | (flags), \ | |
353 | (wired)); \ | |
354 | MACRO_END | |
355 | #endif /* !PMAP_ENTER */ | |
356 | ||
357 | /* | |
358 | * Routines to manage reference/modify bits based on | |
359 | * physical addresses, simulating them if not provided | |
360 | * by the hardware. | |
361 | */ | |
362 | /* Clear reference bit */ | |
363 | extern void pmap_clear_reference(ppnum_t pn); | |
364 | /* Return reference bit */ | |
365 | extern boolean_t (pmap_is_referenced)(ppnum_t pn); | |
366 | /* Set modify bit */ | |
367 | extern void pmap_set_modify(ppnum_t pn); | |
368 | /* Clear modify bit */ | |
369 | extern void pmap_clear_modify(ppnum_t pn); | |
370 | /* Return modify bit */ | |
371 | extern boolean_t pmap_is_modified(ppnum_t pn); | |
372 | /* Return modified and referenced bits */ | |
373 | extern unsigned int pmap_get_refmod(ppnum_t pn); | |
374 | /* Clear modified and referenced bits */ | |
375 | extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); | |
376 | #define VM_MEM_MODIFIED 0x01 /* Modified bit */ | |
377 | #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ | |
378 | ||
379 | /* | |
380 | * Routines that operate on ranges of virtual addresses. | |
381 | */ | |
382 | extern void pmap_protect( /* Change protections. */ | |
383 | pmap_t map, | |
384 | vm_map_offset_t s, | |
385 | vm_map_offset_t e, | |
386 | vm_prot_t prot); | |
387 | ||
388 | extern void (pmap_pageable)( | |
389 | pmap_t pmap, | |
390 | vm_map_offset_t start, | |
391 | vm_map_offset_t end, | |
392 | boolean_t pageable); | |
393 | ||
394 | #endif /* MACH_KERNEL_PRIVATE */ | |
395 | ||
396 | /* | |
397 | * JMM - This portion is exported to other kernel components right now, | |
398 | * but will be pulled back in the future when the needed functionality | |
399 | * is provided in a cleaner manner. | |
400 | */ | |
401 | ||
402 | extern pmap_t kernel_pmap; /* The kernel's map */ | |
403 | #define pmap_kernel() (kernel_pmap) | |
404 | ||
405 | /* machine independent WIMG bits */ | |
406 | ||
407 | #define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ | |
408 | #define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ | |
409 | #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ | |
410 | #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ | |
411 | ||
412 | #define VM_WIMG_MASK 0xFF | |
413 | #define VM_WIMG_USE_DEFAULT 0x80000000 | |
414 | ||
415 | extern void pmap_modify_pages( /* Set modify bit for pages */ | |
416 | pmap_t map, | |
417 | vm_map_offset_t s, | |
418 | vm_map_offset_t e); | |
419 | ||
420 | extern vm_offset_t pmap_extract(pmap_t pmap, | |
421 | vm_map_offset_t va); | |
422 | ||
423 | extern void pmap_change_wiring( /* Specify pageability */ | |
424 | pmap_t pmap, | |
425 | vm_map_offset_t va, | |
426 | boolean_t wired); | |
427 | ||
428 | /* LP64todo - switch to vm_map_offset_t when it grows */ | |
429 | extern void pmap_remove( /* Remove mappings. */ | |
430 | pmap_t map, | |
431 | addr64_t s, | |
432 | addr64_t e); | |
433 | ||
434 | ||
435 | #endif /* KERNEL_PRIVATE */ | |
436 | ||
437 | #endif /* _VM_PMAP_H_ */ |