]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1990,1991,1992 The University of Utah and | |
34 | * the Center for Software Science (CSS). | |
35 | * Copyright (c) 1991,1987 Carnegie Mellon University. | |
36 | * All rights reserved. | |
37 | * | |
38 | * Permission to use, copy, modify and distribute this software and its | |
39 | * documentation is hereby granted, provided that both the copyright | |
40 | * notice and this permission notice appear in all copies of the | |
41 | * software, derivative works or modified versions, and any portions | |
42 | * thereof, and that both notices appear in supporting documentation, | |
43 | * and that all advertising materials mentioning features or use of | |
44 | * this software display the following acknowledgement: ``This product | |
45 | * includes software developed by the Center for Software Science at | |
46 | * the University of Utah.'' | |
47 | * | |
48 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF | |
49 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY | |
50 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF | |
51 | * THIS SOFTWARE. | |
52 | * | |
53 | * CSS requests users of this software to return to css-dist@cs.utah.edu any | |
54 | * improvements that they make and grant CSS redistribution rights. | |
55 | * | |
56 | * Carnegie Mellon requests users of this software to return to | |
57 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
58 | * School of Computer Science | |
59 | * Carnegie Mellon University | |
60 | * Pittsburgh PA 15213-3890 | |
61 | * any improvements or extensions that they make and grant Carnegie Mellon | |
62 | * the rights to redistribute these changes. | |
63 | * | |
64 | * Utah $Hdr: pmap.c 1.28 92/06/23$ | |
65 | * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90 | |
66 | */ | |
67 | ||
68 | /* | |
69 | * Manages physical address maps for powerpc. | |
70 | * | |
71 | * In addition to hardware address maps, this | |
72 | * module is called upon to provide software-use-only | |
73 | * maps which may or may not be stored in the same | |
74 | * form as hardware maps. These pseudo-maps are | |
75 | * used to store intermediate results from copy | |
76 | * operations to and from address spaces. | |
77 | * | |
78 | * Since the information managed by this module is | |
79 | * also stored by the logical address mapping module, | |
80 | * this module may throw away valid virtual-to-physical | |
81 | * mappings at almost any time. However, invalidations | |
82 | * of virtual-to-physical mappings must be done as | |
83 | * requested. | |
84 | * | |
85 | * In order to cope with hardware architectures which | |
86 | * make virtual-to-physical map invalidates expensive, | |
87 | * this module may delay invalidate or reduced protection | |
88 | * operations until such time as they are actually | |
89 | * necessary. This module is given full information to | |
90 | * when physical maps must be made correct. | |
91 | * | |
92 | */ | |
93 | ||
94 | #include <zone_debug.h> | |
1c79356b A |
95 | #include <debug.h> |
96 | #include <mach_kgdb.h> | |
97 | #include <mach_vm_debug.h> | |
98 | #include <db_machine_commands.h> | |
99 | ||
100 | #include <kern/thread.h> | |
9bccf70c | 101 | #include <kern/simple_lock.h> |
1c79356b A |
102 | #include <mach/vm_attributes.h> |
103 | #include <mach/vm_param.h> | |
55e303ae | 104 | #include <vm/vm_kern.h> |
1c79356b A |
105 | #include <kern/spl.h> |
106 | ||
107 | #include <kern/misc_protos.h> | |
108 | #include <ppc/misc_protos.h> | |
109 | #include <ppc/proc_reg.h> | |
110 | ||
111 | #include <vm/pmap.h> | |
112 | #include <vm/vm_map.h> | |
113 | #include <vm/vm_page.h> | |
114 | ||
115 | #include <ppc/pmap.h> | |
1c79356b A |
116 | #include <ppc/mem.h> |
117 | #include <ppc/mappings.h> | |
118 | ||
119 | #include <ppc/new_screen.h> | |
120 | #include <ppc/Firmware.h> | |
121 | #include <ppc/savearea.h> | |
91447636 | 122 | #include <ppc/cpu_internal.h> |
9bccf70c | 123 | #include <ppc/exception.h> |
55e303ae | 124 | #include <ppc/low_trace.h> |
91447636 | 125 | #include <ppc/lowglobals.h> |
1c79356b | 126 | #include <ddb/db_output.h> |
91447636 A |
127 | #include <machine/cpu_capabilities.h> |
128 | ||
129 | #include <vm/vm_protos.h> /* must be last */ | |
130 | ||
1c79356b | 131 | |
1c79356b | 132 | extern unsigned int avail_remaining; |
55e303ae | 133 | unsigned int debugbackpocket; /* (TEST/DEBUG) */ |
1c79356b | 134 | |
1c79356b A |
135 | vm_offset_t first_free_virt; |
136 | int current_free_region; /* Used in pmap_next_page */ | |
137 | ||
55e303ae A |
138 | pmapTransTab *pmapTrans; /* Point to the hash to pmap translations */ |
139 | struct phys_entry *phys_table; | |
140 | ||
1c79356b | 141 | /* forward */ |
91447636 A |
142 | static void pmap_map_physical(void); |
143 | static void pmap_map_iohole(addr64_t paddr, addr64_t size); | |
1c79356b A |
144 | void pmap_activate(pmap_t pmap, thread_t th, int which_cpu); |
145 | void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu); | |
1c79356b | 146 | |
91447636 | 147 | extern void hw_hash_init(void); |
1c79356b | 148 | |
1c79356b A |
149 | /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */ |
150 | ||
151 | extern struct pmap kernel_pmap_store; | |
152 | pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */ | |
55e303ae | 153 | addr64_t kernel_pmap_phys; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */ |
1c79356b | 154 | pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */ |
55e303ae | 155 | pmap_t sharedPmap; /* Pointer to common pmap for 64-bit address spaces */ |
1c79356b A |
156 | struct zone *pmap_zone; /* zone of pmap structures */ |
157 | boolean_t pmap_initialized = FALSE; | |
158 | ||
55e303ae A |
159 | int ppc_max_pmaps; /* Maximum number of concurrent address spaces allowed. This is machine dependent */ |
160 | addr64_t vm_max_address; /* Maximum effective address supported */ | |
161 | addr64_t vm_max_physical; /* Maximum physical address supported */ | |
162 | ||
1c79356b A |
163 | /* |
164 | * Physical-to-virtual translations are handled by inverted page table | |
165 | * structures, phys_tables. Multiple mappings of a single page are handled | |
166 | * by linking the affected mapping structures. We initialise one region | |
167 | * for phys_tables of the physical memory we know about, but more may be | |
168 | * added as it is discovered (eg. by drivers). | |
169 | */ | |
1c79356b A |
170 | |
171 | /* | |
172 | * free pmap list. caches the first free_pmap_max pmaps that are freed up | |
173 | */ | |
174 | int free_pmap_max = 32; | |
175 | int free_pmap_count; | |
176 | pmap_t free_pmap_list; | |
177 | decl_simple_lock_data(,free_pmap_lock) | |
178 | ||
179 | /* | |
180 | * Function to get index into phys_table for a given physical address | |
181 | */ | |
182 | ||
55e303ae | 183 | struct phys_entry *pmap_find_physentry(ppnum_t pa) |
1c79356b A |
184 | { |
185 | int i; | |
55e303ae | 186 | unsigned int entry; |
1c79356b | 187 | |
55e303ae A |
188 | for (i = pmap_mem_regions_count - 1; i >= 0; i--) { |
189 | if (pa < pmap_mem_regions[i].mrStart) continue; /* See if we fit in this region */ | |
190 | if (pa > pmap_mem_regions[i].mrEnd) continue; /* Check the end too */ | |
1c79356b | 191 | |
91447636 | 192 | entry = (unsigned int)pmap_mem_regions[i].mrPhysTab + ((pa - pmap_mem_regions[i].mrStart) * sizeof(phys_entry_t)); |
55e303ae | 193 | return (struct phys_entry *)entry; |
1c79356b | 194 | } |
55e303ae A |
195 | // kprintf("DEBUG - pmap_find_physentry: page 0x%08X not found\n", pa); |
196 | return 0; | |
1c79356b A |
197 | } |
198 | ||
199 | /* | |
200 | * kern_return_t | |
201 | * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, | |
202 | * boolean_t available, unsigned int attr) | |
55e303ae A |
203 | * |
204 | * THIS IS NOT SUPPORTED | |
1c79356b | 205 | */ |
91447636 A |
206 | kern_return_t |
207 | pmap_add_physical_memory( | |
208 | __unused vm_offset_t spa, | |
209 | __unused vm_offset_t epa, | |
210 | __unused boolean_t available, | |
211 | __unused unsigned int attr) | |
1c79356b | 212 | { |
1c79356b A |
213 | |
214 | panic("Forget it! You can't map no more memory, you greedy puke!\n"); | |
1c79356b A |
215 | return KERN_SUCCESS; |
216 | } | |
217 | ||
218 | /* | |
219 | * pmap_map(va, spa, epa, prot) | |
220 | * is called during boot to map memory in the kernel's address map. | |
221 | * A virtual address range starting at "va" is mapped to the physical | |
222 | * address range "spa" to "epa" with machine independent protection | |
223 | * "prot". | |
224 | * | |
225 | * "va", "spa", and "epa" are byte addresses and must be on machine | |
226 | * independent page boundaries. | |
227 | * | |
228 | * Pages with a contiguous virtual address range, the same protection, and attributes. | |
229 | * therefore, we map it with a single block. | |
230 | * | |
55e303ae A |
231 | * Note that this call will only map into 32-bit space |
232 | * | |
1c79356b | 233 | */ |
55e303ae | 234 | |
1c79356b A |
235 | vm_offset_t |
236 | pmap_map( | |
237 | vm_offset_t va, | |
238 | vm_offset_t spa, | |
239 | vm_offset_t epa, | |
21362eb3 | 240 | vm_prot_t prot) |
1c79356b A |
241 | { |
242 | ||
55e303ae | 243 | addr64_t colladr; |
1c79356b | 244 | |
55e303ae | 245 | if (spa == epa) return(va); |
1c79356b A |
246 | |
247 | assert(epa > spa); | |
248 | ||
21362eb3 | 249 | colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, prot & VM_PROT_ALL); |
1c79356b | 250 | |
55e303ae A |
251 | if(colladr) { /* Was something already mapped in the range? */ |
252 | panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n", | |
253 | va, spa, epa, colladr); | |
254 | } | |
1c79356b A |
255 | return(va); |
256 | } | |
257 | ||
91447636 A |
258 | /* |
259 | * pmap_map_physical() | |
260 | * Maps physical memory into the kernel's address map beginning at lgPMWvaddr, the | |
261 | * physical memory window. | |
262 | * | |
263 | */ | |
264 | void | |
265 | pmap_map_physical() | |
266 | { | |
267 | unsigned region; | |
3a60a9f5 A |
268 | uint64_t msize, size; |
269 | addr64_t paddr, vaddr, colladdr; | |
91447636 A |
270 | |
271 | /* Iterate over physical memory regions, block mapping each into the kernel's address map */ | |
272 | for (region = 0; region < (unsigned)pmap_mem_regions_count; region++) { | |
3a60a9f5 A |
273 | paddr = ((addr64_t)pmap_mem_regions[region].mrStart << 12); /* Get starting physical address */ |
274 | size = (((addr64_t)pmap_mem_regions[region].mrEnd + 1) << 12) - paddr; | |
275 | ||
276 | vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */ | |
277 | ||
91447636 | 278 | while (size > 0) { |
3a60a9f5 A |
279 | |
280 | msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */ | |
281 | ||
282 | colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12), | |
283 | (mmFlgBlock | mmFlgPerm), (msize >> 12), | |
284 | (VM_PROT_READ | VM_PROT_WRITE)); | |
91447636 | 285 | if (colladdr) { |
3a60a9f5 | 286 | panic ("pmap_map_physical: mapping failure - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n", |
91447636 A |
287 | vaddr, (paddr >> 12), (msize >> 12), colladdr); |
288 | } | |
3a60a9f5 A |
289 | |
290 | vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */ | |
291 | paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */ | |
91447636 A |
292 | size -= msize; |
293 | } | |
294 | } | |
295 | } | |
296 | ||
297 | /* | |
298 | * pmap_map_iohole(addr64_t paddr, addr64_t size) | |
299 | * Maps an I/O hole into the kernel's address map at its proper offset in | |
300 | * the physical memory window. | |
301 | * | |
302 | */ | |
303 | void | |
304 | pmap_map_iohole(addr64_t paddr, addr64_t size) | |
305 | { | |
3a60a9f5 A |
306 | |
307 | addr64_t vaddr, colladdr, msize; | |
308 | uint32_t psize; | |
309 | ||
310 | vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */ | |
311 | ||
91447636 | 312 | while (size > 0) { |
3a60a9f5 A |
313 | |
314 | msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */ | |
315 | ||
316 | colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12), | |
317 | (mmFlgBlock | mmFlgPerm | mmFlgGuarded | mmFlgCInhib), (msize >> 12), | |
318 | (VM_PROT_READ | VM_PROT_WRITE)); | |
91447636 | 319 | if (colladdr) { |
3a60a9f5 A |
320 | panic ("pmap_map_iohole: mapping failed - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n", |
321 | vaddr, (paddr >> 12), (msize >> 12), colladdr); | |
91447636 | 322 | } |
3a60a9f5 A |
323 | |
324 | vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */ | |
325 | paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */ | |
91447636 | 326 | size -= msize; |
3a60a9f5 | 327 | } |
91447636 A |
328 | } |
329 | ||
1c79356b A |
330 | /* |
331 | * Bootstrap the system enough to run with virtual memory. | |
332 | * Map the kernel's code and data, and allocate the system page table. | |
333 | * Called with mapping done by BATs. Page_size must already be set. | |
334 | * | |
335 | * Parameters: | |
55e303ae | 336 | * msize: Total memory present |
1c79356b | 337 | * first_avail: First virtual address available |
55e303ae | 338 | * kmapsize: Size of kernel text and data |
1c79356b A |
339 | */ |
340 | void | |
55e303ae | 341 | pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize) |
1c79356b | 342 | { |
1c79356b A |
343 | vm_offset_t addr; |
344 | vm_size_t size; | |
91447636 A |
345 | unsigned int i, num, mapsize, vmpagesz, vmmapsz, nbits; |
346 | signed bank; | |
55e303ae A |
347 | uint64_t tmemsize; |
348 | uint_t htslop; | |
349 | vm_offset_t first_used_addr, PCAsize; | |
91447636 | 350 | struct phys_entry *phys_entry; |
de355530 | 351 | |
91447636 | 352 | *first_avail = round_page(*first_avail); /* Make sure we start out on a page boundary */ |
55e303ae | 353 | vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address know to VM */ |
1c79356b A |
354 | |
355 | /* | |
356 | * Initialize kernel pmap | |
357 | */ | |
358 | kernel_pmap = &kernel_pmap_store; | |
55e303ae | 359 | kernel_pmap_phys = (addr64_t)&kernel_pmap_store; |
1c79356b A |
360 | cursor_pmap = &kernel_pmap_store; |
361 | ||
1c79356b A |
362 | kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */ |
363 | kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */ | |
364 | kernel_pmap->ref_count = 1; | |
55e303ae A |
365 | kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */ |
366 | kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */ | |
1c79356b | 367 | kernel_pmap->space = PPC_SID_KERNEL; |
55e303ae | 368 | kernel_pmap->pmapvr = 0; /* Virtual = Real */ |
1c79356b | 369 | |
55e303ae | 370 | /* |
91447636 A |
371 | * IBM's recommended hash table size is one PTEG for every 2 physical pages. |
372 | * However, we have found that OSX rarely uses more than 4 PTEs in a PTEG | |
373 | * with this size table. Therefore, by default we allocate a hash table | |
374 | * one half IBM's recommended size, ie one PTEG per 4 pages. The "ht_shift" boot-arg | |
375 | * can be used to override the default hash table size. | |
376 | * We will allocate the hash table in physical RAM, outside of kernel virtual memory, | |
55e303ae A |
377 | * at the top of the highest bank that will contain it. |
378 | * Note that "bank" doesn't refer to a physical memory slot here, it is a range of | |
379 | * physically contiguous memory. | |
380 | * | |
381 | * The PCA will go there as well, immediately before the hash table. | |
382 | */ | |
383 | ||
384 | nbits = cntlzw(((msize << 1) - 1) >> 32); /* Get first bit in upper half */ | |
91447636 A |
385 | if (nbits == 32) /* If upper half was empty, find bit in bottom half */ |
386 | nbits = nbits + cntlzw((uint_t)((msize << 1) - 1)); | |
387 | tmemsize = 0x8000000000000000ULL >> nbits; /* Get memory size rounded up to power of 2 */ | |
55e303ae | 388 | |
91447636 A |
389 | /* Calculate hash table size: First, make sure we don't overflow 32-bit arithmetic. */ |
390 | if (tmemsize > 0x0000002000000000ULL) | |
391 | tmemsize = 0x0000002000000000ULL; | |
392 | ||
393 | /* Second, calculate IBM recommended hash table size, ie one PTEG per 2 physical pages */ | |
394 | hash_table_size = (uint_t)(tmemsize >> 13) * PerProcTable[0].ppe_vaddr->pf.pfPTEG; | |
395 | ||
396 | /* Third, cut this in half to produce the OSX default, ie one PTEG per 4 physical pages */ | |
397 | hash_table_size >>= 1; | |
398 | ||
399 | /* Fourth, adjust default size per "ht_shift" boot arg */ | |
400 | if (hash_table_shift >= 0) /* if positive, make size bigger */ | |
401 | hash_table_size <<= hash_table_shift; | |
402 | else /* if "ht_shift" is negative, make smaller */ | |
403 | hash_table_size >>= (-hash_table_shift); | |
404 | ||
405 | /* Fifth, make sure we are at least minimum size */ | |
406 | if (hash_table_size < (256 * 1024)) | |
407 | hash_table_size = (256 * 1024); | |
1c79356b | 408 | |
55e303ae | 409 | while(1) { /* Try to fit hash table in PCA into contiguous memory */ |
1c79356b | 410 | |
55e303ae A |
411 | if(hash_table_size < (256 * 1024)) { /* Have we dropped too short? This should never, ever happen */ |
412 | panic("pmap_bootstrap: Can't find space for hash table\n"); /* This will never print, system isn't up far enough... */ | |
413 | } | |
1c79356b | 414 | |
91447636 A |
415 | PCAsize = (hash_table_size / PerProcTable[0].ppe_vaddr->pf.pfPTEG) * sizeof(PCA_t); /* Get total size of PCA table */ |
416 | PCAsize = round_page(PCAsize); /* Make sure it is at least a page long */ | |
55e303ae A |
417 | |
418 | for(bank = pmap_mem_regions_count - 1; bank >= 0; bank--) { /* Search backwards through banks */ | |
419 | ||
420 | hash_table_base = ((addr64_t)pmap_mem_regions[bank].mrEnd << 12) - hash_table_size + PAGE_SIZE; /* Get tenative address */ | |
421 | ||
422 | htslop = hash_table_base & (hash_table_size - 1); /* Get the extra that we will round down when we align */ | |
423 | hash_table_base = hash_table_base & -(addr64_t)hash_table_size; /* Round down to correct boundary */ | |
424 | ||
91447636 | 425 | if((hash_table_base - round_page(PCAsize)) >= ((addr64_t)pmap_mem_regions[bank].mrStart << 12)) break; /* Leave if we fit */ |
55e303ae A |
426 | } |
427 | ||
428 | if(bank >= 0) break; /* We are done if we found a suitable bank */ | |
429 | ||
430 | hash_table_size = hash_table_size >> 1; /* Try the next size down */ | |
431 | } | |
de355530 | 432 | |
55e303ae | 433 | if(htslop) { /* If there was slop (i.e., wasted pages for alignment) add a new region */ |
91447636 | 434 | for(i = pmap_mem_regions_count - 1; i >= (unsigned)bank; i--) { /* Copy from end to our bank, including our bank */ |
55e303ae A |
435 | pmap_mem_regions[i + 1].mrStart = pmap_mem_regions[i].mrStart; /* Set the start of the bank */ |
436 | pmap_mem_regions[i + 1].mrAStart = pmap_mem_regions[i].mrAStart; /* Set the start of allocatable area */ | |
437 | pmap_mem_regions[i + 1].mrEnd = pmap_mem_regions[i].mrEnd; /* Set the end address of bank */ | |
438 | pmap_mem_regions[i + 1].mrAEnd = pmap_mem_regions[i].mrAEnd; /* Set the end address of allocatable area */ | |
439 | } | |
de355530 | 440 | |
55e303ae A |
441 | pmap_mem_regions[i + 1].mrStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of the next bank to the start of the slop area */ |
442 | pmap_mem_regions[i + 1].mrAStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of allocatable area to the start of the slop area */ | |
443 | pmap_mem_regions[i].mrEnd = (hash_table_base + hash_table_size - 4096) >> 12; /* Set the end of our bank to the end of the hash table */ | |
de355530 | 444 | |
55e303ae A |
445 | } |
446 | ||
447 | pmap_mem_regions[bank].mrAEnd = (hash_table_base - PCAsize - 4096) >> 12; /* Set the maximum allocatable in this bank */ | |
448 | ||
449 | hw_hash_init(); /* Initiaize the hash table and PCA */ | |
450 | hw_setup_trans(); /* Set up hardware registers needed for translation */ | |
451 | ||
452 | /* | |
453 | * The hash table is now all initialized and so is the PCA. Go on to do the rest of it. | |
454 | * This allocation is from the bottom up. | |
455 | */ | |
456 | ||
457 | num = atop_64(msize); /* Get number of pages in all of memory */ | |
1c79356b | 458 | |
55e303ae | 459 | /* Figure out how much we need to allocate */ |
1c79356b | 460 | |
55e303ae A |
461 | size = (vm_size_t) ( |
462 | (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */ | |
463 | (BackPocketSaveBloks * PAGE_SIZE) + /* For backpocket saveareas */ | |
464 | trcWork.traceSize + /* Size of trace table */ | |
465 | ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096) + /* Size of pmap translate table */ | |
466 | (((num * sizeof(struct phys_entry)) + 4095) & -4096) /* For the physical entries */ | |
467 | ); | |
1c79356b | 468 | |
91447636 | 469 | mapsize = size = round_page(size); /* Get size of area to map that we just calculated */ |
55e303ae | 470 | mapsize = mapsize + kmapsize; /* Account for the kernel text size */ |
1c79356b | 471 | |
91447636 A |
472 | vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */ |
473 | vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */ | |
55e303ae A |
474 | |
475 | mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */ | |
1c79356b | 476 | |
55e303ae A |
477 | mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */ |
478 | mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */ | |
479 | mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */ | |
1c79356b | 480 | |
55e303ae | 481 | size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */ |
1c79356b | 482 | |
55e303ae | 483 | /* hash table must be aligned to its size */ |
1c79356b | 484 | |
55e303ae A |
485 | addr = *first_avail; /* Set the address to start allocations */ |
486 | first_used_addr = addr; /* Remember where we started */ | |
d7e50217 | 487 | |
55e303ae | 488 | bzero((char *)addr, size); /* Clear everything that we are allocating */ |
d7e50217 | 489 | |
55e303ae A |
490 | savearea_init(addr); /* Initialize the savearea chains and data */ |
491 | ||
492 | addr = (vm_offset_t)((unsigned int)addr + ((InitialSaveBloks + BackPocketSaveBloks) * PAGE_SIZE)); /* Point past saveareas */ | |
1c79356b | 493 | |
55e303ae A |
494 | trcWork.traceCurr = (unsigned int)addr; /* Set first trace slot to use */ |
495 | trcWork.traceStart = (unsigned int)addr; /* Set start of trace table */ | |
496 | trcWork.traceEnd = (unsigned int)addr + trcWork.traceSize; /* Set end of trace table */ | |
497 | ||
498 | addr = (vm_offset_t)trcWork.traceEnd; /* Set next allocatable location */ | |
1c79356b | 499 | |
55e303ae | 500 | pmapTrans = (pmapTransTab *)addr; /* Point to the pmap to hash translation table */ |
1c79356b | 501 | |
55e303ae A |
502 | pmapTrans[PPC_SID_KERNEL].pmapPAddr = (addr64_t)((uintptr_t)kernel_pmap); /* Initialize the kernel pmap in the translate table */ |
503 | pmapTrans[PPC_SID_KERNEL].pmapVAddr = CAST_DOWN(unsigned int, kernel_pmap); /* Initialize the kernel pmap in the translate table */ | |
1c79356b | 504 | |
55e303ae | 505 | addr += ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096); /* Point past pmap translate table */ |
1c79356b | 506 | |
55e303ae | 507 | /* NOTE: the phys_table must be within the first 2GB of physical RAM. This makes sure we only need to do 32-bit arithmetic */ |
1c79356b | 508 | |
91447636 | 509 | phys_entry = (struct phys_entry *) addr; /* Get pointer to physical table */ |
1c79356b | 510 | |
55e303ae A |
511 | for (bank = 0; bank < pmap_mem_regions_count; bank++) { /* Set pointer and initialize all banks of ram */ |
512 | ||
91447636 | 513 | pmap_mem_regions[bank].mrPhysTab = phys_entry; /* Set pointer to the physical table for this bank */ |
55e303ae | 514 | |
91447636 | 515 | phys_entry = phys_entry + (pmap_mem_regions[bank].mrEnd - pmap_mem_regions[bank].mrStart + 1); /* Point to the next */ |
55e303ae | 516 | } |
1c79356b | 517 | |
55e303ae A |
518 | addr += (((num * sizeof(struct phys_entry)) + 4095) & -4096); /* Step on past the physical entries */ |
519 | ||
1c79356b A |
520 | /* |
521 | * Remaining space is for mapping entries. Tell the initializer routine that | |
522 | * the mapping system can't release this block because it's permanently assigned | |
523 | */ | |
524 | ||
55e303ae | 525 | mapping_init(); /* Initialize the mapping tables */ |
1c79356b A |
526 | |
527 | for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */ | |
55e303ae | 528 | mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */ |
1c79356b | 529 | } |
55e303ae | 530 | mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */ |
1c79356b A |
531 | |
532 | /* Map V=R the page tables */ | |
533 | pmap_map(first_used_addr, first_used_addr, | |
21362eb3 | 534 | round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE); |
de355530 | 535 | |
91447636 | 536 | *first_avail = round_page(first_used_addr + size); /* Set next available page */ |
55e303ae | 537 | first_free_virt = *first_avail; /* Ditto */ |
91447636 A |
538 | |
539 | /* For 64-bit machines, block map physical memory and the I/O hole into kernel space */ | |
540 | if(BootProcInfo.pf.Available & pf64Bit) { /* Are we on a 64-bit machine? */ | |
541 | lowGlo.lgPMWvaddr = PHYS_MEM_WINDOW_VADDR; /* Initialize the physical memory window's virtual address */ | |
542 | ||
543 | pmap_map_physical(); /* Block map physical memory into the window */ | |
544 | ||
545 | pmap_map_iohole(IO_MEM_WINDOW_VADDR, IO_MEM_WINDOW_SIZE); | |
546 | /* Block map the I/O hole */ | |
547 | } | |
1c79356b A |
548 | |
549 | /* All the rest of memory is free - add it to the free | |
550 | * regions so that it can be allocated by pmap_steal | |
551 | */ | |
1c79356b | 552 | |
55e303ae | 553 | pmap_mem_regions[0].mrAStart = (*first_avail >> 12); /* Set up the free area to start allocations (always in the first bank) */ |
de355530 | 554 | |
55e303ae A |
555 | current_free_region = 0; /* Set that we will start allocating in bank 0 */ |
556 | avail_remaining = 0; /* Clear free page count */ | |
557 | for(bank = 0; bank < pmap_mem_regions_count; bank++) { /* Total up all of the pages in the system that are available */ | |
558 | avail_remaining += (pmap_mem_regions[bank].mrAEnd - pmap_mem_regions[bank].mrAStart) + 1; /* Add in allocatable pages in this bank */ | |
de355530 | 559 | } |
55e303ae | 560 | |
1c79356b A |
561 | |
562 | } | |
563 | ||
564 | /* | |
565 | * pmap_init(spa, epa) | |
566 | * finishes the initialization of the pmap module. | |
567 | * This procedure is called from vm_mem_init() in vm/vm_init.c | |
568 | * to initialize any remaining data structures that the pmap module | |
569 | * needs to map virtual memory (VM is already ON). | |
570 | * | |
571 | * Note that the pmap needs to be sized and aligned to | |
572 | * a power of two. This is because it is used both in virtual and | |
573 | * real so it can't span a page boundary. | |
574 | */ | |
575 | ||
576 | void | |
577 | pmap_init(void) | |
578 | { | |
579 | ||
1c79356b A |
580 | pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap"); |
581 | #if ZONE_DEBUG | |
582 | zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */ | |
583 | #endif /* ZONE_DEBUG */ | |
584 | ||
585 | pmap_initialized = TRUE; | |
586 | ||
587 | /* | |
588 | * Initialize list of freed up pmaps | |
589 | */ | |
590 | free_pmap_list = 0; /* Set that there are no free pmaps */ | |
591 | free_pmap_count = 0; | |
91447636 | 592 | simple_lock_init(&free_pmap_lock, 0); |
55e303ae | 593 | |
1c79356b A |
594 | } |
595 | ||
596 | unsigned int pmap_free_pages(void) | |
597 | { | |
598 | return avail_remaining; | |
599 | } | |
600 | ||
55e303ae A |
601 | /* |
602 | * This function allocates physical pages. | |
603 | */ | |
604 | ||
605 | /* Non-optimal, but only used for virtual memory startup. | |
606 | * Allocate memory from a table of free physical addresses | |
607 | * If there are no more free entries, too bad. | |
608 | */ | |
609 | ||
610 | boolean_t pmap_next_page(ppnum_t *addrp) | |
1c79356b | 611 | { |
55e303ae | 612 | int i; |
1c79356b | 613 | |
55e303ae | 614 | if(current_free_region >= pmap_mem_regions_count) return FALSE; /* Return failure if we have used everything... */ |
d7e50217 | 615 | |
55e303ae A |
616 | for(i = current_free_region; i < pmap_mem_regions_count; i++) { /* Find the next bank with free pages */ |
617 | if(pmap_mem_regions[i].mrAStart <= pmap_mem_regions[i].mrAEnd) break; /* Found one */ | |
de355530 | 618 | } |
55e303ae A |
619 | |
620 | current_free_region = i; /* Set our current bank */ | |
621 | if(i >= pmap_mem_regions_count) return FALSE; /* Couldn't find a free page */ | |
622 | ||
623 | *addrp = pmap_mem_regions[i].mrAStart; /* Allocate the page */ | |
624 | pmap_mem_regions[i].mrAStart = pmap_mem_regions[i].mrAStart + 1; /* Set the next one to go */ | |
625 | avail_remaining--; /* Drop free count */ | |
626 | ||
1c79356b A |
627 | return TRUE; |
628 | } | |
629 | ||
630 | void pmap_virtual_space( | |
631 | vm_offset_t *startp, | |
632 | vm_offset_t *endp) | |
633 | { | |
91447636 | 634 | *startp = round_page(first_free_virt); |
55e303ae | 635 | *endp = vm_last_addr; |
1c79356b A |
636 | } |
637 | ||
638 | /* | |
639 | * pmap_create | |
640 | * | |
641 | * Create and return a physical map. | |
642 | * | |
643 | * If the size specified for the map is zero, the map is an actual physical | |
644 | * map, and may be referenced by the hardware. | |
645 | * | |
646 | * A pmap is either in the free list or in the in-use list. The only use | |
647 | * of the in-use list (aside from debugging) is to handle the VSID wrap situation. | |
648 | * Whenever a new pmap is allocated (i.e., not recovered from the free list). The | |
649 | * in-use list is matched until a hole in the VSID sequence is found. (Note | |
650 | * that the in-use pmaps are queued in VSID sequence order.) This is all done | |
651 | * while free_pmap_lock is held. | |
652 | * | |
653 | * If the size specified is non-zero, the map will be used in software | |
654 | * only, and is bounded by that size. | |
655 | */ | |
656 | pmap_t | |
21362eb3 | 657 | pmap_create(vm_map_size_t size) |
1c79356b | 658 | { |
91447636 A |
659 | pmap_t pmap, ckpmap, fore; |
660 | int s; | |
661 | unsigned int currSID; | |
55e303ae | 662 | addr64_t physpmap; |
1c79356b A |
663 | |
664 | /* | |
665 | * A software use-only map doesn't even need a pmap structure. | |
666 | */ | |
667 | if (size) | |
668 | return(PMAP_NULL); | |
669 | ||
670 | /* | |
671 | * If there is a pmap in the pmap free list, reuse it. | |
672 | * Note that we use free_pmap_list for all chaining of pmaps, both to | |
673 | * the free list and the in use chain (anchored from kernel_pmap). | |
674 | */ | |
675 | s = splhigh(); | |
676 | simple_lock(&free_pmap_lock); | |
677 | ||
55e303ae A |
678 | if(free_pmap_list) { /* Any free? */ |
679 | pmap = free_pmap_list; /* Yes, allocate it */ | |
680 | free_pmap_list = (pmap_t)pmap->freepmap; /* Dequeue this one (we chain free ones through freepmap) */ | |
1c79356b A |
681 | free_pmap_count--; |
682 | } | |
683 | else { | |
55e303ae | 684 | simple_unlock(&free_pmap_lock); /* Unlock just in case */ |
1c79356b A |
685 | splx(s); |
686 | ||
55e303ae | 687 | pmap = (pmap_t) zalloc(pmap_zone); /* Get one */ |
1c79356b A |
688 | if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */ |
689 | ||
55e303ae | 690 | bzero((char *)pmap, pmapSize); /* Clean up the pmap */ |
1c79356b A |
691 | |
692 | s = splhigh(); | |
55e303ae | 693 | simple_lock(&free_pmap_lock); /* Lock it back up */ |
1c79356b | 694 | |
55e303ae A |
695 | ckpmap = cursor_pmap; /* Get starting point for free ID search */ |
696 | currSID = ckpmap->spaceNum; /* Get the actual space ID number */ | |
1c79356b | 697 | |
55e303ae | 698 | while(1) { /* Keep trying until something happens */ |
1c79356b | 699 | |
55e303ae A |
700 | currSID = (currSID + 1) & (maxAdrSp - 1); /* Get the next in the sequence */ |
701 | if(((currSID * incrVSID) & (maxAdrSp - 1)) == invalSpace) continue; /* Skip the space we have reserved */ | |
1c79356b A |
702 | ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */ |
703 | ||
704 | if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */ | |
705 | ||
55e303ae A |
706 | if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */ |
707 | panic("pmap_create: Maximum number (%d) active address spaces reached\n", maxAdrSp); /* Die pig dog */ | |
1c79356b A |
708 | } |
709 | } | |
710 | ||
55e303ae A |
711 | pmap->space = (currSID * incrVSID) & (maxAdrSp - 1); /* Calculate the actual VSID */ |
712 | pmap->spaceNum = currSID; /* Set the space ID number */ | |
1c79356b A |
713 | /* |
714 | * Now we link into the chain just before the out of sequence guy. | |
715 | */ | |
716 | ||
55e303ae A |
717 | fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */ |
718 | pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */ | |
719 | fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */ | |
720 | pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */ | |
721 | ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */ | |
55e303ae A |
722 | |
723 | physpmap = ((addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)pmap)) << 12) | (addr64_t)((unsigned int)pmap & 0xFFF); /* Get the physical address of the pmap */ | |
724 | ||
725 | pmap->pmapvr = (addr64_t)((uintptr_t)pmap) ^ physpmap; /* Make V to R translation mask */ | |
726 | ||
727 | pmapTrans[pmap->space].pmapPAddr = physpmap; /* Set translate table physical to point to us */ | |
728 | pmapTrans[pmap->space].pmapVAddr = CAST_DOWN(unsigned int, pmap); /* Set translate table virtual to point to us */ | |
1c79356b | 729 | } |
55e303ae | 730 | |
91447636 A |
731 | pmap->pmapVmmExt = 0; /* Clear VMM extension block vaddr */ |
732 | pmap->pmapVmmExtPhys = 0; /* and the paddr, too */ | |
55e303ae A |
733 | pmap->pmapFlags = pmapKeyDef; /* Set default key */ |
734 | pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */ | |
1c79356b A |
735 | pmap->ref_count = 1; |
736 | pmap->stats.resident_count = 0; | |
737 | pmap->stats.wired_count = 0; | |
55e303ae | 738 | pmap->pmapSCSubTag = 0x0000000000000000ULL; /* Make sure this is clean an tidy */ |
de355530 | 739 | simple_unlock(&free_pmap_lock); |
55e303ae | 740 | |
1c79356b A |
741 | splx(s); |
742 | return(pmap); | |
743 | } | |
744 | ||
745 | /* | |
746 | * pmap_destroy | |
747 | * | |
748 | * Gives up a reference to the specified pmap. When the reference count | |
749 | * reaches zero the pmap structure is added to the pmap free list. | |
750 | * | |
751 | * Should only be called if the map contains no valid mappings. | |
752 | */ | |
753 | void | |
754 | pmap_destroy(pmap_t pmap) | |
755 | { | |
756 | int ref_count; | |
757 | spl_t s; | |
758 | pmap_t fore, aft; | |
759 | ||
1c79356b A |
760 | if (pmap == PMAP_NULL) |
761 | return; | |
762 | ||
763 | ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */ | |
764 | if(ref_count>0) return; /* Still more users, leave now... */ | |
765 | ||
766 | if(ref_count < 0) /* Did we go too far? */ | |
767 | panic("pmap_destroy(): ref_count < 0"); | |
91447636 A |
768 | |
769 | if (!(pmap->pmapFlags & pmapVMgsaa)) { /* Don't try this for a shadow assist guest */ | |
770 | pmap_unmap_sharedpage(pmap); /* Remove any mapping of page -1 */ | |
771 | } | |
1c79356b A |
772 | |
773 | #ifdef notdef | |
774 | if(pmap->stats.resident_count != 0) | |
775 | panic("PMAP_DESTROY: pmap not empty"); | |
776 | #else | |
777 | if(pmap->stats.resident_count != 0) { | |
55e303ae | 778 | pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000ULL); |
1c79356b A |
779 | } |
780 | #endif | |
781 | ||
782 | /* | |
783 | * Add the pmap to the pmap free list. | |
784 | */ | |
785 | ||
786 | s = splhigh(); | |
787 | /* | |
788 | * Add the pmap to the pmap free list. | |
789 | */ | |
790 | simple_lock(&free_pmap_lock); | |
791 | ||
55e303ae | 792 | if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */ |
1c79356b | 793 | |
55e303ae | 794 | pmap->freepmap = free_pmap_list; /* Queue in front */ |
1c79356b A |
795 | free_pmap_list = pmap; |
796 | free_pmap_count++; | |
797 | simple_unlock(&free_pmap_lock); | |
798 | ||
799 | } else { | |
800 | if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */ | |
801 | fore = (pmap_t)pmap->pmap_link.prev; | |
802 | aft = (pmap_t)pmap->pmap_link.next; | |
803 | fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */ | |
804 | aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */ | |
805 | simple_unlock(&free_pmap_lock); | |
55e303ae A |
806 | pmapTrans[pmap->space].pmapPAddr = -1; /* Invalidate the translate table physical */ |
807 | pmapTrans[pmap->space].pmapVAddr = -1; /* Invalidate the translate table virtual */ | |
91447636 | 808 | zfree(pmap_zone, pmap); |
1c79356b A |
809 | } |
810 | splx(s); | |
811 | } | |
812 | ||
813 | /* | |
814 | * pmap_reference(pmap) | |
815 | * gains a reference to the specified pmap. | |
816 | */ | |
817 | void | |
818 | pmap_reference(pmap_t pmap) | |
819 | { | |
1c79356b A |
820 | if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */ |
821 | } | |
822 | ||
0b4e3aa0 A |
823 | /* |
824 | * pmap_remove_some_phys | |
825 | * | |
826 | * Removes mappings of the associated page from the specified pmap | |
827 | * | |
828 | */ | |
829 | void pmap_remove_some_phys( | |
830 | pmap_t pmap, | |
831 | vm_offset_t pa) | |
832 | { | |
833 | register struct phys_entry *pp; | |
55e303ae A |
834 | register struct mapping *mp; |
835 | unsigned int pindex; | |
0b4e3aa0 | 836 | |
55e303ae A |
837 | if (pmap == PMAP_NULL) { /* This should never be called with a null pmap */ |
838 | panic("pmap_remove_some_phys: null pmap\n"); | |
839 | } | |
0b4e3aa0 | 840 | |
55e303ae A |
841 | pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ |
842 | if (pp == 0) return; /* Leave if not in physical RAM */ | |
0b4e3aa0 | 843 | |
91447636 | 844 | do { /* Keep going until we toss all pages from this pmap */ |
55e303ae A |
845 | if (pmap->pmapFlags & pmapVMhost) { |
846 | mp = hw_purge_phys(pp); /* Toss a map */ | |
91447636 A |
847 | switch ((unsigned int)mp & mapRetCode) { |
848 | case mapRtOK: | |
849 | mapping_free(mp); /* Return mapping to free inventory */ | |
850 | break; | |
851 | case mapRtGuest: | |
852 | break; /* Don't try to return a guest mapping */ | |
853 | case mapRtEmpty: | |
854 | break; /* Physent chain empty, we're done */ | |
855 | case mapRtNotFnd: | |
856 | break; /* Mapping disappeared on us, retry */ | |
857 | default: | |
858 | panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n", | |
859 | pp, pmap, mp); /* Handle failure with our usual lack of tact */ | |
55e303ae A |
860 | } |
861 | } else { | |
91447636 A |
862 | mp = hw_purge_space(pp, pmap); /* Toss a map */ |
863 | switch ((unsigned int)mp & mapRetCode) { | |
864 | case mapRtOK: | |
865 | mapping_free(mp); /* Return mapping to free inventory */ | |
866 | break; | |
867 | case mapRtEmpty: | |
868 | break; /* Physent chain empty, we're done */ | |
869 | case mapRtNotFnd: | |
870 | break; /* Mapping disappeared on us, retry */ | |
871 | default: | |
872 | panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n", | |
873 | pp, pmap, mp); /* Handle failure with our usual lack of tact */ | |
55e303ae A |
874 | } |
875 | } | |
91447636 A |
876 | } while (mapRtEmpty != ((unsigned int)mp & mapRetCode)); |
877 | ||
878 | #if DEBUG | |
879 | if ((pmap->pmapFlags & pmapVMhost) && !pmap_verify_free(pa)) | |
880 | panic("pmap_remove_some_phys: cruft left behind - pa = %08X, pmap = %08X\n", pa, pmap); | |
881 | #endif | |
de355530 | 882 | |
55e303ae | 883 | return; /* Leave... */ |
0b4e3aa0 A |
884 | } |
885 | ||
1c79356b A |
886 | /* |
887 | * pmap_remove(pmap, s, e) | |
888 | * unmaps all virtual addresses v in the virtual address | |
889 | * range determined by [s, e) and pmap. | |
890 | * s and e must be on machine independent page boundaries and | |
891 | * s must be less than or equal to e. | |
892 | * | |
893 | * Note that pmap_remove does not remove any mappings in nested pmaps. We just | |
894 | * skip those segments. | |
895 | */ | |
896 | void | |
897 | pmap_remove( | |
898 | pmap_t pmap, | |
55e303ae A |
899 | addr64_t sva, |
900 | addr64_t eva) | |
1c79356b | 901 | { |
55e303ae | 902 | addr64_t va, endva; |
de355530 | 903 | |
55e303ae | 904 | if (pmap == PMAP_NULL) return; /* Leave if software pmap */ |
1c79356b | 905 | |
1c79356b A |
906 | |
907 | /* It is just possible that eva might have wrapped around to zero, | |
908 | * and sometimes we get asked to liberate something of size zero | |
909 | * even though it's dumb (eg. after zero length read_overwrites) | |
910 | */ | |
911 | assert(eva >= sva); | |
912 | ||
913 | /* If these are not page aligned the loop might not terminate */ | |
55e303ae | 914 | assert((sva == trunc_page_64(sva)) && (eva == trunc_page_64(eva))); |
de355530 | 915 | |
55e303ae A |
916 | va = sva & -4096LL; /* Round start down to a page */ |
917 | endva = eva & -4096LL; /* Round end down to a page */ | |
1c79356b | 918 | |
55e303ae A |
919 | while(1) { /* Go until we finish the range */ |
920 | va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */ | |
921 | va = va & -4096LL; /* Make sure the "not found" indication is clear */ | |
922 | if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */ | |
1c79356b A |
923 | } |
924 | ||
1c79356b A |
925 | } |
926 | ||
927 | /* | |
928 | * Routine: | |
929 | * pmap_page_protect | |
930 | * | |
931 | * Function: | |
932 | * Lower the permission for all mappings to a given page. | |
933 | */ | |
934 | void | |
935 | pmap_page_protect( | |
55e303ae | 936 | ppnum_t pa, |
1c79356b A |
937 | vm_prot_t prot) |
938 | { | |
939 | register struct phys_entry *pp; | |
940 | boolean_t remove; | |
55e303ae | 941 | unsigned int pindex; |
91447636 | 942 | mapping_t *mp; |
1c79356b A |
943 | |
944 | ||
21362eb3 | 945 | switch (prot) { |
1c79356b A |
946 | case VM_PROT_READ: |
947 | case VM_PROT_READ|VM_PROT_EXECUTE: | |
948 | remove = FALSE; | |
949 | break; | |
950 | case VM_PROT_ALL: | |
951 | return; | |
952 | default: | |
953 | remove = TRUE; | |
954 | break; | |
955 | } | |
956 | ||
55e303ae | 957 | |
91447636 | 958 | pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ |
55e303ae | 959 | if (pp == 0) return; /* Leave if not in physical RAM */ |
1c79356b A |
960 | |
961 | if (remove) { /* If the protection was set to none, we'll remove all mappings */ | |
55e303ae | 962 | |
91447636 | 963 | do { /* Keep going until we toss all pages from this physical page */ |
55e303ae | 964 | mp = hw_purge_phys(pp); /* Toss a map */ |
91447636 A |
965 | switch ((unsigned int)mp & mapRetCode) { |
966 | case mapRtOK: | |
967 | mapping_free(mp); /* Return mapping to free inventory */ | |
968 | break; | |
969 | case mapRtGuest: | |
970 | break; /* Don't try to return a guest mapping */ | |
971 | case mapRtNotFnd: | |
972 | break; /* Mapping disappeared on us, retry */ | |
973 | case mapRtEmpty: | |
974 | break; /* Physent chain empty, we're done */ | |
975 | default: panic("pmap_page_protect: hw_purge_phys failed - pp = %08X, code = %08X\n", | |
976 | pp, mp); /* Handle failure with our usual lack of tact */ | |
55e303ae | 977 | } |
91447636 A |
978 | } while (mapRtEmpty != ((unsigned int)mp & mapRetCode)); |
979 | ||
980 | #if DEBUG | |
981 | if (!pmap_verify_free(pa)) | |
982 | panic("pmap_page_protect: cruft left behind - pa = %08X\n", pa); | |
983 | #endif | |
1c79356b | 984 | |
1c79356b A |
985 | return; /* Leave... */ |
986 | } | |
1c79356b | 987 | |
55e303ae A |
988 | /* When we get here, it means that we are to change the protection for a |
989 | * physical page. | |
990 | */ | |
991 | ||
21362eb3 | 992 | mapping_protect_phys(pa, prot & VM_PROT_ALL); /* Change protection of all mappings to page. */ |
55e303ae | 993 | |
1c79356b A |
994 | } |
995 | ||
91447636 A |
996 | /* |
997 | * Routine: | |
998 | * pmap_disconnect | |
999 | * | |
1000 | * Function: | |
1001 | * Disconnect all mappings for this page and return reference and change status | |
1002 | * in generic format. | |
1003 | * | |
1004 | */ | |
1005 | unsigned int pmap_disconnect( | |
1006 | ppnum_t pa) | |
1007 | { | |
1008 | register struct phys_entry *pp; | |
1009 | unsigned int pindex; | |
1010 | mapping_t *mp; | |
1011 | ||
1012 | pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ | |
1013 | if (pp == 0) return (0); /* Return null ref and chg if not in physical RAM */ | |
1014 | do { /* Iterate until all mappings are dead and gone */ | |
1015 | mp = hw_purge_phys(pp); /* Disconnect a mapping */ | |
1016 | if (!mp) break; /* All mappings are gone, leave the loop */ | |
1017 | switch ((unsigned int)mp & mapRetCode) { | |
1018 | case mapRtOK: | |
1019 | mapping_free(mp); /* Return mapping to free inventory */ | |
1020 | break; | |
1021 | case mapRtGuest: | |
1022 | break; /* Don't try to return a guest mapping */ | |
1023 | case mapRtNotFnd: | |
1024 | break; /* Mapping disappeared on us, retry */ | |
1025 | case mapRtEmpty: | |
1026 | break; /* Physent chain empty, we're done */ | |
1027 | default: panic("hw_purge_phys: hw_purge_phys failed - pp = %08X, code = %08X\n", | |
1028 | pp, mp); /* Handle failure with our usual lack of tact */ | |
1029 | } | |
1030 | } while (mapRtEmpty != ((unsigned int)mp & mapRetCode)); | |
1031 | ||
1032 | #if DEBUG | |
1033 | if (!pmap_verify_free(pa)) | |
1034 | panic("pmap_disconnect: cruft left behind - pa = %08X\n", pa); | |
1035 | #endif | |
1036 | ||
1037 | return (mapping_tst_refmod(pa)); /* Return page ref and chg in generic format */ | |
1038 | } | |
1039 | ||
1c79356b A |
1040 | /* |
1041 | * pmap_protect(pmap, s, e, prot) | |
1042 | * changes the protection on all virtual addresses v in the | |
1043 | * virtual address range determined by [s, e] and pmap to prot. | |
1044 | * s and e must be on machine independent page boundaries and | |
1045 | * s must be less than or equal to e. | |
1046 | * | |
1047 | * Note that any requests to change the protection of a nested pmap are | |
1048 | * ignored. Those changes MUST be done by calling this with the correct pmap. | |
1049 | */ | |
1050 | void pmap_protect( | |
1051 | pmap_t pmap, | |
91447636 A |
1052 | vm_map_offset_t sva, |
1053 | vm_map_offset_t eva, | |
1c79356b A |
1054 | vm_prot_t prot) |
1055 | { | |
de355530 | 1056 | |
91447636 | 1057 | addr64_t va, endva; |
1c79356b A |
1058 | |
1059 | if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */ | |
1060 | ||
1c79356b | 1061 | if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */ |
55e303ae | 1062 | pmap_remove(pmap, (addr64_t)sva, (addr64_t)eva); /* Yeah, dump 'em */ |
1c79356b A |
1063 | return; /* Leave... */ |
1064 | } | |
1065 | ||
55e303ae A |
1066 | va = sva & -4096LL; /* Round start down to a page */ |
1067 | endva = eva & -4096LL; /* Round end down to a page */ | |
1c79356b | 1068 | |
55e303ae | 1069 | while(1) { /* Go until we finish the range */ |
21362eb3 | 1070 | mapping_protect(pmap, va, prot & VM_PROT_ALL, &va); /* Change the protection and see what's next */ |
55e303ae | 1071 | if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */ |
1c79356b A |
1072 | } |
1073 | ||
1c79356b A |
1074 | } |
1075 | ||
9bccf70c A |
1076 | |
1077 | ||
1c79356b A |
1078 | /* |
1079 | * pmap_enter | |
1080 | * | |
1081 | * Create a translation for the virtual address (virt) to the physical | |
1082 | * address (phys) in the pmap with the protection requested. If the | |
1083 | * translation is wired then we can not allow a full page fault, i.e., | |
1084 | * the mapping control block is not eligible to be stolen in a low memory | |
1085 | * condition. | |
1086 | * | |
1087 | * NB: This is the only routine which MAY NOT lazy-evaluate | |
1088 | * or lose information. That is, this routine must actually | |
1089 | * insert this page into the given map NOW. | |
1090 | */ | |
1091 | void | |
91447636 A |
1092 | pmap_enter(pmap_t pmap, vm_map_offset_t va, ppnum_t pa, vm_prot_t prot, |
1093 | unsigned int flags, __unused boolean_t wired) | |
1c79356b | 1094 | { |
55e303ae A |
1095 | unsigned int mflags; |
1096 | addr64_t colva; | |
1c79356b | 1097 | |
55e303ae | 1098 | if (pmap == PMAP_NULL) return; /* Leave if software pmap */ |
1c79356b | 1099 | |
55e303ae A |
1100 | mflags = 0; /* Make sure this is initialized to nothing special */ |
1101 | if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */ | |
1102 | mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ | |
1103 | } | |
1104 | ||
1105 | /* | |
1106 | * It is possible to hang here if another processor is remapping any pages we collide with and are removing | |
1107 | */ | |
1c79356b | 1108 | |
55e303ae A |
1109 | while(1) { /* Keep trying the enter until it goes in */ |
1110 | ||
21362eb3 | 1111 | colva = mapping_make(pmap, va, pa, mflags, 1, prot & VM_PROT_ALL); /* Enter the mapping into the pmap */ |
55e303ae A |
1112 | |
1113 | if(!colva) break; /* If there were no collisions, we are done... */ | |
1114 | ||
1115 | mapping_remove(pmap, colva); /* Remove the mapping that collided */ | |
1116 | } | |
55e303ae A |
1117 | } |
1118 | ||
1119 | /* | |
1120 | * Enters translations for odd-sized V=F blocks. | |
1121 | * | |
1122 | * The higher level VM map should be locked to insure that we don't have a | |
1123 | * double diddle here. | |
1124 | * | |
1125 | * We panic if we get a block that overlaps with another. We do not merge adjacent | |
1126 | * blocks because removing any address within a block removes the entire block and if | |
1127 | * would really mess things up if we trashed too much. | |
1128 | * | |
1129 | * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can | |
1130 | * not be changed. The block must be unmapped and then remapped with the new stuff. | |
1131 | * We also do not keep track of reference or change flags. | |
1132 | * | |
3a60a9f5 A |
1133 | * Any block that is larger than 256MB must be a multiple of 32MB. We panic if it is not. |
1134 | * | |
55e303ae A |
1135 | * Note that pmap_map_block_rc is the same but doesn't panic if collision. |
1136 | * | |
1137 | */ | |
1138 | ||
3a60a9f5 | 1139 | void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ |
55e303ae | 1140 | |
55e303ae A |
1141 | unsigned int mflags; |
1142 | addr64_t colva; | |
d7e50217 | 1143 | |
d7e50217 | 1144 | |
55e303ae A |
1145 | if (pmap == PMAP_NULL) { /* Did they give us a pmap? */ |
1146 | panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */ | |
1147 | } | |
d7e50217 | 1148 | |
55e303ae | 1149 | // kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */ |
91447636 | 1150 | // current_thread(), va, pa, size, prot, attr, flags); /* (BRINGUP) */ |
d7e50217 | 1151 | |
55e303ae A |
1152 | mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ |
1153 | if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */ | |
1154 | ||
3a60a9f5 | 1155 | colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */ |
55e303ae A |
1156 | |
1157 | if(colva) { /* If there was a collision, panic */ | |
3a60a9f5 | 1158 | panic("pmap_map_block: mapping error %d, pmap = %08X, va = %016llX\n", (uint32_t)(colva & mapRetCode), pmap, va); |
55e303ae A |
1159 | } |
1160 | ||
1161 | return; /* Return */ | |
1162 | } | |
de355530 | 1163 | |
3a60a9f5 | 1164 | int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ |
d7e50217 | 1165 | |
55e303ae A |
1166 | unsigned int mflags; |
1167 | addr64_t colva; | |
1168 | ||
1169 | ||
1170 | if (pmap == PMAP_NULL) { /* Did they give us a pmap? */ | |
1171 | panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */ | |
1172 | } | |
1173 | ||
1174 | mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ | |
1175 | if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */ | |
3a60a9f5 A |
1176 | |
1177 | colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */ | |
55e303ae A |
1178 | |
1179 | if(colva) return 0; /* If there was a collision, fail */ | |
1180 | ||
1181 | return 1; /* Return true of we worked */ | |
1c79356b A |
1182 | } |
1183 | ||
1184 | /* | |
1185 | * pmap_extract(pmap, va) | |
1186 | * returns the physical address corrsponding to the | |
1187 | * virtual address specified by pmap and va if the | |
1188 | * virtual address is mapped and 0 if it is not. | |
55e303ae A |
1189 | * Note: we assume nothing is ever mapped to phys 0. |
1190 | * | |
1191 | * NOTE: This call always will fail for physical addresses greater than 0xFFFFF000. | |
1c79356b | 1192 | */ |
91447636 | 1193 | vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va) { |
1c79356b A |
1194 | |
1195 | spl_t spl; | |
55e303ae | 1196 | register struct mapping *mp; |
1c79356b | 1197 | register vm_offset_t pa; |
55e303ae A |
1198 | addr64_t nextva; |
1199 | ppnum_t ppoffset; | |
1200 | unsigned int gva; | |
1c79356b | 1201 | |
55e303ae A |
1202 | #ifdef BOGUSCOMPAT |
1203 | panic("pmap_extract: THIS CALL IS BOGUS. NEVER USE IT EVER. So there...\n"); /* Don't use this */ | |
1204 | #else | |
d7e50217 | 1205 | |
55e303ae | 1206 | gva = (unsigned int)va; /* Make sure we don't have a sign */ |
1c79356b | 1207 | |
de355530 | 1208 | spl = splhigh(); /* We can't allow any loss of control here */ |
55e303ae A |
1209 | |
1210 | mp = mapping_find(pmap, (addr64_t)gva, &nextva,1); /* Find the mapping for this address */ | |
1211 | ||
1212 | if(!mp) { /* Is the page mapped? */ | |
1213 | splx(spl); /* Enable interrupts */ | |
1214 | return 0; /* Pass back 0 if not found */ | |
de355530 A |
1215 | } |
1216 | ||
55e303ae A |
1217 | ppoffset = (ppnum_t)(((gva & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */ |
1218 | ||
1219 | ||
1220 | pa = mp->mpPAddr + ppoffset; /* Remember ppage because mapping may vanish after drop call */ | |
1221 | ||
1222 | mapping_drop_busy(mp); /* We have everything we need from the mapping */ | |
d7e50217 | 1223 | splx(spl); /* Restore 'rupts */ |
55e303ae A |
1224 | |
1225 | if(pa > maxPPage32) return 0; /* Force large addresses to fail */ | |
1226 | ||
1227 | pa = (pa << 12) | (va & 0xFFF); /* Convert physical page number to address */ | |
1228 | ||
1229 | #endif | |
d7e50217 A |
1230 | return pa; /* Return physical address or 0 */ |
1231 | } | |
1232 | ||
de355530 | 1233 | /* |
55e303ae A |
1234 | * ppnum_t pmap_find_phys(pmap, addr64_t va) |
1235 | * returns the physical page corrsponding to the | |
1236 | * virtual address specified by pmap and va if the | |
1237 | * virtual address is mapped and 0 if it is not. | |
1238 | * Note: we assume nothing is ever mapped to phys 0. | |
1239 | * | |
de355530 | 1240 | */ |
55e303ae A |
1241 | ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) { |
1242 | ||
1243 | spl_t spl; | |
1244 | register struct mapping *mp; | |
1245 | ppnum_t pa, ppoffset; | |
91447636 | 1246 | addr64_t nextva; |
55e303ae A |
1247 | |
1248 | spl = splhigh(); /* We can't allow any loss of control here */ | |
1249 | ||
1250 | mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */ | |
1251 | ||
1252 | if(!mp) { /* Is the page mapped? */ | |
1253 | splx(spl); /* Enable interrupts */ | |
1254 | return 0; /* Pass back 0 if not found */ | |
de355530 | 1255 | } |
55e303ae | 1256 | |
de355530 | 1257 | |
55e303ae A |
1258 | ppoffset = (ppnum_t)(((va & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */ |
1259 | ||
1260 | pa = mp->mpPAddr + ppoffset; /* Get the actual physical address */ | |
1261 | ||
1262 | mapping_drop_busy(mp); /* We have everything we need from the mapping */ | |
1263 | ||
1264 | splx(spl); /* Restore 'rupts */ | |
1265 | return pa; /* Return physical address or 0 */ | |
1266 | } | |
1267 | ||
9bccf70c | 1268 | |
1c79356b A |
1269 | /* |
1270 | * pmap_attributes: | |
1271 | * | |
55e303ae | 1272 | * Set/Get special memory attributes; not implemented. |
1c79356b A |
1273 | * |
1274 | * Note: 'VAL_GET_INFO' is used to return info about a page. | |
1275 | * If less than 1 page is specified, return the physical page | |
1276 | * mapping and a count of the number of mappings to that page. | |
1277 | * If more than one page is specified, return the number | |
1278 | * of resident pages and the number of shared (more than | |
1279 | * one mapping) pages in the range; | |
1280 | * | |
55e303ae | 1281 | * |
1c79356b A |
1282 | */ |
1283 | kern_return_t | |
91447636 A |
1284 | pmap_attribute( |
1285 | __unused pmap_t pmap, | |
1286 | __unused vm_map_offset_t address, | |
1287 | __unused vm_map_size_t size, | |
1288 | __unused vm_machine_attribute_t attribute, | |
1289 | __unused vm_machine_attribute_val_t* value) | |
1c79356b | 1290 | { |
55e303ae A |
1291 | |
1292 | return KERN_INVALID_ARGUMENT; | |
1293 | ||
1294 | } | |
1c79356b | 1295 | |
1c79356b | 1296 | /* |
55e303ae A |
1297 | * pmap_attribute_cache_sync(vm_offset_t pa) |
1298 | * | |
1299 | * Invalidates all of the instruction cache on a physical page and | |
1300 | * pushes any dirty data from the data cache for the same physical page | |
1c79356b | 1301 | */ |
55e303ae A |
1302 | |
1303 | kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size, | |
91447636 A |
1304 | __unused vm_machine_attribute_t attribute, |
1305 | __unused vm_machine_attribute_val_t* value) { | |
d7e50217 | 1306 | |
55e303ae A |
1307 | spl_t s; |
1308 | unsigned int i, npages; | |
1309 | ||
91447636 | 1310 | npages = round_page(size) >> 12; /* Get the number of pages to do */ |
55e303ae A |
1311 | |
1312 | for(i = 0; i < npages; i++) { /* Do all requested pages */ | |
1313 | s = splhigh(); /* No interruptions here */ | |
1314 | sync_ppage(pp + i); /* Go flush data cache and invalidate icache */ | |
1315 | splx(s); /* Allow interruptions */ | |
1c79356b | 1316 | } |
55e303ae A |
1317 | |
1318 | return KERN_SUCCESS; | |
1c79356b A |
1319 | } |
1320 | ||
765c9de3 | 1321 | /* |
91447636 | 1322 | * pmap_sync_page_data_phys(ppnum_t pa) |
765c9de3 A |
1323 | * |
1324 | * Invalidates all of the instruction cache on a physical page and | |
1325 | * pushes any dirty data from the data cache for the same physical page | |
1326 | */ | |
1327 | ||
91447636 | 1328 | void pmap_sync_page_data_phys(ppnum_t pa) { |
765c9de3 A |
1329 | |
1330 | spl_t s; | |
55e303ae A |
1331 | |
1332 | s = splhigh(); /* No interruptions here */ | |
1333 | sync_ppage(pa); /* Sync up dem caches */ | |
1334 | splx(s); /* Allow interruptions */ | |
765c9de3 A |
1335 | return; |
1336 | } | |
1337 | ||
91447636 A |
1338 | void |
1339 | pmap_sync_page_attributes_phys(ppnum_t pa) | |
1340 | { | |
1341 | pmap_sync_page_data_phys(pa); | |
1342 | } | |
1343 | ||
1c79356b A |
1344 | /* |
1345 | * pmap_collect | |
1346 | * | |
1347 | * Garbage collects the physical map system for pages that are no longer used. | |
1348 | * It isn't implemented or needed or wanted. | |
1349 | */ | |
1350 | void | |
91447636 | 1351 | pmap_collect(__unused pmap_t pmap) |
1c79356b A |
1352 | { |
1353 | return; | |
1354 | } | |
1355 | ||
1356 | /* | |
1357 | * Routine: pmap_activate | |
1358 | * Function: | |
1359 | * Binds the given physical map to the given | |
1360 | * processor, and returns a hardware map description. | |
1361 | * It isn't implemented or needed or wanted. | |
1362 | */ | |
1363 | void | |
1364 | pmap_activate( | |
91447636 A |
1365 | __unused pmap_t pmap, |
1366 | __unused thread_t th, | |
1367 | __unused int which_cpu) | |
1c79356b A |
1368 | { |
1369 | return; | |
1370 | } | |
1371 | /* | |
1372 | * pmap_deactivate: | |
1373 | * It isn't implemented or needed or wanted. | |
1374 | */ | |
1375 | void | |
1376 | pmap_deactivate( | |
91447636 A |
1377 | __unused pmap_t pmap, |
1378 | __unused thread_t th, | |
1379 | __unused int which_cpu) | |
1c79356b A |
1380 | { |
1381 | return; | |
1382 | } | |
1383 | ||
1c79356b A |
1384 | |
1385 | /* | |
1386 | * pmap_pageable(pmap, s, e, pageable) | |
1387 | * Make the specified pages (by pmap, offset) | |
1388 | * pageable (or not) as requested. | |
1389 | * | |
1390 | * A page which is not pageable may not take | |
1391 | * a fault; therefore, its page table entry | |
1392 | * must remain valid for the duration. | |
1393 | * | |
1394 | * This routine is merely advisory; pmap_enter() | |
1395 | * will specify that these pages are to be wired | |
1396 | * down (or not) as appropriate. | |
1397 | * | |
1398 | * (called from vm/vm_fault.c). | |
1399 | */ | |
1400 | void | |
1401 | pmap_pageable( | |
91447636 A |
1402 | __unused pmap_t pmap, |
1403 | __unused vm_map_offset_t start, | |
1404 | __unused vm_map_offset_t end, | |
1405 | __unused boolean_t pageable) | |
1c79356b A |
1406 | { |
1407 | ||
1408 | return; /* This is not used... */ | |
1409 | ||
1410 | } | |
1411 | /* | |
1412 | * Routine: pmap_change_wiring | |
55e303ae | 1413 | * NOT USED ANYMORE. |
1c79356b A |
1414 | */ |
1415 | void | |
1416 | pmap_change_wiring( | |
91447636 A |
1417 | __unused pmap_t pmap, |
1418 | __unused vm_map_offset_t va, | |
1419 | __unused boolean_t wired) | |
1c79356b A |
1420 | { |
1421 | return; /* This is not used... */ | |
1422 | } | |
1423 | ||
1424 | /* | |
1425 | * pmap_modify_pages(pmap, s, e) | |
1426 | * sets the modified bit on all virtual addresses v in the | |
1427 | * virtual address range determined by [s, e] and pmap, | |
1428 | * s and e must be on machine independent page boundaries and | |
1429 | * s must be less than or equal to e. | |
55e303ae A |
1430 | * |
1431 | * Note that this function will not descend nested pmaps. | |
1c79356b A |
1432 | */ |
1433 | void | |
1434 | pmap_modify_pages( | |
1435 | pmap_t pmap, | |
91447636 A |
1436 | vm_map_offset_t sva, |
1437 | vm_map_offset_t eva) | |
1c79356b A |
1438 | { |
1439 | spl_t spl; | |
91447636 | 1440 | mapping_t *mp; |
55e303ae | 1441 | ppnum_t pa; |
91447636 A |
1442 | addr64_t va, endva; |
1443 | unsigned int savetype; | |
1c79356b | 1444 | |
55e303ae A |
1445 | if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */ |
1446 | ||
1447 | va = sva & -4096; /* Round to page */ | |
1448 | endva = eva & -4096; /* Round to page */ | |
de355530 | 1449 | |
55e303ae | 1450 | while (va < endva) { /* Walk through all pages */ |
de355530 | 1451 | |
55e303ae A |
1452 | spl = splhigh(); /* We can't allow any loss of control here */ |
1453 | ||
1454 | mp = mapping_find(pmap, (addr64_t)va, &va, 0); /* Find the mapping for this address */ | |
1455 | ||
1456 | if(!mp) { /* Is the page mapped? */ | |
1457 | splx(spl); /* Page not mapped, restore interruptions */ | |
1458 | if((va == 0) || (va >= endva)) break; /* We are done if there are no more or we hit the end... */ | |
1459 | continue; /* We are not done and there is more to check... */ | |
1c79356b | 1460 | } |
55e303ae | 1461 | |
91447636 | 1462 | savetype = mp->mpFlags & mpType; /* Remember the type */ |
55e303ae A |
1463 | pa = mp->mpPAddr; /* Remember ppage because mapping may vanish after drop call */ |
1464 | ||
1465 | mapping_drop_busy(mp); /* We have everything we need from the mapping */ | |
1466 | ||
1467 | splx(spl); /* Restore 'rupts */ | |
1468 | ||
91447636 | 1469 | if(savetype != mpNormal) continue; /* Can't mess around with these guys... */ |
55e303ae A |
1470 | |
1471 | mapping_set_mod(pa); /* Set the modfied bit for this page */ | |
1472 | ||
1473 | if(va == 0) break; /* We hit the end of the pmap, might as well leave now... */ | |
1c79356b | 1474 | } |
55e303ae | 1475 | return; /* Leave... */ |
1c79356b A |
1476 | } |
1477 | ||
1478 | /* | |
1479 | * pmap_clear_modify(phys) | |
1480 | * clears the hardware modified ("dirty") bit for one | |
1481 | * machine independant page starting at the given | |
1482 | * physical address. phys must be aligned on a machine | |
1483 | * independant page boundary. | |
1484 | */ | |
1485 | void | |
91447636 | 1486 | pmap_clear_modify(ppnum_t pa) |
1c79356b | 1487 | { |
1c79356b | 1488 | |
91447636 | 1489 | mapping_clr_mod(pa); /* Clear all change bits for physical page */ |
de355530 | 1490 | |
1c79356b A |
1491 | } |
1492 | ||
1493 | /* | |
1494 | * pmap_is_modified(phys) | |
1495 | * returns TRUE if the given physical page has been modified | |
1496 | * since the last call to pmap_clear_modify(). | |
1497 | */ | |
1498 | boolean_t | |
91447636 | 1499 | pmap_is_modified(register ppnum_t pa) |
1c79356b | 1500 | { |
91447636 | 1501 | return mapping_tst_mod(pa); /* Check for modified */ |
de355530 | 1502 | |
1c79356b A |
1503 | } |
1504 | ||
1505 | /* | |
1506 | * pmap_clear_reference(phys) | |
1507 | * clears the hardware referenced bit in the given machine | |
1508 | * independant physical page. | |
1509 | * | |
1510 | */ | |
1511 | void | |
91447636 | 1512 | pmap_clear_reference(ppnum_t pa) |
1c79356b | 1513 | { |
91447636 | 1514 | mapping_clr_ref(pa); /* Check for modified */ |
1c79356b A |
1515 | } |
1516 | ||
1517 | /* | |
1518 | * pmap_is_referenced(phys) | |
1519 | * returns TRUE if the given physical page has been referenced | |
1520 | * since the last call to pmap_clear_reference(). | |
1521 | */ | |
1522 | boolean_t | |
91447636 | 1523 | pmap_is_referenced(ppnum_t pa) |
1c79356b | 1524 | { |
91447636 | 1525 | return mapping_tst_ref(pa); /* Check for referenced */ |
55e303ae | 1526 | } |
de355530 | 1527 | |
55e303ae | 1528 | /* |
91447636 A |
1529 | * pmap_get_refmod(phys) |
1530 | * returns the referenced and modified bits of the specified | |
1531 | * physical page. | |
55e303ae | 1532 | */ |
91447636 A |
1533 | unsigned int |
1534 | pmap_get_refmod(ppnum_t pa) | |
1535 | { | |
1536 | return (mapping_tst_refmod(pa)); | |
1537 | } | |
1538 | ||
1539 | /* | |
1540 | * pmap_clear_refmod(phys, mask) | |
1541 | * clears the referenced and modified bits as specified by the mask | |
1542 | * of the specified physical page. | |
1543 | */ | |
1544 | void | |
1545 | pmap_clear_refmod(ppnum_t pa, unsigned int mask) | |
1546 | { | |
1547 | mapping_clr_refmod(pa, mask); | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * pmap_eligible_for_execute(ppnum_t pa) | |
1552 | * return true if physical address is eligible to contain executable code; | |
1553 | * otherwise, return false | |
1554 | */ | |
1555 | boolean_t | |
1556 | pmap_eligible_for_execute(ppnum_t pa) | |
1557 | { | |
1558 | phys_entry_t *physent; | |
1559 | unsigned int pindex; | |
de355530 | 1560 | |
55e303ae | 1561 | physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ |
1c79356b | 1562 | |
91447636 A |
1563 | if((!physent) || (physent->ppLink & ppG)) |
1564 | return 0; /* If there is no physical entry or marked guarded, | |
1565 | the entry is not eligible for execute */ | |
1c79356b | 1566 | |
91447636 | 1567 | return 1; /* Otherwise, entry is eligible for execute */ |
1c79356b A |
1568 | } |
1569 | ||
1570 | #if MACH_VM_DEBUG | |
1571 | int | |
1572 | pmap_list_resident_pages( | |
91447636 A |
1573 | __unused pmap_t pmap, |
1574 | __unused vm_offset_t *listp, | |
1575 | __unused int space) | |
1c79356b A |
1576 | { |
1577 | return 0; | |
1578 | } | |
1579 | #endif /* MACH_VM_DEBUG */ | |
1580 | ||
1581 | /* | |
1582 | * Locking: | |
1583 | * spl: VM | |
1584 | */ | |
1585 | void | |
1586 | pmap_copy_part_page( | |
1587 | vm_offset_t src, | |
1588 | vm_offset_t src_offset, | |
1589 | vm_offset_t dst, | |
1590 | vm_offset_t dst_offset, | |
1591 | vm_size_t len) | |
1592 | { | |
55e303ae | 1593 | addr64_t fsrc, fdst; |
1c79356b | 1594 | |
55e303ae A |
1595 | assert(((dst <<12) & PAGE_MASK+dst_offset+len) <= PAGE_SIZE); |
1596 | assert(((src <<12) & PAGE_MASK+src_offset+len) <= PAGE_SIZE); | |
1c79356b | 1597 | |
55e303ae A |
1598 | fsrc = ((addr64_t)src << 12) + src_offset; |
1599 | fdst = ((addr64_t)dst << 12) + dst_offset; | |
de355530 | 1600 | |
55e303ae | 1601 | phys_copy(fsrc, fdst, len); /* Copy the stuff physically */ |
1c79356b A |
1602 | } |
1603 | ||
1604 | void | |
1605 | pmap_zero_part_page( | |
91447636 A |
1606 | __unused vm_offset_t p, |
1607 | __unused vm_offset_t offset, | |
1608 | __unused vm_size_t len) | |
1c79356b A |
1609 | { |
1610 | panic("pmap_zero_part_page"); | |
1611 | } | |
1612 | ||
55e303ae | 1613 | boolean_t pmap_verify_free(ppnum_t pa) { |
1c79356b A |
1614 | |
1615 | struct phys_entry *pp; | |
55e303ae | 1616 | unsigned int pindex; |
1c79356b | 1617 | |
55e303ae A |
1618 | pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */ |
1619 | if (pp == 0) return FALSE; /* If there isn't one, show no mapping... */ | |
1c79356b | 1620 | |
91447636 A |
1621 | if(pp->ppLink & ~(ppLock | ppFlags)) return FALSE; /* We have at least one mapping */ |
1622 | return TRUE; /* No mappings */ | |
1c79356b A |
1623 | } |
1624 | ||
1625 | ||
1626 | /* Determine if we need to switch space and set up for it if so */ | |
1627 | ||
1628 | void pmap_switch(pmap_t map) | |
1629 | { | |
91447636 A |
1630 | hw_blow_seg(lowGlo.lgUMWvaddr); /* Blow off the first segment */ |
1631 | hw_blow_seg(lowGlo.lgUMWvaddr + 0x10000000ULL); /* Blow off the second segment */ | |
1c79356b A |
1632 | |
1633 | /* when changing to kernel space, don't bother | |
1634 | * doing anything, the kernel is mapped from here already. | |
1635 | */ | |
1636 | if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */ | |
1637 | return; /* If so, we don't do anything... */ | |
1638 | } | |
1639 | ||
1640 | hw_set_user_space(map); /* Indicate if we need to load the SRs or not */ | |
1641 | return; /* Bye, bye, butterfly... */ | |
1642 | } | |
1643 | ||
1644 | /* | |
55e303ae | 1645 | * kern_return_t pmap_nest(grand, subord, vstart, size) |
1c79356b A |
1646 | * |
1647 | * grand = the pmap that we will nest subord into | |
1648 | * subord = the pmap that goes into the grand | |
55e303ae A |
1649 | * vstart = start of range in pmap to be inserted |
1650 | * nstart = start of range in pmap nested pmap | |
3a60a9f5 | 1651 | * size = Size of nest area (up to 2TB) |
1c79356b A |
1652 | * |
1653 | * Inserts a pmap into another. This is used to implement shared segments. | |
1654 | * On the current PPC processors, this is limited to segment (256MB) aligned | |
1655 | * segment sized ranges. | |
55e303ae A |
1656 | * |
1657 | * We actually kinda allow recursive nests. The gating factor is that we do not allow | |
1658 | * nesting on top of something that is already mapped, i.e., the range must be empty. | |
1659 | * | |
55e303ae A |
1660 | * Note that we depend upon higher level VM locks to insure that things don't change while |
1661 | * we are doing this. For example, VM should not be doing any pmap enters while it is nesting | |
1662 | * or do 2 nests at once. | |
1c79356b A |
1663 | */ |
1664 | ||
55e303ae A |
1665 | kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) { |
1666 | ||
91447636 | 1667 | addr64_t vend, colladdr; |
55e303ae | 1668 | unsigned int msize; |
91447636 A |
1669 | int nlists; |
1670 | mapping_t *mp; | |
d7e50217 | 1671 | |
55e303ae | 1672 | if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */ |
3a60a9f5 | 1673 | if((size >> 25) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 2TB */ |
55e303ae A |
1674 | if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ |
1675 | if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ | |
1676 | ||
1677 | if(size == 0) { /* Is the size valid? */ | |
1678 | panic("pmap_nest: size is invalid - %016llX\n", size); | |
de355530 | 1679 | } |
1c79356b | 1680 | |
3a60a9f5 | 1681 | msize = (size >> 25) - 1; /* Change size to blocks of 32MB */ |
55e303ae A |
1682 | |
1683 | nlists = mapSetLists(grand); /* Set number of lists this will be on */ | |
de355530 | 1684 | |
55e303ae | 1685 | mp = mapping_alloc(nlists); /* Get a spare mapping block */ |
1c79356b | 1686 | |
3a60a9f5 | 1687 | mp->mpFlags = 0x01000000 | mpNest | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */ |
91447636 | 1688 | /* Set the flags. Make sure busy count is 1 */ |
55e303ae | 1689 | mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */ |
91447636 | 1690 | mp->u.mpBSize = msize; /* Set the size */ |
55e303ae A |
1691 | mp->mpPte = 0; /* Set the PTE invalid */ |
1692 | mp->mpPAddr = 0; /* Set the physical page number */ | |
1693 | mp->mpVAddr = vstart; /* Set the address */ | |
1694 | mp->mpNestReloc = nstart - vstart; /* Set grand to nested vaddr relocation value */ | |
d7e50217 | 1695 | |
55e303ae A |
1696 | colladdr = hw_add_map(grand, mp); /* Go add the mapping to the pmap */ |
1697 | ||
1698 | if(colladdr) { /* Did it collide? */ | |
1699 | vend = vstart + size - 4096; /* Point to the last page we would cover in nest */ | |
1700 | panic("pmap_nest: attempt to nest into a non-empty range - pmap = %08X, start = %016llX, end = %016llX\n", | |
1701 | grand, vstart, vend); | |
1c79356b | 1702 | } |
55e303ae A |
1703 | |
1704 | return KERN_SUCCESS; | |
1c79356b A |
1705 | } |
1706 | ||
1c79356b | 1707 | /* |
55e303ae | 1708 | * kern_return_t pmap_unnest(grand, vaddr) |
1c79356b A |
1709 | * |
1710 | * grand = the pmap that we will nest subord into | |
55e303ae | 1711 | * vaddr = start of range in pmap to be unnested |
1c79356b A |
1712 | * |
1713 | * Removes a pmap from another. This is used to implement shared segments. | |
1714 | * On the current PPC processors, this is limited to segment (256MB) aligned | |
1715 | * segment sized ranges. | |
1716 | */ | |
1717 | ||
55e303ae | 1718 | kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) { |
1c79356b | 1719 | |
91447636 | 1720 | unsigned int tstamp, i, mycpu; |
55e303ae A |
1721 | addr64_t nextva; |
1722 | spl_t s; | |
91447636 | 1723 | mapping_t *mp; |
1c79356b | 1724 | |
55e303ae A |
1725 | s = splhigh(); /* Make sure interruptions are disabled */ |
1726 | ||
1727 | mp = mapping_find(grand, vaddr, &nextva, 0); /* Find the nested map */ | |
1728 | ||
1729 | if(((unsigned int)mp & mapRetCode) != mapRtOK) { /* See if it was even nested */ | |
1730 | panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr); | |
1731 | } | |
1732 | ||
91447636 | 1733 | if((mp->mpFlags & mpType) != mpNest) { /* Did we find something other than a nest? */ |
55e303ae | 1734 | panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr); |
1c79356b A |
1735 | } |
1736 | ||
55e303ae A |
1737 | if(mp->mpVAddr != vaddr) { /* Make sure the address is the same */ |
1738 | panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr); | |
1739 | } | |
1c79356b | 1740 | |
91447636 | 1741 | (void)hw_atomic_and(&mp->mpFlags, ~mpPerm); /* Show that this mapping is now removable */ |
1c79356b | 1742 | |
91447636 | 1743 | mapping_drop_busy(mp); /* Go ahead and release the mapping now */ |
1c79356b | 1744 | |
55e303ae A |
1745 | splx(s); /* Restore 'rupts */ |
1746 | ||
1747 | (void)mapping_remove(grand, vaddr); /* Toss the nested pmap mapping */ | |
1748 | ||
1749 | invalidateSegs(grand); /* Invalidate the pmap segment cache */ | |
1750 | ||
1c79356b A |
1751 | /* |
1752 | * Note that the following will force the segment registers to be reloaded | |
1753 | * on all processors (if they are using the pmap we just changed) before returning. | |
1754 | * | |
1755 | * This is needed. The reason is that until the segment register is | |
1756 | * reloaded, another thread in the same task on a different processor will | |
1757 | * be able to access memory that it isn't allowed to anymore. That can happen | |
1758 | * because access to the subordinate pmap is being removed, but the pmap is still | |
1759 | * valid. | |
1760 | * | |
1761 | * Note that we only kick the other processor if we see that it was using the pmap while we | |
1762 | * were changing it. | |
1763 | */ | |
1764 | ||
1765 | ||
55e303ae | 1766 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ |
91447636 A |
1767 | disable_preemption(); |
1768 | mycpu = cpu_number(); /* Who am I? Am I just a dream? */ | |
1769 | if((unsigned int)grand == PerProcTable[i].ppe_vaddr->ppUserPmapVirt) { /* Is this guy using the changed pmap? */ | |
55e303ae | 1770 | |
91447636 | 1771 | PerProcTable[i].ppe_vaddr->ppInvSeg = 1; /* Show that we need to invalidate the segments */ |
55e303ae | 1772 | |
91447636 | 1773 | if(i != mycpu) { |
55e303ae | 1774 | |
91447636 A |
1775 | tstamp = PerProcTable[i].ppe_vaddr->ruptStamp[1]; /* Save the processor's last interrupt time stamp */ |
1776 | if(cpu_signal(i, SIGPcpureq, CPRQsegload, 0) == KERN_SUCCESS) { /* Make sure we see the pmap change */ | |
1777 | if(!hw_cpu_wcng(&PerProcTable[i].ppe_vaddr->ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */ | |
1778 | panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i); | |
1779 | } | |
1780 | } | |
1c79356b A |
1781 | } |
1782 | } | |
91447636 | 1783 | enable_preemption(); |
1c79356b A |
1784 | } |
1785 | ||
55e303ae | 1786 | return KERN_SUCCESS; /* Bye, bye, butterfly... */ |
1c79356b A |
1787 | } |
1788 | ||
1789 | ||
55e303ae | 1790 | /* |
91447636 | 1791 | * void MapUserMemoryWindowInit(void) |
55e303ae | 1792 | * |
91447636 | 1793 | * Initialize anything we need to in order to map user address space slices into |
55e303ae A |
1794 | * the kernel. Primarily used for copy in/out. |
1795 | * | |
1796 | * Currently we only support one 512MB slot for this purpose. There are two special | |
1797 | * mappings defined for the purpose: the special pmap nest, and linkage mapping. | |
1798 | * | |
1799 | * The special pmap nest (which is allocated in this function) is used as a place holder | |
1800 | * in the kernel's pmap search list. It is 512MB long and covers the address range | |
91447636 | 1801 | * starting at lgUMWvaddr. It points to no actual memory and when the fault handler |
55e303ae A |
1802 | * hits in it, it knows to look in the per_proc and start using the linkage |
1803 | * mapping contained therin. | |
1804 | * | |
1805 | * The linkage mapping is used to glue the user address space slice into the | |
1806 | * kernel. It contains the relocation information used to transform the faulting | |
1807 | * kernel address into the user address space. It also provides the link to the | |
1808 | * user's pmap. This is pointed to by the per_proc and is switched in and out | |
1809 | * whenever there is a context switch. | |
1810 | * | |
1811 | */ | |
1812 | ||
91447636 | 1813 | void MapUserMemoryWindowInit(void) { |
55e303ae A |
1814 | |
1815 | addr64_t colladdr; | |
91447636 A |
1816 | int nlists; |
1817 | mapping_t *mp; | |
55e303ae A |
1818 | |
1819 | nlists = mapSetLists(kernel_pmap); /* Set number of lists this will be on */ | |
1820 | ||
1821 | mp = mapping_alloc(nlists); /* Get a spare mapping block */ | |
91447636 | 1822 | |
3a60a9f5 | 1823 | mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */ |
91447636 | 1824 | /* Set the flags. Make sure busy count is 1 */ |
55e303ae | 1825 | mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */ |
3a60a9f5 | 1826 | mp->u.mpBSize = 15; /* Set the size to 2 segments in 32MB chunks - 1 */ |
55e303ae A |
1827 | mp->mpPte = 0; /* Means nothing */ |
1828 | mp->mpPAddr = 0; /* Means nothing */ | |
91447636 | 1829 | mp->mpVAddr = lowGlo.lgUMWvaddr; /* Set the address range we cover */ |
55e303ae A |
1830 | mp->mpNestReloc = 0; /* Means nothing */ |
1831 | ||
1832 | colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */ | |
1833 | ||
1834 | if(colladdr) { /* Did it collide? */ | |
91447636 | 1835 | panic("MapUserMemoryWindowInit: MapUserMemoryWindow range already mapped\n"); |
55e303ae A |
1836 | } |
1837 | ||
1838 | return; | |
1839 | } | |
1840 | ||
1841 | /* | |
91447636 | 1842 | * addr64_t MapUserMemoryWindow(vm_map_t map, vm_offset_t va, size) |
55e303ae A |
1843 | * |
1844 | * map = the vm_map that we are mapping into the kernel | |
1845 | * va = start of the address range we are mapping | |
55e303ae A |
1846 | * Note that we do not test validty, we chose to trust our fellows... |
1847 | * | |
91447636 A |
1848 | * Maps a 512M slice of a user address space into a predefined kernel range |
1849 | * on a per-thread basis. We map only the first 256M segment, allowing the | |
1850 | * second 256M segment to fault in as needed. This allows our clients to access | |
1851 | * an arbitrarily aligned operand up to 256M in size. | |
1852 | * | |
1853 | * In the future, the restriction of a predefined range may be loosened. | |
55e303ae A |
1854 | * |
1855 | * Builds the proper linkage map to map the user range | |
1856 | * We will round this down to the previous segment boundary and calculate | |
1857 | * the relocation to the kernel slot | |
1858 | * | |
1859 | * We always make a segment table entry here if we need to. This is mainly because of | |
1860 | * copyin/out and if we don't, there will be multiple segment faults for | |
1861 | * each system call. I have seen upwards of 30000 per second. | |
1862 | * | |
1863 | * We do check, however, to see if the slice is already mapped and if so, | |
1864 | * we just exit. This is done for performance reasons. It was found that | |
1865 | * there was a considerable boost in copyin/out performance if we did not | |
1866 | * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the | |
91447636 | 1867 | * restriction that you had to bracket MapUserMemoryWindow. Further, there |
55e303ae A |
1868 | * is a yet further boost if you didn't need to map it each time. The theory |
1869 | * behind this is that many times copies are to or from the same segment and | |
1870 | * done multiple times within the same system call. To take advantage of that, | |
91447636 | 1871 | * we check umwSpace and umwRelo to see if we've already got it. |
55e303ae A |
1872 | * |
1873 | * We also need to half-invalidate the slice when we context switch or go | |
1874 | * back to user state. A half-invalidate does not clear the actual mapping, | |
91447636 | 1875 | * but it does force the MapUserMemoryWindow function to reload the segment |
55e303ae A |
1876 | * register/SLBE. If this is not done, we can end up some pretty severe |
1877 | * performance penalties. If we map a slice, and the cached space/relocation is | |
1878 | * the same, we won't reload the segment registers. Howver, since we ran someone else, | |
1879 | * our SR is cleared and we will take a fault. This is reasonable if we block | |
1880 | * while copying (e.g., we took a page fault), but it is not reasonable when we | |
1881 | * just start. For this reason, we half-invalidate to make sure that the SR is | |
1882 | * explicitly reloaded. | |
1883 | * | |
1884 | * Note that we do not go to the trouble of making a pmap segment cache | |
1885 | * entry for these guys because they are very short term -- 99.99% of the time | |
1886 | * they will be unmapped before the next context switch. | |
1887 | * | |
1888 | */ | |
1c79356b | 1889 | |
91447636 A |
1890 | addr64_t MapUserMemoryWindow( |
1891 | vm_map_t map, | |
1892 | addr64_t va) { | |
55e303ae A |
1893 | |
1894 | addr64_t baddrs, reladd; | |
91447636 A |
1895 | thread_t thread; |
1896 | mapping_t *mp; | |
1c79356b | 1897 | |
55e303ae | 1898 | baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */ |
91447636 | 1899 | thread = current_thread(); /* Remember our activation */ |
d7e50217 | 1900 | |
91447636 | 1901 | reladd = baddrs - lowGlo.lgUMWvaddr; /* Get the relocation from user to kernel */ |
d7e50217 | 1902 | |
91447636 A |
1903 | if((thread->machine.umwSpace == map->pmap->space) && (thread->machine.umwRelo == reladd)) { /* Already mapped? */ |
1904 | return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */ | |
1c79356b | 1905 | } |
55e303ae A |
1906 | |
1907 | disable_preemption(); /* Don't move... */ | |
55e303ae | 1908 | |
91447636 A |
1909 | mp = (mapping_t *)&(getPerProc()->ppUMWmp); /* Make up for C */ |
1910 | thread->machine.umwRelo = reladd; /* Relocation from user to kernel */ | |
55e303ae A |
1911 | mp->mpNestReloc = reladd; /* Relocation from user to kernel */ |
1912 | ||
91447636 | 1913 | thread->machine.umwSpace = map->pmap->space; /* Set the address space/pmap lookup ID */ |
55e303ae A |
1914 | mp->mpSpace = map->pmap->space; /* Set the address space/pmap lookup ID */ |
1915 | ||
1916 | /* | |
1917 | * Here we make an assumption that we are going to be using the base pmap's address space. | |
1918 | * If we are wrong, and that would be very, very, very rare, the fault handler will fix us up. | |
1919 | */ | |
1920 | ||
91447636 | 1921 | hw_map_seg(map->pmap, lowGlo.lgUMWvaddr, baddrs); /* Make the entry for the first segment */ |
55e303ae | 1922 | |
55e303ae | 1923 | enable_preemption(); /* Let's move */ |
91447636 | 1924 | return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */ |
55e303ae A |
1925 | } |
1926 | ||
1927 | ||
55e303ae A |
1928 | /* |
1929 | * kern_return_t pmap_boot_map(size) | |
1930 | * | |
1931 | * size = size of virtual address range to be mapped | |
1932 | * | |
1933 | * This function is used to assign a range of virtual addresses before VM in | |
1934 | * initialized. It starts at VM_MAX_KERNEL_ADDRESS and works downward. | |
1935 | * The variable vm_last_addr contains the current highest possible VM | |
1936 | * assignable address. It is a panic to attempt to call this after VM has | |
1937 | * started up. The only problem is, is that we may not have the serial or | |
1938 | * framebuffer mapped, so we'll never know we died......... | |
1939 | */ | |
1940 | ||
1941 | vm_offset_t pmap_boot_map(vm_size_t size) { | |
1942 | ||
1943 | if(kernel_map != VM_MAP_NULL) { /* Has VM already started? */ | |
1944 | panic("pmap_boot_map: VM started\n"); | |
1c79356b | 1945 | } |
55e303ae | 1946 | |
91447636 | 1947 | size = round_page(size); /* Make sure this is in pages */ |
55e303ae A |
1948 | vm_last_addr = vm_last_addr - size; /* Allocate the memory */ |
1949 | return (vm_last_addr + 1); /* Return the vaddr we just allocated */ | |
1950 | ||
1c79356b A |
1951 | } |
1952 | ||
1953 | ||
91447636 A |
1954 | /* |
1955 | * void pmap_init_sharedpage(void); | |
1956 | * | |
1957 | * Hack map for the 64-bit commpage | |
1958 | */ | |
1959 | ||
1960 | void pmap_init_sharedpage(vm_offset_t cpg){ | |
1961 | ||
1962 | addr64_t cva, cpoff; | |
1963 | ppnum_t cpphys; | |
1964 | ||
21362eb3 | 1965 | sharedPmap = pmap_create(0); /* Get a pmap to hold the common segment */ |
91447636 A |
1966 | if(!sharedPmap) { /* Check for errors */ |
1967 | panic("pmap_init_sharedpage: couldn't make sharedPmap\n"); | |
1968 | } | |
1969 | ||
1970 | for(cpoff = 0; cpoff < _COMM_PAGE_AREA_USED; cpoff += 4096) { /* Step along now */ | |
1971 | ||
1972 | cpphys = pmap_find_phys(kernel_pmap, (addr64_t)cpg + cpoff); | |
1973 | if(!cpphys) { | |
1974 | panic("pmap_init_sharedpage: compage %08X not mapped in kernel\n", cpg + cpoff); | |
1975 | } | |
1976 | ||
1977 | cva = mapping_make(sharedPmap, (addr64_t)((uint32_t)_COMM_PAGE_BASE_ADDRESS) + cpoff, | |
21362eb3 | 1978 | cpphys, mmFlgPerm, 1, VM_PROT_READ); /* Map the page read only */ |
91447636 A |
1979 | if(cva) { /* Check for errors */ |
1980 | panic("pmap_init_sharedpage: couldn't map commpage page - cva = %016llX\n", cva); | |
1981 | } | |
1982 | ||
1983 | } | |
1984 | ||
1985 | return; | |
1986 | } | |
1987 | ||
1988 | ||
1989 | /* | |
1990 | * void pmap_map_sharedpage(pmap_t pmap); | |
1991 | * | |
1992 | * Maps the last segment in a 64-bit address space | |
1993 | * | |
1994 | * | |
1995 | */ | |
1996 | ||
1997 | void pmap_map_sharedpage(task_t task, pmap_t pmap){ | |
1998 | ||
1999 | kern_return_t ret; | |
2000 | ||
2001 | if(task_has_64BitAddr(task) || _cpu_capabilities & k64Bit) { /* Should we map the 64-bit page -1? */ | |
2002 | ret = pmap_nest(pmap, sharedPmap, 0xFFFFFFFFF0000000ULL, 0x00000000F0000000ULL, | |
2003 | 0x0000000010000000ULL); /* Nest the highest possible segment to map comm page */ | |
2004 | if(ret != KERN_SUCCESS) { /* Did it work? */ | |
2005 | panic("pmap_map_sharedpage: couldn't nest shared page - ret = %08X\n", ret); | |
2006 | } | |
2007 | } | |
2008 | ||
2009 | return; | |
2010 | } | |
2011 | ||
2012 | ||
2013 | /* | |
2014 | * void pmap_unmap_sharedpage(pmap_t pmap); | |
2015 | * | |
2016 | * Unmaps the last segment in a 64-bit address space | |
2017 | * | |
2018 | */ | |
2019 | ||
2020 | void pmap_unmap_sharedpage(pmap_t pmap){ | |
2021 | ||
2022 | kern_return_t ret; | |
2023 | mapping_t *mp; | |
2024 | boolean_t inter; | |
2025 | int gotnest; | |
2026 | addr64_t nextva; | |
2027 | ||
2028 | if(BootProcInfo.pf.Available & pf64Bit) { /* Are we on a 64-bit machine? */ | |
2029 | ||
2030 | inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */ | |
2031 | mp = hw_find_map(pmap, 0xFFFFFFFFF0000000ULL, &nextva); /* Find the mapping for this address */ | |
2032 | if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */ | |
2033 | panic("pmap_unmap_sharedpage: mapping lock failure - rc = %08X, pmap = %08X\n", mp, pmap); /* Die... */ | |
2034 | } | |
2035 | ||
2036 | gotnest = 0; /* Assume nothing here */ | |
2037 | if(mp) { | |
2038 | gotnest = ((mp->mpFlags & mpType) == mpNest); | |
2039 | /* Remember if we have a nest here */ | |
2040 | mapping_drop_busy(mp); /* We have everything we need from the mapping */ | |
2041 | } | |
2042 | ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */ | |
2043 | ||
2044 | if(!gotnest) return; /* Leave if there isn't any nesting here */ | |
2045 | ||
2046 | ret = pmap_unnest(pmap, 0xFFFFFFFFF0000000ULL); /* Unnest the max 64-bit page */ | |
2047 | ||
2048 | if(ret != KERN_SUCCESS) { /* Did it work? */ | |
2049 | panic("pmap_unmap_sharedpage: couldn't unnest shared page - ret = %08X\n", ret); | |
2050 | } | |
2051 | } | |
2052 | ||
2053 | return; | |
2054 | } | |
2055 | ||
55e303ae | 2056 | |
9bccf70c A |
2057 | /* temporary workaround */ |
2058 | boolean_t | |
91447636 A |
2059 | coredumpok( |
2060 | __unused vm_map_t map, | |
2061 | __unused vm_offset_t va) | |
9bccf70c | 2062 | { |
91447636 | 2063 | return TRUE; |
9bccf70c | 2064 | } |