]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/pmap.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1990,1991,1992 The University of Utah and
29 * the Center for Software Science (CSS).
30 * Copyright (c) 1991,1987 Carnegie Mellon University.
31 * All rights reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation,
38 * and that all advertising materials mentioning features or use of
39 * this software display the following acknowledgement: ``This product
40 * includes software developed by the Center for Software Science at
41 * the University of Utah.''
42 *
43 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
44 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
45 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
46 * THIS SOFTWARE.
47 *
48 * CSS requests users of this software to return to css-dist@cs.utah.edu any
49 * improvements that they make and grant CSS redistribution rights.
50 *
51 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 * any improvements or extensions that they make and grant Carnegie Mellon
57 * the rights to redistribute these changes.
58 *
59 * Utah $Hdr: pmap.c 1.28 92/06/23$
60 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
61 */
62
63 /*
64 * Manages physical address maps for powerpc.
65 *
66 * In addition to hardware address maps, this
67 * module is called upon to provide software-use-only
68 * maps which may or may not be stored in the same
69 * form as hardware maps. These pseudo-maps are
70 * used to store intermediate results from copy
71 * operations to and from address spaces.
72 *
73 * Since the information managed by this module is
74 * also stored by the logical address mapping module,
75 * this module may throw away valid virtual-to-physical
76 * mappings at almost any time. However, invalidations
77 * of virtual-to-physical mappings must be done as
78 * requested.
79 *
80 * In order to cope with hardware architectures which
81 * make virtual-to-physical map invalidates expensive,
82 * this module may delay invalidate or reduced protection
83 * operations until such time as they are actually
84 * necessary. This module is given full information to
85 * when physical maps must be made correct.
86 *
87 */
88
89 #include <zone_debug.h>
90 #include <debug.h>
91 #include <mach_kgdb.h>
92 #include <mach_vm_debug.h>
93 #include <db_machine_commands.h>
94
95 #include <kern/thread.h>
96 #include <kern/simple_lock.h>
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <vm/vm_kern.h>
100 #include <kern/spl.h>
101
102 #include <kern/misc_protos.h>
103 #include <ppc/misc_protos.h>
104 #include <ppc/proc_reg.h>
105
106 #include <vm/pmap.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_page.h>
109
110 #include <ppc/pmap.h>
111 #include <ppc/mem.h>
112 #include <ppc/mappings.h>
113
114 #include <ppc/new_screen.h>
115 #include <ppc/Firmware.h>
116 #include <ppc/savearea.h>
117 #include <ppc/cpu_internal.h>
118 #include <ppc/exception.h>
119 #include <ppc/low_trace.h>
120 #include <ppc/lowglobals.h>
121 #include <ddb/db_output.h>
122 #include <machine/cpu_capabilities.h>
123
124 #include <vm/vm_protos.h> /* must be last */
125
126
127 extern unsigned int avail_remaining;
128 unsigned int debugbackpocket; /* (TEST/DEBUG) */
129
130 vm_offset_t first_free_virt;
131 int current_free_region; /* Used in pmap_next_page */
132
133 pmapTransTab *pmapTrans; /* Point to the hash to pmap translations */
134 struct phys_entry *phys_table;
135
136 /* forward */
137 static void pmap_map_physical(void);
138 static void pmap_map_iohole(addr64_t paddr, addr64_t size);
139 void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
140 void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
141
142 extern void hw_hash_init(void);
143
144 /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
145
146 extern struct pmap kernel_pmap_store;
147 pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
148 addr64_t kernel_pmap_phys; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */
149 pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
150 pmap_t sharedPmap; /* Pointer to common pmap for 64-bit address spaces */
151 struct zone *pmap_zone; /* zone of pmap structures */
152 boolean_t pmap_initialized = FALSE;
153
154 int ppc_max_pmaps; /* Maximum number of concurrent address spaces allowed. This is machine dependent */
155 addr64_t vm_max_address; /* Maximum effective address supported */
156 addr64_t vm_max_physical; /* Maximum physical address supported */
157
158 /*
159 * Physical-to-virtual translations are handled by inverted page table
160 * structures, phys_tables. Multiple mappings of a single page are handled
161 * by linking the affected mapping structures. We initialise one region
162 * for phys_tables of the physical memory we know about, but more may be
163 * added as it is discovered (eg. by drivers).
164 */
165
166 /*
167 * free pmap list. caches the first free_pmap_max pmaps that are freed up
168 */
169 int free_pmap_max = 32;
170 int free_pmap_count;
171 pmap_t free_pmap_list;
172 decl_simple_lock_data(,free_pmap_lock)
173
174 /*
175 * Function to get index into phys_table for a given physical address
176 */
177
178 struct phys_entry *pmap_find_physentry(ppnum_t pa)
179 {
180 int i;
181 unsigned int entry;
182
183 for (i = pmap_mem_regions_count - 1; i >= 0; i--) {
184 if (pa < pmap_mem_regions[i].mrStart) continue; /* See if we fit in this region */
185 if (pa > pmap_mem_regions[i].mrEnd) continue; /* Check the end too */
186
187 entry = (unsigned int)pmap_mem_regions[i].mrPhysTab + ((pa - pmap_mem_regions[i].mrStart) * sizeof(phys_entry_t));
188 return (struct phys_entry *)entry;
189 }
190 // kprintf("DEBUG - pmap_find_physentry: page 0x%08X not found\n", pa);
191 return 0;
192 }
193
194 /*
195 * kern_return_t
196 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
197 * boolean_t available, unsigned int attr)
198 *
199 * THIS IS NOT SUPPORTED
200 */
201 kern_return_t
202 pmap_add_physical_memory(
203 __unused vm_offset_t spa,
204 __unused vm_offset_t epa,
205 __unused boolean_t available,
206 __unused unsigned int attr)
207 {
208
209 panic("Forget it! You can't map no more memory, you greedy puke!\n");
210 return KERN_SUCCESS;
211 }
212
213 /*
214 * pmap_map(va, spa, epa, prot)
215 * is called during boot to map memory in the kernel's address map.
216 * A virtual address range starting at "va" is mapped to the physical
217 * address range "spa" to "epa" with machine independent protection
218 * "prot".
219 *
220 * "va", "spa", and "epa" are byte addresses and must be on machine
221 * independent page boundaries.
222 *
223 * Pages with a contiguous virtual address range, the same protection, and attributes.
224 * therefore, we map it with a single block.
225 *
226 * Note that this call will only map into 32-bit space
227 *
228 */
229
230 vm_offset_t
231 pmap_map(
232 vm_offset_t va,
233 vm_offset_t spa,
234 vm_offset_t epa,
235 vm_prot_t prot)
236 {
237
238 addr64_t colladr;
239
240 if (spa == epa) return(va);
241
242 assert(epa > spa);
243
244 colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, prot & VM_PROT_ALL);
245
246 if(colladr) { /* Was something already mapped in the range? */
247 panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n",
248 va, spa, epa, colladr);
249 }
250 return(va);
251 }
252
253 /*
254 * pmap_map_physical()
255 * Maps physical memory into the kernel's address map beginning at lgPMWvaddr, the
256 * physical memory window.
257 *
258 */
259 void
260 pmap_map_physical()
261 {
262 unsigned region;
263 uint64_t msize, size;
264 addr64_t paddr, vaddr, colladdr;
265
266 /* Iterate over physical memory regions, block mapping each into the kernel's address map */
267 for (region = 0; region < (unsigned)pmap_mem_regions_count; region++) {
268 paddr = ((addr64_t)pmap_mem_regions[region].mrStart << 12); /* Get starting physical address */
269 size = (((addr64_t)pmap_mem_regions[region].mrEnd + 1) << 12) - paddr;
270
271 vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */
272
273 while (size > 0) {
274
275 msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */
276
277 colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
278 (mmFlgBlock | mmFlgPerm), (msize >> 12),
279 (VM_PROT_READ | VM_PROT_WRITE));
280 if (colladdr) {
281 panic ("pmap_map_physical: mapping failure - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
282 vaddr, (paddr >> 12), (msize >> 12), colladdr);
283 }
284
285 vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */
286 paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */
287 size -= msize;
288 }
289 }
290 }
291
292 /*
293 * pmap_map_iohole(addr64_t paddr, addr64_t size)
294 * Maps an I/O hole into the kernel's address map at its proper offset in
295 * the physical memory window.
296 *
297 */
298 void
299 pmap_map_iohole(addr64_t paddr, addr64_t size)
300 {
301
302 addr64_t vaddr, colladdr, msize;
303 uint32_t psize;
304
305 vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */
306
307 while (size > 0) {
308
309 msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */
310
311 colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
312 (mmFlgBlock | mmFlgPerm | mmFlgGuarded | mmFlgCInhib), (msize >> 12),
313 (VM_PROT_READ | VM_PROT_WRITE));
314 if (colladdr) {
315 panic ("pmap_map_iohole: mapping failed - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
316 vaddr, (paddr >> 12), (msize >> 12), colladdr);
317 }
318
319 vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */
320 paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */
321 size -= msize;
322 }
323 }
324
325 /*
326 * Bootstrap the system enough to run with virtual memory.
327 * Map the kernel's code and data, and allocate the system page table.
328 * Called with mapping done by BATs. Page_size must already be set.
329 *
330 * Parameters:
331 * msize: Total memory present
332 * first_avail: First virtual address available
333 * kmapsize: Size of kernel text and data
334 */
335 void
336 pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize)
337 {
338 vm_offset_t addr;
339 vm_size_t size;
340 unsigned int i, num, mapsize, vmpagesz, vmmapsz, nbits;
341 signed bank;
342 uint64_t tmemsize;
343 uint_t htslop;
344 vm_offset_t first_used_addr, PCAsize;
345 struct phys_entry *phys_entry;
346
347 *first_avail = round_page(*first_avail); /* Make sure we start out on a page boundary */
348 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address know to VM */
349
350 /*
351 * Initialize kernel pmap
352 */
353 kernel_pmap = &kernel_pmap_store;
354 kernel_pmap_phys = (addr64_t)&kernel_pmap_store;
355 cursor_pmap = &kernel_pmap_store;
356
357 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
358 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
359 kernel_pmap->ref_count = 1;
360 kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */
361 kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
362 kernel_pmap->space = PPC_SID_KERNEL;
363 kernel_pmap->pmapvr = 0; /* Virtual = Real */
364
365 /*
366 * IBM's recommended hash table size is one PTEG for every 2 physical pages.
367 * However, we have found that OSX rarely uses more than 4 PTEs in a PTEG
368 * with this size table. Therefore, by default we allocate a hash table
369 * one half IBM's recommended size, ie one PTEG per 4 pages. The "ht_shift" boot-arg
370 * can be used to override the default hash table size.
371 * We will allocate the hash table in physical RAM, outside of kernel virtual memory,
372 * at the top of the highest bank that will contain it.
373 * Note that "bank" doesn't refer to a physical memory slot here, it is a range of
374 * physically contiguous memory.
375 *
376 * The PCA will go there as well, immediately before the hash table.
377 */
378
379 nbits = cntlzw(((msize << 1) - 1) >> 32); /* Get first bit in upper half */
380 if (nbits == 32) /* If upper half was empty, find bit in bottom half */
381 nbits = nbits + cntlzw((uint_t)((msize << 1) - 1));
382 tmemsize = 0x8000000000000000ULL >> nbits; /* Get memory size rounded up to power of 2 */
383
384 /* Calculate hash table size: First, make sure we don't overflow 32-bit arithmetic. */
385 if (tmemsize > 0x0000002000000000ULL)
386 tmemsize = 0x0000002000000000ULL;
387
388 /* Second, calculate IBM recommended hash table size, ie one PTEG per 2 physical pages */
389 hash_table_size = (uint_t)(tmemsize >> 13) * PerProcTable[0].ppe_vaddr->pf.pfPTEG;
390
391 /* Third, cut this in half to produce the OSX default, ie one PTEG per 4 physical pages */
392 hash_table_size >>= 1;
393
394 /* Fourth, adjust default size per "ht_shift" boot arg */
395 if (hash_table_shift >= 0) /* if positive, make size bigger */
396 hash_table_size <<= hash_table_shift;
397 else /* if "ht_shift" is negative, make smaller */
398 hash_table_size >>= (-hash_table_shift);
399
400 /* Fifth, make sure we are at least minimum size */
401 if (hash_table_size < (256 * 1024))
402 hash_table_size = (256 * 1024);
403
404 while(1) { /* Try to fit hash table in PCA into contiguous memory */
405
406 if(hash_table_size < (256 * 1024)) { /* Have we dropped too short? This should never, ever happen */
407 panic("pmap_bootstrap: Can't find space for hash table\n"); /* This will never print, system isn't up far enough... */
408 }
409
410 PCAsize = (hash_table_size / PerProcTable[0].ppe_vaddr->pf.pfPTEG) * sizeof(PCA_t); /* Get total size of PCA table */
411 PCAsize = round_page(PCAsize); /* Make sure it is at least a page long */
412
413 for(bank = pmap_mem_regions_count - 1; bank >= 0; bank--) { /* Search backwards through banks */
414
415 hash_table_base = ((addr64_t)pmap_mem_regions[bank].mrEnd << 12) - hash_table_size + PAGE_SIZE; /* Get tenative address */
416
417 htslop = hash_table_base & (hash_table_size - 1); /* Get the extra that we will round down when we align */
418 hash_table_base = hash_table_base & -(addr64_t)hash_table_size; /* Round down to correct boundary */
419
420 if((hash_table_base - round_page(PCAsize)) >= ((addr64_t)pmap_mem_regions[bank].mrStart << 12)) break; /* Leave if we fit */
421 }
422
423 if(bank >= 0) break; /* We are done if we found a suitable bank */
424
425 hash_table_size = hash_table_size >> 1; /* Try the next size down */
426 }
427
428 if(htslop) { /* If there was slop (i.e., wasted pages for alignment) add a new region */
429 for(i = pmap_mem_regions_count - 1; i >= (unsigned)bank; i--) { /* Copy from end to our bank, including our bank */
430 pmap_mem_regions[i + 1].mrStart = pmap_mem_regions[i].mrStart; /* Set the start of the bank */
431 pmap_mem_regions[i + 1].mrAStart = pmap_mem_regions[i].mrAStart; /* Set the start of allocatable area */
432 pmap_mem_regions[i + 1].mrEnd = pmap_mem_regions[i].mrEnd; /* Set the end address of bank */
433 pmap_mem_regions[i + 1].mrAEnd = pmap_mem_regions[i].mrAEnd; /* Set the end address of allocatable area */
434 }
435
436 pmap_mem_regions[i + 1].mrStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of the next bank to the start of the slop area */
437 pmap_mem_regions[i + 1].mrAStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of allocatable area to the start of the slop area */
438 pmap_mem_regions[i].mrEnd = (hash_table_base + hash_table_size - 4096) >> 12; /* Set the end of our bank to the end of the hash table */
439
440 }
441
442 pmap_mem_regions[bank].mrAEnd = (hash_table_base - PCAsize - 4096) >> 12; /* Set the maximum allocatable in this bank */
443
444 hw_hash_init(); /* Initiaize the hash table and PCA */
445 hw_setup_trans(); /* Set up hardware registers needed for translation */
446
447 /*
448 * The hash table is now all initialized and so is the PCA. Go on to do the rest of it.
449 * This allocation is from the bottom up.
450 */
451
452 num = atop_64(msize); /* Get number of pages in all of memory */
453
454 /* Figure out how much we need to allocate */
455
456 size = (vm_size_t) (
457 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
458 (BackPocketSaveBloks * PAGE_SIZE) + /* For backpocket saveareas */
459 trcWork.traceSize + /* Size of trace table */
460 ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096) + /* Size of pmap translate table */
461 (((num * sizeof(struct phys_entry)) + 4095) & -4096) /* For the physical entries */
462 );
463
464 mapsize = size = round_page(size); /* Get size of area to map that we just calculated */
465 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
466
467 vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
468 vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
469
470 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
471
472 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
473 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
474 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
475
476 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
477
478 /* hash table must be aligned to its size */
479
480 addr = *first_avail; /* Set the address to start allocations */
481 first_used_addr = addr; /* Remember where we started */
482
483 bzero((char *)addr, size); /* Clear everything that we are allocating */
484
485 savearea_init(addr); /* Initialize the savearea chains and data */
486
487 addr = (vm_offset_t)((unsigned int)addr + ((InitialSaveBloks + BackPocketSaveBloks) * PAGE_SIZE)); /* Point past saveareas */
488
489 trcWork.traceCurr = (unsigned int)addr; /* Set first trace slot to use */
490 trcWork.traceStart = (unsigned int)addr; /* Set start of trace table */
491 trcWork.traceEnd = (unsigned int)addr + trcWork.traceSize; /* Set end of trace table */
492
493 addr = (vm_offset_t)trcWork.traceEnd; /* Set next allocatable location */
494
495 pmapTrans = (pmapTransTab *)addr; /* Point to the pmap to hash translation table */
496
497 pmapTrans[PPC_SID_KERNEL].pmapPAddr = (addr64_t)((uintptr_t)kernel_pmap); /* Initialize the kernel pmap in the translate table */
498 pmapTrans[PPC_SID_KERNEL].pmapVAddr = CAST_DOWN(unsigned int, kernel_pmap); /* Initialize the kernel pmap in the translate table */
499
500 addr += ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096); /* Point past pmap translate table */
501
502 /* NOTE: the phys_table must be within the first 2GB of physical RAM. This makes sure we only need to do 32-bit arithmetic */
503
504 phys_entry = (struct phys_entry *) addr; /* Get pointer to physical table */
505
506 for (bank = 0; bank < pmap_mem_regions_count; bank++) { /* Set pointer and initialize all banks of ram */
507
508 pmap_mem_regions[bank].mrPhysTab = phys_entry; /* Set pointer to the physical table for this bank */
509
510 phys_entry = phys_entry + (pmap_mem_regions[bank].mrEnd - pmap_mem_regions[bank].mrStart + 1); /* Point to the next */
511 }
512
513 addr += (((num * sizeof(struct phys_entry)) + 4095) & -4096); /* Step on past the physical entries */
514
515 /*
516 * Remaining space is for mapping entries. Tell the initializer routine that
517 * the mapping system can't release this block because it's permanently assigned
518 */
519
520 mapping_init(); /* Initialize the mapping tables */
521
522 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
523 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
524 }
525 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
526
527 /* Map V=R the page tables */
528 pmap_map(first_used_addr, first_used_addr,
529 round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE);
530
531 *first_avail = round_page(first_used_addr + size); /* Set next available page */
532 first_free_virt = *first_avail; /* Ditto */
533
534 /* For 64-bit machines, block map physical memory and the I/O hole into kernel space */
535 if(BootProcInfo.pf.Available & pf64Bit) { /* Are we on a 64-bit machine? */
536 lowGlo.lgPMWvaddr = PHYS_MEM_WINDOW_VADDR; /* Initialize the physical memory window's virtual address */
537
538 pmap_map_physical(); /* Block map physical memory into the window */
539
540 pmap_map_iohole(IO_MEM_WINDOW_VADDR, IO_MEM_WINDOW_SIZE);
541 /* Block map the I/O hole */
542 }
543
544 /* All the rest of memory is free - add it to the free
545 * regions so that it can be allocated by pmap_steal
546 */
547
548 pmap_mem_regions[0].mrAStart = (*first_avail >> 12); /* Set up the free area to start allocations (always in the first bank) */
549
550 current_free_region = 0; /* Set that we will start allocating in bank 0 */
551 avail_remaining = 0; /* Clear free page count */
552 for(bank = 0; bank < pmap_mem_regions_count; bank++) { /* Total up all of the pages in the system that are available */
553 avail_remaining += (pmap_mem_regions[bank].mrAEnd - pmap_mem_regions[bank].mrAStart) + 1; /* Add in allocatable pages in this bank */
554 }
555
556
557 }
558
559 /*
560 * pmap_init(spa, epa)
561 * finishes the initialization of the pmap module.
562 * This procedure is called from vm_mem_init() in vm/vm_init.c
563 * to initialize any remaining data structures that the pmap module
564 * needs to map virtual memory (VM is already ON).
565 *
566 * Note that the pmap needs to be sized and aligned to
567 * a power of two. This is because it is used both in virtual and
568 * real so it can't span a page boundary.
569 */
570
571 void
572 pmap_init(void)
573 {
574
575 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
576 #if ZONE_DEBUG
577 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
578 #endif /* ZONE_DEBUG */
579
580 pmap_initialized = TRUE;
581
582 /*
583 * Initialize list of freed up pmaps
584 */
585 free_pmap_list = 0; /* Set that there are no free pmaps */
586 free_pmap_count = 0;
587 simple_lock_init(&free_pmap_lock, 0);
588
589 }
590
591 unsigned int pmap_free_pages(void)
592 {
593 return avail_remaining;
594 }
595
596 /*
597 * This function allocates physical pages.
598 */
599
600 /* Non-optimal, but only used for virtual memory startup.
601 * Allocate memory from a table of free physical addresses
602 * If there are no more free entries, too bad.
603 */
604
605 boolean_t pmap_next_page(ppnum_t *addrp)
606 {
607 int i;
608
609 if(current_free_region >= pmap_mem_regions_count) return FALSE; /* Return failure if we have used everything... */
610
611 for(i = current_free_region; i < pmap_mem_regions_count; i++) { /* Find the next bank with free pages */
612 if(pmap_mem_regions[i].mrAStart <= pmap_mem_regions[i].mrAEnd) break; /* Found one */
613 }
614
615 current_free_region = i; /* Set our current bank */
616 if(i >= pmap_mem_regions_count) return FALSE; /* Couldn't find a free page */
617
618 *addrp = pmap_mem_regions[i].mrAStart; /* Allocate the page */
619 pmap_mem_regions[i].mrAStart = pmap_mem_regions[i].mrAStart + 1; /* Set the next one to go */
620 avail_remaining--; /* Drop free count */
621
622 return TRUE;
623 }
624
625 void pmap_virtual_space(
626 vm_offset_t *startp,
627 vm_offset_t *endp)
628 {
629 *startp = round_page(first_free_virt);
630 *endp = vm_last_addr;
631 }
632
633 /*
634 * pmap_create
635 *
636 * Create and return a physical map.
637 *
638 * If the size specified for the map is zero, the map is an actual physical
639 * map, and may be referenced by the hardware.
640 *
641 * A pmap is either in the free list or in the in-use list. The only use
642 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
643 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
644 * in-use list is matched until a hole in the VSID sequence is found. (Note
645 * that the in-use pmaps are queued in VSID sequence order.) This is all done
646 * while free_pmap_lock is held.
647 *
648 * If the size specified is non-zero, the map will be used in software
649 * only, and is bounded by that size.
650 */
651 pmap_t
652 pmap_create(vm_map_size_t size)
653 {
654 pmap_t pmap, ckpmap, fore;
655 int s;
656 unsigned int currSID;
657 addr64_t physpmap;
658
659 /*
660 * A software use-only map doesn't even need a pmap structure.
661 */
662 if (size)
663 return(PMAP_NULL);
664
665 /*
666 * If there is a pmap in the pmap free list, reuse it.
667 * Note that we use free_pmap_list for all chaining of pmaps, both to
668 * the free list and the in use chain (anchored from kernel_pmap).
669 */
670 s = splhigh();
671 simple_lock(&free_pmap_lock);
672
673 if(free_pmap_list) { /* Any free? */
674 pmap = free_pmap_list; /* Yes, allocate it */
675 free_pmap_list = (pmap_t)pmap->freepmap; /* Dequeue this one (we chain free ones through freepmap) */
676 free_pmap_count--;
677 }
678 else {
679 simple_unlock(&free_pmap_lock); /* Unlock just in case */
680 splx(s);
681
682 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
683 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
684
685 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
686
687 s = splhigh();
688 simple_lock(&free_pmap_lock); /* Lock it back up */
689
690 ckpmap = cursor_pmap; /* Get starting point for free ID search */
691 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
692
693 while(1) { /* Keep trying until something happens */
694
695 currSID = (currSID + 1) & (maxAdrSp - 1); /* Get the next in the sequence */
696 if(((currSID * incrVSID) & (maxAdrSp - 1)) == invalSpace) continue; /* Skip the space we have reserved */
697 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
698
699 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
700
701 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
702 panic("pmap_create: Maximum number (%d) active address spaces reached\n", maxAdrSp); /* Die pig dog */
703 }
704 }
705
706 pmap->space = (currSID * incrVSID) & (maxAdrSp - 1); /* Calculate the actual VSID */
707 pmap->spaceNum = currSID; /* Set the space ID number */
708 /*
709 * Now we link into the chain just before the out of sequence guy.
710 */
711
712 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
713 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
714 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
715 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
716 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
717
718 physpmap = ((addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)pmap)) << 12) | (addr64_t)((unsigned int)pmap & 0xFFF); /* Get the physical address of the pmap */
719
720 pmap->pmapvr = (addr64_t)((uintptr_t)pmap) ^ physpmap; /* Make V to R translation mask */
721
722 pmapTrans[pmap->space].pmapPAddr = physpmap; /* Set translate table physical to point to us */
723 pmapTrans[pmap->space].pmapVAddr = CAST_DOWN(unsigned int, pmap); /* Set translate table virtual to point to us */
724 }
725
726 pmap->pmapVmmExt = 0; /* Clear VMM extension block vaddr */
727 pmap->pmapVmmExtPhys = 0; /* and the paddr, too */
728 pmap->pmapFlags = pmapKeyDef; /* Set default key */
729 pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
730 pmap->ref_count = 1;
731 pmap->stats.resident_count = 0;
732 pmap->stats.wired_count = 0;
733 pmap->pmapSCSubTag = 0x0000000000000000ULL; /* Make sure this is clean an tidy */
734 simple_unlock(&free_pmap_lock);
735
736 splx(s);
737 return(pmap);
738 }
739
740 /*
741 * pmap_destroy
742 *
743 * Gives up a reference to the specified pmap. When the reference count
744 * reaches zero the pmap structure is added to the pmap free list.
745 *
746 * Should only be called if the map contains no valid mappings.
747 */
748 void
749 pmap_destroy(pmap_t pmap)
750 {
751 int ref_count;
752 spl_t s;
753 pmap_t fore, aft;
754
755 if (pmap == PMAP_NULL)
756 return;
757
758 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
759 if(ref_count>0) return; /* Still more users, leave now... */
760
761 if(ref_count < 0) /* Did we go too far? */
762 panic("pmap_destroy(): ref_count < 0");
763
764 if (!(pmap->pmapFlags & pmapVMgsaa)) { /* Don't try this for a shadow assist guest */
765 pmap_unmap_sharedpage(pmap); /* Remove any mapping of page -1 */
766 }
767
768 #ifdef notdef
769 if(pmap->stats.resident_count != 0)
770 panic("PMAP_DESTROY: pmap not empty");
771 #else
772 if(pmap->stats.resident_count != 0) {
773 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000ULL);
774 }
775 #endif
776
777 /*
778 * Add the pmap to the pmap free list.
779 */
780
781 s = splhigh();
782 /*
783 * Add the pmap to the pmap free list.
784 */
785 simple_lock(&free_pmap_lock);
786
787 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
788
789 pmap->freepmap = free_pmap_list; /* Queue in front */
790 free_pmap_list = pmap;
791 free_pmap_count++;
792 simple_unlock(&free_pmap_lock);
793
794 } else {
795 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
796 fore = (pmap_t)pmap->pmap_link.prev;
797 aft = (pmap_t)pmap->pmap_link.next;
798 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
799 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
800 simple_unlock(&free_pmap_lock);
801 pmapTrans[pmap->space].pmapPAddr = -1; /* Invalidate the translate table physical */
802 pmapTrans[pmap->space].pmapVAddr = -1; /* Invalidate the translate table virtual */
803 zfree(pmap_zone, pmap);
804 }
805 splx(s);
806 }
807
808 /*
809 * pmap_reference(pmap)
810 * gains a reference to the specified pmap.
811 */
812 void
813 pmap_reference(pmap_t pmap)
814 {
815 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
816 }
817
818 /*
819 * pmap_remove_some_phys
820 *
821 * Removes mappings of the associated page from the specified pmap
822 *
823 */
824 void pmap_remove_some_phys(
825 pmap_t pmap,
826 vm_offset_t pa)
827 {
828 register struct phys_entry *pp;
829 register struct mapping *mp;
830 unsigned int pindex;
831
832 if (pmap == PMAP_NULL) { /* This should never be called with a null pmap */
833 panic("pmap_remove_some_phys: null pmap\n");
834 }
835
836 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
837 if (pp == 0) return; /* Leave if not in physical RAM */
838
839 do { /* Keep going until we toss all pages from this pmap */
840 if (pmap->pmapFlags & pmapVMhost) {
841 mp = hw_purge_phys(pp); /* Toss a map */
842 switch ((unsigned int)mp & mapRetCode) {
843 case mapRtOK:
844 mapping_free(mp); /* Return mapping to free inventory */
845 break;
846 case mapRtGuest:
847 break; /* Don't try to return a guest mapping */
848 case mapRtEmpty:
849 break; /* Physent chain empty, we're done */
850 case mapRtNotFnd:
851 break; /* Mapping disappeared on us, retry */
852 default:
853 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n",
854 pp, pmap, mp); /* Handle failure with our usual lack of tact */
855 }
856 } else {
857 mp = hw_purge_space(pp, pmap); /* Toss a map */
858 switch ((unsigned int)mp & mapRetCode) {
859 case mapRtOK:
860 mapping_free(mp); /* Return mapping to free inventory */
861 break;
862 case mapRtEmpty:
863 break; /* Physent chain empty, we're done */
864 case mapRtNotFnd:
865 break; /* Mapping disappeared on us, retry */
866 default:
867 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n",
868 pp, pmap, mp); /* Handle failure with our usual lack of tact */
869 }
870 }
871 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
872
873 #if DEBUG
874 if ((pmap->pmapFlags & pmapVMhost) && !pmap_verify_free(pa))
875 panic("pmap_remove_some_phys: cruft left behind - pa = %08X, pmap = %08X\n", pa, pmap);
876 #endif
877
878 return; /* Leave... */
879 }
880
881 /*
882 * pmap_remove(pmap, s, e)
883 * unmaps all virtual addresses v in the virtual address
884 * range determined by [s, e) and pmap.
885 * s and e must be on machine independent page boundaries and
886 * s must be less than or equal to e.
887 *
888 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
889 * skip those segments.
890 */
891 void
892 pmap_remove(
893 pmap_t pmap,
894 addr64_t sva,
895 addr64_t eva)
896 {
897 addr64_t va, endva;
898
899 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
900
901
902 /* It is just possible that eva might have wrapped around to zero,
903 * and sometimes we get asked to liberate something of size zero
904 * even though it's dumb (eg. after zero length read_overwrites)
905 */
906 assert(eva >= sva);
907
908 /* If these are not page aligned the loop might not terminate */
909 assert((sva == trunc_page_64(sva)) && (eva == trunc_page_64(eva)));
910
911 va = sva & -4096LL; /* Round start down to a page */
912 endva = eva & -4096LL; /* Round end down to a page */
913
914 while(1) { /* Go until we finish the range */
915 va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */
916 va = va & -4096LL; /* Make sure the "not found" indication is clear */
917 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
918 }
919
920 }
921
922 /*
923 * Routine:
924 * pmap_page_protect
925 *
926 * Function:
927 * Lower the permission for all mappings to a given page.
928 */
929 void
930 pmap_page_protect(
931 ppnum_t pa,
932 vm_prot_t prot)
933 {
934 register struct phys_entry *pp;
935 boolean_t remove;
936 unsigned int pindex;
937 mapping_t *mp;
938
939
940 switch (prot) {
941 case VM_PROT_READ:
942 case VM_PROT_READ|VM_PROT_EXECUTE:
943 remove = FALSE;
944 break;
945 case VM_PROT_ALL:
946 return;
947 default:
948 remove = TRUE;
949 break;
950 }
951
952
953 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
954 if (pp == 0) return; /* Leave if not in physical RAM */
955
956 if (remove) { /* If the protection was set to none, we'll remove all mappings */
957
958 do { /* Keep going until we toss all pages from this physical page */
959 mp = hw_purge_phys(pp); /* Toss a map */
960 switch ((unsigned int)mp & mapRetCode) {
961 case mapRtOK:
962 mapping_free(mp); /* Return mapping to free inventory */
963 break;
964 case mapRtGuest:
965 break; /* Don't try to return a guest mapping */
966 case mapRtNotFnd:
967 break; /* Mapping disappeared on us, retry */
968 case mapRtEmpty:
969 break; /* Physent chain empty, we're done */
970 default: panic("pmap_page_protect: hw_purge_phys failed - pp = %08X, code = %08X\n",
971 pp, mp); /* Handle failure with our usual lack of tact */
972 }
973 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
974
975 #if DEBUG
976 if (!pmap_verify_free(pa))
977 panic("pmap_page_protect: cruft left behind - pa = %08X\n", pa);
978 #endif
979
980 return; /* Leave... */
981 }
982
983 /* When we get here, it means that we are to change the protection for a
984 * physical page.
985 */
986
987 mapping_protect_phys(pa, prot & VM_PROT_ALL); /* Change protection of all mappings to page. */
988
989 }
990
991 /*
992 * Routine:
993 * pmap_disconnect
994 *
995 * Function:
996 * Disconnect all mappings for this page and return reference and change status
997 * in generic format.
998 *
999 */
1000 unsigned int pmap_disconnect(
1001 ppnum_t pa)
1002 {
1003 register struct phys_entry *pp;
1004 unsigned int pindex;
1005 mapping_t *mp;
1006
1007 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1008 if (pp == 0) return (0); /* Return null ref and chg if not in physical RAM */
1009 do { /* Iterate until all mappings are dead and gone */
1010 mp = hw_purge_phys(pp); /* Disconnect a mapping */
1011 if (!mp) break; /* All mappings are gone, leave the loop */
1012 switch ((unsigned int)mp & mapRetCode) {
1013 case mapRtOK:
1014 mapping_free(mp); /* Return mapping to free inventory */
1015 break;
1016 case mapRtGuest:
1017 break; /* Don't try to return a guest mapping */
1018 case mapRtNotFnd:
1019 break; /* Mapping disappeared on us, retry */
1020 case mapRtEmpty:
1021 break; /* Physent chain empty, we're done */
1022 default: panic("hw_purge_phys: hw_purge_phys failed - pp = %08X, code = %08X\n",
1023 pp, mp); /* Handle failure with our usual lack of tact */
1024 }
1025 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
1026
1027 #if DEBUG
1028 if (!pmap_verify_free(pa))
1029 panic("pmap_disconnect: cruft left behind - pa = %08X\n", pa);
1030 #endif
1031
1032 return (mapping_tst_refmod(pa)); /* Return page ref and chg in generic format */
1033 }
1034
1035 /*
1036 * pmap_protect(pmap, s, e, prot)
1037 * changes the protection on all virtual addresses v in the
1038 * virtual address range determined by [s, e] and pmap to prot.
1039 * s and e must be on machine independent page boundaries and
1040 * s must be less than or equal to e.
1041 *
1042 * Note that any requests to change the protection of a nested pmap are
1043 * ignored. Those changes MUST be done by calling this with the correct pmap.
1044 */
1045 void pmap_protect(
1046 pmap_t pmap,
1047 vm_map_offset_t sva,
1048 vm_map_offset_t eva,
1049 vm_prot_t prot)
1050 {
1051
1052 addr64_t va, endva;
1053
1054 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1055
1056 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
1057 pmap_remove(pmap, (addr64_t)sva, (addr64_t)eva); /* Yeah, dump 'em */
1058 return; /* Leave... */
1059 }
1060
1061 va = sva & -4096LL; /* Round start down to a page */
1062 endva = eva & -4096LL; /* Round end down to a page */
1063
1064 while(1) { /* Go until we finish the range */
1065 mapping_protect(pmap, va, prot & VM_PROT_ALL, &va); /* Change the protection and see what's next */
1066 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
1067 }
1068
1069 }
1070
1071
1072
1073 /*
1074 * pmap_enter
1075 *
1076 * Create a translation for the virtual address (virt) to the physical
1077 * address (phys) in the pmap with the protection requested. If the
1078 * translation is wired then we can not allow a full page fault, i.e.,
1079 * the mapping control block is not eligible to be stolen in a low memory
1080 * condition.
1081 *
1082 * NB: This is the only routine which MAY NOT lazy-evaluate
1083 * or lose information. That is, this routine must actually
1084 * insert this page into the given map NOW.
1085 */
1086 void
1087 pmap_enter(pmap_t pmap, vm_map_offset_t va, ppnum_t pa, vm_prot_t prot,
1088 unsigned int flags, __unused boolean_t wired)
1089 {
1090 unsigned int mflags;
1091 addr64_t colva;
1092
1093 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
1094
1095 mflags = 0; /* Make sure this is initialized to nothing special */
1096 if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */
1097 mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
1098 }
1099
1100 /*
1101 * It is possible to hang here if another processor is remapping any pages we collide with and are removing
1102 */
1103
1104 while(1) { /* Keep trying the enter until it goes in */
1105
1106 colva = mapping_make(pmap, va, pa, mflags, 1, prot & VM_PROT_ALL); /* Enter the mapping into the pmap */
1107
1108 if(!colva) break; /* If there were no collisions, we are done... */
1109
1110 mapping_remove(pmap, colva); /* Remove the mapping that collided */
1111 }
1112 }
1113
1114 /*
1115 * Enters translations for odd-sized V=F blocks.
1116 *
1117 * The higher level VM map should be locked to insure that we don't have a
1118 * double diddle here.
1119 *
1120 * We panic if we get a block that overlaps with another. We do not merge adjacent
1121 * blocks because removing any address within a block removes the entire block and if
1122 * would really mess things up if we trashed too much.
1123 *
1124 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
1125 * not be changed. The block must be unmapped and then remapped with the new stuff.
1126 * We also do not keep track of reference or change flags.
1127 *
1128 * Any block that is larger than 256MB must be a multiple of 32MB. We panic if it is not.
1129 *
1130 * Note that pmap_map_block_rc is the same but doesn't panic if collision.
1131 *
1132 */
1133
1134 void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1135
1136 unsigned int mflags;
1137 addr64_t colva;
1138
1139
1140 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
1141 panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */
1142 }
1143
1144 // kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */
1145 // current_thread(), va, pa, size, prot, attr, flags); /* (BRINGUP) */
1146
1147 mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
1148 if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
1149
1150 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1151
1152 if(colva) { /* If there was a collision, panic */
1153 panic("pmap_map_block: mapping error %d, pmap = %08X, va = %016llX\n", (uint32_t)(colva & mapRetCode), pmap, va);
1154 }
1155
1156 return; /* Return */
1157 }
1158
1159 int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1160
1161 unsigned int mflags;
1162 addr64_t colva;
1163
1164
1165 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
1166 panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */
1167 }
1168
1169 mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
1170 if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
1171
1172 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1173
1174 if(colva) return 0; /* If there was a collision, fail */
1175
1176 return 1; /* Return true of we worked */
1177 }
1178
1179 /*
1180 * pmap_extract(pmap, va)
1181 * returns the physical address corrsponding to the
1182 * virtual address specified by pmap and va if the
1183 * virtual address is mapped and 0 if it is not.
1184 * Note: we assume nothing is ever mapped to phys 0.
1185 *
1186 * NOTE: This call always will fail for physical addresses greater than 0xFFFFF000.
1187 */
1188 vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va) {
1189
1190 spl_t spl;
1191 register struct mapping *mp;
1192 register vm_offset_t pa;
1193 addr64_t nextva;
1194 ppnum_t ppoffset;
1195 unsigned int gva;
1196
1197 #ifdef BOGUSCOMPAT
1198 panic("pmap_extract: THIS CALL IS BOGUS. NEVER USE IT EVER. So there...\n"); /* Don't use this */
1199 #else
1200
1201 gva = (unsigned int)va; /* Make sure we don't have a sign */
1202
1203 spl = splhigh(); /* We can't allow any loss of control here */
1204
1205 mp = mapping_find(pmap, (addr64_t)gva, &nextva,1); /* Find the mapping for this address */
1206
1207 if(!mp) { /* Is the page mapped? */
1208 splx(spl); /* Enable interrupts */
1209 return 0; /* Pass back 0 if not found */
1210 }
1211
1212 ppoffset = (ppnum_t)(((gva & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1213
1214
1215 pa = mp->mpPAddr + ppoffset; /* Remember ppage because mapping may vanish after drop call */
1216
1217 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1218 splx(spl); /* Restore 'rupts */
1219
1220 if(pa > maxPPage32) return 0; /* Force large addresses to fail */
1221
1222 pa = (pa << 12) | (va & 0xFFF); /* Convert physical page number to address */
1223
1224 #endif
1225 return pa; /* Return physical address or 0 */
1226 }
1227
1228 /*
1229 * ppnum_t pmap_find_phys(pmap, addr64_t va)
1230 * returns the physical page corrsponding to the
1231 * virtual address specified by pmap and va if the
1232 * virtual address is mapped and 0 if it is not.
1233 * Note: we assume nothing is ever mapped to phys 0.
1234 *
1235 */
1236 ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) {
1237
1238 spl_t spl;
1239 register struct mapping *mp;
1240 ppnum_t pa, ppoffset;
1241 addr64_t nextva;
1242
1243 spl = splhigh(); /* We can't allow any loss of control here */
1244
1245 mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */
1246
1247 if(!mp) { /* Is the page mapped? */
1248 splx(spl); /* Enable interrupts */
1249 return 0; /* Pass back 0 if not found */
1250 }
1251
1252
1253 ppoffset = (ppnum_t)(((va & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1254
1255 pa = mp->mpPAddr + ppoffset; /* Get the actual physical address */
1256
1257 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1258
1259 splx(spl); /* Restore 'rupts */
1260 return pa; /* Return physical address or 0 */
1261 }
1262
1263
1264 /*
1265 * pmap_attributes:
1266 *
1267 * Set/Get special memory attributes; not implemented.
1268 *
1269 * Note: 'VAL_GET_INFO' is used to return info about a page.
1270 * If less than 1 page is specified, return the physical page
1271 * mapping and a count of the number of mappings to that page.
1272 * If more than one page is specified, return the number
1273 * of resident pages and the number of shared (more than
1274 * one mapping) pages in the range;
1275 *
1276 *
1277 */
1278 kern_return_t
1279 pmap_attribute(
1280 __unused pmap_t pmap,
1281 __unused vm_map_offset_t address,
1282 __unused vm_map_size_t size,
1283 __unused vm_machine_attribute_t attribute,
1284 __unused vm_machine_attribute_val_t* value)
1285 {
1286
1287 return KERN_INVALID_ARGUMENT;
1288
1289 }
1290
1291 /*
1292 * pmap_attribute_cache_sync(vm_offset_t pa)
1293 *
1294 * Invalidates all of the instruction cache on a physical page and
1295 * pushes any dirty data from the data cache for the same physical page
1296 */
1297
1298 kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size,
1299 __unused vm_machine_attribute_t attribute,
1300 __unused vm_machine_attribute_val_t* value) {
1301
1302 spl_t s;
1303 unsigned int i, npages;
1304
1305 npages = round_page(size) >> 12; /* Get the number of pages to do */
1306
1307 for(i = 0; i < npages; i++) { /* Do all requested pages */
1308 s = splhigh(); /* No interruptions here */
1309 sync_ppage(pp + i); /* Go flush data cache and invalidate icache */
1310 splx(s); /* Allow interruptions */
1311 }
1312
1313 return KERN_SUCCESS;
1314 }
1315
1316 /*
1317 * pmap_sync_page_data_phys(ppnum_t pa)
1318 *
1319 * Invalidates all of the instruction cache on a physical page and
1320 * pushes any dirty data from the data cache for the same physical page
1321 */
1322
1323 void pmap_sync_page_data_phys(ppnum_t pa) {
1324
1325 spl_t s;
1326
1327 s = splhigh(); /* No interruptions here */
1328 sync_ppage(pa); /* Sync up dem caches */
1329 splx(s); /* Allow interruptions */
1330 return;
1331 }
1332
1333 void
1334 pmap_sync_page_attributes_phys(ppnum_t pa)
1335 {
1336 pmap_sync_page_data_phys(pa);
1337 }
1338
1339 /*
1340 * pmap_collect
1341 *
1342 * Garbage collects the physical map system for pages that are no longer used.
1343 * It isn't implemented or needed or wanted.
1344 */
1345 void
1346 pmap_collect(__unused pmap_t pmap)
1347 {
1348 return;
1349 }
1350
1351 /*
1352 * Routine: pmap_activate
1353 * Function:
1354 * Binds the given physical map to the given
1355 * processor, and returns a hardware map description.
1356 * It isn't implemented or needed or wanted.
1357 */
1358 void
1359 pmap_activate(
1360 __unused pmap_t pmap,
1361 __unused thread_t th,
1362 __unused int which_cpu)
1363 {
1364 return;
1365 }
1366 /*
1367 * pmap_deactivate:
1368 * It isn't implemented or needed or wanted.
1369 */
1370 void
1371 pmap_deactivate(
1372 __unused pmap_t pmap,
1373 __unused thread_t th,
1374 __unused int which_cpu)
1375 {
1376 return;
1377 }
1378
1379
1380 /*
1381 * pmap_pageable(pmap, s, e, pageable)
1382 * Make the specified pages (by pmap, offset)
1383 * pageable (or not) as requested.
1384 *
1385 * A page which is not pageable may not take
1386 * a fault; therefore, its page table entry
1387 * must remain valid for the duration.
1388 *
1389 * This routine is merely advisory; pmap_enter()
1390 * will specify that these pages are to be wired
1391 * down (or not) as appropriate.
1392 *
1393 * (called from vm/vm_fault.c).
1394 */
1395 void
1396 pmap_pageable(
1397 __unused pmap_t pmap,
1398 __unused vm_map_offset_t start,
1399 __unused vm_map_offset_t end,
1400 __unused boolean_t pageable)
1401 {
1402
1403 return; /* This is not used... */
1404
1405 }
1406 /*
1407 * Routine: pmap_change_wiring
1408 * NOT USED ANYMORE.
1409 */
1410 void
1411 pmap_change_wiring(
1412 __unused pmap_t pmap,
1413 __unused vm_map_offset_t va,
1414 __unused boolean_t wired)
1415 {
1416 return; /* This is not used... */
1417 }
1418
1419 /*
1420 * pmap_modify_pages(pmap, s, e)
1421 * sets the modified bit on all virtual addresses v in the
1422 * virtual address range determined by [s, e] and pmap,
1423 * s and e must be on machine independent page boundaries and
1424 * s must be less than or equal to e.
1425 *
1426 * Note that this function will not descend nested pmaps.
1427 */
1428 void
1429 pmap_modify_pages(
1430 pmap_t pmap,
1431 vm_map_offset_t sva,
1432 vm_map_offset_t eva)
1433 {
1434 spl_t spl;
1435 mapping_t *mp;
1436 ppnum_t pa;
1437 addr64_t va, endva;
1438 unsigned int savetype;
1439
1440 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1441
1442 va = sva & -4096; /* Round to page */
1443 endva = eva & -4096; /* Round to page */
1444
1445 while (va < endva) { /* Walk through all pages */
1446
1447 spl = splhigh(); /* We can't allow any loss of control here */
1448
1449 mp = mapping_find(pmap, (addr64_t)va, &va, 0); /* Find the mapping for this address */
1450
1451 if(!mp) { /* Is the page mapped? */
1452 splx(spl); /* Page not mapped, restore interruptions */
1453 if((va == 0) || (va >= endva)) break; /* We are done if there are no more or we hit the end... */
1454 continue; /* We are not done and there is more to check... */
1455 }
1456
1457 savetype = mp->mpFlags & mpType; /* Remember the type */
1458 pa = mp->mpPAddr; /* Remember ppage because mapping may vanish after drop call */
1459
1460 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1461
1462 splx(spl); /* Restore 'rupts */
1463
1464 if(savetype != mpNormal) continue; /* Can't mess around with these guys... */
1465
1466 mapping_set_mod(pa); /* Set the modfied bit for this page */
1467
1468 if(va == 0) break; /* We hit the end of the pmap, might as well leave now... */
1469 }
1470 return; /* Leave... */
1471 }
1472
1473 /*
1474 * pmap_clear_modify(phys)
1475 * clears the hardware modified ("dirty") bit for one
1476 * machine independant page starting at the given
1477 * physical address. phys must be aligned on a machine
1478 * independant page boundary.
1479 */
1480 void
1481 pmap_clear_modify(ppnum_t pa)
1482 {
1483
1484 mapping_clr_mod(pa); /* Clear all change bits for physical page */
1485
1486 }
1487
1488 /*
1489 * pmap_is_modified(phys)
1490 * returns TRUE if the given physical page has been modified
1491 * since the last call to pmap_clear_modify().
1492 */
1493 boolean_t
1494 pmap_is_modified(register ppnum_t pa)
1495 {
1496 return mapping_tst_mod(pa); /* Check for modified */
1497
1498 }
1499
1500 /*
1501 * pmap_clear_reference(phys)
1502 * clears the hardware referenced bit in the given machine
1503 * independant physical page.
1504 *
1505 */
1506 void
1507 pmap_clear_reference(ppnum_t pa)
1508 {
1509 mapping_clr_ref(pa); /* Check for modified */
1510 }
1511
1512 /*
1513 * pmap_is_referenced(phys)
1514 * returns TRUE if the given physical page has been referenced
1515 * since the last call to pmap_clear_reference().
1516 */
1517 boolean_t
1518 pmap_is_referenced(ppnum_t pa)
1519 {
1520 return mapping_tst_ref(pa); /* Check for referenced */
1521 }
1522
1523 /*
1524 * pmap_get_refmod(phys)
1525 * returns the referenced and modified bits of the specified
1526 * physical page.
1527 */
1528 unsigned int
1529 pmap_get_refmod(ppnum_t pa)
1530 {
1531 return (mapping_tst_refmod(pa));
1532 }
1533
1534 /*
1535 * pmap_clear_refmod(phys, mask)
1536 * clears the referenced and modified bits as specified by the mask
1537 * of the specified physical page.
1538 */
1539 void
1540 pmap_clear_refmod(ppnum_t pa, unsigned int mask)
1541 {
1542 mapping_clr_refmod(pa, mask);
1543 }
1544
1545 /*
1546 * pmap_eligible_for_execute(ppnum_t pa)
1547 * return true if physical address is eligible to contain executable code;
1548 * otherwise, return false
1549 */
1550 boolean_t
1551 pmap_eligible_for_execute(ppnum_t pa)
1552 {
1553 phys_entry_t *physent;
1554 unsigned int pindex;
1555
1556 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1557
1558 if((!physent) || (physent->ppLink & ppG))
1559 return 0; /* If there is no physical entry or marked guarded,
1560 the entry is not eligible for execute */
1561
1562 return 1; /* Otherwise, entry is eligible for execute */
1563 }
1564
1565 #if MACH_VM_DEBUG
1566 int
1567 pmap_list_resident_pages(
1568 __unused pmap_t pmap,
1569 __unused vm_offset_t *listp,
1570 __unused int space)
1571 {
1572 return 0;
1573 }
1574 #endif /* MACH_VM_DEBUG */
1575
1576 /*
1577 * Locking:
1578 * spl: VM
1579 */
1580 void
1581 pmap_copy_part_page(
1582 vm_offset_t src,
1583 vm_offset_t src_offset,
1584 vm_offset_t dst,
1585 vm_offset_t dst_offset,
1586 vm_size_t len)
1587 {
1588 addr64_t fsrc, fdst;
1589
1590 assert(((dst <<12) & PAGE_MASK+dst_offset+len) <= PAGE_SIZE);
1591 assert(((src <<12) & PAGE_MASK+src_offset+len) <= PAGE_SIZE);
1592
1593 fsrc = ((addr64_t)src << 12) + src_offset;
1594 fdst = ((addr64_t)dst << 12) + dst_offset;
1595
1596 phys_copy(fsrc, fdst, len); /* Copy the stuff physically */
1597 }
1598
1599 void
1600 pmap_zero_part_page(
1601 __unused vm_offset_t p,
1602 __unused vm_offset_t offset,
1603 __unused vm_size_t len)
1604 {
1605 panic("pmap_zero_part_page");
1606 }
1607
1608 boolean_t pmap_verify_free(ppnum_t pa) {
1609
1610 struct phys_entry *pp;
1611 unsigned int pindex;
1612
1613 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1614 if (pp == 0) return FALSE; /* If there isn't one, show no mapping... */
1615
1616 if(pp->ppLink & ~(ppLock | ppFlags)) return FALSE; /* We have at least one mapping */
1617 return TRUE; /* No mappings */
1618 }
1619
1620
1621 /* Determine if we need to switch space and set up for it if so */
1622
1623 void pmap_switch(pmap_t map)
1624 {
1625 hw_blow_seg(lowGlo.lgUMWvaddr); /* Blow off the first segment */
1626 hw_blow_seg(lowGlo.lgUMWvaddr + 0x10000000ULL); /* Blow off the second segment */
1627
1628 /* when changing to kernel space, don't bother
1629 * doing anything, the kernel is mapped from here already.
1630 */
1631 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
1632 return; /* If so, we don't do anything... */
1633 }
1634
1635 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
1636 return; /* Bye, bye, butterfly... */
1637 }
1638
1639 /*
1640 * kern_return_t pmap_nest(grand, subord, vstart, size)
1641 *
1642 * grand = the pmap that we will nest subord into
1643 * subord = the pmap that goes into the grand
1644 * vstart = start of range in pmap to be inserted
1645 * nstart = start of range in pmap nested pmap
1646 * size = Size of nest area (up to 2TB)
1647 *
1648 * Inserts a pmap into another. This is used to implement shared segments.
1649 * On the current PPC processors, this is limited to segment (256MB) aligned
1650 * segment sized ranges.
1651 *
1652 * We actually kinda allow recursive nests. The gating factor is that we do not allow
1653 * nesting on top of something that is already mapped, i.e., the range must be empty.
1654 *
1655 * Note that we depend upon higher level VM locks to insure that things don't change while
1656 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
1657 * or do 2 nests at once.
1658 */
1659
1660 kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) {
1661
1662 addr64_t vend, colladdr;
1663 unsigned int msize;
1664 int nlists;
1665 mapping_t *mp;
1666
1667 if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */
1668 if((size >> 25) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 2TB */
1669 if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
1670 if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
1671
1672 if(size == 0) { /* Is the size valid? */
1673 panic("pmap_nest: size is invalid - %016llX\n", size);
1674 }
1675
1676 msize = (size >> 25) - 1; /* Change size to blocks of 32MB */
1677
1678 nlists = mapSetLists(grand); /* Set number of lists this will be on */
1679
1680 mp = mapping_alloc(nlists); /* Get a spare mapping block */
1681
1682 mp->mpFlags = 0x01000000 | mpNest | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
1683 /* Set the flags. Make sure busy count is 1 */
1684 mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */
1685 mp->u.mpBSize = msize; /* Set the size */
1686 mp->mpPte = 0; /* Set the PTE invalid */
1687 mp->mpPAddr = 0; /* Set the physical page number */
1688 mp->mpVAddr = vstart; /* Set the address */
1689 mp->mpNestReloc = nstart - vstart; /* Set grand to nested vaddr relocation value */
1690
1691 colladdr = hw_add_map(grand, mp); /* Go add the mapping to the pmap */
1692
1693 if(colladdr) { /* Did it collide? */
1694 vend = vstart + size - 4096; /* Point to the last page we would cover in nest */
1695 panic("pmap_nest: attempt to nest into a non-empty range - pmap = %08X, start = %016llX, end = %016llX\n",
1696 grand, vstart, vend);
1697 }
1698
1699 return KERN_SUCCESS;
1700 }
1701
1702 /*
1703 * kern_return_t pmap_unnest(grand, vaddr)
1704 *
1705 * grand = the pmap that we will nest subord into
1706 * vaddr = start of range in pmap to be unnested
1707 *
1708 * Removes a pmap from another. This is used to implement shared segments.
1709 * On the current PPC processors, this is limited to segment (256MB) aligned
1710 * segment sized ranges.
1711 */
1712
1713 kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) {
1714
1715 unsigned int tstamp, i, mycpu;
1716 addr64_t nextva;
1717 spl_t s;
1718 mapping_t *mp;
1719
1720 s = splhigh(); /* Make sure interruptions are disabled */
1721
1722 mp = mapping_find(grand, vaddr, &nextva, 0); /* Find the nested map */
1723
1724 if(((unsigned int)mp & mapRetCode) != mapRtOK) { /* See if it was even nested */
1725 panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr);
1726 }
1727
1728 if((mp->mpFlags & mpType) != mpNest) { /* Did we find something other than a nest? */
1729 panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr);
1730 }
1731
1732 if(mp->mpVAddr != vaddr) { /* Make sure the address is the same */
1733 panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr);
1734 }
1735
1736 (void)hw_atomic_and(&mp->mpFlags, ~mpPerm); /* Show that this mapping is now removable */
1737
1738 mapping_drop_busy(mp); /* Go ahead and release the mapping now */
1739
1740 splx(s); /* Restore 'rupts */
1741
1742 (void)mapping_remove(grand, vaddr); /* Toss the nested pmap mapping */
1743
1744 invalidateSegs(grand); /* Invalidate the pmap segment cache */
1745
1746 /*
1747 * Note that the following will force the segment registers to be reloaded
1748 * on all processors (if they are using the pmap we just changed) before returning.
1749 *
1750 * This is needed. The reason is that until the segment register is
1751 * reloaded, another thread in the same task on a different processor will
1752 * be able to access memory that it isn't allowed to anymore. That can happen
1753 * because access to the subordinate pmap is being removed, but the pmap is still
1754 * valid.
1755 *
1756 * Note that we only kick the other processor if we see that it was using the pmap while we
1757 * were changing it.
1758 */
1759
1760
1761 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
1762 disable_preemption();
1763 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
1764 if((unsigned int)grand == PerProcTable[i].ppe_vaddr->ppUserPmapVirt) { /* Is this guy using the changed pmap? */
1765
1766 PerProcTable[i].ppe_vaddr->ppInvSeg = 1; /* Show that we need to invalidate the segments */
1767
1768 if(i != mycpu) {
1769
1770 tstamp = PerProcTable[i].ppe_vaddr->ruptStamp[1]; /* Save the processor's last interrupt time stamp */
1771 if(cpu_signal(i, SIGPcpureq, CPRQsegload, 0) == KERN_SUCCESS) { /* Make sure we see the pmap change */
1772 if(!hw_cpu_wcng(&PerProcTable[i].ppe_vaddr->ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */
1773 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
1774 }
1775 }
1776 }
1777 }
1778 enable_preemption();
1779 }
1780
1781 return KERN_SUCCESS; /* Bye, bye, butterfly... */
1782 }
1783
1784
1785 /*
1786 * void MapUserMemoryWindowInit(void)
1787 *
1788 * Initialize anything we need to in order to map user address space slices into
1789 * the kernel. Primarily used for copy in/out.
1790 *
1791 * Currently we only support one 512MB slot for this purpose. There are two special
1792 * mappings defined for the purpose: the special pmap nest, and linkage mapping.
1793 *
1794 * The special pmap nest (which is allocated in this function) is used as a place holder
1795 * in the kernel's pmap search list. It is 512MB long and covers the address range
1796 * starting at lgUMWvaddr. It points to no actual memory and when the fault handler
1797 * hits in it, it knows to look in the per_proc and start using the linkage
1798 * mapping contained therin.
1799 *
1800 * The linkage mapping is used to glue the user address space slice into the
1801 * kernel. It contains the relocation information used to transform the faulting
1802 * kernel address into the user address space. It also provides the link to the
1803 * user's pmap. This is pointed to by the per_proc and is switched in and out
1804 * whenever there is a context switch.
1805 *
1806 */
1807
1808 void MapUserMemoryWindowInit(void) {
1809
1810 addr64_t colladdr;
1811 int nlists;
1812 mapping_t *mp;
1813
1814 nlists = mapSetLists(kernel_pmap); /* Set number of lists this will be on */
1815
1816 mp = mapping_alloc(nlists); /* Get a spare mapping block */
1817
1818 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
1819 /* Set the flags. Make sure busy count is 1 */
1820 mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */
1821 mp->u.mpBSize = 15; /* Set the size to 2 segments in 32MB chunks - 1 */
1822 mp->mpPte = 0; /* Means nothing */
1823 mp->mpPAddr = 0; /* Means nothing */
1824 mp->mpVAddr = lowGlo.lgUMWvaddr; /* Set the address range we cover */
1825 mp->mpNestReloc = 0; /* Means nothing */
1826
1827 colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */
1828
1829 if(colladdr) { /* Did it collide? */
1830 panic("MapUserMemoryWindowInit: MapUserMemoryWindow range already mapped\n");
1831 }
1832
1833 return;
1834 }
1835
1836 /*
1837 * addr64_t MapUserMemoryWindow(vm_map_t map, vm_offset_t va, size)
1838 *
1839 * map = the vm_map that we are mapping into the kernel
1840 * va = start of the address range we are mapping
1841 * Note that we do not test validty, we chose to trust our fellows...
1842 *
1843 * Maps a 512M slice of a user address space into a predefined kernel range
1844 * on a per-thread basis. We map only the first 256M segment, allowing the
1845 * second 256M segment to fault in as needed. This allows our clients to access
1846 * an arbitrarily aligned operand up to 256M in size.
1847 *
1848 * In the future, the restriction of a predefined range may be loosened.
1849 *
1850 * Builds the proper linkage map to map the user range
1851 * We will round this down to the previous segment boundary and calculate
1852 * the relocation to the kernel slot
1853 *
1854 * We always make a segment table entry here if we need to. This is mainly because of
1855 * copyin/out and if we don't, there will be multiple segment faults for
1856 * each system call. I have seen upwards of 30000 per second.
1857 *
1858 * We do check, however, to see if the slice is already mapped and if so,
1859 * we just exit. This is done for performance reasons. It was found that
1860 * there was a considerable boost in copyin/out performance if we did not
1861 * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the
1862 * restriction that you had to bracket MapUserMemoryWindow. Further, there
1863 * is a yet further boost if you didn't need to map it each time. The theory
1864 * behind this is that many times copies are to or from the same segment and
1865 * done multiple times within the same system call. To take advantage of that,
1866 * we check umwSpace and umwRelo to see if we've already got it.
1867 *
1868 * We also need to half-invalidate the slice when we context switch or go
1869 * back to user state. A half-invalidate does not clear the actual mapping,
1870 * but it does force the MapUserMemoryWindow function to reload the segment
1871 * register/SLBE. If this is not done, we can end up some pretty severe
1872 * performance penalties. If we map a slice, and the cached space/relocation is
1873 * the same, we won't reload the segment registers. Howver, since we ran someone else,
1874 * our SR is cleared and we will take a fault. This is reasonable if we block
1875 * while copying (e.g., we took a page fault), but it is not reasonable when we
1876 * just start. For this reason, we half-invalidate to make sure that the SR is
1877 * explicitly reloaded.
1878 *
1879 * Note that we do not go to the trouble of making a pmap segment cache
1880 * entry for these guys because they are very short term -- 99.99% of the time
1881 * they will be unmapped before the next context switch.
1882 *
1883 */
1884
1885 addr64_t MapUserMemoryWindow(
1886 vm_map_t map,
1887 addr64_t va) {
1888
1889 addr64_t baddrs, reladd;
1890 thread_t thread;
1891 mapping_t *mp;
1892
1893 baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */
1894 thread = current_thread(); /* Remember our activation */
1895
1896 reladd = baddrs - lowGlo.lgUMWvaddr; /* Get the relocation from user to kernel */
1897
1898 if((thread->machine.umwSpace == map->pmap->space) && (thread->machine.umwRelo == reladd)) { /* Already mapped? */
1899 return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */
1900 }
1901
1902 disable_preemption(); /* Don't move... */
1903
1904 mp = (mapping_t *)&(getPerProc()->ppUMWmp); /* Make up for C */
1905 thread->machine.umwRelo = reladd; /* Relocation from user to kernel */
1906 mp->mpNestReloc = reladd; /* Relocation from user to kernel */
1907
1908 thread->machine.umwSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1909 mp->mpSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1910
1911 /*
1912 * Here we make an assumption that we are going to be using the base pmap's address space.
1913 * If we are wrong, and that would be very, very, very rare, the fault handler will fix us up.
1914 */
1915
1916 hw_map_seg(map->pmap, lowGlo.lgUMWvaddr, baddrs); /* Make the entry for the first segment */
1917
1918 enable_preemption(); /* Let's move */
1919 return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */
1920 }
1921
1922
1923 /*
1924 * kern_return_t pmap_boot_map(size)
1925 *
1926 * size = size of virtual address range to be mapped
1927 *
1928 * This function is used to assign a range of virtual addresses before VM in
1929 * initialized. It starts at VM_MAX_KERNEL_ADDRESS and works downward.
1930 * The variable vm_last_addr contains the current highest possible VM
1931 * assignable address. It is a panic to attempt to call this after VM has
1932 * started up. The only problem is, is that we may not have the serial or
1933 * framebuffer mapped, so we'll never know we died.........
1934 */
1935
1936 vm_offset_t pmap_boot_map(vm_size_t size) {
1937
1938 if(kernel_map != VM_MAP_NULL) { /* Has VM already started? */
1939 panic("pmap_boot_map: VM started\n");
1940 }
1941
1942 size = round_page(size); /* Make sure this is in pages */
1943 vm_last_addr = vm_last_addr - size; /* Allocate the memory */
1944 return (vm_last_addr + 1); /* Return the vaddr we just allocated */
1945
1946 }
1947
1948
1949 /*
1950 * void pmap_init_sharedpage(void);
1951 *
1952 * Hack map for the 64-bit commpage
1953 */
1954
1955 void pmap_init_sharedpage(vm_offset_t cpg){
1956
1957 addr64_t cva, cpoff;
1958 ppnum_t cpphys;
1959
1960 sharedPmap = pmap_create(0); /* Get a pmap to hold the common segment */
1961 if(!sharedPmap) { /* Check for errors */
1962 panic("pmap_init_sharedpage: couldn't make sharedPmap\n");
1963 }
1964
1965 for(cpoff = 0; cpoff < _COMM_PAGE_AREA_USED; cpoff += 4096) { /* Step along now */
1966
1967 cpphys = pmap_find_phys(kernel_pmap, (addr64_t)cpg + cpoff);
1968 if(!cpphys) {
1969 panic("pmap_init_sharedpage: compage %08X not mapped in kernel\n", cpg + cpoff);
1970 }
1971
1972 cva = mapping_make(sharedPmap, (addr64_t)((uint32_t)_COMM_PAGE_BASE_ADDRESS) + cpoff,
1973 cpphys, mmFlgPerm, 1, VM_PROT_READ); /* Map the page read only */
1974 if(cva) { /* Check for errors */
1975 panic("pmap_init_sharedpage: couldn't map commpage page - cva = %016llX\n", cva);
1976 }
1977
1978 }
1979
1980 return;
1981 }
1982
1983
1984 /*
1985 * void pmap_map_sharedpage(pmap_t pmap);
1986 *
1987 * Maps the last segment in a 64-bit address space
1988 *
1989 *
1990 */
1991
1992 void pmap_map_sharedpage(task_t task, pmap_t pmap){
1993
1994 kern_return_t ret;
1995
1996 if(task_has_64BitAddr(task) || _cpu_capabilities & k64Bit) { /* Should we map the 64-bit page -1? */
1997 ret = pmap_nest(pmap, sharedPmap, 0xFFFFFFFFF0000000ULL, 0x00000000F0000000ULL,
1998 0x0000000010000000ULL); /* Nest the highest possible segment to map comm page */
1999 if(ret != KERN_SUCCESS) { /* Did it work? */
2000 panic("pmap_map_sharedpage: couldn't nest shared page - ret = %08X\n", ret);
2001 }
2002 }
2003
2004 return;
2005 }
2006
2007
2008 /*
2009 * void pmap_unmap_sharedpage(pmap_t pmap);
2010 *
2011 * Unmaps the last segment in a 64-bit address space
2012 *
2013 */
2014
2015 void pmap_unmap_sharedpage(pmap_t pmap){
2016
2017 kern_return_t ret;
2018 mapping_t *mp;
2019 boolean_t inter;
2020 int gotnest;
2021 addr64_t nextva;
2022
2023 if(BootProcInfo.pf.Available & pf64Bit) { /* Are we on a 64-bit machine? */
2024
2025 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
2026 mp = hw_find_map(pmap, 0xFFFFFFFFF0000000ULL, &nextva); /* Find the mapping for this address */
2027 if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
2028 panic("pmap_unmap_sharedpage: mapping lock failure - rc = %08X, pmap = %08X\n", mp, pmap); /* Die... */
2029 }
2030
2031 gotnest = 0; /* Assume nothing here */
2032 if(mp) {
2033 gotnest = ((mp->mpFlags & mpType) == mpNest);
2034 /* Remember if we have a nest here */
2035 mapping_drop_busy(mp); /* We have everything we need from the mapping */
2036 }
2037 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
2038
2039 if(!gotnest) return; /* Leave if there isn't any nesting here */
2040
2041 ret = pmap_unnest(pmap, 0xFFFFFFFFF0000000ULL); /* Unnest the max 64-bit page */
2042
2043 if(ret != KERN_SUCCESS) { /* Did it work? */
2044 panic("pmap_unmap_sharedpage: couldn't unnest shared page - ret = %08X\n", ret);
2045 }
2046 }
2047
2048 return;
2049 }
2050
2051
2052 /* temporary workaround */
2053 boolean_t
2054 coredumpok(
2055 __unused vm_map_t map,
2056 __unused vm_offset_t va)
2057 {
2058 return TRUE;
2059 }