]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/pmap.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1990,1991,1992 The University of Utah and
28 * the Center for Software Science (CSS).
29 * Copyright (c) 1991,1987 Carnegie Mellon University.
30 * All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation,
37 * and that all advertising materials mentioning features or use of
38 * this software display the following acknowledgement: ``This product
39 * includes software developed by the Center for Software Science at
40 * the University of Utah.''
41 *
42 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
43 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
44 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
45 * THIS SOFTWARE.
46 *
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 *
58 * Utah $Hdr: pmap.c 1.28 92/06/23$
59 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
60 */
61
62 /*
63 * Manages physical address maps for powerpc.
64 *
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
71 *
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
77 * requested.
78 *
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information to
84 * when physical maps must be made correct.
85 *
86 */
87
88 #include <zone_debug.h>
89 #include <debug.h>
90 #include <mach_kgdb.h>
91 #include <mach_vm_debug.h>
92 #include <db_machine_commands.h>
93
94 #include <kern/thread.h>
95 #include <kern/simple_lock.h>
96 #include <mach/vm_attributes.h>
97 #include <mach/vm_param.h>
98 #include <vm/vm_kern.h>
99 #include <kern/spl.h>
100
101 #include <kern/misc_protos.h>
102 #include <ppc/misc_protos.h>
103 #include <ppc/proc_reg.h>
104
105 #include <vm/pmap.h>
106 #include <vm/vm_map.h>
107 #include <vm/vm_page.h>
108
109 #include <ppc/pmap.h>
110 #include <ppc/mem.h>
111 #include <ppc/mappings.h>
112
113 #include <ppc/new_screen.h>
114 #include <ppc/Firmware.h>
115 #include <ppc/savearea.h>
116 #include <ppc/cpu_internal.h>
117 #include <ppc/exception.h>
118 #include <ppc/low_trace.h>
119 #include <ppc/lowglobals.h>
120 #include <ddb/db_output.h>
121 #include <machine/cpu_capabilities.h>
122
123 #include <vm/vm_protos.h> /* must be last */
124
125
126 extern unsigned int avail_remaining;
127 unsigned int debugbackpocket; /* (TEST/DEBUG) */
128
129 vm_offset_t first_free_virt;
130 int current_free_region; /* Used in pmap_next_page */
131
132 pmapTransTab *pmapTrans; /* Point to the hash to pmap translations */
133 struct phys_entry *phys_table;
134
135 /* forward */
136 static void pmap_map_physical(void);
137 static void pmap_map_iohole(addr64_t paddr, addr64_t size);
138 void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
139 void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
140
141 extern void hw_hash_init(void);
142
143 /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
144
145 extern struct pmap kernel_pmap_store;
146 pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
147 addr64_t kernel_pmap_phys; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */
148 pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
149 pmap_t sharedPmap; /* Pointer to common pmap for 64-bit address spaces */
150 struct zone *pmap_zone; /* zone of pmap structures */
151 boolean_t pmap_initialized = FALSE;
152
153 int ppc_max_pmaps; /* Maximum number of concurrent address spaces allowed. This is machine dependent */
154 addr64_t vm_max_address; /* Maximum effective address supported */
155 addr64_t vm_max_physical; /* Maximum physical address supported */
156
157 /*
158 * Physical-to-virtual translations are handled by inverted page table
159 * structures, phys_tables. Multiple mappings of a single page are handled
160 * by linking the affected mapping structures. We initialise one region
161 * for phys_tables of the physical memory we know about, but more may be
162 * added as it is discovered (eg. by drivers).
163 */
164
165 /*
166 * free pmap list. caches the first free_pmap_max pmaps that are freed up
167 */
168 int free_pmap_max = 32;
169 int free_pmap_count;
170 pmap_t free_pmap_list;
171 decl_simple_lock_data(,free_pmap_lock)
172
173 /*
174 * Function to get index into phys_table for a given physical address
175 */
176
177 struct phys_entry *pmap_find_physentry(ppnum_t pa)
178 {
179 int i;
180 unsigned int entry;
181
182 for (i = pmap_mem_regions_count - 1; i >= 0; i--) {
183 if (pa < pmap_mem_regions[i].mrStart) continue; /* See if we fit in this region */
184 if (pa > pmap_mem_regions[i].mrEnd) continue; /* Check the end too */
185
186 entry = (unsigned int)pmap_mem_regions[i].mrPhysTab + ((pa - pmap_mem_regions[i].mrStart) * sizeof(phys_entry_t));
187 return (struct phys_entry *)entry;
188 }
189 // kprintf("DEBUG - pmap_find_physentry: page 0x%08X not found\n", pa);
190 return 0;
191 }
192
193 /*
194 * kern_return_t
195 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
196 * boolean_t available, unsigned int attr)
197 *
198 * THIS IS NOT SUPPORTED
199 */
200 kern_return_t
201 pmap_add_physical_memory(
202 __unused vm_offset_t spa,
203 __unused vm_offset_t epa,
204 __unused boolean_t available,
205 __unused unsigned int attr)
206 {
207
208 panic("Forget it! You can't map no more memory, you greedy puke!\n");
209 return KERN_SUCCESS;
210 }
211
212 /*
213 * pmap_map(va, spa, epa, prot)
214 * is called during boot to map memory in the kernel's address map.
215 * A virtual address range starting at "va" is mapped to the physical
216 * address range "spa" to "epa" with machine independent protection
217 * "prot".
218 *
219 * "va", "spa", and "epa" are byte addresses and must be on machine
220 * independent page boundaries.
221 *
222 * Pages with a contiguous virtual address range, the same protection, and attributes.
223 * therefore, we map it with a single block.
224 *
225 * Note that this call will only map into 32-bit space
226 *
227 */
228
229 vm_offset_t
230 pmap_map(
231 vm_offset_t va,
232 vm_offset_t spa,
233 vm_offset_t epa,
234 vm_prot_t prot,
235 unsigned int flags)
236 {
237 unsigned int mflags;
238 mflags = 0; /* Make sure this is initialized to nothing special */
239 if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */
240 mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
241 }
242
243 addr64_t colladr;
244
245 if (spa == epa) return(va);
246
247 assert(epa > spa);
248
249 colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12),
250 (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, (prot & VM_PROT_ALL) );
251
252 if(colladr) { /* Was something already mapped in the range? */
253 panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n",
254 va, spa, epa, colladr);
255 }
256 return(va);
257 }
258
259 /*
260 * pmap_map_physical()
261 * Maps physical memory into the kernel's address map beginning at lgPMWvaddr, the
262 * physical memory window.
263 *
264 */
265 void
266 pmap_map_physical()
267 {
268 unsigned region;
269 uint64_t msize, size;
270 addr64_t paddr, vaddr, colladdr;
271
272 /* Iterate over physical memory regions, block mapping each into the kernel's address map */
273 for (region = 0; region < (unsigned)pmap_mem_regions_count; region++) {
274 paddr = ((addr64_t)pmap_mem_regions[region].mrStart << 12); /* Get starting physical address */
275 size = (((addr64_t)pmap_mem_regions[region].mrEnd + 1) << 12) - paddr;
276
277 vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */
278
279 while (size > 0) {
280
281 msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */
282
283 colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
284 (mmFlgBlock | mmFlgPerm), (msize >> 12),
285 (VM_PROT_READ | VM_PROT_WRITE));
286 if (colladdr) {
287 panic ("pmap_map_physical: mapping failure - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
288 vaddr, (paddr >> 12), (msize >> 12), colladdr);
289 }
290
291 vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */
292 paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */
293 size -= msize;
294 }
295 }
296 }
297
298 /*
299 * pmap_map_iohole(addr64_t paddr, addr64_t size)
300 * Maps an I/O hole into the kernel's address map at its proper offset in
301 * the physical memory window.
302 *
303 */
304 void
305 pmap_map_iohole(addr64_t paddr, addr64_t size)
306 {
307
308 addr64_t vaddr, colladdr, msize;
309 uint32_t psize;
310
311 vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */
312
313 while (size > 0) {
314
315 msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */
316
317 colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
318 (mmFlgBlock | mmFlgPerm | mmFlgGuarded | mmFlgCInhib), (msize >> 12),
319 (VM_PROT_READ | VM_PROT_WRITE));
320 if (colladdr) {
321 panic ("pmap_map_iohole: mapping failed - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
322 vaddr, (paddr >> 12), (msize >> 12), colladdr);
323 }
324
325 vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */
326 paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */
327 size -= msize;
328 }
329 }
330
331 /*
332 * Bootstrap the system enough to run with virtual memory.
333 * Map the kernel's code and data, and allocate the system page table.
334 * Called with mapping done by BATs. Page_size must already be set.
335 *
336 * Parameters:
337 * msize: Total memory present
338 * first_avail: First virtual address available
339 * kmapsize: Size of kernel text and data
340 */
341 void
342 pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize)
343 {
344 vm_offset_t addr;
345 vm_size_t size;
346 unsigned int i, num, mapsize, vmpagesz, vmmapsz, nbits;
347 signed bank;
348 uint64_t tmemsize;
349 uint_t htslop;
350 vm_offset_t first_used_addr, PCAsize;
351 struct phys_entry *phys_entry;
352
353 *first_avail = round_page(*first_avail); /* Make sure we start out on a page boundary */
354 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address know to VM */
355
356 /*
357 * Initialize kernel pmap
358 */
359 kernel_pmap = &kernel_pmap_store;
360 kernel_pmap_phys = (addr64_t)&kernel_pmap_store;
361 cursor_pmap = &kernel_pmap_store;
362
363 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
364 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
365 kernel_pmap->ref_count = 1;
366 kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */
367 kernel_pmap->pmapFlags |= pmapNXdisabled;
368 kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
369 kernel_pmap->space = PPC_SID_KERNEL;
370 kernel_pmap->pmapvr = 0; /* Virtual = Real */
371
372 /*
373 * IBM's recommended hash table size is one PTEG for every 2 physical pages.
374 * However, we have found that OSX rarely uses more than 4 PTEs in a PTEG
375 * with this size table. Therefore, by default we allocate a hash table
376 * one half IBM's recommended size, ie one PTEG per 4 pages. The "ht_shift" boot-arg
377 * can be used to override the default hash table size.
378 * We will allocate the hash table in physical RAM, outside of kernel virtual memory,
379 * at the top of the highest bank that will contain it.
380 * Note that "bank" doesn't refer to a physical memory slot here, it is a range of
381 * physically contiguous memory.
382 *
383 * The PCA will go there as well, immediately before the hash table.
384 */
385
386 nbits = cntlzw(((msize << 1) - 1) >> 32); /* Get first bit in upper half */
387 if (nbits == 32) /* If upper half was empty, find bit in bottom half */
388 nbits = nbits + cntlzw((uint_t)((msize << 1) - 1));
389 tmemsize = 0x8000000000000000ULL >> nbits; /* Get memory size rounded up to power of 2 */
390
391 /* Calculate hash table size: First, make sure we don't overflow 32-bit arithmetic. */
392 if (tmemsize > 0x0000002000000000ULL)
393 tmemsize = 0x0000002000000000ULL;
394
395 /* Second, calculate IBM recommended hash table size, ie one PTEG per 2 physical pages */
396 hash_table_size = (uint_t)(tmemsize >> 13) * PerProcTable[0].ppe_vaddr->pf.pfPTEG;
397
398 /* Third, cut this in half to produce the OSX default, ie one PTEG per 4 physical pages */
399 hash_table_size >>= 1;
400
401 /* Fourth, adjust default size per "ht_shift" boot arg */
402 if (hash_table_shift >= 0) /* if positive, make size bigger */
403 hash_table_size <<= hash_table_shift;
404 else /* if "ht_shift" is negative, make smaller */
405 hash_table_size >>= (-hash_table_shift);
406
407 /* Fifth, make sure we are at least minimum size */
408 if (hash_table_size < (256 * 1024))
409 hash_table_size = (256 * 1024);
410
411 while(1) { /* Try to fit hash table in PCA into contiguous memory */
412
413 if(hash_table_size < (256 * 1024)) { /* Have we dropped too short? This should never, ever happen */
414 panic("pmap_bootstrap: Can't find space for hash table\n"); /* This will never print, system isn't up far enough... */
415 }
416
417 PCAsize = (hash_table_size / PerProcTable[0].ppe_vaddr->pf.pfPTEG) * sizeof(PCA_t); /* Get total size of PCA table */
418 PCAsize = round_page(PCAsize); /* Make sure it is at least a page long */
419
420 for(bank = pmap_mem_regions_count - 1; bank >= 0; bank--) { /* Search backwards through banks */
421
422 hash_table_base = ((addr64_t)pmap_mem_regions[bank].mrEnd << 12) - hash_table_size + PAGE_SIZE; /* Get tenative address */
423
424 htslop = hash_table_base & (hash_table_size - 1); /* Get the extra that we will round down when we align */
425 hash_table_base = hash_table_base & -(addr64_t)hash_table_size; /* Round down to correct boundary */
426
427 if((hash_table_base - round_page(PCAsize)) >= ((addr64_t)pmap_mem_regions[bank].mrStart << 12)) break; /* Leave if we fit */
428 }
429
430 if(bank >= 0) break; /* We are done if we found a suitable bank */
431
432 hash_table_size = hash_table_size >> 1; /* Try the next size down */
433 }
434
435 if(htslop) { /* If there was slop (i.e., wasted pages for alignment) add a new region */
436 for(i = pmap_mem_regions_count - 1; i >= (unsigned)bank; i--) { /* Copy from end to our bank, including our bank */
437 pmap_mem_regions[i + 1].mrStart = pmap_mem_regions[i].mrStart; /* Set the start of the bank */
438 pmap_mem_regions[i + 1].mrAStart = pmap_mem_regions[i].mrAStart; /* Set the start of allocatable area */
439 pmap_mem_regions[i + 1].mrEnd = pmap_mem_regions[i].mrEnd; /* Set the end address of bank */
440 pmap_mem_regions[i + 1].mrAEnd = pmap_mem_regions[i].mrAEnd; /* Set the end address of allocatable area */
441 }
442
443 pmap_mem_regions[i + 1].mrStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of the next bank to the start of the slop area */
444 pmap_mem_regions[i + 1].mrAStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of allocatable area to the start of the slop area */
445 pmap_mem_regions[i].mrEnd = (hash_table_base + hash_table_size - 4096) >> 12; /* Set the end of our bank to the end of the hash table */
446
447 }
448
449 pmap_mem_regions[bank].mrAEnd = (hash_table_base - PCAsize - 4096) >> 12; /* Set the maximum allocatable in this bank */
450
451 hw_hash_init(); /* Initiaize the hash table and PCA */
452 hw_setup_trans(); /* Set up hardware registers needed for translation */
453
454 /*
455 * The hash table is now all initialized and so is the PCA. Go on to do the rest of it.
456 * This allocation is from the bottom up.
457 */
458
459 num = atop_64(msize); /* Get number of pages in all of memory */
460
461 /* Figure out how much we need to allocate */
462
463 size = (vm_size_t) (
464 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
465 (BackPocketSaveBloks * PAGE_SIZE) + /* For backpocket saveareas */
466 trcWork.traceSize + /* Size of trace table */
467 ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096) + /* Size of pmap translate table */
468 (((num * sizeof(struct phys_entry)) + 4095) & -4096) /* For the physical entries */
469 );
470
471 mapsize = size = round_page(size); /* Get size of area to map that we just calculated */
472 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
473
474 vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
475 vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
476
477 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
478
479 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
480 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
481 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
482
483 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
484
485 /* hash table must be aligned to its size */
486
487 addr = *first_avail; /* Set the address to start allocations */
488 first_used_addr = addr; /* Remember where we started */
489
490 bzero((char *)addr, size); /* Clear everything that we are allocating */
491
492 savearea_init(addr); /* Initialize the savearea chains and data */
493
494 addr = (vm_offset_t)((unsigned int)addr + ((InitialSaveBloks + BackPocketSaveBloks) * PAGE_SIZE)); /* Point past saveareas */
495
496 trcWork.traceCurr = (unsigned int)addr; /* Set first trace slot to use */
497 trcWork.traceStart = (unsigned int)addr; /* Set start of trace table */
498 trcWork.traceEnd = (unsigned int)addr + trcWork.traceSize; /* Set end of trace table */
499
500 addr = (vm_offset_t)trcWork.traceEnd; /* Set next allocatable location */
501
502 pmapTrans = (pmapTransTab *)addr; /* Point to the pmap to hash translation table */
503
504 pmapTrans[PPC_SID_KERNEL].pmapPAddr = (addr64_t)((uintptr_t)kernel_pmap); /* Initialize the kernel pmap in the translate table */
505 pmapTrans[PPC_SID_KERNEL].pmapVAddr = CAST_DOWN(unsigned int, kernel_pmap); /* Initialize the kernel pmap in the translate table */
506
507 addr += ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096); /* Point past pmap translate table */
508
509 /* NOTE: the phys_table must be within the first 2GB of physical RAM. This makes sure we only need to do 32-bit arithmetic */
510
511 phys_entry = (struct phys_entry *) addr; /* Get pointer to physical table */
512
513 for (bank = 0; bank < pmap_mem_regions_count; bank++) { /* Set pointer and initialize all banks of ram */
514
515 pmap_mem_regions[bank].mrPhysTab = phys_entry; /* Set pointer to the physical table for this bank */
516
517 phys_entry = phys_entry + (pmap_mem_regions[bank].mrEnd - pmap_mem_regions[bank].mrStart + 1); /* Point to the next */
518 }
519
520 addr += (((num * sizeof(struct phys_entry)) + 4095) & -4096); /* Step on past the physical entries */
521
522 /*
523 * Remaining space is for mapping entries. Tell the initializer routine that
524 * the mapping system can't release this block because it's permanently assigned
525 */
526
527 mapping_init(); /* Initialize the mapping tables */
528
529 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
530 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
531 }
532 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
533
534 /* Map V=R the page tables */
535 pmap_map(first_used_addr, first_used_addr,
536 round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE, VM_WIMG_USE_DEFAULT);
537
538 *first_avail = round_page(first_used_addr + size); /* Set next available page */
539 first_free_virt = *first_avail; /* Ditto */
540
541 /* For 64-bit machines, block map physical memory and the I/O hole into kernel space */
542 if(BootProcInfo.pf.Available & pf64Bit) { /* Are we on a 64-bit machine? */
543 lowGlo.lgPMWvaddr = PHYS_MEM_WINDOW_VADDR; /* Initialize the physical memory window's virtual address */
544
545 pmap_map_physical(); /* Block map physical memory into the window */
546
547 pmap_map_iohole(IO_MEM_WINDOW_VADDR, IO_MEM_WINDOW_SIZE);
548 /* Block map the I/O hole */
549 }
550
551 /* All the rest of memory is free - add it to the free
552 * regions so that it can be allocated by pmap_steal
553 */
554
555 pmap_mem_regions[0].mrAStart = (*first_avail >> 12); /* Set up the free area to start allocations (always in the first bank) */
556
557 current_free_region = 0; /* Set that we will start allocating in bank 0 */
558 avail_remaining = 0; /* Clear free page count */
559 for(bank = 0; bank < pmap_mem_regions_count; bank++) { /* Total up all of the pages in the system that are available */
560 avail_remaining += (pmap_mem_regions[bank].mrAEnd - pmap_mem_regions[bank].mrAStart) + 1; /* Add in allocatable pages in this bank */
561 }
562
563
564 }
565
566 /*
567 * pmap_init(spa, epa)
568 * finishes the initialization of the pmap module.
569 * This procedure is called from vm_mem_init() in vm/vm_init.c
570 * to initialize any remaining data structures that the pmap module
571 * needs to map virtual memory (VM is already ON).
572 *
573 * Note that the pmap needs to be sized and aligned to
574 * a power of two. This is because it is used both in virtual and
575 * real so it can't span a page boundary.
576 */
577
578 void
579 pmap_init(void)
580 {
581
582 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
583 #if ZONE_DEBUG
584 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
585 #endif /* ZONE_DEBUG */
586
587 pmap_initialized = TRUE;
588
589 /*
590 * Initialize list of freed up pmaps
591 */
592 free_pmap_list = 0; /* Set that there are no free pmaps */
593 free_pmap_count = 0;
594 simple_lock_init(&free_pmap_lock, 0);
595
596 }
597
598 unsigned int pmap_free_pages(void)
599 {
600 return avail_remaining;
601 }
602
603 /*
604 * This function allocates physical pages.
605 */
606
607 /* Non-optimal, but only used for virtual memory startup.
608 * Allocate memory from a table of free physical addresses
609 * If there are no more free entries, too bad.
610 */
611
612 boolean_t pmap_next_page(ppnum_t *addrp)
613 {
614 int i;
615
616 if(current_free_region >= pmap_mem_regions_count) return FALSE; /* Return failure if we have used everything... */
617
618 for(i = current_free_region; i < pmap_mem_regions_count; i++) { /* Find the next bank with free pages */
619 if(pmap_mem_regions[i].mrAStart <= pmap_mem_regions[i].mrAEnd) break; /* Found one */
620 }
621
622 current_free_region = i; /* Set our current bank */
623 if(i >= pmap_mem_regions_count) return FALSE; /* Couldn't find a free page */
624
625 *addrp = pmap_mem_regions[i].mrAStart; /* Allocate the page */
626 pmap_mem_regions[i].mrAStart = pmap_mem_regions[i].mrAStart + 1; /* Set the next one to go */
627 avail_remaining--; /* Drop free count */
628
629 return TRUE;
630 }
631
632 void pmap_virtual_space(
633 vm_offset_t *startp,
634 vm_offset_t *endp)
635 {
636 *startp = round_page(first_free_virt);
637 *endp = vm_last_addr;
638 }
639
640 /*
641 * pmap_create
642 *
643 * Create and return a physical map.
644 *
645 * If the size specified for the map is zero, the map is an actual physical
646 * map, and may be referenced by the hardware.
647 *
648 * A pmap is either in the free list or in the in-use list. The only use
649 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
650 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
651 * in-use list is matched until a hole in the VSID sequence is found. (Note
652 * that the in-use pmaps are queued in VSID sequence order.) This is all done
653 * while free_pmap_lock is held.
654 *
655 * If the size specified is non-zero, the map will be used in software
656 * only, and is bounded by that size.
657 */
658 pmap_t
659 pmap_create(vm_map_size_t size, __unused boolean_t is_64bit)
660 {
661 pmap_t pmap, ckpmap, fore;
662 int s;
663 unsigned int currSID;
664 addr64_t physpmap;
665
666 /*
667 * A software use-only map doesn't even need a pmap structure.
668 */
669 if (size)
670 return(PMAP_NULL);
671
672 /*
673 * If there is a pmap in the pmap free list, reuse it.
674 * Note that we use free_pmap_list for all chaining of pmaps, both to
675 * the free list and the in use chain (anchored from kernel_pmap).
676 */
677 s = splhigh();
678 simple_lock(&free_pmap_lock);
679
680 if(free_pmap_list) { /* Any free? */
681 pmap = free_pmap_list; /* Yes, allocate it */
682 free_pmap_list = (pmap_t)pmap->freepmap; /* Dequeue this one (we chain free ones through freepmap) */
683 free_pmap_count--;
684 }
685 else {
686 simple_unlock(&free_pmap_lock); /* Unlock just in case */
687 splx(s);
688
689 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
690 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
691
692 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
693
694 s = splhigh();
695 simple_lock(&free_pmap_lock); /* Lock it back up */
696
697 ckpmap = cursor_pmap; /* Get starting point for free ID search */
698 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
699
700 while(1) { /* Keep trying until something happens */
701
702 currSID = (currSID + 1) & (maxAdrSp - 1); /* Get the next in the sequence */
703 if(((currSID * incrVSID) & (maxAdrSp - 1)) == invalSpace) continue; /* Skip the space we have reserved */
704 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
705
706 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
707
708 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
709 panic("pmap_create: Maximum number (%d) active address spaces reached\n", maxAdrSp); /* Die pig dog */
710 }
711 }
712
713 pmap->space = (currSID * incrVSID) & (maxAdrSp - 1); /* Calculate the actual VSID */
714 pmap->spaceNum = currSID; /* Set the space ID number */
715 /*
716 * Now we link into the chain just before the out of sequence guy.
717 */
718
719 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
720 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
721 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
722 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
723 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
724
725 physpmap = ((addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)pmap)) << 12) | (addr64_t)((unsigned int)pmap & 0xFFF); /* Get the physical address of the pmap */
726
727 pmap->pmapvr = (addr64_t)((uintptr_t)pmap) ^ physpmap; /* Make V to R translation mask */
728
729 pmapTrans[pmap->space].pmapPAddr = physpmap; /* Set translate table physical to point to us */
730 pmapTrans[pmap->space].pmapVAddr = CAST_DOWN(unsigned int, pmap); /* Set translate table virtual to point to us */
731 }
732
733 pmap->pmapVmmExt = 0; /* Clear VMM extension block vaddr */
734 pmap->pmapVmmExtPhys = 0; /* and the paddr, too */
735 pmap->pmapFlags = pmapKeyDef; /* Set default key */
736 pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
737 pmap->ref_count = 1;
738 pmap->stats.resident_count = 0;
739 pmap->stats.wired_count = 0;
740 pmap->pmapSCSubTag = 0x0000000000000000ULL; /* Make sure this is clean an tidy */
741 simple_unlock(&free_pmap_lock);
742
743 splx(s);
744 return(pmap);
745 }
746
747 /*
748 * pmap_destroy
749 *
750 * Gives up a reference to the specified pmap. When the reference count
751 * reaches zero the pmap structure is added to the pmap free list.
752 *
753 * Should only be called if the map contains no valid mappings.
754 */
755 void
756 pmap_destroy(pmap_t pmap)
757 {
758 int ref_count;
759 spl_t s;
760 pmap_t fore, aft;
761
762 if (pmap == PMAP_NULL)
763 return;
764
765 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
766 if(ref_count>0) return; /* Still more users, leave now... */
767
768 if(ref_count < 0) /* Did we go too far? */
769 panic("pmap_destroy(): ref_count < 0");
770
771 if (!(pmap->pmapFlags & pmapVMgsaa)) { /* Don't try this for a shadow assist guest */
772 pmap_unmap_sharedpage(pmap); /* Remove any mapping of page -1 */
773 }
774
775 #ifdef notdef
776 if(pmap->stats.resident_count != 0)
777 panic("PMAP_DESTROY: pmap not empty");
778 #else
779 if(pmap->stats.resident_count != 0) {
780 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000ULL);
781 }
782 #endif
783
784 /*
785 * Add the pmap to the pmap free list.
786 */
787
788 s = splhigh();
789 /*
790 * Add the pmap to the pmap free list.
791 */
792 simple_lock(&free_pmap_lock);
793
794 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
795
796 pmap->freepmap = free_pmap_list; /* Queue in front */
797 free_pmap_list = pmap;
798 free_pmap_count++;
799 simple_unlock(&free_pmap_lock);
800
801 } else {
802 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
803 fore = (pmap_t)pmap->pmap_link.prev;
804 aft = (pmap_t)pmap->pmap_link.next;
805 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
806 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
807 simple_unlock(&free_pmap_lock);
808 pmapTrans[pmap->space].pmapPAddr = -1; /* Invalidate the translate table physical */
809 pmapTrans[pmap->space].pmapVAddr = -1; /* Invalidate the translate table virtual */
810 zfree(pmap_zone, pmap);
811 }
812 splx(s);
813 }
814
815 /*
816 * pmap_reference(pmap)
817 * gains a reference to the specified pmap.
818 */
819 void
820 pmap_reference(pmap_t pmap)
821 {
822 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
823 }
824
825 /*
826 * pmap_remove_some_phys
827 *
828 * Removes mappings of the associated page from the specified pmap
829 *
830 */
831 void pmap_remove_some_phys(
832 pmap_t pmap,
833 vm_offset_t pa)
834 {
835 register struct phys_entry *pp;
836 register struct mapping *mp;
837 unsigned int pindex;
838
839 if (pmap == PMAP_NULL) { /* This should never be called with a null pmap */
840 panic("pmap_remove_some_phys: null pmap\n");
841 }
842
843 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
844 if (pp == 0) return; /* Leave if not in physical RAM */
845
846 do { /* Keep going until we toss all pages from this pmap */
847 if (pmap->pmapFlags & pmapVMhost) {
848 mp = hw_purge_phys(pp); /* Toss a map */
849 switch ((unsigned int)mp & mapRetCode) {
850 case mapRtOK:
851 mapping_free(mp); /* Return mapping to free inventory */
852 break;
853 case mapRtGuest:
854 break; /* Don't try to return a guest mapping */
855 case mapRtEmpty:
856 break; /* Physent chain empty, we're done */
857 case mapRtNotFnd:
858 break; /* Mapping disappeared on us, retry */
859 default:
860 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n",
861 pp, pmap, mp); /* Handle failure with our usual lack of tact */
862 }
863 } else {
864 mp = hw_purge_space(pp, pmap); /* Toss a map */
865 switch ((unsigned int)mp & mapRetCode) {
866 case mapRtOK:
867 mapping_free(mp); /* Return mapping to free inventory */
868 break;
869 case mapRtEmpty:
870 break; /* Physent chain empty, we're done */
871 case mapRtNotFnd:
872 break; /* Mapping disappeared on us, retry */
873 default:
874 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n",
875 pp, pmap, mp); /* Handle failure with our usual lack of tact */
876 }
877 }
878 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
879
880 #if DEBUG
881 if ((pmap->pmapFlags & pmapVMhost) && !pmap_verify_free(pa))
882 panic("pmap_remove_some_phys: cruft left behind - pa = %08X, pmap = %08X\n", pa, pmap);
883 #endif
884
885 return; /* Leave... */
886 }
887
888 /*
889 * pmap_remove(pmap, s, e)
890 * unmaps all virtual addresses v in the virtual address
891 * range determined by [s, e) and pmap.
892 * s and e must be on machine independent page boundaries and
893 * s must be less than or equal to e.
894 *
895 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
896 * skip those segments.
897 */
898 void
899 pmap_remove(
900 pmap_t pmap,
901 addr64_t sva,
902 addr64_t eva)
903 {
904 addr64_t va, endva;
905
906 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
907
908
909 /* It is just possible that eva might have wrapped around to zero,
910 * and sometimes we get asked to liberate something of size zero
911 * even though it's dumb (eg. after zero length read_overwrites)
912 */
913 assert(eva >= sva);
914
915 /* If these are not page aligned the loop might not terminate */
916 assert((sva == trunc_page_64(sva)) && (eva == trunc_page_64(eva)));
917
918 va = sva & -4096LL; /* Round start down to a page */
919 endva = eva & -4096LL; /* Round end down to a page */
920
921 while(1) { /* Go until we finish the range */
922 va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */
923 va = va & -4096LL; /* Make sure the "not found" indication is clear */
924 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
925 }
926
927 }
928
929 /*
930 * Routine:
931 * pmap_page_protect
932 *
933 * Function:
934 * Lower the permission for all mappings to a given page.
935 */
936 void
937 pmap_page_protect(
938 ppnum_t pa,
939 vm_prot_t prot)
940 {
941 register struct phys_entry *pp;
942 boolean_t remove;
943 unsigned int pindex;
944 mapping_t *mp;
945
946
947 switch (prot & VM_PROT_ALL) {
948 case VM_PROT_READ:
949 case VM_PROT_READ|VM_PROT_EXECUTE:
950 remove = FALSE;
951 break;
952 case VM_PROT_ALL:
953 return;
954 default:
955 remove = TRUE;
956 break;
957 }
958
959
960 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
961 if (pp == 0) return; /* Leave if not in physical RAM */
962
963 if (remove) { /* If the protection was set to none, we'll remove all mappings */
964
965 do { /* Keep going until we toss all pages from this physical page */
966 mp = hw_purge_phys(pp); /* Toss a map */
967 switch ((unsigned int)mp & mapRetCode) {
968 case mapRtOK:
969 mapping_free(mp); /* Return mapping to free inventory */
970 break;
971 case mapRtGuest:
972 break; /* Don't try to return a guest mapping */
973 case mapRtNotFnd:
974 break; /* Mapping disappeared on us, retry */
975 case mapRtEmpty:
976 break; /* Physent chain empty, we're done */
977 default: panic("pmap_page_protect: hw_purge_phys failed - pp = %08X, code = %08X\n",
978 pp, mp); /* Handle failure with our usual lack of tact */
979 }
980 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
981
982 #if DEBUG
983 if (!pmap_verify_free(pa))
984 panic("pmap_page_protect: cruft left behind - pa = %08X\n", pa);
985 #endif
986
987 return; /* Leave... */
988 }
989
990 /* When we get here, it means that we are to change the protection for a
991 * physical page.
992 */
993
994 mapping_protect_phys(pa, (prot & VM_PROT_ALL) ); /* Change protection of all mappings to page. */
995
996 }
997
998 /*
999 * Routine:
1000 * pmap_disconnect
1001 *
1002 * Function:
1003 * Disconnect all mappings for this page and return reference and change status
1004 * in generic format.
1005 *
1006 */
1007 unsigned int pmap_disconnect(
1008 ppnum_t pa)
1009 {
1010 register struct phys_entry *pp;
1011 unsigned int pindex;
1012 mapping_t *mp;
1013
1014 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1015 if (pp == 0) return (0); /* Return null ref and chg if not in physical RAM */
1016 do { /* Iterate until all mappings are dead and gone */
1017 mp = hw_purge_phys(pp); /* Disconnect a mapping */
1018 if (!mp) break; /* All mappings are gone, leave the loop */
1019 switch ((unsigned int)mp & mapRetCode) {
1020 case mapRtOK:
1021 mapping_free(mp); /* Return mapping to free inventory */
1022 break;
1023 case mapRtGuest:
1024 break; /* Don't try to return a guest mapping */
1025 case mapRtNotFnd:
1026 break; /* Mapping disappeared on us, retry */
1027 case mapRtEmpty:
1028 break; /* Physent chain empty, we're done */
1029 default: panic("hw_purge_phys: hw_purge_phys failed - pp = %08X, code = %08X\n",
1030 pp, mp); /* Handle failure with our usual lack of tact */
1031 }
1032 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
1033
1034 #if DEBUG
1035 if (!pmap_verify_free(pa))
1036 panic("pmap_disconnect: cruft left behind - pa = %08X\n", pa);
1037 #endif
1038
1039 return (mapping_tst_refmod(pa)); /* Return page ref and chg in generic format */
1040 }
1041
1042 /*
1043 * pmap_protect(pmap, s, e, prot)
1044 * changes the protection on all virtual addresses v in the
1045 * virtual address range determined by [s, e] and pmap to prot.
1046 * s and e must be on machine independent page boundaries and
1047 * s must be less than or equal to e.
1048 *
1049 * Note that any requests to change the protection of a nested pmap are
1050 * ignored. Those changes MUST be done by calling this with the correct pmap.
1051 */
1052 void pmap_protect(
1053 pmap_t pmap,
1054 vm_map_offset_t sva,
1055 vm_map_offset_t eva,
1056 vm_prot_t prot)
1057 {
1058
1059 addr64_t va, endva;
1060
1061 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1062
1063 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
1064 pmap_remove(pmap, (addr64_t)sva, (addr64_t)eva); /* Yeah, dump 'em */
1065 return; /* Leave... */
1066 }
1067
1068 va = sva & -4096LL; /* Round start down to a page */
1069 endva = eva & -4096LL; /* Round end down to a page */
1070
1071 while(1) { /* Go until we finish the range */
1072 mapping_protect(pmap, va, (prot & VM_PROT_ALL), &va); /* Change the protection and see what's next */
1073 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
1074 }
1075
1076 }
1077
1078
1079
1080 /*
1081 * pmap_enter
1082 *
1083 * Create a translation for the virtual address (virt) to the physical
1084 * address (phys) in the pmap with the protection requested. If the
1085 * translation is wired then we can not allow a full page fault, i.e.,
1086 * the mapping control block is not eligible to be stolen in a low memory
1087 * condition.
1088 *
1089 * NB: This is the only routine which MAY NOT lazy-evaluate
1090 * or lose information. That is, this routine must actually
1091 * insert this page into the given map NOW.
1092 */
1093 void
1094 pmap_enter(pmap_t pmap, vm_map_offset_t va, ppnum_t pa, vm_prot_t prot,
1095 unsigned int flags, __unused boolean_t wired)
1096 {
1097 unsigned int mflags;
1098 addr64_t colva;
1099
1100 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
1101
1102 mflags = 0; /* Make sure this is initialized to nothing special */
1103 if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */
1104 mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
1105 }
1106
1107 /*
1108 * It is possible to hang here if another processor is remapping any pages we collide with and are removing
1109 */
1110
1111 while(1) { /* Keep trying the enter until it goes in */
1112
1113 colva = mapping_make(pmap, va, pa, mflags, 1, (prot & VM_PROT_ALL) ); /* Enter the mapping into the pmap */
1114
1115 if(!colva) break; /* If there were no collisions, we are done... */
1116
1117 mapping_remove(pmap, colva); /* Remove the mapping that collided */
1118 }
1119 }
1120
1121 /*
1122 * Enters translations for odd-sized V=F blocks.
1123 *
1124 * The higher level VM map should be locked to insure that we don't have a
1125 * double diddle here.
1126 *
1127 * We panic if we get a block that overlaps with another. We do not merge adjacent
1128 * blocks because removing any address within a block removes the entire block and if
1129 * would really mess things up if we trashed too much.
1130 *
1131 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
1132 * not be changed. The block must be unmapped and then remapped with the new stuff.
1133 * We also do not keep track of reference or change flags.
1134 *
1135 * Any block that is larger than 256MB must be a multiple of 32MB. We panic if it is not.
1136 *
1137 * Note that pmap_map_block_rc is the same but doesn't panic if collision.
1138 *
1139 */
1140
1141 void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1142
1143 unsigned int mflags;
1144 addr64_t colva;
1145
1146
1147 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
1148 panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */
1149 }
1150
1151 // kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */
1152 // current_thread(), va, pa, size, prot, attr, flags); /* (BRINGUP) */
1153
1154 mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
1155 if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
1156
1157 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1158
1159 if(colva) { /* If there was a collision, panic */
1160 panic("pmap_map_block: mapping error %d, pmap = %08X, va = %016llX\n", (uint32_t)(colva & mapRetCode), pmap, va);
1161 }
1162
1163 return; /* Return */
1164 }
1165
1166 int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1167
1168 unsigned int mflags;
1169 addr64_t colva;
1170
1171
1172 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
1173 panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */
1174 }
1175
1176 mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
1177 if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
1178
1179 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1180
1181 if(colva) return 0; /* If there was a collision, fail */
1182
1183 return 1; /* Return true of we worked */
1184 }
1185
1186 /*
1187 * pmap_extract(pmap, va)
1188 * returns the physical address corrsponding to the
1189 * virtual address specified by pmap and va if the
1190 * virtual address is mapped and 0 if it is not.
1191 * Note: we assume nothing is ever mapped to phys 0.
1192 *
1193 * NOTE: This call always will fail for physical addresses greater than 0xFFFFF000.
1194 */
1195 vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va) {
1196
1197 spl_t spl;
1198 register struct mapping *mp;
1199 register vm_offset_t pa;
1200 addr64_t nextva;
1201 ppnum_t ppoffset;
1202 unsigned int gva;
1203
1204 #ifdef BOGUSCOMPAT
1205 panic("pmap_extract: THIS CALL IS BOGUS. NEVER USE IT EVER. So there...\n"); /* Don't use this */
1206 #else
1207
1208 gva = (unsigned int)va; /* Make sure we don't have a sign */
1209
1210 spl = splhigh(); /* We can't allow any loss of control here */
1211
1212 mp = mapping_find(pmap, (addr64_t)gva, &nextva,1); /* Find the mapping for this address */
1213
1214 if(!mp) { /* Is the page mapped? */
1215 splx(spl); /* Enable interrupts */
1216 return 0; /* Pass back 0 if not found */
1217 }
1218
1219 ppoffset = (ppnum_t)(((gva & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1220
1221
1222 pa = mp->mpPAddr + ppoffset; /* Remember ppage because mapping may vanish after drop call */
1223
1224 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1225 splx(spl); /* Restore 'rupts */
1226
1227 if(pa > maxPPage32) return 0; /* Force large addresses to fail */
1228
1229 pa = (pa << 12) | (va & 0xFFF); /* Convert physical page number to address */
1230
1231 #endif
1232 return pa; /* Return physical address or 0 */
1233 }
1234
1235 /*
1236 * ppnum_t pmap_find_phys(pmap, addr64_t va)
1237 * returns the physical page corrsponding to the
1238 * virtual address specified by pmap and va if the
1239 * virtual address is mapped and 0 if it is not.
1240 * Note: we assume nothing is ever mapped to phys 0.
1241 *
1242 */
1243 ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) {
1244
1245 spl_t spl;
1246 register struct mapping *mp;
1247 ppnum_t pa, ppoffset;
1248 addr64_t nextva;
1249
1250 spl = splhigh(); /* We can't allow any loss of control here */
1251
1252 mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */
1253
1254 if(!mp) { /* Is the page mapped? */
1255 splx(spl); /* Enable interrupts */
1256 return 0; /* Pass back 0 if not found */
1257 }
1258
1259
1260 ppoffset = (ppnum_t)(((va & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1261
1262 pa = mp->mpPAddr + ppoffset; /* Get the actual physical address */
1263
1264 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1265
1266 splx(spl); /* Restore 'rupts */
1267 return pa; /* Return physical address or 0 */
1268 }
1269
1270
1271 /*
1272 * pmap_attributes:
1273 *
1274 * Set/Get special memory attributes; not implemented.
1275 *
1276 * Note: 'VAL_GET_INFO' is used to return info about a page.
1277 * If less than 1 page is specified, return the physical page
1278 * mapping and a count of the number of mappings to that page.
1279 * If more than one page is specified, return the number
1280 * of resident pages and the number of shared (more than
1281 * one mapping) pages in the range;
1282 *
1283 *
1284 */
1285 kern_return_t
1286 pmap_attribute(
1287 __unused pmap_t pmap,
1288 __unused vm_map_offset_t address,
1289 __unused vm_map_size_t size,
1290 __unused vm_machine_attribute_t attribute,
1291 __unused vm_machine_attribute_val_t* value)
1292 {
1293
1294 return KERN_INVALID_ARGUMENT;
1295
1296 }
1297
1298
1299
1300 unsigned int pmap_cache_attributes(ppnum_t pgn) {
1301
1302 unsigned int flags;
1303 struct phys_entry * pp;
1304
1305 // Find physical address
1306 if ((pp = pmap_find_physentry(pgn))) {
1307 // Use physical attributes as default
1308 // NOTE: DEVICE_PAGER_FLAGS are made to line up
1309 flags = VM_MEM_COHERENT; /* We only support coherent memory */
1310 if (pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */
1311 if (pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */
1312 } else
1313 // If no physical, just hard code attributes
1314 flags = VM_WIMG_IO;
1315
1316 return (flags);
1317 }
1318
1319
1320
1321 /*
1322 * pmap_attribute_cache_sync(vm_offset_t pa)
1323 *
1324 * Invalidates all of the instruction cache on a physical page and
1325 * pushes any dirty data from the data cache for the same physical page
1326 */
1327
1328 kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size,
1329 __unused vm_machine_attribute_t attribute,
1330 __unused vm_machine_attribute_val_t* value) {
1331
1332 spl_t s;
1333 unsigned int i, npages;
1334
1335 npages = round_page(size) >> 12; /* Get the number of pages to do */
1336
1337 for(i = 0; i < npages; i++) { /* Do all requested pages */
1338 s = splhigh(); /* No interruptions here */
1339 sync_ppage(pp + i); /* Go flush data cache and invalidate icache */
1340 splx(s); /* Allow interruptions */
1341 }
1342
1343 return KERN_SUCCESS;
1344 }
1345
1346 /*
1347 * pmap_sync_page_data_phys(ppnum_t pa)
1348 *
1349 * Invalidates all of the instruction cache on a physical page and
1350 * pushes any dirty data from the data cache for the same physical page
1351 */
1352
1353 void pmap_sync_page_data_phys(ppnum_t pa) {
1354
1355 spl_t s;
1356
1357 s = splhigh(); /* No interruptions here */
1358 sync_ppage(pa); /* Sync up dem caches */
1359 splx(s); /* Allow interruptions */
1360 return;
1361 }
1362
1363 void
1364 pmap_sync_page_attributes_phys(ppnum_t pa)
1365 {
1366 pmap_sync_page_data_phys(pa);
1367 }
1368
1369 /*
1370 * pmap_collect
1371 *
1372 * Garbage collects the physical map system for pages that are no longer used.
1373 * It isn't implemented or needed or wanted.
1374 */
1375 void
1376 pmap_collect(__unused pmap_t pmap)
1377 {
1378 return;
1379 }
1380
1381 /*
1382 * Routine: pmap_activate
1383 * Function:
1384 * Binds the given physical map to the given
1385 * processor, and returns a hardware map description.
1386 * It isn't implemented or needed or wanted.
1387 */
1388 void
1389 pmap_activate(
1390 __unused pmap_t pmap,
1391 __unused thread_t th,
1392 __unused int which_cpu)
1393 {
1394 return;
1395 }
1396 /*
1397 * pmap_deactivate:
1398 * It isn't implemented or needed or wanted.
1399 */
1400 void
1401 pmap_deactivate(
1402 __unused pmap_t pmap,
1403 __unused thread_t th,
1404 __unused int which_cpu)
1405 {
1406 return;
1407 }
1408
1409
1410 /*
1411 * pmap_pageable(pmap, s, e, pageable)
1412 * Make the specified pages (by pmap, offset)
1413 * pageable (or not) as requested.
1414 *
1415 * A page which is not pageable may not take
1416 * a fault; therefore, its page table entry
1417 * must remain valid for the duration.
1418 *
1419 * This routine is merely advisory; pmap_enter()
1420 * will specify that these pages are to be wired
1421 * down (or not) as appropriate.
1422 *
1423 * (called from vm/vm_fault.c).
1424 */
1425 void
1426 pmap_pageable(
1427 __unused pmap_t pmap,
1428 __unused vm_map_offset_t start,
1429 __unused vm_map_offset_t end,
1430 __unused boolean_t pageable)
1431 {
1432
1433 return; /* This is not used... */
1434
1435 }
1436 /*
1437 * Routine: pmap_change_wiring
1438 * NOT USED ANYMORE.
1439 */
1440 void
1441 pmap_change_wiring(
1442 __unused pmap_t pmap,
1443 __unused vm_map_offset_t va,
1444 __unused boolean_t wired)
1445 {
1446 return; /* This is not used... */
1447 }
1448
1449 /*
1450 * pmap_modify_pages(pmap, s, e)
1451 * sets the modified bit on all virtual addresses v in the
1452 * virtual address range determined by [s, e] and pmap,
1453 * s and e must be on machine independent page boundaries and
1454 * s must be less than or equal to e.
1455 *
1456 * Note that this function will not descend nested pmaps.
1457 */
1458 void
1459 pmap_modify_pages(
1460 pmap_t pmap,
1461 vm_map_offset_t sva,
1462 vm_map_offset_t eva)
1463 {
1464 spl_t spl;
1465 mapping_t *mp;
1466 ppnum_t pa;
1467 addr64_t va, endva;
1468 unsigned int savetype;
1469
1470 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1471
1472 va = sva & -4096; /* Round to page */
1473 endva = eva & -4096; /* Round to page */
1474
1475 while (va < endva) { /* Walk through all pages */
1476
1477 spl = splhigh(); /* We can't allow any loss of control here */
1478
1479 mp = mapping_find(pmap, (addr64_t)va, &va, 0); /* Find the mapping for this address */
1480
1481 if(!mp) { /* Is the page mapped? */
1482 splx(spl); /* Page not mapped, restore interruptions */
1483 if((va == 0) || (va >= endva)) break; /* We are done if there are no more or we hit the end... */
1484 continue; /* We are not done and there is more to check... */
1485 }
1486
1487 savetype = mp->mpFlags & mpType; /* Remember the type */
1488 pa = mp->mpPAddr; /* Remember ppage because mapping may vanish after drop call */
1489
1490 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1491
1492 splx(spl); /* Restore 'rupts */
1493
1494 if(savetype != mpNormal) continue; /* Can't mess around with these guys... */
1495
1496 mapping_set_mod(pa); /* Set the modfied bit for this page */
1497
1498 if(va == 0) break; /* We hit the end of the pmap, might as well leave now... */
1499 }
1500 return; /* Leave... */
1501 }
1502
1503 /*
1504 * pmap_clear_modify(phys)
1505 * clears the hardware modified ("dirty") bit for one
1506 * machine independant page starting at the given
1507 * physical address. phys must be aligned on a machine
1508 * independant page boundary.
1509 */
1510 void
1511 pmap_clear_modify(ppnum_t pa)
1512 {
1513
1514 mapping_clr_mod(pa); /* Clear all change bits for physical page */
1515
1516 }
1517
1518 /*
1519 * pmap_is_modified(phys)
1520 * returns TRUE if the given physical page has been modified
1521 * since the last call to pmap_clear_modify().
1522 */
1523 boolean_t
1524 pmap_is_modified(register ppnum_t pa)
1525 {
1526 return mapping_tst_mod(pa); /* Check for modified */
1527
1528 }
1529
1530 /*
1531 * pmap_clear_reference(phys)
1532 * clears the hardware referenced bit in the given machine
1533 * independant physical page.
1534 *
1535 */
1536 void
1537 pmap_clear_reference(ppnum_t pa)
1538 {
1539 mapping_clr_ref(pa); /* Check for modified */
1540 }
1541
1542 /*
1543 * pmap_is_referenced(phys)
1544 * returns TRUE if the given physical page has been referenced
1545 * since the last call to pmap_clear_reference().
1546 */
1547 boolean_t
1548 pmap_is_referenced(ppnum_t pa)
1549 {
1550 return mapping_tst_ref(pa); /* Check for referenced */
1551 }
1552
1553 /*
1554 * pmap_get_refmod(phys)
1555 * returns the referenced and modified bits of the specified
1556 * physical page.
1557 */
1558 unsigned int
1559 pmap_get_refmod(ppnum_t pa)
1560 {
1561 return (mapping_tst_refmod(pa));
1562 }
1563
1564 /*
1565 * pmap_clear_refmod(phys, mask)
1566 * clears the referenced and modified bits as specified by the mask
1567 * of the specified physical page.
1568 */
1569 void
1570 pmap_clear_refmod(ppnum_t pa, unsigned int mask)
1571 {
1572 mapping_clr_refmod(pa, mask);
1573 }
1574
1575 /*
1576 * pmap_eligible_for_execute(ppnum_t pa)
1577 * return true if physical address is eligible to contain executable code;
1578 * otherwise, return false
1579 */
1580 boolean_t
1581 pmap_eligible_for_execute(ppnum_t pa)
1582 {
1583 phys_entry_t *physent;
1584 unsigned int pindex;
1585
1586 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1587
1588 if((!physent) || (physent->ppLink & ppG))
1589 return 0; /* If there is no physical entry or marked guarded,
1590 the entry is not eligible for execute */
1591
1592 return 1; /* Otherwise, entry is eligible for execute */
1593 }
1594
1595 #if MACH_VM_DEBUG
1596 int
1597 pmap_list_resident_pages(
1598 __unused pmap_t pmap,
1599 __unused vm_offset_t *listp,
1600 __unused int space)
1601 {
1602 return 0;
1603 }
1604 #endif /* MACH_VM_DEBUG */
1605
1606 /*
1607 * Locking:
1608 * spl: VM
1609 */
1610 void
1611 pmap_copy_part_page(
1612 vm_offset_t src,
1613 vm_offset_t src_offset,
1614 vm_offset_t dst,
1615 vm_offset_t dst_offset,
1616 vm_size_t len)
1617 {
1618 addr64_t fsrc, fdst;
1619
1620 assert(((dst <<12) & PAGE_MASK+dst_offset+len) <= PAGE_SIZE);
1621 assert(((src <<12) & PAGE_MASK+src_offset+len) <= PAGE_SIZE);
1622
1623 fsrc = ((addr64_t)src << 12) + src_offset;
1624 fdst = ((addr64_t)dst << 12) + dst_offset;
1625
1626 phys_copy(fsrc, fdst, len); /* Copy the stuff physically */
1627 }
1628
1629 void
1630 pmap_zero_part_page(
1631 __unused vm_offset_t p,
1632 __unused vm_offset_t offset,
1633 __unused vm_size_t len)
1634 {
1635 panic("pmap_zero_part_page");
1636 }
1637
1638 boolean_t pmap_verify_free(ppnum_t pa) {
1639
1640 struct phys_entry *pp;
1641 unsigned int pindex;
1642
1643 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1644 if (pp == 0) return FALSE; /* If there isn't one, show no mapping... */
1645
1646 if(pp->ppLink & ~(ppLock | ppFlags)) return FALSE; /* We have at least one mapping */
1647 return TRUE; /* No mappings */
1648 }
1649
1650
1651 /* Determine if we need to switch space and set up for it if so */
1652
1653 void pmap_switch(pmap_t map)
1654 {
1655 hw_blow_seg(lowGlo.lgUMWvaddr); /* Blow off the first segment */
1656 hw_blow_seg(lowGlo.lgUMWvaddr + 0x10000000ULL); /* Blow off the second segment */
1657
1658 /* when changing to kernel space, don't bother
1659 * doing anything, the kernel is mapped from here already.
1660 */
1661 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
1662 return; /* If so, we don't do anything... */
1663 }
1664
1665 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
1666 return; /* Bye, bye, butterfly... */
1667 }
1668
1669 /*
1670 * kern_return_t pmap_nest(grand, subord, vstart, size)
1671 *
1672 * grand = the pmap that we will nest subord into
1673 * subord = the pmap that goes into the grand
1674 * vstart = start of range in pmap to be inserted
1675 * nstart = start of range in pmap nested pmap
1676 * size = Size of nest area (up to 2TB)
1677 *
1678 * Inserts a pmap into another. This is used to implement shared segments.
1679 * On the current PPC processors, this is limited to segment (256MB) aligned
1680 * segment sized ranges.
1681 *
1682 * We actually kinda allow recursive nests. The gating factor is that we do not allow
1683 * nesting on top of something that is already mapped, i.e., the range must be empty.
1684 *
1685 * Note that we depend upon higher level VM locks to insure that things don't change while
1686 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
1687 * or do 2 nests at once.
1688 */
1689
1690 kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) {
1691
1692 addr64_t vend, colladdr;
1693 unsigned int msize;
1694 int nlists;
1695 mapping_t *mp;
1696
1697 if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */
1698 if((size >> 25) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 2TB */
1699 if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
1700 if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
1701
1702 if(size == 0) { /* Is the size valid? */
1703 panic("pmap_nest: size is invalid - %016llX\n", size);
1704 }
1705
1706 msize = (size >> 25) - 1; /* Change size to blocks of 32MB */
1707
1708 nlists = mapSetLists(grand); /* Set number of lists this will be on */
1709
1710 mp = mapping_alloc(nlists); /* Get a spare mapping block */
1711
1712 mp->mpFlags = 0x01000000 | mpNest | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
1713 /* Set the flags. Make sure busy count is 1 */
1714 mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */
1715 mp->u.mpBSize = msize; /* Set the size */
1716 mp->mpPte = 0; /* Set the PTE invalid */
1717 mp->mpPAddr = 0; /* Set the physical page number */
1718 mp->mpVAddr = vstart; /* Set the address */
1719 mp->mpNestReloc = nstart - vstart; /* Set grand to nested vaddr relocation value */
1720
1721 colladdr = hw_add_map(grand, mp); /* Go add the mapping to the pmap */
1722
1723 if(colladdr) { /* Did it collide? */
1724 vend = vstart + size - 4096; /* Point to the last page we would cover in nest */
1725 panic("pmap_nest: attempt to nest into a non-empty range - pmap = %08X, start = %016llX, end = %016llX\n",
1726 grand, vstart, vend);
1727 }
1728
1729 return KERN_SUCCESS;
1730 }
1731
1732 /*
1733 * kern_return_t pmap_unnest(grand, vaddr)
1734 *
1735 * grand = the pmap that we will nest subord into
1736 * vaddr = start of range in pmap to be unnested
1737 *
1738 * Removes a pmap from another. This is used to implement shared segments.
1739 * On the current PPC processors, this is limited to segment (256MB) aligned
1740 * segment sized ranges.
1741 */
1742
1743 kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) {
1744
1745 unsigned int tstamp, i, mycpu;
1746 addr64_t nextva;
1747 spl_t s;
1748 mapping_t *mp;
1749
1750 s = splhigh(); /* Make sure interruptions are disabled */
1751
1752 mp = mapping_find(grand, vaddr, &nextva, 0); /* Find the nested map */
1753
1754 if(((unsigned int)mp & mapRetCode) != mapRtOK) { /* See if it was even nested */
1755 panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr);
1756 }
1757
1758 if((mp->mpFlags & mpType) != mpNest) { /* Did we find something other than a nest? */
1759 panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr);
1760 }
1761
1762 if(mp->mpVAddr != vaddr) { /* Make sure the address is the same */
1763 panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr);
1764 }
1765
1766 (void)hw_atomic_and(&mp->mpFlags, ~mpPerm); /* Show that this mapping is now removable */
1767
1768 mapping_drop_busy(mp); /* Go ahead and release the mapping now */
1769
1770 splx(s); /* Restore 'rupts */
1771
1772 (void)mapping_remove(grand, vaddr); /* Toss the nested pmap mapping */
1773
1774 invalidateSegs(grand); /* Invalidate the pmap segment cache */
1775
1776 /*
1777 * Note that the following will force the segment registers to be reloaded
1778 * on all processors (if they are using the pmap we just changed) before returning.
1779 *
1780 * This is needed. The reason is that until the segment register is
1781 * reloaded, another thread in the same task on a different processor will
1782 * be able to access memory that it isn't allowed to anymore. That can happen
1783 * because access to the subordinate pmap is being removed, but the pmap is still
1784 * valid.
1785 *
1786 * Note that we only kick the other processor if we see that it was using the pmap while we
1787 * were changing it.
1788 */
1789
1790
1791 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
1792 disable_preemption();
1793 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
1794 if((unsigned int)grand == PerProcTable[i].ppe_vaddr->ppUserPmapVirt) { /* Is this guy using the changed pmap? */
1795
1796 PerProcTable[i].ppe_vaddr->ppInvSeg = 1; /* Show that we need to invalidate the segments */
1797
1798 if(i != mycpu) {
1799
1800 tstamp = PerProcTable[i].ppe_vaddr->ruptStamp[1]; /* Save the processor's last interrupt time stamp */
1801 if(cpu_signal(i, SIGPcpureq, CPRQsegload, 0) == KERN_SUCCESS) { /* Make sure we see the pmap change */
1802 if(!hw_cpu_wcng(&PerProcTable[i].ppe_vaddr->ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */
1803 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
1804 }
1805 }
1806 }
1807 }
1808 enable_preemption();
1809 }
1810
1811 return KERN_SUCCESS; /* Bye, bye, butterfly... */
1812 }
1813
1814
1815 /*
1816 * void MapUserMemoryWindowInit(void)
1817 *
1818 * Initialize anything we need to in order to map user address space slices into
1819 * the kernel. Primarily used for copy in/out.
1820 *
1821 * Currently we only support one 512MB slot for this purpose. There are two special
1822 * mappings defined for the purpose: the special pmap nest, and linkage mapping.
1823 *
1824 * The special pmap nest (which is allocated in this function) is used as a place holder
1825 * in the kernel's pmap search list. It is 512MB long and covers the address range
1826 * starting at lgUMWvaddr. It points to no actual memory and when the fault handler
1827 * hits in it, it knows to look in the per_proc and start using the linkage
1828 * mapping contained therin.
1829 *
1830 * The linkage mapping is used to glue the user address space slice into the
1831 * kernel. It contains the relocation information used to transform the faulting
1832 * kernel address into the user address space. It also provides the link to the
1833 * user's pmap. This is pointed to by the per_proc and is switched in and out
1834 * whenever there is a context switch.
1835 *
1836 */
1837
1838 void MapUserMemoryWindowInit(void) {
1839
1840 addr64_t colladdr;
1841 int nlists;
1842 mapping_t *mp;
1843
1844 nlists = mapSetLists(kernel_pmap); /* Set number of lists this will be on */
1845
1846 mp = mapping_alloc(nlists); /* Get a spare mapping block */
1847
1848 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
1849 /* Set the flags. Make sure busy count is 1 */
1850 mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */
1851 mp->u.mpBSize = 15; /* Set the size to 2 segments in 32MB chunks - 1 */
1852 mp->mpPte = 0; /* Means nothing */
1853 mp->mpPAddr = 0; /* Means nothing */
1854 mp->mpVAddr = lowGlo.lgUMWvaddr; /* Set the address range we cover */
1855 mp->mpNestReloc = 0; /* Means nothing */
1856
1857 colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */
1858
1859 if(colladdr) { /* Did it collide? */
1860 panic("MapUserMemoryWindowInit: MapUserMemoryWindow range already mapped\n");
1861 }
1862
1863 return;
1864 }
1865
1866 /*
1867 * addr64_t MapUserMemoryWindow(vm_map_t map, vm_offset_t va, size)
1868 *
1869 * map = the vm_map that we are mapping into the kernel
1870 * va = start of the address range we are mapping
1871 * Note that we do not test validty, we chose to trust our fellows...
1872 *
1873 * Maps a 512M slice of a user address space into a predefined kernel range
1874 * on a per-thread basis. We map only the first 256M segment, allowing the
1875 * second 256M segment to fault in as needed. This allows our clients to access
1876 * an arbitrarily aligned operand up to 256M in size.
1877 *
1878 * In the future, the restriction of a predefined range may be loosened.
1879 *
1880 * Builds the proper linkage map to map the user range
1881 * We will round this down to the previous segment boundary and calculate
1882 * the relocation to the kernel slot
1883 *
1884 * We always make a segment table entry here if we need to. This is mainly because of
1885 * copyin/out and if we don't, there will be multiple segment faults for
1886 * each system call. I have seen upwards of 30000 per second.
1887 *
1888 * We do check, however, to see if the slice is already mapped and if so,
1889 * we just exit. This is done for performance reasons. It was found that
1890 * there was a considerable boost in copyin/out performance if we did not
1891 * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the
1892 * restriction that you had to bracket MapUserMemoryWindow. Further, there
1893 * is a yet further boost if you didn't need to map it each time. The theory
1894 * behind this is that many times copies are to or from the same segment and
1895 * done multiple times within the same system call. To take advantage of that,
1896 * we check umwSpace and umwRelo to see if we've already got it.
1897 *
1898 * We also need to half-invalidate the slice when we context switch or go
1899 * back to user state. A half-invalidate does not clear the actual mapping,
1900 * but it does force the MapUserMemoryWindow function to reload the segment
1901 * register/SLBE. If this is not done, we can end up some pretty severe
1902 * performance penalties. If we map a slice, and the cached space/relocation is
1903 * the same, we won't reload the segment registers. Howver, since we ran someone else,
1904 * our SR is cleared and we will take a fault. This is reasonable if we block
1905 * while copying (e.g., we took a page fault), but it is not reasonable when we
1906 * just start. For this reason, we half-invalidate to make sure that the SR is
1907 * explicitly reloaded.
1908 *
1909 * Note that we do not go to the trouble of making a pmap segment cache
1910 * entry for these guys because they are very short term -- 99.99% of the time
1911 * they will be unmapped before the next context switch.
1912 *
1913 */
1914
1915 addr64_t MapUserMemoryWindow(
1916 vm_map_t map,
1917 addr64_t va) {
1918
1919 addr64_t baddrs, reladd;
1920 thread_t thread;
1921 mapping_t *mp;
1922
1923 baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */
1924 thread = current_thread(); /* Remember our activation */
1925
1926 reladd = baddrs - lowGlo.lgUMWvaddr; /* Get the relocation from user to kernel */
1927
1928 if((thread->machine.umwSpace == map->pmap->space) && (thread->machine.umwRelo == reladd)) { /* Already mapped? */
1929 return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */
1930 }
1931
1932 disable_preemption(); /* Don't move... */
1933
1934 mp = (mapping_t *)&(getPerProc()->ppUMWmp); /* Make up for C */
1935 thread->machine.umwRelo = reladd; /* Relocation from user to kernel */
1936 mp->mpNestReloc = reladd; /* Relocation from user to kernel */
1937
1938 thread->machine.umwSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1939 mp->mpSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1940
1941 /*
1942 * Here we make an assumption that we are going to be using the base pmap's address space.
1943 * If we are wrong, and that would be very, very, very rare, the fault handler will fix us up.
1944 */
1945
1946 hw_map_seg(map->pmap, lowGlo.lgUMWvaddr, baddrs); /* Make the entry for the first segment */
1947
1948 enable_preemption(); /* Let's move */
1949 return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */
1950 }
1951
1952
1953 /*
1954 * kern_return_t pmap_boot_map(size)
1955 *
1956 * size = size of virtual address range to be mapped
1957 *
1958 * This function is used to assign a range of virtual addresses before VM in
1959 * initialized. It starts at VM_MAX_KERNEL_ADDRESS and works downward.
1960 * The variable vm_last_addr contains the current highest possible VM
1961 * assignable address. It is a panic to attempt to call this after VM has
1962 * started up. The only problem is, is that we may not have the serial or
1963 * framebuffer mapped, so we'll never know we died.........
1964 */
1965
1966 vm_offset_t pmap_boot_map(vm_size_t size) {
1967
1968 if(kernel_map != VM_MAP_NULL) { /* Has VM already started? */
1969 panic("pmap_boot_map: VM started\n");
1970 }
1971
1972 size = round_page(size); /* Make sure this is in pages */
1973 vm_last_addr = vm_last_addr - size; /* Allocate the memory */
1974 return (vm_last_addr + 1); /* Return the vaddr we just allocated */
1975
1976 }
1977
1978
1979 /*
1980 * void pmap_init_sharedpage(void);
1981 *
1982 * Hack map for the 64-bit commpage
1983 */
1984
1985 void pmap_init_sharedpage(vm_offset_t cpg){
1986
1987 addr64_t cva, cpoff;
1988 ppnum_t cpphys;
1989
1990 sharedPmap = pmap_create(0, FALSE); /* Get a pmap to hold the common segment */
1991 if(!sharedPmap) { /* Check for errors */
1992 panic("pmap_init_sharedpage: couldn't make sharedPmap\n");
1993 }
1994
1995 for(cpoff = 0; cpoff < _COMM_PAGE_AREA_USED; cpoff += 4096) { /* Step along now */
1996
1997 cpphys = pmap_find_phys(kernel_pmap, (addr64_t)cpg + cpoff);
1998 if(!cpphys) {
1999 panic("pmap_init_sharedpage: compage %08X not mapped in kernel\n", cpg + cpoff);
2000 }
2001
2002 cva = mapping_make(sharedPmap, (addr64_t)((uint32_t)_COMM_PAGE_BASE_ADDRESS) + cpoff,
2003 cpphys, mmFlgPerm, 1, VM_PROT_READ | VM_PROT_EXECUTE); /* Map the page read/execute only */
2004 if(cva) { /* Check for errors */
2005 panic("pmap_init_sharedpage: couldn't map commpage page - cva = %016llX\n", cva);
2006 }
2007
2008 }
2009
2010 return;
2011 }
2012
2013
2014 /*
2015 * void pmap_map_sharedpage(pmap_t pmap);
2016 *
2017 * Maps the last segment in a 64-bit address space
2018 *
2019 *
2020 */
2021
2022 void pmap_map_sharedpage(task_t task, pmap_t pmap){
2023
2024 kern_return_t ret;
2025
2026 if(task_has_64BitAddr(task) || _cpu_capabilities & k64Bit) { /* Should we map the 64-bit page -1? */
2027 ret = pmap_nest(pmap, sharedPmap, 0xFFFFFFFFF0000000ULL, 0x00000000F0000000ULL,
2028 0x0000000010000000ULL); /* Nest the highest possible segment to map comm page */
2029 if(ret != KERN_SUCCESS) { /* Did it work? */
2030 panic("pmap_map_sharedpage: couldn't nest shared page - ret = %08X\n", ret);
2031 }
2032 }
2033
2034 return;
2035 }
2036
2037
2038 /*
2039 * void pmap_unmap_sharedpage(pmap_t pmap);
2040 *
2041 * Unmaps the last segment in a 64-bit address space
2042 *
2043 */
2044
2045 void pmap_unmap_sharedpage(pmap_t pmap){
2046
2047 kern_return_t ret;
2048 mapping_t *mp;
2049 boolean_t inter;
2050 int gotnest;
2051 addr64_t nextva;
2052
2053 if(BootProcInfo.pf.Available & pf64Bit) { /* Are we on a 64-bit machine? */
2054
2055 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
2056 mp = hw_find_map(pmap, 0xFFFFFFFFF0000000ULL, &nextva); /* Find the mapping for this address */
2057 if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
2058 panic("pmap_unmap_sharedpage: mapping lock failure - rc = %08X, pmap = %08X\n", mp, pmap); /* Die... */
2059 }
2060
2061 gotnest = 0; /* Assume nothing here */
2062 if(mp) {
2063 gotnest = ((mp->mpFlags & mpType) == mpNest);
2064 /* Remember if we have a nest here */
2065 mapping_drop_busy(mp); /* We have everything we need from the mapping */
2066 }
2067 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
2068
2069 if(!gotnest) return; /* Leave if there isn't any nesting here */
2070
2071 ret = pmap_unnest(pmap, 0xFFFFFFFFF0000000ULL); /* Unnest the max 64-bit page */
2072
2073 if(ret != KERN_SUCCESS) { /* Did it work? */
2074 panic("pmap_unmap_sharedpage: couldn't unnest shared page - ret = %08X\n", ret);
2075 }
2076 }
2077
2078 return;
2079 }
2080
2081
2082 /* temporary workaround */
2083 boolean_t
2084 coredumpok(
2085 __unused vm_map_t map,
2086 __unused vm_offset_t va)
2087 {
2088 return TRUE;
2089 }
2090
2091
2092 /*
2093 * disable no-execute capability on
2094 * the specified pmap
2095 */
2096 void pmap_disable_NX(pmap_t pmap) {
2097
2098 pmap->pmapFlags |= pmapNXdisabled;
2099 }