]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/pmap.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1990,1991,1992 The University of Utah and
28 * the Center for Software Science (CSS).
29 * Copyright (c) 1991,1987 Carnegie Mellon University.
30 * All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation,
37 * and that all advertising materials mentioning features or use of
38 * this software display the following acknowledgement: ``This product
39 * includes software developed by the Center for Software Science at
40 * the University of Utah.''
41 *
42 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
43 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
44 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
45 * THIS SOFTWARE.
46 *
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 *
58 * Utah $Hdr: pmap.c 1.28 92/06/23$
59 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
60 */
61
62 /*
63 * Manages physical address maps for powerpc.
64 *
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
71 *
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
77 * requested.
78 *
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information to
84 * when physical maps must be made correct.
85 *
86 */
87
88 #include <zone_debug.h>
89 #include <cpus.h>
90 #include <debug.h>
91 #include <mach_kgdb.h>
92 #include <mach_vm_debug.h>
93 #include <db_machine_commands.h>
94
95 #include <kern/thread.h>
96 #include <kern/simple_lock.h>
97 #include <mach/vm_attributes.h>
98 #include <mach/vm_param.h>
99 #include <vm/vm_kern.h>
100 #include <kern/spl.h>
101
102 #include <kern/misc_protos.h>
103 #include <ppc/misc_protos.h>
104 #include <ppc/proc_reg.h>
105
106 #include <vm/pmap.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_page.h>
109
110 #include <ppc/pmap.h>
111 #include <ppc/mem.h>
112 #include <ppc/mappings.h>
113
114 #include <ppc/new_screen.h>
115 #include <ppc/Firmware.h>
116 #include <ppc/savearea.h>
117 #include <ppc/exception.h>
118 #include <ppc/low_trace.h>
119 #include <ddb/db_output.h>
120
121 extern unsigned int avail_remaining;
122 extern unsigned int mappingdeb0;
123 extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
124 extern int real_ncpus; /* Number of actual CPUs */
125 unsigned int debugbackpocket; /* (TEST/DEBUG) */
126
127 vm_offset_t first_free_virt;
128 int current_free_region; /* Used in pmap_next_page */
129
130 pmapTransTab *pmapTrans; /* Point to the hash to pmap translations */
131 struct phys_entry *phys_table;
132
133 /* forward */
134 void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
135 void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
136 void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount);
137
138 #if MACH_VM_DEBUG
139 int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space);
140 #endif
141
142 /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
143
144 extern struct pmap kernel_pmap_store;
145 pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
146 addr64_t kernel_pmap_phys; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */
147 pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
148 pmap_t sharedPmap; /* Pointer to common pmap for 64-bit address spaces */
149 struct zone *pmap_zone; /* zone of pmap structures */
150 boolean_t pmap_initialized = FALSE;
151
152 int ppc_max_pmaps; /* Maximum number of concurrent address spaces allowed. This is machine dependent */
153 addr64_t vm_max_address; /* Maximum effective address supported */
154 addr64_t vm_max_physical; /* Maximum physical address supported */
155
156 /*
157 * Physical-to-virtual translations are handled by inverted page table
158 * structures, phys_tables. Multiple mappings of a single page are handled
159 * by linking the affected mapping structures. We initialise one region
160 * for phys_tables of the physical memory we know about, but more may be
161 * added as it is discovered (eg. by drivers).
162 */
163
164 /*
165 * free pmap list. caches the first free_pmap_max pmaps that are freed up
166 */
167 int free_pmap_max = 32;
168 int free_pmap_count;
169 pmap_t free_pmap_list;
170 decl_simple_lock_data(,free_pmap_lock)
171
172 /*
173 * Function to get index into phys_table for a given physical address
174 */
175
176 struct phys_entry *pmap_find_physentry(ppnum_t pa)
177 {
178 int i;
179 unsigned int entry;
180
181 for (i = pmap_mem_regions_count - 1; i >= 0; i--) {
182 if (pa < pmap_mem_regions[i].mrStart) continue; /* See if we fit in this region */
183 if (pa > pmap_mem_regions[i].mrEnd) continue; /* Check the end too */
184
185 entry = (unsigned int)pmap_mem_regions[i].mrPhysTab + ((pa - pmap_mem_regions[i].mrStart) * sizeof(phys_entry));
186 return (struct phys_entry *)entry;
187 }
188 // kprintf("DEBUG - pmap_find_physentry: page 0x%08X not found\n", pa);
189 return 0;
190 }
191
192 /*
193 * kern_return_t
194 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
195 * boolean_t available, unsigned int attr)
196 *
197 * THIS IS NOT SUPPORTED
198 */
199 kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
200 boolean_t available, unsigned int attr)
201 {
202
203 panic("Forget it! You can't map no more memory, you greedy puke!\n");
204 return KERN_SUCCESS;
205 }
206
207 /*
208 * pmap_map(va, spa, epa, prot)
209 * is called during boot to map memory in the kernel's address map.
210 * A virtual address range starting at "va" is mapped to the physical
211 * address range "spa" to "epa" with machine independent protection
212 * "prot".
213 *
214 * "va", "spa", and "epa" are byte addresses and must be on machine
215 * independent page boundaries.
216 *
217 * Pages with a contiguous virtual address range, the same protection, and attributes.
218 * therefore, we map it with a single block.
219 *
220 * Note that this call will only map into 32-bit space
221 *
222 */
223
224 vm_offset_t
225 pmap_map(
226 vm_offset_t va,
227 vm_offset_t spa,
228 vm_offset_t epa,
229 vm_prot_t prot)
230 {
231
232 addr64_t colladr;
233
234 if (spa == epa) return(va);
235
236 assert(epa > spa);
237
238 colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, prot & VM_PROT_ALL);
239
240 if(colladr) { /* Was something already mapped in the range? */
241 panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n",
242 va, spa, epa, colladr);
243 }
244 return(va);
245 }
246
247 /*
248 * Bootstrap the system enough to run with virtual memory.
249 * Map the kernel's code and data, and allocate the system page table.
250 * Called with mapping done by BATs. Page_size must already be set.
251 *
252 * Parameters:
253 * msize: Total memory present
254 * first_avail: First virtual address available
255 * kmapsize: Size of kernel text and data
256 */
257 void
258 pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize)
259 {
260 register struct mapping *mp;
261 vm_offset_t addr;
262 vm_size_t size;
263 int i, num, j, rsize, mapsize, vmpagesz, vmmapsz, bank, nbits;
264 uint64_t tmemsize;
265 uint_t htslop;
266 vm_offset_t first_used_addr, PCAsize;
267 struct phys_entry *phys_table;
268
269 *first_avail = round_page_32(*first_avail); /* Make sure we start out on a page boundary */
270 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address know to VM */
271
272 /*
273 * Initialize kernel pmap
274 */
275 kernel_pmap = &kernel_pmap_store;
276 kernel_pmap_phys = (addr64_t)&kernel_pmap_store;
277 cursor_pmap = &kernel_pmap_store;
278
279 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
280
281 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
282 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
283 kernel_pmap->ref_count = 1;
284 kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */
285 kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
286 kernel_pmap->space = PPC_SID_KERNEL;
287 kernel_pmap->pmapvr = 0; /* Virtual = Real */
288
289 /*
290 * The hash table wants to have one pteg for every 2 physical pages.
291 * We will allocate this in physical RAM, outside of kernel virtual memory,
292 * at the top of the highest bank that will contain it.
293 * Note that "bank" doesn't refer to a physical memory slot here, it is a range of
294 * physically contiguous memory.
295 *
296 * The PCA will go there as well, immediately before the hash table.
297 */
298
299 nbits = cntlzw(((msize << 1) - 1) >> 32); /* Get first bit in upper half */
300 if(nbits == 32) nbits = nbits + cntlzw((uint_t)((msize << 1) - 1)); /* If upper half was empty, find bit in bottom half */
301 tmemsize = 0x8000000000000000ULL >> nbits; /* Get memory size rounded up to power of 2 */
302
303 if(tmemsize > 0x0000002000000000ULL) tmemsize = 0x0000002000000000ULL; /* Make sure we don't make an unsupported hash table size */
304
305 hash_table_size = (uint_t)(tmemsize >> 13) * per_proc_info[0].pf.pfPTEG; /* Get provisional hash_table_size */
306 if(hash_table_size < (256 * 1024)) hash_table_size = (256 * 1024); /* Make sure we are at least minimum size */
307
308 while(1) { /* Try to fit hash table in PCA into contiguous memory */
309
310 if(hash_table_size < (256 * 1024)) { /* Have we dropped too short? This should never, ever happen */
311 panic("pmap_bootstrap: Can't find space for hash table\n"); /* This will never print, system isn't up far enough... */
312 }
313
314 PCAsize = (hash_table_size / per_proc_info[0].pf.pfPTEG) * sizeof(PCA); /* Get total size of PCA table */
315 PCAsize = round_page_32(PCAsize); /* Make sure it is at least a page long */
316
317 for(bank = pmap_mem_regions_count - 1; bank >= 0; bank--) { /* Search backwards through banks */
318
319 hash_table_base = ((addr64_t)pmap_mem_regions[bank].mrEnd << 12) - hash_table_size + PAGE_SIZE; /* Get tenative address */
320
321 htslop = hash_table_base & (hash_table_size - 1); /* Get the extra that we will round down when we align */
322 hash_table_base = hash_table_base & -(addr64_t)hash_table_size; /* Round down to correct boundary */
323
324 if((hash_table_base - round_page_32(PCAsize)) >= ((addr64_t)pmap_mem_regions[bank].mrStart << 12)) break; /* Leave if we fit */
325 }
326
327 if(bank >= 0) break; /* We are done if we found a suitable bank */
328
329 hash_table_size = hash_table_size >> 1; /* Try the next size down */
330 }
331
332 if(htslop) { /* If there was slop (i.e., wasted pages for alignment) add a new region */
333 for(i = pmap_mem_regions_count - 1; i >= bank; i--) { /* Copy from end to our bank, including our bank */
334 pmap_mem_regions[i + 1].mrStart = pmap_mem_regions[i].mrStart; /* Set the start of the bank */
335 pmap_mem_regions[i + 1].mrAStart = pmap_mem_regions[i].mrAStart; /* Set the start of allocatable area */
336 pmap_mem_regions[i + 1].mrEnd = pmap_mem_regions[i].mrEnd; /* Set the end address of bank */
337 pmap_mem_regions[i + 1].mrAEnd = pmap_mem_regions[i].mrAEnd; /* Set the end address of allocatable area */
338 }
339
340 pmap_mem_regions[i + 1].mrStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of the next bank to the start of the slop area */
341 pmap_mem_regions[i + 1].mrAStart = (hash_table_base + hash_table_size) >> 12; /* Set the start of allocatable area to the start of the slop area */
342 pmap_mem_regions[i].mrEnd = (hash_table_base + hash_table_size - 4096) >> 12; /* Set the end of our bank to the end of the hash table */
343
344 }
345
346 pmap_mem_regions[bank].mrAEnd = (hash_table_base - PCAsize - 4096) >> 12; /* Set the maximum allocatable in this bank */
347
348 hw_hash_init(); /* Initiaize the hash table and PCA */
349 hw_setup_trans(); /* Set up hardware registers needed for translation */
350
351 /*
352 * The hash table is now all initialized and so is the PCA. Go on to do the rest of it.
353 * This allocation is from the bottom up.
354 */
355
356 num = atop_64(msize); /* Get number of pages in all of memory */
357
358 /* Figure out how much we need to allocate */
359
360 size = (vm_size_t) (
361 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
362 (BackPocketSaveBloks * PAGE_SIZE) + /* For backpocket saveareas */
363 trcWork.traceSize + /* Size of trace table */
364 ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096) + /* Size of pmap translate table */
365 (((num * sizeof(struct phys_entry)) + 4095) & -4096) /* For the physical entries */
366 );
367
368 mapsize = size = round_page_32(size); /* Get size of area to map that we just calculated */
369 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
370
371 vmpagesz = round_page_32(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
372 vmmapsz = round_page_32((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
373
374 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
375
376 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
377 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
378 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
379
380 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
381
382 /* hash table must be aligned to its size */
383
384 addr = *first_avail; /* Set the address to start allocations */
385 first_used_addr = addr; /* Remember where we started */
386
387 bzero((char *)addr, size); /* Clear everything that we are allocating */
388
389 savearea_init(addr); /* Initialize the savearea chains and data */
390
391 addr = (vm_offset_t)((unsigned int)addr + ((InitialSaveBloks + BackPocketSaveBloks) * PAGE_SIZE)); /* Point past saveareas */
392
393 trcWork.traceCurr = (unsigned int)addr; /* Set first trace slot to use */
394 trcWork.traceStart = (unsigned int)addr; /* Set start of trace table */
395 trcWork.traceEnd = (unsigned int)addr + trcWork.traceSize; /* Set end of trace table */
396
397 addr = (vm_offset_t)trcWork.traceEnd; /* Set next allocatable location */
398
399 pmapTrans = (pmapTransTab *)addr; /* Point to the pmap to hash translation table */
400
401 pmapTrans[PPC_SID_KERNEL].pmapPAddr = (addr64_t)((uintptr_t)kernel_pmap); /* Initialize the kernel pmap in the translate table */
402 pmapTrans[PPC_SID_KERNEL].pmapVAddr = CAST_DOWN(unsigned int, kernel_pmap); /* Initialize the kernel pmap in the translate table */
403
404 addr += ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096); /* Point past pmap translate table */
405
406 /* NOTE: the phys_table must be within the first 2GB of physical RAM. This makes sure we only need to do 32-bit arithmetic */
407
408 phys_table = (struct phys_entry *) addr; /* Get pointer to physical table */
409
410 for (bank = 0; bank < pmap_mem_regions_count; bank++) { /* Set pointer and initialize all banks of ram */
411
412 pmap_mem_regions[bank].mrPhysTab = phys_table; /* Set pointer to the physical table for this bank */
413
414 phys_table = phys_table + (pmap_mem_regions[bank].mrEnd - pmap_mem_regions[bank].mrStart + 1); /* Point to the next */
415 }
416
417 addr += (((num * sizeof(struct phys_entry)) + 4095) & -4096); /* Step on past the physical entries */
418
419 /*
420 * Remaining space is for mapping entries. Tell the initializer routine that
421 * the mapping system can't release this block because it's permanently assigned
422 */
423
424 mapping_init(); /* Initialize the mapping tables */
425
426 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
427 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
428 }
429 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
430
431 /* Map V=R the page tables */
432 pmap_map(first_used_addr, first_used_addr,
433 round_page_32(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE);
434
435 *first_avail = round_page_32(first_used_addr + size); /* Set next available page */
436 first_free_virt = *first_avail; /* Ditto */
437
438 /* All the rest of memory is free - add it to the free
439 * regions so that it can be allocated by pmap_steal
440 */
441
442 pmap_mem_regions[0].mrAStart = (*first_avail >> 12); /* Set up the free area to start allocations (always in the first bank) */
443
444 current_free_region = 0; /* Set that we will start allocating in bank 0 */
445 avail_remaining = 0; /* Clear free page count */
446 for(bank = 0; bank < pmap_mem_regions_count; bank++) { /* Total up all of the pages in the system that are available */
447 avail_remaining += (pmap_mem_regions[bank].mrAEnd - pmap_mem_regions[bank].mrAStart) + 1; /* Add in allocatable pages in this bank */
448 }
449
450
451 }
452
453 /*
454 * pmap_init(spa, epa)
455 * finishes the initialization of the pmap module.
456 * This procedure is called from vm_mem_init() in vm/vm_init.c
457 * to initialize any remaining data structures that the pmap module
458 * needs to map virtual memory (VM is already ON).
459 *
460 * Note that the pmap needs to be sized and aligned to
461 * a power of two. This is because it is used both in virtual and
462 * real so it can't span a page boundary.
463 */
464
465 void
466 pmap_init(void)
467 {
468
469 addr64_t cva;
470
471 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
472 #if ZONE_DEBUG
473 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
474 #endif /* ZONE_DEBUG */
475
476 pmap_initialized = TRUE;
477
478 /*
479 * Initialize list of freed up pmaps
480 */
481 free_pmap_list = 0; /* Set that there are no free pmaps */
482 free_pmap_count = 0;
483 simple_lock_init(&free_pmap_lock, ETAP_VM_PMAP_CACHE);
484
485 }
486
487 unsigned int pmap_free_pages(void)
488 {
489 return avail_remaining;
490 }
491
492 /*
493 * This function allocates physical pages.
494 */
495
496 /* Non-optimal, but only used for virtual memory startup.
497 * Allocate memory from a table of free physical addresses
498 * If there are no more free entries, too bad.
499 */
500
501 boolean_t pmap_next_page(ppnum_t *addrp)
502 {
503 int i;
504
505 if(current_free_region >= pmap_mem_regions_count) return FALSE; /* Return failure if we have used everything... */
506
507 for(i = current_free_region; i < pmap_mem_regions_count; i++) { /* Find the next bank with free pages */
508 if(pmap_mem_regions[i].mrAStart <= pmap_mem_regions[i].mrAEnd) break; /* Found one */
509 }
510
511 current_free_region = i; /* Set our current bank */
512 if(i >= pmap_mem_regions_count) return FALSE; /* Couldn't find a free page */
513
514 *addrp = pmap_mem_regions[i].mrAStart; /* Allocate the page */
515 pmap_mem_regions[i].mrAStart = pmap_mem_regions[i].mrAStart + 1; /* Set the next one to go */
516 avail_remaining--; /* Drop free count */
517
518 return TRUE;
519 }
520
521 void pmap_virtual_space(
522 vm_offset_t *startp,
523 vm_offset_t *endp)
524 {
525 *startp = round_page_32(first_free_virt);
526 *endp = vm_last_addr;
527 }
528
529 /*
530 * pmap_create
531 *
532 * Create and return a physical map.
533 *
534 * If the size specified for the map is zero, the map is an actual physical
535 * map, and may be referenced by the hardware.
536 *
537 * A pmap is either in the free list or in the in-use list. The only use
538 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
539 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
540 * in-use list is matched until a hole in the VSID sequence is found. (Note
541 * that the in-use pmaps are queued in VSID sequence order.) This is all done
542 * while free_pmap_lock is held.
543 *
544 * If the size specified is non-zero, the map will be used in software
545 * only, and is bounded by that size.
546 */
547 pmap_t
548 pmap_create(vm_size_t size)
549 {
550 pmap_t pmap, ckpmap, fore, aft;
551 int s, i;
552 unsigned int currSID, hspace;
553 addr64_t physpmap;
554
555 /*
556 * A software use-only map doesn't even need a pmap structure.
557 */
558 if (size)
559 return(PMAP_NULL);
560
561 /*
562 * If there is a pmap in the pmap free list, reuse it.
563 * Note that we use free_pmap_list for all chaining of pmaps, both to
564 * the free list and the in use chain (anchored from kernel_pmap).
565 */
566 s = splhigh();
567 simple_lock(&free_pmap_lock);
568
569 if(free_pmap_list) { /* Any free? */
570 pmap = free_pmap_list; /* Yes, allocate it */
571 free_pmap_list = (pmap_t)pmap->freepmap; /* Dequeue this one (we chain free ones through freepmap) */
572 free_pmap_count--;
573 }
574 else {
575 simple_unlock(&free_pmap_lock); /* Unlock just in case */
576 splx(s);
577
578 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
579 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
580
581 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
582
583 s = splhigh();
584 simple_lock(&free_pmap_lock); /* Lock it back up */
585
586 ckpmap = cursor_pmap; /* Get starting point for free ID search */
587 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
588
589 while(1) { /* Keep trying until something happens */
590
591 currSID = (currSID + 1) & (maxAdrSp - 1); /* Get the next in the sequence */
592 if(((currSID * incrVSID) & (maxAdrSp - 1)) == invalSpace) continue; /* Skip the space we have reserved */
593 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
594
595 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
596
597 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
598 panic("pmap_create: Maximum number (%d) active address spaces reached\n", maxAdrSp); /* Die pig dog */
599 }
600 }
601
602 pmap->space = (currSID * incrVSID) & (maxAdrSp - 1); /* Calculate the actual VSID */
603 pmap->spaceNum = currSID; /* Set the space ID number */
604 /*
605 * Now we link into the chain just before the out of sequence guy.
606 */
607
608 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
609 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
610 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
611 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
612 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
613
614 simple_lock_init(&pmap->lock, ETAP_VM_PMAP);
615
616 physpmap = ((addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)pmap)) << 12) | (addr64_t)((unsigned int)pmap & 0xFFF); /* Get the physical address of the pmap */
617
618 pmap->pmapvr = (addr64_t)((uintptr_t)pmap) ^ physpmap; /* Make V to R translation mask */
619
620 pmapTrans[pmap->space].pmapPAddr = physpmap; /* Set translate table physical to point to us */
621 pmapTrans[pmap->space].pmapVAddr = CAST_DOWN(unsigned int, pmap); /* Set translate table virtual to point to us */
622 }
623
624 pmap->pmapFlags = pmapKeyDef; /* Set default key */
625 pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
626 pmap->ref_count = 1;
627 pmap->stats.resident_count = 0;
628 pmap->stats.wired_count = 0;
629 pmap->pmapSCSubTag = 0x0000000000000000ULL; /* Make sure this is clean an tidy */
630 simple_unlock(&free_pmap_lock);
631
632 splx(s);
633 return(pmap);
634 }
635
636 /*
637 * pmap_destroy
638 *
639 * Gives up a reference to the specified pmap. When the reference count
640 * reaches zero the pmap structure is added to the pmap free list.
641 *
642 * Should only be called if the map contains no valid mappings.
643 */
644 void
645 pmap_destroy(pmap_t pmap)
646 {
647 int ref_count;
648 spl_t s;
649 pmap_t fore, aft;
650
651 if (pmap == PMAP_NULL)
652 return;
653
654 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
655 if(ref_count>0) return; /* Still more users, leave now... */
656
657 if(ref_count < 0) /* Did we go too far? */
658 panic("pmap_destroy(): ref_count < 0");
659
660 #ifdef notdef
661 if(pmap->stats.resident_count != 0)
662 panic("PMAP_DESTROY: pmap not empty");
663 #else
664 if(pmap->stats.resident_count != 0) {
665 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000ULL);
666 }
667 #endif
668
669 /*
670 * Add the pmap to the pmap free list.
671 */
672
673 s = splhigh();
674 /*
675 * Add the pmap to the pmap free list.
676 */
677 simple_lock(&free_pmap_lock);
678
679 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
680
681 pmap->freepmap = free_pmap_list; /* Queue in front */
682 free_pmap_list = pmap;
683 free_pmap_count++;
684 simple_unlock(&free_pmap_lock);
685
686 } else {
687 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
688 fore = (pmap_t)pmap->pmap_link.prev;
689 aft = (pmap_t)pmap->pmap_link.next;
690 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
691 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
692 simple_unlock(&free_pmap_lock);
693 pmapTrans[pmap->space].pmapPAddr = -1; /* Invalidate the translate table physical */
694 pmapTrans[pmap->space].pmapVAddr = -1; /* Invalidate the translate table virtual */
695 zfree(pmap_zone, (vm_offset_t) pmap);
696 }
697 splx(s);
698 }
699
700 /*
701 * pmap_reference(pmap)
702 * gains a reference to the specified pmap.
703 */
704 void
705 pmap_reference(pmap_t pmap)
706 {
707 spl_t s;
708
709 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
710 }
711
712 /*
713 * pmap_remove_some_phys
714 *
715 * Removes mappings of the associated page from the specified pmap
716 *
717 */
718 void pmap_remove_some_phys(
719 pmap_t pmap,
720 vm_offset_t pa)
721 {
722 register struct phys_entry *pp;
723 register struct mapping *mp;
724 unsigned int pindex;
725
726 if (pmap == PMAP_NULL) { /* This should never be called with a null pmap */
727 panic("pmap_remove_some_phys: null pmap\n");
728 }
729
730 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
731 if (pp == 0) return; /* Leave if not in physical RAM */
732
733 while(1) { /* Keep going until we toss all pages from this pmap */
734 if (pmap->pmapFlags & pmapVMhost) {
735 mp = hw_purge_phys(pp); /* Toss a map */
736 if(!mp ) return;
737 if((unsigned int)mp & mapRetCode) { /* Was there a failure? */
738 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %08X, pmap = %08X, code = %08X\n",
739 pp, pmap, mp);
740 }
741 } else {
742 mp = hw_purge_space(pp, pmap); /* Toss a map */
743 if(!mp ) return;
744 if((unsigned int)mp & mapRetCode) { /* Was there a failure? */
745 panic("pmap_remove_some_phys: hw_purge_pmap failed - pp = %08X, pmap = %08X, code = %08X\n",
746 pp, pmap, mp);
747 }
748 }
749 mapping_free(mp); /* Toss the mapping */
750 }
751
752 return; /* Leave... */
753 }
754
755 /*
756 * pmap_remove(pmap, s, e)
757 * unmaps all virtual addresses v in the virtual address
758 * range determined by [s, e) and pmap.
759 * s and e must be on machine independent page boundaries and
760 * s must be less than or equal to e.
761 *
762 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
763 * skip those segments.
764 */
765 void
766 pmap_remove(
767 pmap_t pmap,
768 addr64_t sva,
769 addr64_t eva)
770 {
771 addr64_t va, endva;
772
773 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
774
775
776 /* It is just possible that eva might have wrapped around to zero,
777 * and sometimes we get asked to liberate something of size zero
778 * even though it's dumb (eg. after zero length read_overwrites)
779 */
780 assert(eva >= sva);
781
782 /* If these are not page aligned the loop might not terminate */
783 assert((sva == trunc_page_64(sva)) && (eva == trunc_page_64(eva)));
784
785 va = sva & -4096LL; /* Round start down to a page */
786 endva = eva & -4096LL; /* Round end down to a page */
787
788 while(1) { /* Go until we finish the range */
789 va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */
790 va = va & -4096LL; /* Make sure the "not found" indication is clear */
791 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
792 }
793
794 }
795
796 /*
797 * Routine:
798 * pmap_page_protect
799 *
800 * Function:
801 * Lower the permission for all mappings to a given page.
802 */
803 void
804 pmap_page_protect(
805 ppnum_t pa,
806 vm_prot_t prot)
807 {
808 register struct phys_entry *pp;
809 boolean_t remove;
810 unsigned int pindex;
811 mapping *mp;
812
813
814 switch (prot) {
815 case VM_PROT_READ:
816 case VM_PROT_READ|VM_PROT_EXECUTE:
817 remove = FALSE;
818 break;
819 case VM_PROT_ALL:
820 return;
821 default:
822 remove = TRUE;
823 break;
824 }
825
826
827 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
828 if (pp == 0) return; /* Leave if not in physical RAM */
829
830 if (remove) { /* If the protection was set to none, we'll remove all mappings */
831
832 while(1) { /* Keep going until we toss all pages from this physical page */
833 mp = hw_purge_phys(pp); /* Toss a map */
834 if(!mp ) return;
835 if((unsigned int)mp & mapRetCode) { /* Was there a failure? */
836 panic("pmap_page_protect: hw_purge_phys failed - pp = %08X, code = %08X\n",
837 pp, mp);
838 }
839 mapping_free(mp); /* Toss the mapping */
840 }
841
842 return; /* Leave... */
843 }
844
845 /* When we get here, it means that we are to change the protection for a
846 * physical page.
847 */
848
849 mapping_protect_phys(pa, prot & VM_PROT_ALL); /* Change protection of all mappings to page. */
850
851 }
852
853 /*
854 * pmap_protect(pmap, s, e, prot)
855 * changes the protection on all virtual addresses v in the
856 * virtual address range determined by [s, e] and pmap to prot.
857 * s and e must be on machine independent page boundaries and
858 * s must be less than or equal to e.
859 *
860 * Note that any requests to change the protection of a nested pmap are
861 * ignored. Those changes MUST be done by calling this with the correct pmap.
862 */
863 void pmap_protect(
864 pmap_t pmap,
865 vm_offset_t sva,
866 vm_offset_t eva,
867 vm_prot_t prot)
868 {
869
870 addr64_t va, endva, nextva;
871
872 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
873
874 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
875 pmap_remove(pmap, (addr64_t)sva, (addr64_t)eva); /* Yeah, dump 'em */
876 return; /* Leave... */
877 }
878
879 va = sva & -4096LL; /* Round start down to a page */
880 endva = eva & -4096LL; /* Round end down to a page */
881
882 while(1) { /* Go until we finish the range */
883 (void)mapping_protect(pmap, va, prot & VM_PROT_ALL, &va); /* Change the protection and see what's next */
884 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
885 }
886
887 }
888
889
890
891 /*
892 * pmap_enter
893 *
894 * Create a translation for the virtual address (virt) to the physical
895 * address (phys) in the pmap with the protection requested. If the
896 * translation is wired then we can not allow a full page fault, i.e.,
897 * the mapping control block is not eligible to be stolen in a low memory
898 * condition.
899 *
900 * NB: This is the only routine which MAY NOT lazy-evaluate
901 * or lose information. That is, this routine must actually
902 * insert this page into the given map NOW.
903 */
904 void
905 pmap_enter(pmap_t pmap, vm_offset_t va, ppnum_t pa, vm_prot_t prot,
906 unsigned int flags, boolean_t wired)
907 {
908 int memattr;
909 pmap_t opmap;
910 unsigned int mflags;
911 addr64_t colva;
912
913 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
914
915 disable_preemption(); /* Don't change threads */
916
917 mflags = 0; /* Make sure this is initialized to nothing special */
918 if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */
919 mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
920 }
921
922 /*
923 * It is possible to hang here if another processor is remapping any pages we collide with and are removing
924 */
925
926 while(1) { /* Keep trying the enter until it goes in */
927
928 colva = mapping_make(pmap, va, pa, mflags, 1, prot & VM_PROT_ALL); /* Enter the mapping into the pmap */
929
930 if(!colva) break; /* If there were no collisions, we are done... */
931
932 mapping_remove(pmap, colva); /* Remove the mapping that collided */
933 }
934
935 enable_preemption(); /* Thread change ok */
936
937 }
938
939 /*
940 * Enters translations for odd-sized V=F blocks.
941 *
942 * The higher level VM map should be locked to insure that we don't have a
943 * double diddle here.
944 *
945 * We panic if we get a block that overlaps with another. We do not merge adjacent
946 * blocks because removing any address within a block removes the entire block and if
947 * would really mess things up if we trashed too much.
948 *
949 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
950 * not be changed. The block must be unmapped and then remapped with the new stuff.
951 * We also do not keep track of reference or change flags.
952 *
953 * Note that pmap_map_block_rc is the same but doesn't panic if collision.
954 *
955 */
956
957 void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
958
959 int memattr;
960 unsigned int mflags;
961 addr64_t colva;
962
963
964 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
965 panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */
966 }
967
968 // kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */
969 // current_act(), va, pa, size, prot, attr, flags); /* (BRINGUP) */
970
971
972 mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
973 if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
974
975 colva = mapping_make(pmap, va, pa, mflags, (size >> 12), prot); /* Enter the mapping into the pmap */
976
977 if(colva) { /* If there was a collision, panic */
978 panic("pmap_map_block: collision at %016llX, pmap = %08X\n", colva, pmap);
979 }
980
981 return; /* Return */
982 }
983
984 int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
985
986 int memattr;
987 unsigned int mflags;
988 addr64_t colva;
989
990
991 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
992 panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */
993 }
994
995 mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
996 if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
997
998 colva = mapping_make(pmap, va, pa, mflags, (size >> 12), prot); /* Enter the mapping into the pmap */
999
1000 if(colva) return 0; /* If there was a collision, fail */
1001
1002 return 1; /* Return true of we worked */
1003 }
1004
1005 /*
1006 * pmap_extract(pmap, va)
1007 * returns the physical address corrsponding to the
1008 * virtual address specified by pmap and va if the
1009 * virtual address is mapped and 0 if it is not.
1010 * Note: we assume nothing is ever mapped to phys 0.
1011 *
1012 * NOTE: This call always will fail for physical addresses greater than 0xFFFFF000.
1013 */
1014 vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) {
1015
1016 spl_t spl;
1017 register struct mapping *mp;
1018 register vm_offset_t pa;
1019 addr64_t nextva;
1020 ppnum_t ppoffset;
1021 unsigned int gva;
1022
1023 #ifdef BOGUSCOMPAT
1024 panic("pmap_extract: THIS CALL IS BOGUS. NEVER USE IT EVER. So there...\n"); /* Don't use this */
1025 #else
1026
1027 gva = (unsigned int)va; /* Make sure we don't have a sign */
1028
1029 spl = splhigh(); /* We can't allow any loss of control here */
1030
1031 mp = mapping_find(pmap, (addr64_t)gva, &nextva,1); /* Find the mapping for this address */
1032
1033 if(!mp) { /* Is the page mapped? */
1034 splx(spl); /* Enable interrupts */
1035 return 0; /* Pass back 0 if not found */
1036 }
1037
1038 ppoffset = (ppnum_t)(((gva & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1039
1040
1041 pa = mp->mpPAddr + ppoffset; /* Remember ppage because mapping may vanish after drop call */
1042
1043 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1044 splx(spl); /* Restore 'rupts */
1045
1046 if(pa > maxPPage32) return 0; /* Force large addresses to fail */
1047
1048 pa = (pa << 12) | (va & 0xFFF); /* Convert physical page number to address */
1049
1050 #endif
1051 return pa; /* Return physical address or 0 */
1052 }
1053
1054 /*
1055 * ppnum_t pmap_find_phys(pmap, addr64_t va)
1056 * returns the physical page corrsponding to the
1057 * virtual address specified by pmap and va if the
1058 * virtual address is mapped and 0 if it is not.
1059 * Note: we assume nothing is ever mapped to phys 0.
1060 *
1061 */
1062 ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) {
1063
1064 spl_t spl;
1065 register struct mapping *mp;
1066 ppnum_t pa, ppoffset;
1067 addr64_t nextva, curva;
1068
1069 spl = splhigh(); /* We can't allow any loss of control here */
1070
1071 mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */
1072
1073 if(!mp) { /* Is the page mapped? */
1074 splx(spl); /* Enable interrupts */
1075 return 0; /* Pass back 0 if not found */
1076 }
1077
1078
1079 ppoffset = (ppnum_t)(((va & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1080
1081 pa = mp->mpPAddr + ppoffset; /* Get the actual physical address */
1082
1083 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1084
1085 splx(spl); /* Restore 'rupts */
1086 return pa; /* Return physical address or 0 */
1087 }
1088
1089
1090 /*
1091 * pmap_attributes:
1092 *
1093 * Set/Get special memory attributes; not implemented.
1094 *
1095 * Note: 'VAL_GET_INFO' is used to return info about a page.
1096 * If less than 1 page is specified, return the physical page
1097 * mapping and a count of the number of mappings to that page.
1098 * If more than one page is specified, return the number
1099 * of resident pages and the number of shared (more than
1100 * one mapping) pages in the range;
1101 *
1102 *
1103 */
1104 kern_return_t
1105 pmap_attribute(pmap, address, size, attribute, value)
1106 pmap_t pmap;
1107 vm_offset_t address;
1108 vm_size_t size;
1109 vm_machine_attribute_t attribute;
1110 vm_machine_attribute_val_t* value;
1111 {
1112
1113 return KERN_INVALID_ARGUMENT;
1114
1115 }
1116
1117 /*
1118 * pmap_attribute_cache_sync(vm_offset_t pa)
1119 *
1120 * Invalidates all of the instruction cache on a physical page and
1121 * pushes any dirty data from the data cache for the same physical page
1122 */
1123
1124 kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size,
1125 vm_machine_attribute_t attribute,
1126 vm_machine_attribute_val_t* value) {
1127
1128 spl_t s;
1129 unsigned int i, npages;
1130
1131 npages = round_page_32(size) >> 12; /* Get the number of pages to do */
1132
1133 for(i = 0; i < npages; i++) { /* Do all requested pages */
1134 s = splhigh(); /* No interruptions here */
1135 sync_ppage(pp + i); /* Go flush data cache and invalidate icache */
1136 splx(s); /* Allow interruptions */
1137 }
1138
1139 return KERN_SUCCESS;
1140 }
1141
1142 /*
1143 * pmap_sync_caches_phys(ppnum_t pa)
1144 *
1145 * Invalidates all of the instruction cache on a physical page and
1146 * pushes any dirty data from the data cache for the same physical page
1147 */
1148
1149 void pmap_sync_caches_phys(ppnum_t pa) {
1150
1151 spl_t s;
1152
1153 s = splhigh(); /* No interruptions here */
1154 sync_ppage(pa); /* Sync up dem caches */
1155 splx(s); /* Allow interruptions */
1156 return;
1157 }
1158
1159 /*
1160 * pmap_collect
1161 *
1162 * Garbage collects the physical map system for pages that are no longer used.
1163 * It isn't implemented or needed or wanted.
1164 */
1165 void
1166 pmap_collect(pmap_t pmap)
1167 {
1168 return;
1169 }
1170
1171 /*
1172 * Routine: pmap_activate
1173 * Function:
1174 * Binds the given physical map to the given
1175 * processor, and returns a hardware map description.
1176 * It isn't implemented or needed or wanted.
1177 */
1178 void
1179 pmap_activate(
1180 pmap_t pmap,
1181 thread_t th,
1182 int which_cpu)
1183 {
1184 return;
1185 }
1186 /*
1187 * pmap_deactivate:
1188 * It isn't implemented or needed or wanted.
1189 */
1190 void
1191 pmap_deactivate(
1192 pmap_t pmap,
1193 thread_t th,
1194 int which_cpu)
1195 {
1196 return;
1197 }
1198
1199
1200 /*
1201 * pmap_pageable(pmap, s, e, pageable)
1202 * Make the specified pages (by pmap, offset)
1203 * pageable (or not) as requested.
1204 *
1205 * A page which is not pageable may not take
1206 * a fault; therefore, its page table entry
1207 * must remain valid for the duration.
1208 *
1209 * This routine is merely advisory; pmap_enter()
1210 * will specify that these pages are to be wired
1211 * down (or not) as appropriate.
1212 *
1213 * (called from vm/vm_fault.c).
1214 */
1215 void
1216 pmap_pageable(
1217 pmap_t pmap,
1218 vm_offset_t start,
1219 vm_offset_t end,
1220 boolean_t pageable)
1221 {
1222
1223 return; /* This is not used... */
1224
1225 }
1226 /*
1227 * Routine: pmap_change_wiring
1228 * NOT USED ANYMORE.
1229 */
1230 void
1231 pmap_change_wiring(
1232 register pmap_t pmap,
1233 vm_offset_t va,
1234 boolean_t wired)
1235 {
1236 return; /* This is not used... */
1237 }
1238
1239 /*
1240 * pmap_modify_pages(pmap, s, e)
1241 * sets the modified bit on all virtual addresses v in the
1242 * virtual address range determined by [s, e] and pmap,
1243 * s and e must be on machine independent page boundaries and
1244 * s must be less than or equal to e.
1245 *
1246 * Note that this function will not descend nested pmaps.
1247 */
1248 void
1249 pmap_modify_pages(
1250 pmap_t pmap,
1251 vm_offset_t sva,
1252 vm_offset_t eva)
1253 {
1254 spl_t spl;
1255 mapping *mp;
1256 ppnum_t pa;
1257 addr64_t va, endva, nextva;
1258 unsigned int saveflags;
1259
1260 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1261
1262 va = sva & -4096; /* Round to page */
1263 endva = eva & -4096; /* Round to page */
1264
1265 while (va < endva) { /* Walk through all pages */
1266
1267 spl = splhigh(); /* We can't allow any loss of control here */
1268
1269 mp = mapping_find(pmap, (addr64_t)va, &va, 0); /* Find the mapping for this address */
1270
1271 if(!mp) { /* Is the page mapped? */
1272 splx(spl); /* Page not mapped, restore interruptions */
1273 if((va == 0) || (va >= endva)) break; /* We are done if there are no more or we hit the end... */
1274 continue; /* We are not done and there is more to check... */
1275 }
1276
1277 saveflags = mp->mpFlags; /* Remember the flags */
1278 pa = mp->mpPAddr; /* Remember ppage because mapping may vanish after drop call */
1279
1280 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1281
1282 splx(spl); /* Restore 'rupts */
1283
1284 if(saveflags & (mpNest | mpBlock)) continue; /* Can't mess around with these guys... */
1285
1286 mapping_set_mod(pa); /* Set the modfied bit for this page */
1287
1288 if(va == 0) break; /* We hit the end of the pmap, might as well leave now... */
1289 }
1290 return; /* Leave... */
1291 }
1292
1293 /*
1294 * pmap_clear_modify(phys)
1295 * clears the hardware modified ("dirty") bit for one
1296 * machine independant page starting at the given
1297 * physical address. phys must be aligned on a machine
1298 * independant page boundary.
1299 */
1300 void
1301 pmap_clear_modify(vm_offset_t pa)
1302 {
1303
1304 mapping_clr_mod((ppnum_t)pa); /* Clear all change bits for physical page */
1305
1306 }
1307
1308 /*
1309 * pmap_is_modified(phys)
1310 * returns TRUE if the given physical page has been modified
1311 * since the last call to pmap_clear_modify().
1312 */
1313 boolean_t
1314 pmap_is_modified(register vm_offset_t pa)
1315 {
1316 return mapping_tst_mod((ppnum_t)pa); /* Check for modified */
1317
1318 }
1319
1320 /*
1321 * pmap_clear_reference(phys)
1322 * clears the hardware referenced bit in the given machine
1323 * independant physical page.
1324 *
1325 */
1326 void
1327 pmap_clear_reference(vm_offset_t pa)
1328 {
1329 mapping_clr_ref((ppnum_t)pa); /* Check for modified */
1330 }
1331
1332 /*
1333 * pmap_is_referenced(phys)
1334 * returns TRUE if the given physical page has been referenced
1335 * since the last call to pmap_clear_reference().
1336 */
1337 boolean_t
1338 pmap_is_referenced(vm_offset_t pa)
1339 {
1340 return mapping_tst_ref((ppnum_t)pa); /* Check for referenced */
1341 }
1342
1343 /*
1344 * pmap_canExecute(ppnum_t pa)
1345 * returns 1 if instructions can execute
1346 * returns 0 if know not (i.e. guarded and/or non-executable set)
1347 * returns -1 if we don't know (i.e., the page is no RAM)
1348 */
1349 int
1350 pmap_canExecute(ppnum_t pa)
1351 {
1352 phys_entry *physent;
1353 unsigned int pindex;
1354
1355 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1356
1357 if(!physent) return -1; /* If there is no physical entry, we don't know... */
1358
1359 if((physent->ppLink & (ppN | ppG))) return 0; /* If we are marked non-executable or guarded, say we can not execute */
1360 return 1; /* Good to go... */
1361 }
1362
1363 #if MACH_VM_DEBUG
1364 int
1365 pmap_list_resident_pages(
1366 register pmap_t pmap,
1367 register vm_offset_t *listp,
1368 register int space)
1369 {
1370 return 0;
1371 }
1372 #endif /* MACH_VM_DEBUG */
1373
1374 /*
1375 * Locking:
1376 * spl: VM
1377 */
1378 void
1379 pmap_copy_part_page(
1380 vm_offset_t src,
1381 vm_offset_t src_offset,
1382 vm_offset_t dst,
1383 vm_offset_t dst_offset,
1384 vm_size_t len)
1385 {
1386 register struct phys_entry *pp_src, *pp_dst;
1387 spl_t s;
1388 addr64_t fsrc, fdst;
1389
1390 assert(((dst <<12) & PAGE_MASK+dst_offset+len) <= PAGE_SIZE);
1391 assert(((src <<12) & PAGE_MASK+src_offset+len) <= PAGE_SIZE);
1392
1393 fsrc = ((addr64_t)src << 12) + src_offset;
1394 fdst = ((addr64_t)dst << 12) + dst_offset;
1395
1396 phys_copy(fsrc, fdst, len); /* Copy the stuff physically */
1397 }
1398
1399 void
1400 pmap_zero_part_page(
1401 vm_offset_t p,
1402 vm_offset_t offset,
1403 vm_size_t len)
1404 {
1405 panic("pmap_zero_part_page");
1406 }
1407
1408 boolean_t pmap_verify_free(ppnum_t pa) {
1409
1410 struct phys_entry *pp;
1411 unsigned int pindex;
1412
1413 pp = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1414 if (pp == 0) return FALSE; /* If there isn't one, show no mapping... */
1415
1416 if(pp->ppLink & ~(ppLock | ppN | ppFlags)) return TRUE; /* We have at least one mapping */
1417 return FALSE; /* No mappings */
1418 }
1419
1420
1421 /* Determine if we need to switch space and set up for it if so */
1422
1423 void pmap_switch(pmap_t map)
1424 {
1425 unsigned int i;
1426
1427
1428 hw_blow_seg(copyIOaddr); /* Blow off the first segment */
1429 hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */
1430
1431 /* when changing to kernel space, don't bother
1432 * doing anything, the kernel is mapped from here already.
1433 */
1434 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
1435 return; /* If so, we don't do anything... */
1436 }
1437
1438 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
1439 return; /* Bye, bye, butterfly... */
1440 }
1441
1442 /*
1443 * kern_return_t pmap_nest(grand, subord, vstart, size)
1444 *
1445 * grand = the pmap that we will nest subord into
1446 * subord = the pmap that goes into the grand
1447 * vstart = start of range in pmap to be inserted
1448 * nstart = start of range in pmap nested pmap
1449 * size = Size of nest area (up to 16TB)
1450 *
1451 * Inserts a pmap into another. This is used to implement shared segments.
1452 * On the current PPC processors, this is limited to segment (256MB) aligned
1453 * segment sized ranges.
1454 *
1455 * We actually kinda allow recursive nests. The gating factor is that we do not allow
1456 * nesting on top of something that is already mapped, i.e., the range must be empty.
1457 *
1458 *
1459 *
1460 * Note that we depend upon higher level VM locks to insure that things don't change while
1461 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
1462 * or do 2 nests at once.
1463 */
1464
1465 kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) {
1466
1467 addr64_t nextva, vend, colladdr;
1468 unsigned int msize;
1469 int i, nlists, asize;
1470 spl_t s;
1471 mapping *mp;
1472
1473
1474 if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */
1475 if((size >> 28) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 16TB */
1476 if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
1477 if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
1478
1479 if(size == 0) { /* Is the size valid? */
1480 panic("pmap_nest: size is invalid - %016llX\n", size);
1481 }
1482
1483 msize = (size >> 28) - 1; /* Change size to blocks of 256MB */
1484
1485 nlists = mapSetLists(grand); /* Set number of lists this will be on */
1486
1487 mp = mapping_alloc(nlists); /* Get a spare mapping block */
1488
1489 mp->mpFlags = 0x01000000 | mpNest | nlists; /* Set the flags. Make sure busy count is 1 */
1490 mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */
1491 mp->mpBSize = msize; /* Set the size */
1492 mp->mpPte = 0; /* Set the PTE invalid */
1493 mp->mpPAddr = 0; /* Set the physical page number */
1494 mp->mpVAddr = vstart; /* Set the address */
1495 mp->mpNestReloc = nstart - vstart; /* Set grand to nested vaddr relocation value */
1496
1497 colladdr = hw_add_map(grand, mp); /* Go add the mapping to the pmap */
1498
1499 if(colladdr) { /* Did it collide? */
1500 vend = vstart + size - 4096; /* Point to the last page we would cover in nest */
1501 panic("pmap_nest: attempt to nest into a non-empty range - pmap = %08X, start = %016llX, end = %016llX\n",
1502 grand, vstart, vend);
1503 }
1504
1505 return KERN_SUCCESS;
1506 }
1507
1508 /*
1509 * kern_return_t pmap_unnest(grand, vaddr)
1510 *
1511 * grand = the pmap that we will nest subord into
1512 * vaddr = start of range in pmap to be unnested
1513 *
1514 * Removes a pmap from another. This is used to implement shared segments.
1515 * On the current PPC processors, this is limited to segment (256MB) aligned
1516 * segment sized ranges.
1517 */
1518
1519 kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) {
1520
1521 unsigned int oflags, seg, grandr, tstamp;
1522 int i, tcpu, mycpu;
1523 addr64_t nextva;
1524 spl_t s;
1525 mapping *mp;
1526
1527 s = splhigh(); /* Make sure interruptions are disabled */
1528
1529 mp = mapping_find(grand, vaddr, &nextva, 0); /* Find the nested map */
1530
1531 if(((unsigned int)mp & mapRetCode) != mapRtOK) { /* See if it was even nested */
1532 panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr);
1533 }
1534
1535 if(!(mp->mpFlags & mpNest)) { /* Did we find something other than a nest? */
1536 panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr);
1537 }
1538
1539 if(mp->mpVAddr != vaddr) { /* Make sure the address is the same */
1540 panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr);
1541 }
1542
1543 (void)hw_atomic_or(&mp->mpFlags, mpRemovable); /* Show that this mapping is now removable */
1544
1545 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1546
1547 disable_preemption(); /* It's all for me! */
1548 splx(s); /* Restore 'rupts */
1549
1550 (void)mapping_remove(grand, vaddr); /* Toss the nested pmap mapping */
1551
1552 invalidateSegs(grand); /* Invalidate the pmap segment cache */
1553
1554 /*
1555 * Note that the following will force the segment registers to be reloaded
1556 * on all processors (if they are using the pmap we just changed) before returning.
1557 *
1558 * This is needed. The reason is that until the segment register is
1559 * reloaded, another thread in the same task on a different processor will
1560 * be able to access memory that it isn't allowed to anymore. That can happen
1561 * because access to the subordinate pmap is being removed, but the pmap is still
1562 * valid.
1563 *
1564 * Note that we only kick the other processor if we see that it was using the pmap while we
1565 * were changing it.
1566 */
1567
1568
1569 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
1570 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
1571 if((unsigned int)grand == per_proc_info[i].ppUserPmapVirt) { /* Is this guy using the changed pmap? */
1572
1573 per_proc_info[i].ppInvSeg = 1; /* Show that we need to invalidate the segments */
1574
1575 if(i == mycpu) continue; /* Don't diddle ourselves */
1576
1577 tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */
1578 if(cpu_signal(i, SIGPcpureq, CPRQsegload, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change */
1579 continue;
1580 }
1581
1582 if(!hw_cpu_wcng(&per_proc_info[i].ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */
1583 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
1584 }
1585 }
1586 }
1587
1588 enable_preemption(); /* Others can run now */
1589 return KERN_SUCCESS; /* Bye, bye, butterfly... */
1590 }
1591
1592
1593 /*
1594 * void MapUserAddressSpaceInit(void)
1595 *
1596 * Initialized anything we need to in order to map user address space slices into
1597 * the kernel. Primarily used for copy in/out.
1598 *
1599 * Currently we only support one 512MB slot for this purpose. There are two special
1600 * mappings defined for the purpose: the special pmap nest, and linkage mapping.
1601 *
1602 * The special pmap nest (which is allocated in this function) is used as a place holder
1603 * in the kernel's pmap search list. It is 512MB long and covers the address range
1604 * starting at copyIOaddr. It points to no actual memory and when the fault handler
1605 * hits in it, it knows to look in the per_proc and start using the linkage
1606 * mapping contained therin.
1607 *
1608 * The linkage mapping is used to glue the user address space slice into the
1609 * kernel. It contains the relocation information used to transform the faulting
1610 * kernel address into the user address space. It also provides the link to the
1611 * user's pmap. This is pointed to by the per_proc and is switched in and out
1612 * whenever there is a context switch.
1613 *
1614 */
1615
1616 void MapUserAddressSpaceInit(void) {
1617
1618 addr64_t colladdr;
1619 int nlists, asize;
1620 mapping *mp;
1621
1622 nlists = mapSetLists(kernel_pmap); /* Set number of lists this will be on */
1623
1624 mp = mapping_alloc(nlists); /* Get a spare mapping block */
1625
1626 mp->mpFlags = 0x01000000 |mpNest | mpSpecial | nlists; /* Set the flags. Make sure busy count is 1 */
1627 mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */
1628 mp->mpBSize = 1; /* Set the size to 2 segments */
1629 mp->mpPte = 0; /* Means nothing */
1630 mp->mpPAddr = 0; /* Means nothing */
1631 mp->mpVAddr = copyIOaddr; /* Set the address range we cover */
1632 mp->mpNestReloc = 0; /* Means nothing */
1633
1634 colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */
1635
1636 if(colladdr) { /* Did it collide? */
1637 panic("MapUserAddressSpaceInit: MapUserAddressSpace range already mapped\n");
1638 }
1639
1640 return;
1641 }
1642
1643 /*
1644 * addr64_t MapUserAddressSpace(vm_map_t map, vm_offset_t va, size)
1645 *
1646 * map = the vm_map that we are mapping into the kernel
1647 * va = start of the address range we are mapping
1648 * size = size of the range. No greater than 256MB and not 0.
1649 * Note that we do not test validty, we chose to trust our fellows...
1650 *
1651 * Maps a slice of a user address space into a predefined kernel range
1652 * on a per-thread basis. In the future, the restriction of a predefined
1653 * range will be loosened.
1654 *
1655 * Builds the proper linkage map to map the user range
1656 * We will round this down to the previous segment boundary and calculate
1657 * the relocation to the kernel slot
1658 *
1659 * We always make a segment table entry here if we need to. This is mainly because of
1660 * copyin/out and if we don't, there will be multiple segment faults for
1661 * each system call. I have seen upwards of 30000 per second.
1662 *
1663 * We do check, however, to see if the slice is already mapped and if so,
1664 * we just exit. This is done for performance reasons. It was found that
1665 * there was a considerable boost in copyin/out performance if we did not
1666 * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the
1667 * restriction that you had to bracket MapUserAddressSpace. Further, there
1668 * is a yet further boost if you didn't need to map it each time. The theory
1669 * behind this is that many times copies are to or from the same segment and
1670 * done multiple times within the same system call. To take advantage of that,
1671 * we check cioSpace and cioRelo to see if we've already got it.
1672 *
1673 * We also need to half-invalidate the slice when we context switch or go
1674 * back to user state. A half-invalidate does not clear the actual mapping,
1675 * but it does force the MapUserAddressSpace function to reload the segment
1676 * register/SLBE. If this is not done, we can end up some pretty severe
1677 * performance penalties. If we map a slice, and the cached space/relocation is
1678 * the same, we won't reload the segment registers. Howver, since we ran someone else,
1679 * our SR is cleared and we will take a fault. This is reasonable if we block
1680 * while copying (e.g., we took a page fault), but it is not reasonable when we
1681 * just start. For this reason, we half-invalidate to make sure that the SR is
1682 * explicitly reloaded.
1683 *
1684 * Note that we do not go to the trouble of making a pmap segment cache
1685 * entry for these guys because they are very short term -- 99.99% of the time
1686 * they will be unmapped before the next context switch.
1687 *
1688 */
1689
1690 addr64_t MapUserAddressSpace(vm_map_t map, addr64_t va, unsigned int size) {
1691
1692 addr64_t baddrs, reladd;
1693 thread_act_t act;
1694 mapping *mp;
1695 struct per_proc_info *perproc;
1696
1697 baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */
1698 act = current_act(); /* Remember our activation */
1699
1700 reladd = baddrs - copyIOaddr; /* Get the relocation from user to kernel */
1701
1702 if((act->mact.cioSpace == map->pmap->space) && (act->mact.cioRelo == reladd)) { /* Already mapped? */
1703 return ((va & 0x0FFFFFFFULL) | copyIOaddr); /* Pass back the kernel address we are to use */
1704 }
1705
1706 disable_preemption(); /* Don't move... */
1707 perproc = getPerProc(); /* Get our per_proc_block */
1708
1709 mp = (mapping *)&perproc->ppCIOmp; /* Make up for C */
1710 act->mact.cioRelo = reladd; /* Relocation from user to kernel */
1711 mp->mpNestReloc = reladd; /* Relocation from user to kernel */
1712
1713 act->mact.cioSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1714 mp->mpSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1715
1716 /*
1717 * Here we make an assumption that we are going to be using the base pmap's address space.
1718 * If we are wrong, and that would be very, very, very rare, the fault handler will fix us up.
1719 */
1720
1721 hw_map_seg(map->pmap, copyIOaddr, baddrs); /* Make the entry for the first segment */
1722
1723 enable_preemption(); /* Let's move */
1724 return ((va & 0x0FFFFFFFULL) | copyIOaddr); /* Pass back the kernel address we are to use */
1725 }
1726
1727 /*
1728 * void ReleaseUserAddressMapping(addr64_t kva)
1729 *
1730 * kva = kernel address of the user copy in/out slice
1731 *
1732 */
1733
1734 void ReleaseUserAddressSpace(addr64_t kva) {
1735
1736 int i;
1737 addr64_t nextva, vend, kaddr, baddrs;
1738 unsigned int msize;
1739 thread_act_t act;
1740 mapping *mp;
1741
1742 if(kva == 0) return; /* Handle a 0 */
1743
1744 disable_preemption(); /* Don't move... */
1745
1746 act = current_act(); /* Remember our activation */
1747
1748 if(act->mact.cioSpace == invalSpace) { /* We only support one at a time */
1749 panic("ReleaseUserAddressMapping: attempt release undefined copy in/out user address space slice\n");
1750 }
1751
1752 act->mact.cioSpace = invalSpace; /* Invalidate space */
1753 mp = (mapping *)&per_proc_info[cpu_number()].ppCIOmp; /* Make up for C */
1754 mp->mpSpace = invalSpace; /* Trash it in the per_proc as well */
1755
1756 hw_blow_seg(copyIOaddr); /* Blow off the first segment */
1757 hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */
1758
1759 enable_preemption(); /* Let's move */
1760
1761 return; /* Let's leave */
1762 }
1763
1764
1765
1766 /*
1767 * kern_return_t pmap_boot_map(size)
1768 *
1769 * size = size of virtual address range to be mapped
1770 *
1771 * This function is used to assign a range of virtual addresses before VM in
1772 * initialized. It starts at VM_MAX_KERNEL_ADDRESS and works downward.
1773 * The variable vm_last_addr contains the current highest possible VM
1774 * assignable address. It is a panic to attempt to call this after VM has
1775 * started up. The only problem is, is that we may not have the serial or
1776 * framebuffer mapped, so we'll never know we died.........
1777 */
1778
1779 vm_offset_t pmap_boot_map(vm_size_t size) {
1780
1781 if(kernel_map != VM_MAP_NULL) { /* Has VM already started? */
1782 panic("pmap_boot_map: VM started\n");
1783 }
1784
1785 size = round_page_32(size); /* Make sure this is in pages */
1786 vm_last_addr = vm_last_addr - size; /* Allocate the memory */
1787 return (vm_last_addr + 1); /* Return the vaddr we just allocated */
1788
1789 }
1790
1791
1792
1793 /* temporary workaround */
1794 boolean_t
1795 coredumpok(vm_map_t map, vm_offset_t va)
1796 {
1797 return TRUE;
1798 }