]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/pmap.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1990,1991,1992 The University of Utah and
31 * the Center for Software Science (CSS).
32 * Copyright (c) 1991,1987 Carnegie Mellon University.
33 * All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation,
40 * and that all advertising materials mentioning features or use of
41 * this software display the following acknowledgement: ``This product
42 * includes software developed by the Center for Software Science at
43 * the University of Utah.''
44 *
45 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
46 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
47 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
48 * THIS SOFTWARE.
49 *
50 * CSS requests users of this software to return to css-dist@cs.utah.edu any
51 * improvements that they make and grant CSS redistribution rights.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 * any improvements or extensions that they make and grant Carnegie Mellon
59 * the rights to redistribute these changes.
60 *
61 * Utah $Hdr: pmap.c 1.28 92/06/23$
62 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
63 */
64
65 /*
66 * Manages physical address maps for powerpc.
67 *
68 * In addition to hardware address maps, this
69 * module is called upon to provide software-use-only
70 * maps which may or may not be stored in the same
71 * form as hardware maps. These pseudo-maps are
72 * used to store intermediate results from copy
73 * operations to and from address spaces.
74 *
75 * Since the information managed by this module is
76 * also stored by the logical address mapping module,
77 * this module may throw away valid virtual-to-physical
78 * mappings at almost any time. However, invalidations
79 * of virtual-to-physical mappings must be done as
80 * requested.
81 *
82 * In order to cope with hardware architectures which
83 * make virtual-to-physical map invalidates expensive,
84 * this module may delay invalidate or reduced protection
85 * operations until such time as they are actually
86 * necessary. This module is given full information to
87 * when physical maps must be made correct.
88 *
89 */
90
91 #include <zone_debug.h>
92 #include <cpus.h>
93 #include <debug.h>
94 #include <mach_kgdb.h>
95 #include <mach_vm_debug.h>
96 #include <db_machine_commands.h>
97
98 #include <kern/thread.h>
99 #include <kern/simple_lock.h>
100 #include <mach/vm_attributes.h>
101 #include <mach/vm_param.h>
102 #include <kern/spl.h>
103
104 #include <kern/misc_protos.h>
105 #include <ppc/misc_protos.h>
106 #include <ppc/proc_reg.h>
107
108 #include <vm/pmap.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_page.h>
111
112 #include <ppc/pmap.h>
113 #include <ppc/pmap_internals.h>
114 #include <ppc/mem.h>
115 #include <ppc/mappings.h>
116
117 #include <ppc/new_screen.h>
118 #include <ppc/Firmware.h>
119 #include <ppc/savearea.h>
120 #include <ppc/exception.h>
121 #include <ddb/db_output.h>
122
123 #if DB_MACHINE_COMMANDS
124 /* optionally enable traces of pmap operations in post-mortem trace table */
125 /* #define PMAP_LOWTRACE 1 */
126 #define PMAP_LOWTRACE 0
127 #else /* DB_MACHINE_COMMANDS */
128 /* Can not trace even if we wanted to */
129 #define PMAP_LOWTRACE 0
130 #endif /* DB_MACHINE_COMMANDS */
131
132 #define PERFTIMES 0
133
134 #if PERFTIMES && DEBUG
135 #define debugLog2(a, b, c) dbgLog2(a, b, c)
136 #else
137 #define debugLog2(a, b, c)
138 #endif
139
140 extern unsigned int avail_remaining;
141 extern unsigned int mappingdeb0;
142 extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
143 extern int real_ncpus; /* Number of actual CPUs */
144 unsigned int debugbackpocket; /* (TEST/DEBUG) */
145
146 vm_offset_t avail_next;
147 vm_offset_t first_free_virt;
148 int current_free_region; /* Used in pmap_next_page */
149
150 /* forward */
151 void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
152 void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
153 void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount);
154
155 #if MACH_VM_DEBUG
156 int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space);
157 #endif
158
159 #if DEBUG
160 #define PDB_USER 0x01 /* exported functions */
161 #define PDB_MAPPING 0x02 /* low-level mapping routines */
162 #define PDB_ENTER 0x04 /* pmap_enter specifics */
163 #define PDB_COPY 0x08 /* copy page debugging */
164 #define PDB_ZERO 0x10 /* zero page debugging */
165 #define PDB_WIRED 0x20 /* things concerning wired entries */
166 #define PDB_PTEG 0x40 /* PTEG overflows */
167 #define PDB_LOCK 0x100 /* locks */
168 #define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */
169
170 int pmdebug=0;
171 #endif
172
173 /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
174
175 extern struct pmap kernel_pmap_store;
176 pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
177 pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
178 struct zone *pmap_zone; /* zone of pmap structures */
179 boolean_t pmap_initialized = FALSE;
180
181 /*
182 * Physical-to-virtual translations are handled by inverted page table
183 * structures, phys_tables. Multiple mappings of a single page are handled
184 * by linking the affected mapping structures. We initialise one region
185 * for phys_tables of the physical memory we know about, but more may be
186 * added as it is discovered (eg. by drivers).
187 */
188 struct phys_entry *phys_table; /* For debugging */
189
190 lock_t pmap_system_lock;
191
192 decl_simple_lock_data(,tlb_system_lock)
193
194 /*
195 * free pmap list. caches the first free_pmap_max pmaps that are freed up
196 */
197 int free_pmap_max = 32;
198 int free_pmap_count;
199 pmap_t free_pmap_list;
200 decl_simple_lock_data(,free_pmap_lock)
201
202 /*
203 * Function to get index into phys_table for a given physical address
204 */
205
206 struct phys_entry *pmap_find_physentry(vm_offset_t pa)
207 {
208 int i;
209 struct phys_entry *entry;
210
211 for (i = pmap_mem_regions_count-1; i >= 0; i--) {
212 if (pa < pmap_mem_regions[i].start)
213 continue;
214 if (pa >= pmap_mem_regions[i].end)
215 return PHYS_NULL;
216
217 entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT];
218 __asm__ volatile("dcbt 0,%0" : : "r" (entry)); /* We will use this in a little bit */
219 return entry;
220 }
221 kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa);
222 return PHYS_NULL;
223 }
224
225 /*
226 * kern_return_t
227 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
228 * boolean_t available, unsigned int attr)
229 * Allocate some extra physentries for the physical addresses given,
230 * specifying some default attribute that on the powerpc specifies
231 * the default cachability for any mappings using these addresses
232 * If the memory is marked as available, it is added to the general
233 * VM pool, otherwise it is not (it is reserved for card IO etc).
234 */
235 kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
236 boolean_t available, unsigned int attr)
237 {
238 int i,j;
239 spl_t s;
240
241 /* Only map whole pages */
242
243 panic("Forget it! You can't map no more memory, you greedy puke!\n");
244
245 spa = trunc_page(spa);
246 epa = round_page(epa);
247
248 /* First check that the region doesn't already exist */
249
250 assert (epa >= spa);
251 for (i = 0; i < pmap_mem_regions_count; i++) {
252 /* If we're below the next region, then no conflict */
253 if (epa < pmap_mem_regions[i].start)
254 break;
255 if (spa < pmap_mem_regions[i].end) {
256 #if DEBUG
257 kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr);
258 #endif /* DEBUG */
259 return KERN_NO_SPACE;
260 }
261 }
262
263 #if DEBUG
264 kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count); /* (TEST/DEBUG) */
265 #endif
266
267 /* Check that we've got enough space for another region */
268 if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX)
269 return KERN_RESOURCE_SHORTAGE;
270
271 /* Once here, i points to the mem_region above ours in physical mem */
272
273 /* allocate a new phys_table for this new region */
274 #if DEBUG
275 kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */
276 #endif
277
278 phys_table = (struct phys_entry *)
279 kalloc(sizeof(struct phys_entry) * atop(epa-spa));
280 #if DEBUG
281 kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table); /* (TEST/DEBUG) */
282 #endif
283
284 /* Initialise the new phys_table entries */
285 for (j = 0; j < atop(epa-spa); j++) {
286
287 phys_table[j].phys_link = MAPPING_NULL;
288
289 mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr); /* Initialize the hardware specific portions */
290
291 }
292 s = splhigh();
293
294 /* Move all the phys_table entries up some to make room in
295 * the ordered list.
296 */
297 for (j = pmap_mem_regions_count; j > i ; j--)
298 pmap_mem_regions[j] = pmap_mem_regions[j-1];
299
300 /* Insert a new entry with some memory to back it */
301
302 pmap_mem_regions[i].start = spa;
303 pmap_mem_regions[i].end = epa;
304 pmap_mem_regions[i].phys_table = phys_table;
305
306 pmap_mem_regions_count++;
307 splx(s);
308
309 #if DEBUG
310 for(i=0; i<pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
311 kprintf("region %d: %08X %08X %08X\n", i, pmap_mem_regions[i].start,
312 pmap_mem_regions[i].end, pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
313 }
314 #endif
315
316 if (available) {
317 kprintf("warning : pmap_add_physical_mem() "
318 "available not yet supported\n");
319 }
320
321 return KERN_SUCCESS;
322 }
323
324 /*
325 * pmap_map(va, spa, epa, prot)
326 * is called during boot to map memory in the kernel's address map.
327 * A virtual address range starting at "va" is mapped to the physical
328 * address range "spa" to "epa" with machine independent protection
329 * "prot".
330 *
331 * "va", "spa", and "epa" are byte addresses and must be on machine
332 * independent page boundaries.
333 *
334 * Pages with a contiguous virtual address range, the same protection, and attributes.
335 * therefore, we map it with a single block.
336 *
337 */
338 vm_offset_t
339 pmap_map(
340 vm_offset_t va,
341 vm_offset_t spa,
342 vm_offset_t epa,
343 vm_prot_t prot)
344 {
345
346
347 if (spa == epa)
348 return(va);
349
350 assert(epa > spa);
351 debugLog2(40, va, spa); /* Log pmap_map call */
352
353 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm); /* Set up a permanent block mapped area */
354
355 debugLog2(41, epa, prot); /* Log pmap_map call */
356
357 return(va);
358 }
359
360 /*
361 * pmap_map_bd(va, spa, epa, prot)
362 * Back-door routine for mapping kernel VM at initialisation.
363 * Used for mapping memory outside the known physical memory
364 * space, with caching disabled. Designed for use by device probes.
365 *
366 * A virtual address range starting at "va" is mapped to the physical
367 * address range "spa" to "epa" with machine independent protection
368 * "prot".
369 *
370 * "va", "spa", and "epa" are byte addresses and must be on machine
371 * independent page boundaries.
372 *
373 * WARNING: The current version of memcpy() can use the dcbz instruction
374 * on the destination addresses. This will cause an alignment exception
375 * and consequent overhead if the destination is caching-disabled. So
376 * avoid memcpy()ing into the memory mapped by this function.
377 *
378 * also, many other pmap_ routines will misbehave if you try and change
379 * protections or remove these mappings, they are designed to be permanent.
380 *
381 * These areas will be added to the autogen list, if possible. Existing translations
382 * are overridden and their mapping stuctures are released. This takes place in
383 * the autogen_map function.
384 *
385 * Locking:
386 * this routine is called only during system initialization when only
387 * one processor is active, so no need to take locks...
388 */
389 vm_offset_t
390 pmap_map_bd(
391 vm_offset_t va,
392 vm_offset_t spa,
393 vm_offset_t epa,
394 vm_prot_t prot)
395 {
396 register struct mapping *mp;
397 register struct phys_entry *pp;
398
399
400 if (spa == epa)
401 return(va);
402
403 assert(epa > spa);
404
405 debugLog2(42, va, epa); /* Log pmap_map_bd call */
406
407 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm); /* Set up autogen area */
408
409 debugLog2(43, epa, prot); /* Log pmap_map_bd exit */
410
411 return(va);
412 }
413
414 /*
415 * Bootstrap the system enough to run with virtual memory.
416 * Map the kernel's code and data, and allocate the system page table.
417 * Called with mapping done by BATs. Page_size must already be set.
418 *
419 * Parameters:
420 * mem_size: Total memory present
421 * first_avail: First virtual address available
422 * first_phys_avail: First physical address available
423 */
424 void
425 pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize)
426 {
427 register struct mapping *mp;
428 vm_offset_t addr;
429 vm_size_t size;
430 int i, num, j, rsize, mapsize, vmpagesz, vmmapsz;
431 unsigned int mask;
432 vm_offset_t first_used_addr;
433 PCA *pcaptr;
434
435 *first_avail = round_page(*first_avail);
436
437 #if DEBUG
438 kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n",
439 *first_avail, *first_phys_avail, avail_remaining);
440 #endif
441
442 assert(PAGE_SIZE == PPC_PGBYTES);
443
444 /*
445 * Initialize kernel pmap
446 */
447 kernel_pmap = &kernel_pmap_store;
448 cursor_pmap = &kernel_pmap_store;
449
450 lock_init(&pmap_system_lock,
451 FALSE, /* NOT a sleep lock */
452 ETAP_VM_PMAP_SYS,
453 ETAP_VM_PMAP_SYS_I);
454
455 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
456
457 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
458 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
459 kernel_pmap->ref_count = 1;
460 kernel_pmap->space = PPC_SID_KERNEL;
461 kernel_pmap->pmapvr = 0; /* Virtual = Real */
462 kernel_pmap->bmaps = 0; /* No block pages just yet */
463 for(i=0; i < 128; i++) { /* Clear usage slots */
464 kernel_pmap->pmapUsage[i] = 0;
465 }
466 for(i=0; i < 16; i++) { /* Initialize for laughs */
467 kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL;
468 }
469
470 /*
471 * Allocate: (from first_avail up)
472 * Aligned to its own size:
473 * hash table (for mem size 2**x, allocate 2**(x-10) entries)
474 * mapping table (same size and immediatly following hash table)
475 */
476 /* hash_table_size must be a power of 2, recommended sizes are
477 * taken from PPC601 User Manual, table 6-19. We take the next
478 * highest size if mem_size is not a power of two.
479 * TODO NMGS make this configurable at boot time.
480 */
481
482 num = sizeof(pte_t) * (mem_size >> 10);
483
484 for (hash_table_size = 64 * 1024; /* minimum size = 64Kbytes */
485 hash_table_size < num;
486 hash_table_size *= 2)
487 continue;
488
489 if (num > (sizeof(pte_t) * 524288))
490 hash_table_size = hash_table_size/2; /* reduce by half above 512MB */
491
492 /* Scale to within any physical memory layout constraints */
493 do {
494 num = atop(mem_size); /* num now holds mem_size in pages */
495
496 /* size of all structures that we're going to allocate */
497
498 size = (vm_size_t) (
499 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
500 ((InitialSaveBloks / 2) * PAGE_SIZE) + /* For backpocket saveareas */
501 hash_table_size + /* For hash table */
502 hash_table_size + /* For PTEG allocation table */
503 (num * sizeof(struct phys_entry)) /* For the physical entries */
504 );
505
506 mapsize = size = round_page(size); /* Get size of area to map that we just calculated */
507 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
508
509 vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
510 vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
511
512 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
513
514 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
515 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
516 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
517
518 #if DEBUG
519 kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz);
520 kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz);
521 kprintf("pmap_bootstrap: size before mappings = %08X\n", size);
522 kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize);
523 kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize);
524 #endif
525
526 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
527
528 /* hash table must be aligned to its size */
529
530 addr = (*first_avail +
531 (hash_table_size-1)) & ~(hash_table_size-1);
532
533 if (addr + size > pmap_mem_regions[0].end) {
534 hash_table_size /= 2;
535 } else {
536 break;
537 }
538 /* If we have had to shrink hash table to too small, panic */
539 if (hash_table_size == 32 * 1024)
540 panic("cannot lay out pmap memory map correctly");
541 } while (1);
542
543 #if DEBUG
544 kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n",
545 hash_table_size, size, addr);
546 #endif
547 if (round_page(*first_phys_avail) < trunc_page(addr)) {
548 /* We are stepping over at least one page here, so
549 * add this region to the free regions so that it can
550 * be allocated by pmap_steal
551 */
552 free_regions[free_regions_count].start = round_page(*first_phys_avail);
553 free_regions[free_regions_count].end = trunc_page(addr);
554
555 avail_remaining += (free_regions[free_regions_count].end -
556 free_regions[free_regions_count].start) /
557 PPC_PGBYTES;
558 #if DEBUG
559 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
560 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
561 avail_remaining);
562 #endif /* DEBUG */
563 free_regions_count++;
564 }
565
566 /* Zero everything - this also invalidates the hash table entries */
567 bzero((char *)addr, size);
568
569 /* Set up some pointers to our new structures */
570
571 /* from here, addr points to the next free address */
572
573 first_used_addr = addr; /* remember where we started */
574
575 /* Set up hash table address and dma buffer address, keeping
576 * alignment. These mappings are all 1-1, so dma_r == dma_v
577 *
578 * If hash_table_size == dma_buffer_alignment, then put hash_table
579 * first, since dma_buffer_size may be smaller than alignment, but
580 * hash table alignment==hash_table_size.
581 */
582 hash_table_base = addr;
583
584 addr += hash_table_size;
585 addr += hash_table_size; /* Add another for the PTEG Control Area */
586 assert((hash_table_base & (hash_table_size-1)) == 0);
587
588 pcaptr = (PCA *)(hash_table_base+hash_table_size); /* Point to the PCA table */
589 mapCtl.mapcflush.pcaptr = pcaptr;
590
591 for(i=0; i < (hash_table_size/64) ; i++) { /* For all of PTEG control areas: */
592 pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF; /* Mark all slots free */
593 pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01; /* Initialize steal position */
594 }
595
596 savearea_init(&addr); /* Initialize the savearea chains and data */
597
598 /* phys_table is static to help debugging,
599 * this variable is no longer actually used
600 * outside of this scope
601 */
602
603 phys_table = (struct phys_entry *) addr;
604
605 #if DEBUG
606 kprintf("hash_table_base =%08X\n", hash_table_base);
607 kprintf("phys_table =%08X\n", phys_table);
608 kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count);
609 #endif
610
611 for (i = 0; i < pmap_mem_regions_count; i++) {
612
613 pmap_mem_regions[i].phys_table = phys_table;
614 rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE;
615
616 #if DEBUG
617 kprintf("Initializing physical table for region %d\n", i);
618 kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n",
619 phys_table, rsize, pmap_mem_regions[i].start,
620 (unsigned int)pmap_mem_regions[i].end);
621 #endif
622
623 for (j = 0; j < rsize; j++) {
624 phys_table[j].phys_link = MAPPING_NULL;
625 mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE),
626 PTE_WIMG_DEFAULT); /* Initializes hw specific storage attributes */
627 }
628 phys_table = phys_table +
629 atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start);
630 }
631
632 /* restore phys_table for debug */
633 phys_table = (struct phys_entry *) addr;
634
635 addr += sizeof(struct phys_entry) * num;
636
637 simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB);
638
639 /* Initialise the registers necessary for supporting the hashtable */
640 #if DEBUG
641 kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size);
642 #endif
643
644 hash_table_init(hash_table_base, hash_table_size);
645
646 /*
647 * Remaining space is for mapping entries. Tell the initializer routine that
648 * the mapping system can't release this block because it's permanently assigned
649 */
650
651 mapping_init(); /* Initialize the mapping tables */
652
653 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
654 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
655 }
656 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
657
658 #if DEBUG
659
660 kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n",
661 first_used_addr, round_page(first_used_addr+size),
662 first_used_addr);
663 #endif /* DEBUG */
664
665 /* Map V=R the page tables */
666 pmap_map(first_used_addr, first_used_addr,
667 round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE);
668
669 #if DEBUG
670
671 for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) { /* Step through all these mappings */
672 if(i != (j = kvtophys(i))) { /* Verify that the mapping was made V=R */
673 kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j);
674 }
675 }
676 #endif
677
678 *first_avail = round_page(first_used_addr + size);
679 first_free_virt = round_page(first_used_addr + size);
680
681 /* All the rest of memory is free - add it to the free
682 * regions so that it can be allocated by pmap_steal
683 */
684 free_regions[free_regions_count].start = *first_avail;
685 free_regions[free_regions_count].end = pmap_mem_regions[0].end;
686
687 avail_remaining += (free_regions[free_regions_count].end -
688 free_regions[free_regions_count].start) /
689 PPC_PGBYTES;
690
691 #if DEBUG
692 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
693 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
694 avail_remaining);
695 #endif /* DEBUG */
696
697 free_regions_count++;
698
699 current_free_region = 0;
700
701 avail_next = free_regions[current_free_region].start;
702
703 #if DEBUG
704 kprintf("Number of free regions=%d\n",free_regions_count); /* (TEST/DEBUG) */
705 kprintf("Current free region=%d\n",current_free_region); /* (TEST/DEBUG) */
706 for(i=0;i<free_regions_count; i++) { /* (TEST/DEBUG) */
707 kprintf("Free region %3d - from %08X to %08X\n", i, free_regions[i].start,
708 free_regions[i].end); /* (TEST/DEBUG) */
709 }
710 for (i = 0; i < pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
711 kprintf("PMAP region %3d - from %08X to %08X; phys=%08X\n", i, /* (TEST/DEBUG) */
712 pmap_mem_regions[i].start, /* (TEST/DEBUG) */
713 pmap_mem_regions[i].end, /* (TEST/DEBUG) */
714 pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
715 }
716 #endif
717
718 }
719
720 /*
721 * pmap_init(spa, epa)
722 * finishes the initialization of the pmap module.
723 * This procedure is called from vm_mem_init() in vm/vm_init.c
724 * to initialize any remaining data structures that the pmap module
725 * needs to map virtual memory (VM is already ON).
726 *
727 * Note that the pmap needs to be sized and aligned to
728 * a power of two. This is because it is used both in virtual and
729 * real so it can't span a page boundary.
730 */
731
732 void
733 pmap_init(void)
734 {
735
736
737 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
738 #if ZONE_DEBUG
739 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
740 #endif /* ZONE_DEBUG */
741
742 pmap_initialized = TRUE;
743
744 /*
745 * Initialize list of freed up pmaps
746 */
747 free_pmap_list = 0; /* Set that there are no free pmaps */
748 free_pmap_count = 0;
749 simple_lock_init(&free_pmap_lock, ETAP_VM_PMAP_CACHE);
750 }
751
752 unsigned int pmap_free_pages(void)
753 {
754 return avail_remaining;
755 }
756
757 boolean_t pmap_next_page(vm_offset_t *addrp)
758 {
759 /* Non optimal, but only used for virtual memory startup.
760 * Allocate memory from a table of free physical addresses
761 * If there are no more free entries, too bad. We have two
762 * tables to look through, free_regions[] which holds free
763 * regions from inside pmap_mem_regions[0], and the others...
764 * pmap_mem_regions[1..]
765 */
766
767 /* current_free_region indicates the next free entry,
768 * if it's less than free_regions_count, then we're still
769 * in free_regions, otherwise we're in pmap_mem_regions
770 */
771
772 if (current_free_region >= free_regions_count) {
773 /* We're into the pmap_mem_regions, handle this
774 * separately to free_regions
775 */
776
777 int current_pmap_mem_region = current_free_region -
778 free_regions_count + 1;
779 if (current_pmap_mem_region > pmap_mem_regions_count)
780 return FALSE;
781 *addrp = avail_next;
782 avail_next += PAGE_SIZE;
783 avail_remaining--;
784 if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) {
785 current_free_region++;
786 current_pmap_mem_region++;
787 avail_next = pmap_mem_regions[current_pmap_mem_region].start;
788 #if DEBUG
789 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
790 #endif /* DEBUG */
791 }
792 return TRUE;
793 }
794
795 /* We're in the free_regions, allocate next page and increment
796 * counters
797 */
798 *addrp = avail_next;
799
800 avail_next += PAGE_SIZE;
801 avail_remaining--;
802
803 if (avail_next >= free_regions[current_free_region].end) {
804 current_free_region++;
805 if (current_free_region < free_regions_count)
806 avail_next = free_regions[current_free_region].start;
807 else
808 avail_next = pmap_mem_regions[current_free_region -
809 free_regions_count + 1].start;
810 #if DEBUG
811 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
812 #endif
813 }
814 return TRUE;
815 }
816
817 void pmap_virtual_space(
818 vm_offset_t *startp,
819 vm_offset_t *endp)
820 {
821 *startp = round_page(first_free_virt);
822 *endp = VM_MAX_KERNEL_ADDRESS;
823 }
824
825 /*
826 * pmap_create
827 *
828 * Create and return a physical map.
829 *
830 * If the size specified for the map is zero, the map is an actual physical
831 * map, and may be referenced by the hardware.
832 *
833 * A pmap is either in the free list or in the in-use list. The only use
834 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
835 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
836 * in-use list is matched until a hole in the VSID sequence is found. (Note
837 * that the in-use pmaps are queued in VSID sequence order.) This is all done
838 * while free_pmap_lock is held.
839 *
840 * If the size specified is non-zero, the map will be used in software
841 * only, and is bounded by that size.
842 */
843 pmap_t
844 pmap_create(vm_size_t size)
845 {
846 pmap_t pmap, ckpmap, fore, aft;
847 int s, i;
848 space_t sid;
849 unsigned int currSID;
850
851 #if PMAP_LOWTRACE
852 dbgTrace(0xF1D00001, size, 0); /* (TEST/DEBUG) */
853 #endif
854
855 #if DEBUG
856 if (pmdebug & PDB_USER)
857 kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' ');
858 #endif
859
860 /*
861 * A software use-only map doesn't even need a pmap structure.
862 */
863 if (size)
864 return(PMAP_NULL);
865
866 /*
867 * If there is a pmap in the pmap free list, reuse it.
868 * Note that we use free_pmap_list for all chaining of pmaps, both to
869 * the free list and the in use chain (anchored from kernel_pmap).
870 */
871 s = splhigh();
872 simple_lock(&free_pmap_lock);
873
874 if(free_pmap_list) { /* Any free? */
875 pmap = free_pmap_list; /* Yes, allocate it */
876 free_pmap_list = (pmap_t)pmap->bmaps; /* Dequeue this one (we chain free ones through bmaps) */
877 free_pmap_count--;
878 }
879 else {
880 simple_unlock(&free_pmap_lock); /* Unlock just in case */
881 splx(s);
882
883 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
884 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
885
886 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
887
888 s = splhigh();
889 simple_lock(&free_pmap_lock); /* Lock it back up */
890
891 ckpmap = cursor_pmap; /* Get starting point for free ID search */
892 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
893
894 while(1) { /* Keep trying until something happens */
895
896 currSID = (currSID + 1) & SID_MAX; /* Get the next in the sequence */
897 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
898
899 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
900
901 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
902 panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */
903 }
904 }
905
906 pmap->space = (currSID * incrVSID) & SID_MAX; /* Calculate the actual VSID */
907 pmap->spaceNum = currSID; /* Set the space ID number */
908
909 /*
910 * Now we link into the chain just before the out of sequence guy.
911 */
912
913 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
914 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
915 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
916 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
917 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
918
919 simple_lock_init(&pmap->lock, ETAP_VM_PMAP);
920 pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap); /* Get physical pointer to the pmap and make mask */
921 }
922 pmap->ref_count = 1;
923 pmap->stats.resident_count = 0;
924 pmap->stats.wired_count = 0;
925 pmap->bmaps = 0; /* Clear block map pointer to 0 */
926 pmap->vflags = 0; /* Mark all alternates invalid for now */
927 for(i=0; i < 128; i++) { /* Clean out usage slots */
928 pmap->pmapUsage[i] = 0;
929 }
930 for(i=0; i < 16; i++) { /* Initialize for laughs */
931 pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space;
932 }
933
934 #if PMAP_LOWTRACE
935 dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space); /* (TEST/DEBUG) */
936 #endif
937
938 #if DEBUG
939 if (pmdebug & PDB_USER)
940 kprintf("-> %x, space id = %d\n", pmap, pmap->space);
941 #endif
942
943 simple_unlock(&free_pmap_lock);
944 splx(s);
945 return(pmap);
946 }
947
948 /*
949 * pmap_destroy
950 *
951 * Gives up a reference to the specified pmap. When the reference count
952 * reaches zero the pmap structure is added to the pmap free list.
953 *
954 * Should only be called if the map contains no valid mappings.
955 */
956 void
957 pmap_destroy(pmap_t pmap)
958 {
959 int ref_count;
960 spl_t s;
961 pmap_t fore, aft;
962
963 #if PMAP_LOWTRACE
964 dbgTrace(0xF1D00003, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
965 #endif
966
967 #if DEBUG
968 if (pmdebug & PDB_USER)
969 kprintf("pmap_destroy(pmap=%x)\n", pmap);
970 #endif
971
972 if (pmap == PMAP_NULL)
973 return;
974
975 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
976 if(ref_count>0) return; /* Still more users, leave now... */
977
978 if(ref_count < 0) /* Did we go too far? */
979 panic("pmap_destroy(): ref_count < 0");
980
981 #ifdef notdef
982 if(pmap->stats.resident_count != 0)
983 panic("PMAP_DESTROY: pmap not empty");
984 #else
985 if(pmap->stats.resident_count != 0) {
986 pmap_remove(pmap, 0, 0xFFFFF000);
987 }
988 #endif
989
990 /*
991 * Add the pmap to the pmap free list.
992 */
993
994 s = splhigh();
995 /*
996 * Add the pmap to the pmap free list.
997 */
998 simple_lock(&free_pmap_lock);
999
1000 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
1001
1002 pmap->bmaps = (struct blokmap *)free_pmap_list; /* Queue in front */
1003 free_pmap_list = pmap;
1004 free_pmap_count++;
1005 simple_unlock(&free_pmap_lock);
1006
1007 } else {
1008 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
1009 fore = (pmap_t)pmap->pmap_link.prev;
1010 aft = (pmap_t)pmap->pmap_link.next;
1011 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
1012 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
1013 simple_unlock(&free_pmap_lock);
1014 zfree(pmap_zone, (vm_offset_t) pmap);
1015 }
1016 splx(s);
1017 }
1018
1019 /*
1020 * pmap_reference(pmap)
1021 * gains a reference to the specified pmap.
1022 */
1023 void
1024 pmap_reference(pmap_t pmap)
1025 {
1026 spl_t s;
1027
1028 #if PMAP_LOWTRACE
1029 dbgTrace(0xF1D00004, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
1030 #endif
1031
1032 #if DEBUG
1033 if (pmdebug & PDB_USER)
1034 kprintf("pmap_reference(pmap=%x)\n", pmap);
1035 #endif
1036
1037 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
1038 }
1039
1040 /*
1041 * pmap_remove_some_phys
1042 *
1043 * Removes mappings of the associated page from the specified pmap
1044 *
1045 */
1046 void pmap_remove_some_phys(
1047 pmap_t pmap,
1048 vm_offset_t pa)
1049 {
1050 register struct phys_entry *pp;
1051 register struct mapping *mp, *mpv;
1052
1053
1054 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1055
1056 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1057 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
1058
1059 if (pmap->vflags & pmapVMhost)
1060 mapping_purge(pp);
1061 else
1062 mapping_purge_pmap(pp, pmap);
1063
1064 return; /* Leave... */
1065 }
1066
1067 /*
1068 * pmap_remove(pmap, s, e)
1069 * unmaps all virtual addresses v in the virtual address
1070 * range determined by [s, e) and pmap.
1071 * s and e must be on machine independent page boundaries and
1072 * s must be less than or equal to e.
1073 *
1074 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
1075 * skip those segments.
1076 */
1077 void
1078 pmap_remove(
1079 pmap_t pmap,
1080 vm_offset_t sva,
1081 vm_offset_t eva)
1082 {
1083 spl_t spl;
1084 struct mapping *mp, *blm;
1085 vm_offset_t lpage;
1086
1087 #if PMAP_LOWTRACE
1088 dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12)); /* (TEST/DEBUG) */
1089 #endif
1090
1091 #if DEBUG
1092 if (pmdebug & PDB_USER)
1093 kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n",
1094 pmap, sva, eva);
1095 #endif
1096
1097 if (pmap == PMAP_NULL)
1098 return;
1099
1100 /* It is just possible that eva might have wrapped around to zero,
1101 * and sometimes we get asked to liberate something of size zero
1102 * even though it's dumb (eg. after zero length read_overwrites)
1103 */
1104 assert(eva >= sva);
1105
1106 /* If these are not page aligned the loop might not terminate */
1107 assert((sva == trunc_page(sva)) && (eva == trunc_page(eva)));
1108
1109 /* We liberate addresses from high to low, since the stack grows
1110 * down. This means that we won't need to test addresses below
1111 * the limit of stack growth
1112 */
1113
1114 debugLog2(44, sva, eva); /* Log pmap_map call */
1115
1116 sva = trunc_page(sva); /* Make it clean */
1117 lpage = trunc_page(eva) - PAGE_SIZE; /* Point to the last page contained in the range */
1118
1119 /*
1120 * Here we will remove all of the block mappings that overlap this range.
1121 * hw_rem_blk removes one mapping in the range and returns. If it returns
1122 * 0, there are no blocks in the range.
1123 */
1124
1125 while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) { /* Keep going until no more */
1126 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
1127 blm = (struct mapping *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */
1128 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
1129 pmap, sva, blm);
1130 }
1131 if (!((unsigned int)mp & 2))
1132 mapping_free(hw_cpv(mp)); /* Release it */
1133 }
1134 while (pmap->stats.resident_count && (eva > sva)) {
1135
1136 eva -= PAGE_SIZE; /* Back up a page */
1137
1138 #if 1
1139 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1140 panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1141 #endif
1142 if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1143 eva = eva & (-pmapUsageSize); /* Back up into the previous slot */
1144 continue; /* Check the next... */
1145 }
1146 mapping_remove(pmap, eva); /* Remove the mapping for this address */
1147 }
1148
1149 debugLog2(45, 0, 0); /* Log pmap_map call */
1150 }
1151
1152 /*
1153 * Routine:
1154 * pmap_page_protect
1155 *
1156 * Function:
1157 * Lower the permission for all mappings to a given page.
1158 */
1159 void
1160 pmap_page_protect(
1161 vm_offset_t pa,
1162 vm_prot_t prot)
1163 {
1164 register struct phys_entry *pp;
1165 boolean_t remove;
1166
1167
1168 #if PMAP_LOWTRACE
1169 dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1170 #endif
1171
1172 #if DEBUG
1173 if (pmdebug & PDB_USER)
1174 kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot);
1175 #endif
1176
1177 debugLog2(46, pa, prot); /* Log pmap_page_protect call */
1178
1179 switch (prot) {
1180 case VM_PROT_READ:
1181 case VM_PROT_READ|VM_PROT_EXECUTE:
1182 remove = FALSE;
1183 break;
1184 case VM_PROT_ALL:
1185 return;
1186 default:
1187 remove = TRUE;
1188 break;
1189 }
1190
1191 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1192 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
1193
1194 if (remove) { /* If the protection was set to none, we'll remove all mappings */
1195 mapping_purge(pp); /* Get rid of them all */
1196
1197 debugLog2(47, 0, 0); /* Log pmap_map call */
1198 return; /* Leave... */
1199 }
1200
1201 /* When we get here, it means that we are to change the protection for a
1202 * physical page.
1203 */
1204
1205 mapping_protect_phys(pp, prot, 0); /* Change protection of all mappings to page. */
1206
1207 debugLog2(47, 1, 0); /* Log pmap_map call */
1208 }
1209
1210 /*
1211 * pmap_protect(pmap, s, e, prot)
1212 * changes the protection on all virtual addresses v in the
1213 * virtual address range determined by [s, e] and pmap to prot.
1214 * s and e must be on machine independent page boundaries and
1215 * s must be less than or equal to e.
1216 *
1217 * Note that any requests to change the protection of a nested pmap are
1218 * ignored. Those changes MUST be done by calling this with the correct pmap.
1219 */
1220 void pmap_protect(
1221 pmap_t pmap,
1222 vm_offset_t sva,
1223 vm_offset_t eva,
1224 vm_prot_t prot)
1225 {
1226 spl_t spl;
1227 register struct phys_entry *pp;
1228 register struct mapping *mp, *mpv;
1229
1230 #if PMAP_LOWTRACE
1231 dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1232 #endif
1233
1234 #if DEBUG
1235 if (pmdebug & PDB_USER)
1236 kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot);
1237
1238 assert(sva < eva);
1239 #endif
1240
1241 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1242
1243 debugLog2(48, sva, eva); /* Log pmap_map call */
1244
1245 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
1246 pmap_remove(pmap, sva, eva); /* Yeah, dump 'em */
1247
1248 debugLog2(49, prot, 0); /* Log pmap_map call */
1249
1250 return; /* Leave... */
1251 }
1252
1253 sva = trunc_page(sva); /* Start up a page boundary */
1254
1255 while(sva < eva) { /* Step through */
1256
1257 if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1258 sva = (sva + pmapUsageSize) &(-pmapUsageSize); /* Jump up into the next slot if nothing here */
1259 if(!sva) break; /* We tried to wrap, kill loop... */
1260 continue; /* Check the next... */
1261 }
1262
1263 #if 1
1264 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1265 panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1266 #endif
1267
1268 mapping_protect(pmap, sva, prot); /* Change the protection on the page */
1269 sva += PAGE_SIZE; /* On to the next page */
1270 }
1271
1272 debugLog2(49, prot, 1); /* Log pmap_map call */
1273 return; /* Leave... */
1274 }
1275
1276
1277
1278 /*
1279 * pmap_enter
1280 *
1281 * Create a translation for the virtual address (virt) to the physical
1282 * address (phys) in the pmap with the protection requested. If the
1283 * translation is wired then we can not allow a full page fault, i.e.,
1284 * the mapping control block is not eligible to be stolen in a low memory
1285 * condition.
1286 *
1287 * NB: This is the only routine which MAY NOT lazy-evaluate
1288 * or lose information. That is, this routine must actually
1289 * insert this page into the given map NOW.
1290 */
1291 void
1292 pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
1293 unsigned int flags, boolean_t wired)
1294 {
1295 spl_t spl;
1296 struct mapping *mp;
1297 struct phys_entry *pp;
1298 int memattr;
1299
1300 #if PMAP_LOWTRACE
1301 dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1302 dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1303 #endif
1304
1305 if (pmap == PMAP_NULL) return; /* If they gave us no pmap, just leave... */
1306
1307 debugLog2(50, va, pa); /* Log pmap_map call */
1308
1309 pp = pmap_find_physentry(pa); /* Get the physent for this physical page */
1310
1311 if((0x00008000 >> (va >> 28)) & pmap->vflags)
1312 panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va); /* (TEST/DEBUG) panic */
1313
1314 spl=splhigh(); /* Have to disallow interrupts between the
1315 time we possibly clear a mapping and the time
1316 we get it remapped again. An I/O SLIH could
1317 try to drive an IOR using the page before
1318 we get it mapped (Dude! This was a tough
1319 bug!!!!) */
1320
1321 mapping_remove(pmap, va); /* Remove any other mapping at this address */
1322
1323 if(flags & VM_WIMG_USE_DEFAULT) {
1324 if(pp) {
1325 /* Set attr to the phys default */
1326 memattr = ((pp->pte1&0x00000078) >> 3);
1327 } else {
1328 memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED;
1329 }
1330 } else {
1331 memattr = flags & VM_WIMG_MASK;
1332 }
1333
1334
1335 /* Make the address mapping */
1336 mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0);
1337
1338 splx(spl); /* I'm not busy no more - come what may */
1339
1340 debugLog2(51, prot, 0); /* Log pmap_map call */
1341
1342 #if DEBUG
1343 if (pmdebug & (PDB_USER|PDB_ENTER))
1344 kprintf("leaving pmap_enter\n");
1345 #endif
1346
1347 }
1348
1349 /*
1350 * pmap_extract(pmap, va)
1351 * returns the physical address corrsponding to the
1352 * virtual address specified by pmap and va if the
1353 * virtual address is mapped and 0 if it is not.
1354 */
1355 vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) {
1356
1357 spl_t spl;
1358 register struct mapping *mp, *mpv;
1359 register vm_offset_t pa;
1360 unsigned int seg;
1361 pmap_t actpmap;
1362
1363
1364 #if PMAP_LOWTRACE
1365 dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1366 #endif
1367 #if DEBUG
1368 if (pmdebug & PDB_USER)
1369 kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va);
1370 #endif
1371
1372 seg = va >> 28; /* Isolate segment */
1373 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1374 else actpmap = pmap; /* Otherwise use the one passed in */
1375
1376 pa = (vm_offset_t) 0; /* Clear this to 0 */
1377
1378 debugLog2(52, actpmap->space, va); /* Log pmap_map call */
1379
1380 spl = splhigh(); /* We can't allow any loss of control here */
1381
1382 if(mp=hw_lock_phys_vir(actpmap->space, va)) { /* Find the mapping for this vaddr and lock physent */
1383 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1384 panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1385 splx(spl); /* Interruptions are cool now */
1386 return 0;
1387 }
1388
1389 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1390 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Build the physical address */
1391 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1392 splx(spl); /* Interruptions are cool now */
1393
1394 debugLog2(53, pa, 0); /* Log pmap_map call */
1395
1396 return pa; /* Return the physical address... */
1397 }
1398
1399 pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1400 /* Note no nested pmaps here */
1401 splx(spl); /* Restore 'rupts */
1402 debugLog2(53, pa, 0); /* Log pmap_map call */
1403 return pa; /* Return physical address or 0 */
1404 }
1405
1406 /*
1407 * pmap_attribute_cache_sync
1408 * Handle the machine attribute calls which involve sync the prcessor
1409 * cache.
1410 */
1411 kern_return_t
1412 pmap_attribute_cache_sync(address, size, attribute, value)
1413 vm_offset_t address;
1414 vm_size_t size;
1415 vm_machine_attribute_t attribute;
1416 vm_machine_attribute_val_t* value;
1417 {
1418 while(size) {
1419 switch (*value) { /* What type was that again? */
1420 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1421 sync_cache(address, PAGE_SIZE); /* Sync up dem caches */
1422 break; /* Done with this one here... */
1423
1424 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1425 flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */
1426 invalidate_icache(address,
1427 PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1428 break; /* Done with this one here... */
1429
1430 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1431 flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */
1432 break; /* Done with this one here... */
1433
1434 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1435 invalidate_icache(address,
1436 PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1437 break; /* Done with this one here... */
1438 }
1439 size -= PAGE_SIZE;
1440 }
1441 return KERN_SUCCESS;;
1442 }
1443
1444
1445 /*
1446 * pmap_attributes:
1447 *
1448 * Set/Get special memory attributes; Set is not implemented.
1449 *
1450 * Note: 'VAL_GET_INFO' is used to return info about a page.
1451 * If less than 1 page is specified, return the physical page
1452 * mapping and a count of the number of mappings to that page.
1453 * If more than one page is specified, return the number
1454 * of resident pages and the number of shared (more than
1455 * one mapping) pages in the range;
1456 *
1457 */
1458 kern_return_t
1459 pmap_attribute(pmap, address, size, attribute, value)
1460 pmap_t pmap;
1461 vm_offset_t address;
1462 vm_size_t size;
1463 vm_machine_attribute_t attribute;
1464 vm_machine_attribute_val_t* value;
1465 {
1466 spl_t s;
1467 vm_offset_t sva, eva;
1468 vm_offset_t pa;
1469 kern_return_t ret;
1470 register struct mapping *mp, *mpv;
1471 register struct phys_entry *pp;
1472 int total, seg;
1473 pmap_t actpmap;
1474
1475 if (attribute != MATTR_CACHE)
1476 return KERN_INVALID_ARGUMENT;
1477
1478 /* We can't get the caching attribute for more than one page
1479 * at a time
1480 */
1481 if ((*value == MATTR_VAL_GET) &&
1482 (trunc_page(address) != trunc_page(address+size-1)))
1483 return KERN_INVALID_ARGUMENT;
1484
1485 if (pmap == PMAP_NULL)
1486 return KERN_SUCCESS;
1487
1488 sva = trunc_page(address);
1489 eva = round_page(address + size);
1490 ret = KERN_SUCCESS;
1491
1492 debugLog2(54, address, attribute); /* Log pmap_map call */
1493
1494 switch (*value) {
1495 case MATTR_VAL_CACHE_SYNC: /* sync I+D caches */
1496 case MATTR_VAL_CACHE_FLUSH: /* flush from all caches */
1497 case MATTR_VAL_DCACHE_FLUSH: /* flush from data cache(s) */
1498 case MATTR_VAL_ICACHE_FLUSH: /* flush from instr cache(s) */
1499 sva = trunc_page(sva);
1500 s = splhigh();
1501
1502 while (sva < eva) {
1503 seg = sva >> 28; /* Isolate segment */
1504 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1505 else actpmap = pmap; /* Otherwise use the one passed in */
1506
1507 /*
1508 * Note: the following should work ok with nested pmaps because there are not overlayed mappings
1509 */
1510 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1511 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1512 if(!sva) break; /* We tried to wrap, kill loop... */
1513 continue; /* Check the next... */
1514 }
1515
1516 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1517 sva += PAGE_SIZE; /* Point to the next page */
1518 continue; /* Skip if the page is not mapped... */
1519 }
1520
1521 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1522 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1523 continue;
1524 }
1525
1526 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1527 if((unsigned int)mpv->physent) { /* Is there a physical entry? */
1528 pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE; /* Yes, get the physical address from there */
1529 }
1530 else {
1531 pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE); /* Otherwise from the mapping */
1532 }
1533
1534 switch (*value) { /* What type was that again? */
1535 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1536 sync_cache(pa, PAGE_SIZE); /* Sync up dem caches */
1537 break; /* Done with this one here... */
1538
1539 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1540 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1541 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1542 break; /* Done with this one here... */
1543
1544 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1545 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1546 break; /* Done with this one here... */
1547
1548 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1549 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1550 break; /* Done with this one here... */
1551 }
1552 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry if it exists*/
1553
1554 sva += PAGE_SIZE; /* Point to the next page */
1555 }
1556 splx(s);
1557 break;
1558
1559 case MATTR_VAL_GET_INFO: /* Get info */
1560 total = 0;
1561 s = splhigh(); /* Lock 'em out */
1562
1563 if (size <= PAGE_SIZE) { /* Do they want just one page */
1564 seg = sva >> 28; /* Isolate segment */
1565 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1566 else actpmap = pmap; /* Otherwise use the one passed in */
1567 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1568 *value = 0; /* Return nothing if no mapping */
1569 }
1570 else {
1571 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1572 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1573 }
1574 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1575 if(pp = mpv->physent) { /* Check for a physical entry */
1576 total = 0; /* Clear the count */
1577 for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++; /* Count the mapping */
1578 *value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total); /* Pass back the physical address and the count of mappings */
1579 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1580 }
1581 else { /* This is the case for an I/O mapped area */
1582 *value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1); /* Pass back the physical address and the count of mappings */
1583 }
1584 }
1585 }
1586 else {
1587 total = 0;
1588 while (sva < eva) {
1589 seg = sva >> 28; /* Isolate segment */
1590 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1591 else actpmap = pmap; /* Otherwise use the one passed in */
1592
1593 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1594 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1595 if(!sva) break; /* We tried to wrap, kill loop... */
1596 continue; /* Check the next... */
1597 }
1598 if(mp = hw_lock_phys_vir(actpmap->space, sva)) { /* Find the mapping for this vaddr and lock physent */
1599 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1600 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1601 continue;
1602 }
1603 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1604 total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next); /* Count the "resident" and shared pages */
1605 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1606 }
1607 sva += PAGE_SIZE;
1608 }
1609 *value = total;
1610 }
1611 splx(s);
1612 break;
1613
1614 case MATTR_VAL_GET: /* return current value */
1615 case MATTR_VAL_OFF: /* turn attribute off */
1616 case MATTR_VAL_ON: /* turn attribute on */
1617 default:
1618 ret = KERN_INVALID_ARGUMENT;
1619 break;
1620 }
1621
1622 debugLog2(55, 0, 0); /* Log pmap_map call */
1623
1624 return ret;
1625 }
1626
1627 /*
1628 * pmap_sync_caches_phys(vm_offset_t pa)
1629 *
1630 * Invalidates all of the instruction cache on a physical page and
1631 * pushes any dirty data from the data cache for the same physical page
1632 */
1633
1634 void pmap_sync_caches_phys(vm_offset_t pa) {
1635
1636 spl_t s;
1637
1638 s = splhigh(); /* No interruptions here */
1639 sync_cache(trunc_page(pa), PAGE_SIZE); /* Sync up dem caches */
1640 splx(s); /* Allow interruptions */
1641 return;
1642 }
1643
1644 /*
1645 * pmap_collect
1646 *
1647 * Garbage collects the physical map system for pages that are no longer used.
1648 * It isn't implemented or needed or wanted.
1649 */
1650 void
1651 pmap_collect(pmap_t pmap)
1652 {
1653 return;
1654 }
1655
1656 /*
1657 * Routine: pmap_activate
1658 * Function:
1659 * Binds the given physical map to the given
1660 * processor, and returns a hardware map description.
1661 * It isn't implemented or needed or wanted.
1662 */
1663 void
1664 pmap_activate(
1665 pmap_t pmap,
1666 thread_t th,
1667 int which_cpu)
1668 {
1669 return;
1670 }
1671 /*
1672 * pmap_deactivate:
1673 * It isn't implemented or needed or wanted.
1674 */
1675 void
1676 pmap_deactivate(
1677 pmap_t pmap,
1678 thread_t th,
1679 int which_cpu)
1680 {
1681 return;
1682 }
1683
1684 #if DEBUG
1685
1686 /*
1687 * pmap_zero_page
1688 * pmap_copy page
1689 *
1690 * are implemented in movc.s, these
1691 * are just wrappers to help debugging
1692 */
1693
1694 extern void pmap_zero_page_assembler(vm_offset_t p);
1695 extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst);
1696
1697 /*
1698 * pmap_zero_page(pa)
1699 *
1700 * pmap_zero_page zeros the specified (machine independent) page pa.
1701 */
1702 void
1703 pmap_zero_page(
1704 vm_offset_t p)
1705 {
1706 register struct mapping *mp;
1707 register struct phys_entry *pp;
1708
1709 if (pmdebug & (PDB_USER|PDB_ZERO))
1710 kprintf("pmap_zero_page(pa=%x)\n", p);
1711
1712 /*
1713 * XXX can these happen?
1714 */
1715 if (pmap_find_physentry(p) == PHYS_NULL)
1716 panic("zero_page: physaddr out of range");
1717
1718 pmap_zero_page_assembler(p);
1719 }
1720
1721 /*
1722 * pmap_copy_page(src, dst)
1723 *
1724 * pmap_copy_page copies the specified (machine independent)
1725 * page from physical address src to physical address dst.
1726 *
1727 * We need to invalidate the cache for address dst before
1728 * we do the copy. Apparently there won't be any mappings
1729 * to the dst address normally.
1730 */
1731 void
1732 pmap_copy_page(
1733 vm_offset_t src,
1734 vm_offset_t dst)
1735 {
1736 register struct phys_entry *pp;
1737
1738 if (pmdebug & (PDB_USER|PDB_COPY))
1739 kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst);
1740 if (pmdebug & PDB_COPY)
1741 kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n",
1742 src, dst, PAGE_SIZE);
1743
1744 pmap_copy_page_assembler(src, dst);
1745 }
1746 #endif /* DEBUG */
1747
1748 /*
1749 * pmap_pageable(pmap, s, e, pageable)
1750 * Make the specified pages (by pmap, offset)
1751 * pageable (or not) as requested.
1752 *
1753 * A page which is not pageable may not take
1754 * a fault; therefore, its page table entry
1755 * must remain valid for the duration.
1756 *
1757 * This routine is merely advisory; pmap_enter()
1758 * will specify that these pages are to be wired
1759 * down (or not) as appropriate.
1760 *
1761 * (called from vm/vm_fault.c).
1762 */
1763 void
1764 pmap_pageable(
1765 pmap_t pmap,
1766 vm_offset_t start,
1767 vm_offset_t end,
1768 boolean_t pageable)
1769 {
1770
1771 return; /* This is not used... */
1772
1773 }
1774 /*
1775 * Routine: pmap_change_wiring
1776 * NOTE USED ANYMORE.
1777 */
1778 void
1779 pmap_change_wiring(
1780 register pmap_t pmap,
1781 vm_offset_t va,
1782 boolean_t wired)
1783 {
1784 return; /* This is not used... */
1785 }
1786
1787 /*
1788 * pmap_modify_pages(pmap, s, e)
1789 * sets the modified bit on all virtual addresses v in the
1790 * virtual address range determined by [s, e] and pmap,
1791 * s and e must be on machine independent page boundaries and
1792 * s must be less than or equal to e.
1793 */
1794 void
1795 pmap_modify_pages(
1796 pmap_t pmap,
1797 vm_offset_t sva,
1798 vm_offset_t eva)
1799 {
1800 spl_t spl;
1801 mapping *mp;
1802
1803 #if PMAP_LOWTRACE
1804 dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1805 #endif
1806
1807 #if DEBUG
1808 if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva);
1809 #endif
1810
1811 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1812
1813 debugLog2(56, sva, eva); /* Log pmap_map call */
1814
1815 spl=splhigh(); /* Don't bother me */
1816
1817 for ( ; sva < eva; sva += PAGE_SIZE) { /* Cycle through the whole range */
1818 mp = hw_lock_phys_vir(pmap->space, sva); /* Lock the physical entry for this mapping */
1819 if(mp) { /* Did we find one? */
1820 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1821 panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1822 continue;
1823 }
1824 mp = hw_cpv(mp); /* Convert to virtual addressing */
1825 if(!mp->physent) continue; /* No physical entry means an I/O page, we can't set attributes */
1826 mapping_set_mod(mp->physent); /* Set the modfied bit for this page */
1827 hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1828 }
1829 }
1830 splx(spl); /* Restore the interrupt level */
1831
1832 debugLog2(57, 0, 0); /* Log pmap_map call */
1833 return; /* Leave... */
1834 }
1835
1836 /*
1837 * pmap_clear_modify(phys)
1838 * clears the hardware modified ("dirty") bit for one
1839 * machine independant page starting at the given
1840 * physical address. phys must be aligned on a machine
1841 * independant page boundary.
1842 */
1843 void
1844 pmap_clear_modify(vm_offset_t pa)
1845 {
1846 register struct phys_entry *pp;
1847 spl_t spl;
1848
1849 #if PMAP_LOWTRACE
1850 dbgTrace(0xF1D00011, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1851 #endif
1852 #if DEBUG
1853 if (pmdebug & PDB_USER)
1854 kprintf("pmap_clear_modify(pa=%x)\n", pa);
1855 #endif
1856
1857 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1858 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1859
1860 debugLog2(58, pa, 0); /* Log pmap_map call */
1861
1862 spl=splhigh(); /* Don't bother me */
1863
1864 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1865 panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1866 splx(spl); /* Restore 'rupts */
1867 return; /* Should die before here */
1868 }
1869
1870 mapping_clr_mod(pp); /* Clear all change bits for physical page */
1871
1872 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1873 splx(spl); /* Restore the interrupt level */
1874
1875 debugLog2(59, 0, 0); /* Log pmap_map call */
1876 }
1877
1878 /*
1879 * pmap_is_modified(phys)
1880 * returns TRUE if the given physical page has been modified
1881 * since the last call to pmap_clear_modify().
1882 */
1883 boolean_t
1884 pmap_is_modified(register vm_offset_t pa)
1885 {
1886 register struct phys_entry *pp;
1887 spl_t spl;
1888 boolean_t ret;
1889
1890
1891 #if PMAP_LOWTRACE
1892 dbgTrace(0xF1D00012, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1893 #endif
1894 #if DEBUG
1895 if (pmdebug & PDB_USER)
1896 kprintf("pmap_is_modified(pa=%x)\n", pa);
1897 #endif
1898
1899 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1900 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1901
1902 debugLog2(60, pa, 0); /* Log pmap_map call */
1903
1904 spl=splhigh(); /* Don't bother me */
1905
1906 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1907 panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1908 splx(spl); /* Restore 'rupts */
1909 return 0; /* Should die before here */
1910 }
1911
1912 ret = mapping_tst_mod(pp); /* Check for modified */
1913
1914 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1915 splx(spl); /* Restore the interrupt level */
1916
1917 debugLog2(61, ret, 0); /* Log pmap_map call */
1918
1919 return ret;
1920 }
1921
1922 /*
1923 * pmap_clear_reference(phys)
1924 * clears the hardware referenced bit in the given machine
1925 * independant physical page.
1926 *
1927 */
1928 void
1929 pmap_clear_reference(vm_offset_t pa)
1930 {
1931 register struct phys_entry *pp;
1932 spl_t spl;
1933
1934
1935 #if PMAP_LOWTRACE
1936 dbgTrace(0xF1D00013, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1937 #endif
1938 #if DEBUG
1939 if (pmdebug & PDB_USER)
1940 kprintf("pmap_clear_reference(pa=%x)\n", pa);
1941 #endif
1942
1943 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1944 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1945
1946 debugLog2(62, pa, 0); /* Log pmap_map call */
1947
1948 spl=splhigh(); /* Don't bother me */
1949 mapping_clr_ref(pp); /* Clear all reference bits for physical page */
1950 splx(spl); /* Restore the interrupt level */
1951
1952 debugLog2(63, 0, 0); /* Log pmap_map call */
1953
1954 }
1955
1956 /*
1957 * pmap_is_referenced(phys)
1958 * returns TRUE if the given physical page has been referenced
1959 * since the last call to pmap_clear_reference().
1960 */
1961 boolean_t
1962 pmap_is_referenced(vm_offset_t pa)
1963 {
1964 register struct phys_entry *pp;
1965 spl_t spl;
1966 boolean_t ret;
1967
1968
1969 #if PMAP_LOWTRACE
1970 dbgTrace(0xF1D00014, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1971 #endif
1972 #if DEBUG
1973 if (pmdebug & PDB_USER)
1974 kprintf("pmap_is_referenced(pa=%x)\n", pa);
1975 #endif
1976
1977 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1978 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1979
1980 debugLog2(64, pa, 0); /* Log pmap_map call */
1981
1982 spl=splhigh(); /* Don't bother me */
1983
1984 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1985 panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1986 splx(spl); /* Restore 'rupts */
1987 return 0; /* Should die before here */
1988 }
1989
1990 ret = mapping_tst_ref(pp); /* Check for referenced */
1991
1992 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1993 splx(spl); /* Restore the interrupt level */
1994
1995 debugLog2(65, ret, 0); /* Log pmap_map call */
1996
1997 return ret;
1998 }
1999
2000 #if MACH_VM_DEBUG
2001 int
2002 pmap_list_resident_pages(
2003 register pmap_t pmap,
2004 register vm_offset_t *listp,
2005 register int space)
2006 {
2007 return 0;
2008 }
2009 #endif /* MACH_VM_DEBUG */
2010
2011 /*
2012 * Locking:
2013 * spl: VM
2014 */
2015 void
2016 pmap_copy_part_page(
2017 vm_offset_t src,
2018 vm_offset_t src_offset,
2019 vm_offset_t dst,
2020 vm_offset_t dst_offset,
2021 vm_size_t len)
2022 {
2023 register struct phys_entry *pp_src, *pp_dst;
2024 spl_t s;
2025
2026
2027 #if PMAP_LOWTRACE
2028 dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset); /* (TEST/DEBUG) */
2029 dbgTrace(0xF1D04019, (unsigned int)len, 0); /* (TEST/DEBUG) */
2030 #endif
2031 s = splhigh();
2032
2033 assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE);
2034 assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE);
2035
2036 /*
2037 * Since the source and destination are physical addresses,
2038 * turn off data translation to perform a bcopy() in bcopy_phys().
2039 */
2040 phys_copy((vm_offset_t) src+src_offset,
2041 (vm_offset_t) dst+dst_offset, len);
2042
2043 splx(s);
2044 }
2045
2046 void
2047 pmap_zero_part_page(
2048 vm_offset_t p,
2049 vm_offset_t offset,
2050 vm_size_t len)
2051 {
2052 panic("pmap_zero_part_page");
2053 }
2054
2055 boolean_t pmap_verify_free(vm_offset_t pa) {
2056
2057 struct phys_entry *pp;
2058
2059 #if PMAP_LOWTRACE
2060 dbgTrace(0xF1D00007, (unsigned int)pa, 0); /* (TEST/DEBUG) */
2061 #endif
2062
2063 #if DEBUG
2064 if (pmdebug & PDB_USER)
2065 kprintf("pmap_verify_free(pa=%x)\n", pa);
2066 #endif
2067
2068 if (!pmap_initialized) return(TRUE);
2069
2070 pp = pmap_find_physentry(pa); /* Look up the physical entry */
2071 if (pp == PHYS_NULL) return FALSE; /* If there isn't one, show no mapping... */
2072 return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL); /* Otherwise, return TRUE if mapping exists... */
2073 }
2074
2075
2076 /* Determine if we need to switch space and set up for it if so */
2077
2078 void pmap_switch(pmap_t map)
2079 {
2080 unsigned int i;
2081
2082 #if DEBUG
2083 if (watchacts & WA_PCB) {
2084 kprintf("Switching to map at 0x%08x, space=%d\n",
2085 map,map->space);
2086 }
2087 #endif /* DEBUG */
2088
2089
2090 /* when changing to kernel space, don't bother
2091 * doing anything, the kernel is mapped from here already.
2092 */
2093 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
2094 return; /* If so, we don't do anything... */
2095 }
2096
2097 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
2098 return; /* Bye, bye, butterfly... */
2099 }
2100
2101 /*
2102 * kern_return_t pmap_nest(grand, subord, vaddr, size)
2103 *
2104 * grand = the pmap that we will nest subord into
2105 * subord = the pmap that goes into the grand
2106 * vaddr = start of range in pmap to be inserted
2107 * size = size of range in pmap to be inserted
2108 *
2109 * Inserts a pmap into another. This is used to implement shared segments.
2110 * On the current PPC processors, this is limited to segment (256MB) aligned
2111 * segment sized ranges.
2112 */
2113
2114 kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) {
2115
2116 unsigned int oflags, seg, grandr;
2117 int i;
2118
2119 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2120 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2121
2122 while(1) { /* Test and set the subordinate flag */
2123 oflags = subord->vflags & ~pmapAltSeg; /* Get old unset value */
2124 if(subord->vflags & pmapAltSeg) { /* Are trying to nest one already nested? */
2125 panic("pmap_nest: Attempt to nest an already nested pmap\n");
2126 }
2127 if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break; /* Done if we got it set */
2128 }
2129
2130 simple_lock(&grand->lock); /* Lock the superior pmap */
2131
2132 if(grand->vflags & pmapSubord) { /* Are we only one level deep? */
2133 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2134 panic("pmap_nest: Attempt to nest into subordinate pmap\n");
2135 return KERN_FAILURE; /* Shame on you */
2136 }
2137
2138 seg = vaddr >> 28; /* Isolate the segment number */
2139 if((0x00008000 >> seg) & grand->vflags) { /* See if it is already in use */
2140 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2141 panic("pmap_nest: Attempt to nest into already nested segment\n");
2142 return KERN_FAILURE; /* Shame on you */
2143 }
2144
2145 grand->pmapPmaps[seg] = subord; /* Set the pointer to the subordinate */
2146 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space; /* Set the vsid to the subordinate's vsid */
2147 grand->vflags |= (0x00008000 >> seg); /* Set in-use bit */
2148
2149 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
2150
2151 simple_unlock(&grand->lock); /* Unlock the grand pmap */
2152
2153
2154 /*
2155 * Note that the following will force the segment registers to be reloaded following
2156 * the next interrupt on all processors if they are using the pmap we just changed.
2157 *
2158 */
2159
2160
2161 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
2162 (void)hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap); /* Clear if ours */
2163 }
2164
2165 return KERN_SUCCESS; /* Bye, bye, butterfly... */
2166 }
2167
2168
2169 /*
2170 * kern_return_t pmap_unnest(grand, vaddr, size)
2171 *
2172 * grand = the pmap that we will nest subord into
2173 * vaddr = start of range in pmap to be inserted
2174 * size = size of range in pmap to be inserted
2175 *
2176 * Removes a pmap from another. This is used to implement shared segments.
2177 * On the current PPC processors, this is limited to segment (256MB) aligned
2178 * segment sized ranges.
2179 */
2180
2181 kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) {
2182
2183 unsigned int oflags, seg, grandr, tstamp;
2184 int i, tcpu, mycpu;
2185
2186 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2187 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2188
2189 simple_lock(&grand->lock); /* Lock the superior pmap */
2190 disable_preemption(); /* It's all for me! */
2191
2192 seg = vaddr >> 28; /* Isolate the segment number */
2193 if(!((0x00008000 >> seg) & grand->vflags)) { /* See if it is already in use */
2194 enable_preemption(); /* Ok, your turn */
2195 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2196 panic("pmap_unnest: Attempt to unnest an unnested segment\n");
2197 return KERN_FAILURE; /* Shame on you */
2198 }
2199
2200 grand->pmapPmaps[seg] = (pmap_t)0; /* Clear the pointer to the subordinate */
2201 grand->pmapSegs[seg] = grand->space; /* Set the pointer to the subordinate's vsid */
2202 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space; /* Set the vsid to the grand's vsid */
2203 grand->vflags &= ~(0x00008000 >> seg); /* Clear in-use bit */
2204
2205 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
2206
2207 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2208
2209 /*
2210 * Note that the following will force the segment registers to be reloaded
2211 * on all processors (if they are using the pmap we just changed) before returning.
2212 *
2213 * This is needed. The reason is that until the segment register is
2214 * reloaded, another thread in the same task on a different processor will
2215 * be able to access memory that it isn't allowed to anymore. That can happen
2216 * because access to the subordinate pmap is being removed, but the pmap is still
2217 * valid.
2218 *
2219 * Note that we only kick the other processor if we see that it was using the pmap while we
2220 * were changing it.
2221 */
2222
2223
2224 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
2225 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
2226 if(hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap)) { /* Clear if ours and kick the other guy if he was using it */
2227 if(i == mycpu) continue; /* Don't diddle ourselves */
2228 tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */
2229 if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change */
2230 continue;
2231 }
2232 if(!hw_cpu_wcng(&per_proc_info[i].ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */
2233 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
2234 }
2235 }
2236 }
2237
2238 enable_preemption(); /* Others can run now */
2239 return KERN_SUCCESS; /* Bye, bye, butterfly... */
2240 }
2241
2242
2243 void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) {
2244
2245 int cnt, i, j, k;
2246 vm_offset_t xx;
2247
2248 if(!pmap) return;
2249
2250 sva = trunc_page(sva);
2251 eva = trunc_page(eva);
2252
2253 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2254 if((pmap->pmapUsage[i]) > 8192) { /* See if this is a sane number */
2255 panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
2256 i * pmapUsageSize, pmap->pmapUsage[i], pmap);
2257 }
2258 }
2259 j = 0;
2260 while(1) { /* Try multiple times */
2261 cnt = 0;
2262 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2263 cnt = cnt + pmap->pmapUsage[i]; /* Sum all slots */
2264 }
2265 if(cnt == pmap->stats.resident_count) break; /* We're ok if we match... */
2266
2267 j++;
2268 for(i = 0; i < 100000; i++) {
2269 k = j + i;
2270 }
2271 if(j >= 10) {
2272 panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n",
2273 cnt, pmap->stats.resident_count, pmap);
2274 }
2275 }
2276
2277 for(xx = sva; xx < eva; xx += PAGE_SIZE) { /* See if any slots not clear */
2278 if(pmap_extract(pmap, xx)) {
2279 panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n",
2280 sva, eva, xx, pmap);
2281 }
2282 }
2283 }
2284
2285
2286 /* temporary workaround */
2287 boolean_t
2288 coredumpok(vm_map_t map, vm_offset_t va)
2289 {
2290 return TRUE;
2291 }
2292