]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pmap.c
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1990,1991,1992 The University of Utah and
28 * the Center for Software Science (CSS).
29 * Copyright (c) 1991,1987 Carnegie Mellon University.
30 * All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation,
37 * and that all advertising materials mentioning features or use of
38 * this software display the following acknowledgement: ``This product
39 * includes software developed by the Center for Software Science at
40 * the University of Utah.''
41 *
42 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
43 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
44 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
45 * THIS SOFTWARE.
46 *
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 *
58 * Utah $Hdr: pmap.c 1.28 92/06/23$
59 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
60 */
61
62/*
63 * Manages physical address maps for powerpc.
64 *
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
71 *
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
77 * requested.
78 *
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information to
84 * when physical maps must be made correct.
85 *
86 */
87
88#include <zone_debug.h>
89#include <cpus.h>
90#include <debug.h>
91#include <mach_kgdb.h>
92#include <mach_vm_debug.h>
93#include <db_machine_commands.h>
94
95#include <kern/thread.h>
96#include <mach/vm_attributes.h>
97#include <mach/vm_param.h>
98#include <kern/spl.h>
99
100#include <kern/misc_protos.h>
101#include <ppc/misc_protos.h>
102#include <ppc/proc_reg.h>
103
104#include <vm/pmap.h>
105#include <vm/vm_map.h>
106#include <vm/vm_page.h>
107
108#include <ppc/pmap.h>
109#include <ppc/pmap_internals.h>
110#include <ppc/mem.h>
111#include <ppc/mappings.h>
112
113#include <ppc/new_screen.h>
114#include <ppc/Firmware.h>
115#include <ppc/savearea.h>
116#include <ddb/db_output.h>
117
118#if DB_MACHINE_COMMANDS
119/* optionally enable traces of pmap operations in post-mortem trace table */
120/* #define PMAP_LOWTRACE 1 */
121#define PMAP_LOWTRACE 0
122#else /* DB_MACHINE_COMMANDS */
123/* Can not trace even if we wanted to */
124#define PMAP_LOWTRACE 0
125#endif /* DB_MACHINE_COMMANDS */
126
127#define PERFTIMES 0
128
129#if PERFTIMES && DEBUG
130#define debugLog2(a, b, c) dbgLog2(a, b, c)
131#else
132#define debugLog2(a, b, c)
133#endif
134
135extern unsigned int avail_remaining;
136extern unsigned int mappingdeb0;
137extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
138extern int real_ncpus; /* Number of actual CPUs */
139unsigned int debugbackpocket; /* (TEST/DEBUG) */
140
141vm_offset_t avail_next;
142vm_offset_t first_free_virt;
143int current_free_region; /* Used in pmap_next_page */
144
145/* forward */
146void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
147void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
148void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount);
149
150#if MACH_VM_DEBUG
151int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space);
152#endif
153
154#if DEBUG
155#define PDB_USER 0x01 /* exported functions */
156#define PDB_MAPPING 0x02 /* low-level mapping routines */
157#define PDB_ENTER 0x04 /* pmap_enter specifics */
158#define PDB_COPY 0x08 /* copy page debugging */
159#define PDB_ZERO 0x10 /* zero page debugging */
160#define PDB_WIRED 0x20 /* things concerning wired entries */
161#define PDB_PTEG 0x40 /* PTEG overflows */
162#define PDB_LOCK 0x100 /* locks */
163#define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */
164
165int pmdebug=0;
166#endif
167
168/* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
169
170extern struct pmap kernel_pmap_store;
171pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
172pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
173struct zone *pmap_zone; /* zone of pmap structures */
174boolean_t pmap_initialized = FALSE;
175
176/*
177 * Physical-to-virtual translations are handled by inverted page table
178 * structures, phys_tables. Multiple mappings of a single page are handled
179 * by linking the affected mapping structures. We initialise one region
180 * for phys_tables of the physical memory we know about, but more may be
181 * added as it is discovered (eg. by drivers).
182 */
183struct phys_entry *phys_table; /* For debugging */
184
185lock_t pmap_system_lock;
186
187decl_simple_lock_data(,tlb_system_lock)
188
189/*
190 * free pmap list. caches the first free_pmap_max pmaps that are freed up
191 */
192int free_pmap_max = 32;
193int free_pmap_count;
194pmap_t free_pmap_list;
195decl_simple_lock_data(,free_pmap_lock)
196
197/*
198 * Function to get index into phys_table for a given physical address
199 */
200
201struct phys_entry *pmap_find_physentry(vm_offset_t pa)
202{
203 int i;
204 struct phys_entry *entry;
205
206 for (i = pmap_mem_regions_count-1; i >= 0; i--) {
207 if (pa < pmap_mem_regions[i].start)
208 continue;
209 if (pa >= pmap_mem_regions[i].end)
210 return PHYS_NULL;
211
212 entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT];
213 __asm__ volatile("dcbt 0,%0" : : "r" (entry)); /* We will use this in a little bit */
214 return entry;
215 }
216 kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa);
217 return PHYS_NULL;
218}
219
220/*
221 * kern_return_t
222 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
223 * boolean_t available, unsigned int attr)
224 * Allocate some extra physentries for the physical addresses given,
225 * specifying some default attribute that on the powerpc specifies
226 * the default cachability for any mappings using these addresses
227 * If the memory is marked as available, it is added to the general
228 * VM pool, otherwise it is not (it is reserved for card IO etc).
229 */
230kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
231 boolean_t available, unsigned int attr)
232{
233 int i,j;
234 spl_t s;
235
236 /* Only map whole pages */
237
238 panic("Forget it! You can't map no more memory, you greedy puke!\n");
239
240 spa = trunc_page(spa);
241 epa = round_page(epa);
242
243 /* First check that the region doesn't already exist */
244
245 assert (epa >= spa);
246 for (i = 0; i < pmap_mem_regions_count; i++) {
247 /* If we're below the next region, then no conflict */
248 if (epa < pmap_mem_regions[i].start)
249 break;
250 if (spa < pmap_mem_regions[i].end) {
251#if DEBUG
252 kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr);
253#endif /* DEBUG */
254 return KERN_NO_SPACE;
255 }
256 }
257
258#if DEBUG
259 kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count); /* (TEST/DEBUG) */
260#endif
261
262 /* Check that we've got enough space for another region */
263 if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX)
264 return KERN_RESOURCE_SHORTAGE;
265
266 /* Once here, i points to the mem_region above ours in physical mem */
267
268 /* allocate a new phys_table for this new region */
269#if DEBUG
270 kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */
271#endif
272
273 phys_table = (struct phys_entry *)
274 kalloc(sizeof(struct phys_entry) * atop(epa-spa));
275#if DEBUG
276 kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table); /* (TEST/DEBUG) */
277#endif
278
279 /* Initialise the new phys_table entries */
280 for (j = 0; j < atop(epa-spa); j++) {
281
282 phys_table[j].phys_link = MAPPING_NULL;
283
284 mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr); /* Initialize the hardware specific portions */
285
286 }
287 s = splhigh();
288
289 /* Move all the phys_table entries up some to make room in
290 * the ordered list.
291 */
292 for (j = pmap_mem_regions_count; j > i ; j--)
293 pmap_mem_regions[j] = pmap_mem_regions[j-1];
294
295 /* Insert a new entry with some memory to back it */
296
297 pmap_mem_regions[i].start = spa;
298 pmap_mem_regions[i].end = epa;
299 pmap_mem_regions[i].phys_table = phys_table;
300
301 pmap_mem_regions_count++;
302 splx(s);
303
304#if DEBUG
305 for(i=0; i<pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
306 kprintf("region %d: %08X %08X %08X\n", i, pmap_mem_regions[i].start,
307 pmap_mem_regions[i].end, pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
308 }
309#endif
310
311 if (available) {
312 kprintf("warning : pmap_add_physical_mem() "
313 "available not yet supported\n");
314 }
315
316 return KERN_SUCCESS;
317}
318
319/*
320 * pmap_map(va, spa, epa, prot)
321 * is called during boot to map memory in the kernel's address map.
322 * A virtual address range starting at "va" is mapped to the physical
323 * address range "spa" to "epa" with machine independent protection
324 * "prot".
325 *
326 * "va", "spa", and "epa" are byte addresses and must be on machine
327 * independent page boundaries.
328 *
329 * Pages with a contiguous virtual address range, the same protection, and attributes.
330 * therefore, we map it with a single block.
331 *
332 */
333vm_offset_t
334pmap_map(
335 vm_offset_t va,
336 vm_offset_t spa,
337 vm_offset_t epa,
338 vm_prot_t prot)
339{
340
341
342 if (spa == epa)
343 return(va);
344
345 assert(epa > spa);
346 debugLog2(40, va, spa); /* Log pmap_map call */
347
348 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm); /* Set up a permanent block mapped area */
349
350 debugLog2(41, epa, prot); /* Log pmap_map call */
351
352 return(va);
353}
354
355/*
356 * pmap_map_bd(va, spa, epa, prot)
357 * Back-door routine for mapping kernel VM at initialisation.
358 * Used for mapping memory outside the known physical memory
359 * space, with caching disabled. Designed for use by device probes.
360 *
361 * A virtual address range starting at "va" is mapped to the physical
362 * address range "spa" to "epa" with machine independent protection
363 * "prot".
364 *
365 * "va", "spa", and "epa" are byte addresses and must be on machine
366 * independent page boundaries.
367 *
368 * WARNING: The current version of memcpy() can use the dcbz instruction
369 * on the destination addresses. This will cause an alignment exception
370 * and consequent overhead if the destination is caching-disabled. So
371 * avoid memcpy()ing into the memory mapped by this function.
372 *
373 * also, many other pmap_ routines will misbehave if you try and change
374 * protections or remove these mappings, they are designed to be permanent.
375 *
376 * These areas will be added to the autogen list, if possible. Existing translations
377 * are overridden and their mapping stuctures are released. This takes place in
378 * the autogen_map function.
379 *
380 * Locking:
381 * this routine is called only during system initialization when only
382 * one processor is active, so no need to take locks...
383 */
384vm_offset_t
385pmap_map_bd(
386 vm_offset_t va,
387 vm_offset_t spa,
388 vm_offset_t epa,
389 vm_prot_t prot)
390{
391 register struct mapping *mp;
392 register struct phys_entry *pp;
393
394
395 if (spa == epa)
396 return(va);
397
398 assert(epa > spa);
399
400 debugLog2(42, va, epa); /* Log pmap_map_bd call */
401
402 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm); /* Set up autogen area */
403
404 debugLog2(43, epa, prot); /* Log pmap_map_bd exit */
405
406 return(va);
407}
408
409/*
410 * Bootstrap the system enough to run with virtual memory.
411 * Map the kernel's code and data, and allocate the system page table.
412 * Called with mapping done by BATs. Page_size must already be set.
413 *
414 * Parameters:
415 * mem_size: Total memory present
416 * first_avail: First virtual address available
417 * first_phys_avail: First physical address available
418 */
419void
420pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize)
421{
422 register struct mapping *mp;
423 vm_offset_t addr;
424 vm_size_t size;
425 int i, num, j, rsize, mapsize, vmpagesz, vmmapsz;
426 unsigned int mask;
427 vm_offset_t first_used_addr;
428 PCA *pcaptr;
429 savectl *savec, *savec2;
430 vm_offset_t save, save2;
431
432 *first_avail = round_page(*first_avail);
433
434#if DEBUG
435 kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n",
436 *first_avail, *first_phys_avail, avail_remaining);
437#endif
438
439 assert(PAGE_SIZE == PPC_PGBYTES);
440
441 /*
442 * Initialize kernel pmap
443 */
444 kernel_pmap = &kernel_pmap_store;
445 cursor_pmap = &kernel_pmap_store;
446
447 lock_init(&pmap_system_lock,
448 FALSE, /* NOT a sleep lock */
449 ETAP_VM_PMAP_SYS,
450 ETAP_VM_PMAP_SYS_I);
451
452 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
453
454 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
455 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
456 kernel_pmap->ref_count = 1;
457 kernel_pmap->space = PPC_SID_KERNEL;
458 kernel_pmap->pmapvr = 0; /* Virtual = Real */
459 kernel_pmap->bmaps = 0; /* No block pages just yet */
460 for(i=0; i < 128; i++) { /* Clear usage slots */
461 kernel_pmap->pmapUsage[i] = 0;
462 }
463 for(i=0; i < 16; i++) { /* Initialize for laughs */
464 kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL;
465 }
466
467 /*
468 * Allocate: (from first_avail up)
469 * Aligned to its own size:
470 * hash table (for mem size 2**x, allocate 2**(x-10) entries)
471 * mapping table (same size and immediatly following hash table)
472 */
473 /* hash_table_size must be a power of 2, recommended sizes are
474 * taken from PPC601 User Manual, table 6-19. We take the next
475 * highest size if mem_size is not a power of two.
476 * TODO NMGS make this configurable at boot time.
477 */
478
479 num = sizeof(pte_t) * (mem_size >> 10);
480
481 for (hash_table_size = 64 * 1024; /* minimum size = 64Kbytes */
482 hash_table_size < num;
483 hash_table_size *= 2)
484 continue;
485
486 /* Scale to within any physical memory layout constraints */
487 do {
488 num = atop(mem_size); /* num now holds mem_size in pages */
489
490 /* size of all structures that we're going to allocate */
491
492 size = (vm_size_t) (
493 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
494 (8 * PAGE_SIZE) + /* For backpocket saveareas */
495 hash_table_size + /* For hash table */
496 hash_table_size + /* For PTEG allocation table */
497 (num * sizeof(struct phys_entry)) /* For the physical entries */
498 );
499
500 mapsize = size = round_page(size); /* Get size of area to map that we just calculated */
501 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
502
503 vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
504 vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
505
506 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
507
508 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
509 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
510 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
511
512#if DEBUG
513 kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz);
514 kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz);
515 kprintf("pmap_bootstrap: size before mappings = %08X\n", size);
516 kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize);
517 kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize);
518#endif
519
520 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
521
522 /* hash table must be aligned to its size */
523
524 addr = (*first_avail +
525 (hash_table_size-1)) & ~(hash_table_size-1);
526
527 if (addr + size > pmap_mem_regions[0].end) {
528 hash_table_size /= 2;
529 } else {
530 break;
531 }
532 /* If we have had to shrink hash table to too small, panic */
533 if (hash_table_size == 32 * 1024)
534 panic("cannot lay out pmap memory map correctly");
535 } while (1);
536
537#if DEBUG
538 kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n",
539 hash_table_size, size, addr);
540#endif
541 if (round_page(*first_phys_avail) < trunc_page(addr)) {
542 /* We are stepping over at least one page here, so
543 * add this region to the free regions so that it can
544 * be allocated by pmap_steal
545 */
546 free_regions[free_regions_count].start = round_page(*first_phys_avail);
547 free_regions[free_regions_count].end = trunc_page(addr);
548
549 avail_remaining += (free_regions[free_regions_count].end -
550 free_regions[free_regions_count].start) /
551 PPC_PGBYTES;
552#if DEBUG
553 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
554 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
555 avail_remaining);
556#endif /* DEBUG */
557 free_regions_count++;
558 }
559
560 /* Zero everything - this also invalidates the hash table entries */
561 bzero((char *)addr, size);
562
563 /* Set up some pointers to our new structures */
564
565 /* from here, addr points to the next free address */
566
567 first_used_addr = addr; /* remember where we started */
568
569 /* Set up hash table address and dma buffer address, keeping
570 * alignment. These mappings are all 1-1, so dma_r == dma_v
571 *
572 * If hash_table_size == dma_buffer_alignment, then put hash_table
573 * first, since dma_buffer_size may be smaller than alignment, but
574 * hash table alignment==hash_table_size.
575 */
576 hash_table_base = addr;
577
578 addr += hash_table_size;
579 addr += hash_table_size; /* Add another for the PTEG Control Area */
580 assert((hash_table_base & (hash_table_size-1)) == 0);
581
582 pcaptr = (PCA *)(hash_table_base+hash_table_size); /* Point to the PCA table */
583
584 for(i=0; i < (hash_table_size/64) ; i++) { /* For all of PTEG control areas: */
585 pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF; /* Mark all slots free */
586 pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01; /* Initialize steal position */
587 }
588
589/*
590 * Allocate our initial context save areas. As soon as we do this,
591 * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed
592 * to be at least page aligned.
593 */
594 save2 = addr; /* Remember first page */
595 save = addr; /* Point to the whole block of blocks */
596 savec2 = (savectl *)(addr + PAGE_SIZE - sizeof(savectl)); /* Point to the first's control area */
597
598 for(i=0; i < InitialSaveBloks; i++) { /* Initialize the saveareas */
599
600 savec = (savectl *)(save + PAGE_SIZE - sizeof(savectl)); /* Get the control area for this one */
601
602 savec->sac_alloc = sac_empty; /* Mark both free */
603 savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */
604 savec->sac_flags = sac_perm; /* Mark it permanent */
605
606 savec->sac_flags |= 0x0000EE00; /* (TEST/DEBUG) */
607
608 save += PAGE_SIZE; /* Jump up to the next one now */
609
610 savec->sac_next = (unsigned int *)save; /* Link these two */
611
612 }
613
614 savec->sac_next = (unsigned int *)0; /* Clear the forward pointer for the last */
615 savec2->sac_alloc &= 0x7FFFFFFF; /* Mark the first one in use */
616
617 saveanchor.savefree = (unsigned int)save2; /* Point to the first one */
618 saveanchor.savecount = InitialSaveBloks * sac_cnt; /* The total number of save areas allocated */
619 saveanchor.saveinuse = 1; /* Number of areas in use */
620 saveanchor.savemin = InitialSaveMin; /* We abend if lower than this */
621 saveanchor.saveneghyst = InitialNegHysteresis; /* The minimum number to keep free (must be a multiple of sac_cnt) */
622 saveanchor.savetarget = InitialSaveTarget; /* The target point for free save areas (must be a multiple of sac_cnt) */
623 saveanchor.saveposhyst = InitialPosHysteresis; /* The high water mark for free save areas (must be a multiple of sac_cnt) */
624 __asm__ volatile ("mtsprg 1, %0" : : "r" (save2)); /* Tell the exception handler about it */
625
626 addr += InitialSaveBloks * PAGE_SIZE; /* Move up the next free address */
627
628 save2 = addr;
629 save = addr;
630 savec2 = (savectl *)(addr + PAGE_SIZE - sizeof(savectl));
631
632 for(i=0; i < 8; i++) { /* Allocate backpocket saveareas */
633
634 savec = (savectl *)(save + PAGE_SIZE - sizeof(savectl));
635
636 savec->sac_alloc = sac_empty;
637 savec->sac_vrswap = 0;
638 savec->sac_flags = sac_perm;
639 savec->sac_flags |= 0x0000EE00;
640
641 save += PAGE_SIZE;
642
643 savec->sac_next = (unsigned int *)save;
644
645 }
646
647 savec->sac_next = (unsigned int *)0;
648 savec2->sac_alloc &= 0x7FFFFFFF;
649 debugbackpocket = save2;
650 addr += 8 * PAGE_SIZE;
651
652 /* phys_table is static to help debugging,
653 * this variable is no longer actually used
654 * outside of this scope
655 */
656
657 phys_table = (struct phys_entry *) addr;
658
659#if DEBUG
660 kprintf("hash_table_base =%08X\n", hash_table_base);
661 kprintf("phys_table =%08X\n", phys_table);
662 kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count);
663#endif
664
665 for (i = 0; i < pmap_mem_regions_count; i++) {
666
667 pmap_mem_regions[i].phys_table = phys_table;
668 rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE;
669
670#if DEBUG
671 kprintf("Initializing physical table for region %d\n", i);
672 kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n",
673 phys_table, rsize, pmap_mem_regions[i].start,
674 (unsigned int)pmap_mem_regions[i].end);
675#endif
676
677 for (j = 0; j < rsize; j++) {
678 phys_table[j].phys_link = MAPPING_NULL;
679 mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE),
680 PTE_WIMG_DEFAULT); /* Initializes hw specific storage attributes */
681 }
682 phys_table = phys_table +
683 atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start);
684 }
685
686 /* restore phys_table for debug */
687 phys_table = (struct phys_entry *) addr;
688
689 addr += sizeof(struct phys_entry) * num;
690
691 simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB);
692
693 /* Initialise the registers necessary for supporting the hashtable */
694#if DEBUG
695 kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size);
696#endif
697
698 hash_table_init(hash_table_base, hash_table_size);
699
700/*
701 * Remaining space is for mapping entries. Tell the initializer routine that
702 * the mapping system can't release this block because it's permanently assigned
703 */
704
705 mapping_init(); /* Initialize the mapping tables */
706
707 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
708 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
709 }
710 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
711
712#if DEBUG
713
714 kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n",
715 first_used_addr, round_page(first_used_addr+size),
716 first_used_addr);
717#endif /* DEBUG */
718
719 /* Map V=R the page tables */
720 pmap_map(first_used_addr, first_used_addr,
721 round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE);
722
723#if DEBUG
724
725 for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) { /* Step through all these mappings */
726 if(i != (j = kvtophys(i))) { /* Verify that the mapping was made V=R */
727 kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j);
728 }
729 }
730#endif
731
732 *first_avail = round_page(first_used_addr + size);
733 first_free_virt = round_page(first_used_addr + size);
734
735 /* All the rest of memory is free - add it to the free
736 * regions so that it can be allocated by pmap_steal
737 */
738 free_regions[free_regions_count].start = *first_avail;
739 free_regions[free_regions_count].end = pmap_mem_regions[0].end;
740
741 avail_remaining += (free_regions[free_regions_count].end -
742 free_regions[free_regions_count].start) /
743 PPC_PGBYTES;
744
745#if DEBUG
746 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
747 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
748 avail_remaining);
749#endif /* DEBUG */
750
751 free_regions_count++;
752
753 current_free_region = 0;
754
755 avail_next = free_regions[current_free_region].start;
756
757#if DEBUG
758 kprintf("Number of free regions=%d\n",free_regions_count); /* (TEST/DEBUG) */
759 kprintf("Current free region=%d\n",current_free_region); /* (TEST/DEBUG) */
760 for(i=0;i<free_regions_count; i++) { /* (TEST/DEBUG) */
761 kprintf("Free region %3d - from %08X to %08X\n", i, free_regions[i].start,
762 free_regions[i].end); /* (TEST/DEBUG) */
763 }
764 for (i = 0; i < pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
765 kprintf("PMAP region %3d - from %08X to %08X; phys=%08X\n", i, /* (TEST/DEBUG) */
766 pmap_mem_regions[i].start, /* (TEST/DEBUG) */
767 pmap_mem_regions[i].end, /* (TEST/DEBUG) */
768 pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
769 }
770#endif
771
772}
773
774/*
775 * pmap_init(spa, epa)
776 * finishes the initialization of the pmap module.
777 * This procedure is called from vm_mem_init() in vm/vm_init.c
778 * to initialize any remaining data structures that the pmap module
779 * needs to map virtual memory (VM is already ON).
780 *
781 * Note that the pmap needs to be sized and aligned to
782 * a power of two. This is because it is used both in virtual and
783 * real so it can't span a page boundary.
784 */
785
786void
787pmap_init(void)
788{
789
790
791 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
792#if ZONE_DEBUG
793 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
794#endif /* ZONE_DEBUG */
795
796 pmap_initialized = TRUE;
797
798 /*
799 * Initialize list of freed up pmaps
800 */
801 free_pmap_list = 0; /* Set that there are no free pmaps */
802 free_pmap_count = 0;
803 simple_lock_init(&free_pmap_lock, ETAP_VM_PMAP_CACHE);
804}
805
806unsigned int pmap_free_pages(void)
807{
808 return avail_remaining;
809}
810
811boolean_t pmap_next_page(vm_offset_t *addrp)
812{
813 /* Non optimal, but only used for virtual memory startup.
814 * Allocate memory from a table of free physical addresses
815 * If there are no more free entries, too bad. We have two
816 * tables to look through, free_regions[] which holds free
817 * regions from inside pmap_mem_regions[0], and the others...
818 * pmap_mem_regions[1..]
819 */
820
821 /* current_free_region indicates the next free entry,
822 * if it's less than free_regions_count, then we're still
823 * in free_regions, otherwise we're in pmap_mem_regions
824 */
825
826 if (current_free_region >= free_regions_count) {
827 /* We're into the pmap_mem_regions, handle this
828 * separately to free_regions
829 */
830
831 int current_pmap_mem_region = current_free_region -
832 free_regions_count + 1;
833 if (current_pmap_mem_region > pmap_mem_regions_count)
834 return FALSE;
835 *addrp = avail_next;
836 avail_next += PAGE_SIZE;
837 avail_remaining--;
838 if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) {
839 current_free_region++;
840 current_pmap_mem_region++;
841 avail_next = pmap_mem_regions[current_pmap_mem_region].start;
842#if DEBUG
843 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
844#endif /* DEBUG */
845 }
846 return TRUE;
847 }
848
849 /* We're in the free_regions, allocate next page and increment
850 * counters
851 */
852 *addrp = avail_next;
853
854 avail_next += PAGE_SIZE;
855 avail_remaining--;
856
857 if (avail_next >= free_regions[current_free_region].end) {
858 current_free_region++;
859 if (current_free_region < free_regions_count)
860 avail_next = free_regions[current_free_region].start;
861 else
862 avail_next = pmap_mem_regions[current_free_region -
863 free_regions_count + 1].start;
864#if DEBUG
865 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
866#endif
867 }
868 return TRUE;
869}
870
871void pmap_virtual_space(
872 vm_offset_t *startp,
873 vm_offset_t *endp)
874{
875 *startp = round_page(first_free_virt);
876 *endp = VM_MAX_KERNEL_ADDRESS;
877}
878
879/*
880 * pmap_create
881 *
882 * Create and return a physical map.
883 *
884 * If the size specified for the map is zero, the map is an actual physical
885 * map, and may be referenced by the hardware.
886 *
887 * A pmap is either in the free list or in the in-use list. The only use
888 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
889 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
890 * in-use list is matched until a hole in the VSID sequence is found. (Note
891 * that the in-use pmaps are queued in VSID sequence order.) This is all done
892 * while free_pmap_lock is held.
893 *
894 * If the size specified is non-zero, the map will be used in software
895 * only, and is bounded by that size.
896 */
897pmap_t
898pmap_create(vm_size_t size)
899{
900 pmap_t pmap, ckpmap, fore, aft;
901 int s, i;
902 space_t sid;
903 unsigned int currSID;
904
905#if PMAP_LOWTRACE
906 dbgTrace(0xF1D00001, size, 0); /* (TEST/DEBUG) */
907#endif
908
909#if DEBUG
910 if (pmdebug & PDB_USER)
911 kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' ');
912#endif
913
914 /*
915 * A software use-only map doesn't even need a pmap structure.
916 */
917 if (size)
918 return(PMAP_NULL);
919
920 /*
921 * If there is a pmap in the pmap free list, reuse it.
922 * Note that we use free_pmap_list for all chaining of pmaps, both to
923 * the free list and the in use chain (anchored from kernel_pmap).
924 */
925 s = splhigh();
926 simple_lock(&free_pmap_lock);
927
928 if(free_pmap_list) { /* Any free? */
929 pmap = free_pmap_list; /* Yes, allocate it */
930 free_pmap_list = (pmap_t)pmap->bmaps; /* Dequeue this one (we chain free ones through bmaps) */
931 free_pmap_count--;
932 }
933 else {
934 simple_unlock(&free_pmap_lock); /* Unlock just in case */
935 splx(s);
936
937 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
938 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
939
940 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
941
942 s = splhigh();
943 simple_lock(&free_pmap_lock); /* Lock it back up */
944
945 ckpmap = cursor_pmap; /* Get starting point for free ID search */
946 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
947
948 while(1) { /* Keep trying until something happens */
949
950 currSID = (currSID + 1) & SID_MAX; /* Get the next in the sequence */
951 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
952
953 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
954
955 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
956 panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */
957 }
958 }
959
960 pmap->space = (currSID * incrVSID) & SID_MAX; /* Calculate the actual VSID */
961 pmap->spaceNum = currSID; /* Set the space ID number */
962
963/*
964 * Now we link into the chain just before the out of sequence guy.
965 */
966
967 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
968 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
969 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
970 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
971 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
972
973 simple_lock_init(&pmap->lock, ETAP_VM_PMAP);
974 pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap); /* Get physical pointer to the pmap and make mask */
975 }
976 pmap->ref_count = 1;
977 pmap->stats.resident_count = 0;
978 pmap->stats.wired_count = 0;
979 pmap->bmaps = 0; /* Clear block map pointer to 0 */
980 pmap->vflags = 0; /* Mark all alternates invalid for now */
981 for(i=0; i < 128; i++) { /* Clean out usage slots */
982 pmap->pmapUsage[i] = 0;
983 }
984 for(i=0; i < 16; i++) { /* Initialize for laughs */
985 pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space;
986 }
987
988#if PMAP_LOWTRACE
989 dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space); /* (TEST/DEBUG) */
990#endif
991
992#if DEBUG
993 if (pmdebug & PDB_USER)
994 kprintf("-> %x, space id = %d\n", pmap, pmap->space);
995#endif
996
997 simple_unlock(&free_pmap_lock);
998 splx(s);
999 return(pmap);
1000}
1001
1002/*
1003 * pmap_destroy
1004 *
1005 * Gives up a reference to the specified pmap. When the reference count
1006 * reaches zero the pmap structure is added to the pmap free list.
1007 *
1008 * Should only be called if the map contains no valid mappings.
1009 */
1010void
1011pmap_destroy(pmap_t pmap)
1012{
1013 int ref_count;
1014 spl_t s;
1015 pmap_t fore, aft;
1016
1017#if PMAP_LOWTRACE
1018 dbgTrace(0xF1D00003, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
1019#endif
1020
1021#if DEBUG
1022 if (pmdebug & PDB_USER)
1023 kprintf("pmap_destroy(pmap=%x)\n", pmap);
1024#endif
1025
1026 if (pmap == PMAP_NULL)
1027 return;
1028
1029 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
1030 if(ref_count>0) return; /* Still more users, leave now... */
1031
1032 if(ref_count < 0) /* Did we go too far? */
1033 panic("pmap_destroy(): ref_count < 0");
1034
1035#ifdef notdef
1036 if(pmap->stats.resident_count != 0)
1037 panic("PMAP_DESTROY: pmap not empty");
1038#else
1039 if(pmap->stats.resident_count != 0) {
1040 pmap_remove(pmap, 0, 0xFFFFF000);
1041 }
1042#endif
1043
1044 /*
1045 * Add the pmap to the pmap free list.
1046 */
1047
1048 s = splhigh();
1049 /*
1050 * Add the pmap to the pmap free list.
1051 */
1052 simple_lock(&free_pmap_lock);
1053
1054 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
1055
1056 pmap->bmaps = (struct blokmap *)free_pmap_list; /* Queue in front */
1057 free_pmap_list = pmap;
1058 free_pmap_count++;
1059 simple_unlock(&free_pmap_lock);
1060
1061 } else {
1062 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
1063 fore = (pmap_t)pmap->pmap_link.prev;
1064 aft = (pmap_t)pmap->pmap_link.next;
1065 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
1066 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
1067 simple_unlock(&free_pmap_lock);
1068 zfree(pmap_zone, (vm_offset_t) pmap);
1069 }
1070 splx(s);
1071}
1072
1073/*
1074 * pmap_reference(pmap)
1075 * gains a reference to the specified pmap.
1076 */
1077void
1078pmap_reference(pmap_t pmap)
1079{
1080 spl_t s;
1081
1082#if PMAP_LOWTRACE
1083 dbgTrace(0xF1D00004, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
1084#endif
1085
1086#if DEBUG
1087 if (pmdebug & PDB_USER)
1088 kprintf("pmap_reference(pmap=%x)\n", pmap);
1089#endif
1090
1091 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
1092}
1093
1094/*
1095 * pmap_remove(pmap, s, e)
1096 * unmaps all virtual addresses v in the virtual address
1097 * range determined by [s, e) and pmap.
1098 * s and e must be on machine independent page boundaries and
1099 * s must be less than or equal to e.
1100 *
1101 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
1102 * skip those segments.
1103 */
1104void
1105pmap_remove(
1106 pmap_t pmap,
1107 vm_offset_t sva,
1108 vm_offset_t eva)
1109{
1110 spl_t spl;
1111 struct mapping *mp, *blm;
1112 vm_offset_t lpage;
1113
1114#if PMAP_LOWTRACE
1115 dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12)); /* (TEST/DEBUG) */
1116#endif
1117
1118#if DEBUG
1119 if (pmdebug & PDB_USER)
1120 kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n",
1121 pmap, sva, eva);
1122#endif
1123
1124 if (pmap == PMAP_NULL)
1125 return;
1126
1127 /* It is just possible that eva might have wrapped around to zero,
1128 * and sometimes we get asked to liberate something of size zero
1129 * even though it's dumb (eg. after zero length read_overwrites)
1130 */
1131 assert(eva >= sva);
1132
1133 /* If these are not page aligned the loop might not terminate */
1134 assert((sva == trunc_page(sva)) && (eva == trunc_page(eva)));
1135
1136 /* We liberate addresses from high to low, since the stack grows
1137 * down. This means that we won't need to test addresses below
1138 * the limit of stack growth
1139 */
1140
1141 debugLog2(44, sva, eva); /* Log pmap_map call */
1142
1143 sva = trunc_page(sva); /* Make it clean */
1144 lpage = trunc_page(eva) - PAGE_SIZE; /* Point to the last page contained in the range */
1145
1146/*
1147 * Here we will remove all of the block mappings that overlap this range.
1148 * hw_rem_blk removes one mapping in the range and returns. If it returns
1149 * 0, there are no blocks in the range.
1150 */
1151
1152 while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) { /* Keep going until no more */
1153 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
1154 blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFE)); /* Get virtual address */
1155 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
1156 pmap, sva, blm);
1157 }
1158 mapping_free(hw_cpv(mp)); /* Release it */
1159 }
1160
1161 while (pmap->stats.resident_count && (eva > sva)) {
1162
1163 eva -= PAGE_SIZE; /* Back up a page */
1164
1165#if 1
1166 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1167 panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1168#endif
1169 if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1170 eva = eva & (-pmapUsageSize); /* Back up into the previous slot */
1171 continue; /* Check the next... */
1172 }
1173 mapping_remove(pmap, eva); /* Remove the mapping for this address */
1174 }
1175
1176 debugLog2(45, 0, 0); /* Log pmap_map call */
1177}
1178
1179/*
1180 * Routine:
1181 * pmap_page_protect
1182 *
1183 * Function:
1184 * Lower the permission for all mappings to a given page.
1185 */
1186void
1187pmap_page_protect(
1188 vm_offset_t pa,
1189 vm_prot_t prot)
1190{
1191 register struct phys_entry *pp;
1192 boolean_t remove;
1193
1194
1195#if PMAP_LOWTRACE
1196 dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1197#endif
1198
1199#if DEBUG
1200 if (pmdebug & PDB_USER)
1201 kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot);
1202#endif
1203
1204 debugLog2(46, pa, prot); /* Log pmap_page_protect call */
1205
1206 switch (prot) {
1207 case VM_PROT_READ:
1208 case VM_PROT_READ|VM_PROT_EXECUTE:
1209 remove = FALSE;
1210 break;
1211 case VM_PROT_ALL:
1212 return;
1213 default:
1214 remove = TRUE;
1215 break;
1216 }
1217
1218 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1219 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
1220
1221 if (remove) { /* If the protection was set to none, we'll remove all mappings */
1222 mapping_purge(pp); /* Get rid of them all */
1223
1224 debugLog2(47, 0, 0); /* Log pmap_map call */
1225 return; /* Leave... */
1226 }
1227
1228 /* When we get here, it means that we are to change the protection for a
1229 * physical page.
1230 */
1231
1232 mapping_protect_phys(pp, prot, 0); /* Change protection of all mappings to page. */
1233
1234 debugLog2(47, 1, 0); /* Log pmap_map call */
1235}
1236
1237/*
1238 * pmap_protect(pmap, s, e, prot)
1239 * changes the protection on all virtual addresses v in the
1240 * virtual address range determined by [s, e] and pmap to prot.
1241 * s and e must be on machine independent page boundaries and
1242 * s must be less than or equal to e.
1243 *
1244 * Note that any requests to change the protection of a nested pmap are
1245 * ignored. Those changes MUST be done by calling this with the correct pmap.
1246 */
1247void pmap_protect(
1248 pmap_t pmap,
1249 vm_offset_t sva,
1250 vm_offset_t eva,
1251 vm_prot_t prot)
1252{
1253 spl_t spl;
1254 register struct phys_entry *pp;
1255 register struct mapping *mp, *mpv;
1256
1257#if PMAP_LOWTRACE
1258 dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1259#endif
1260
1261#if DEBUG
1262 if (pmdebug & PDB_USER)
1263 kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot);
1264
1265 assert(sva < eva);
1266#endif
1267
1268 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1269
1270 debugLog2(48, sva, eva); /* Log pmap_map call */
1271
1272 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
1273 pmap_remove(pmap, sva, eva); /* Yeah, dump 'em */
1274
1275 debugLog2(49, prot, 0); /* Log pmap_map call */
1276
1277 return; /* Leave... */
1278 }
1279
1280 sva = trunc_page(sva); /* Start up a page boundary */
1281
1282 while(sva < eva) { /* Step through */
1283
1284 if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1285 sva = (sva + pmapUsageSize) &(-pmapUsageSize); /* Jump up into the next slot if nothing here */
1286 if(!sva) break; /* We tried to wrap, kill loop... */
1287 continue; /* Check the next... */
1288 }
1289
1290#if 1
1291 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1292 panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1293#endif
1294
1295 mapping_protect(pmap, sva, prot); /* Change the protection on the page */
1296 sva += PAGE_SIZE; /* On to the next page */
1297 }
1298
1299 debugLog2(49, prot, 1); /* Log pmap_map call */
1300 return; /* Leave... */
1301}
1302
1303/*
1304 * pmap_enter
1305 *
1306 * Create a translation for the virtual address (virt) to the physical
1307 * address (phys) in the pmap with the protection requested. If the
1308 * translation is wired then we can not allow a full page fault, i.e.,
1309 * the mapping control block is not eligible to be stolen in a low memory
1310 * condition.
1311 *
1312 * NB: This is the only routine which MAY NOT lazy-evaluate
1313 * or lose information. That is, this routine must actually
1314 * insert this page into the given map NOW.
1315 */
1316void
1317pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
1318 boolean_t wired)
1319{
1320 spl_t spl;
1321 struct mapping *mp;
1322 struct phys_entry *pp;
1323 int memattr;
1324
1325#if PMAP_LOWTRACE
1326 dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1327 dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1328#endif
1329
1330 if (pmap == PMAP_NULL) return; /* If they gave us no pmap, just leave... */
1331
1332 debugLog2(50, va, pa); /* Log pmap_map call */
1333
1334 pp = pmap_find_physentry(pa); /* Get the physent for this physical page */
1335
1336 if((0x00008000 >> (va >> 28)) & pmap->vflags)
1337 panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va); /* (TEST/DEBUG) panic */
1338
1339 spl=splhigh(); /* Have to disallow interrupts between the
1340 time we possibly clear a mapping and the time
1341 we get it remapped again. An I/O SLIH could
1342 try to drive an IOR using the page before
1343 we get it mapped (Dude! This was a tough
1344 bug!!!!) */
1345
1346 mapping_remove(pmap, va); /* Remove any other mapping at this address */
1347
1348 memattr = PTE_WIMG_IO; /* Assume I/O mapping for a moment */
1349 if(pp) memattr = ((pp->pte1&0x00000078) >> 3); /* Set the attribute to the physical default */
1350
1351 mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0); /* Make the address mapping */
1352
1353 splx(spl); /* I'm not busy no more - come what may */
1354
1355 debugLog2(51, prot, 0); /* Log pmap_map call */
1356
1357#if DEBUG
1358 if (pmdebug & (PDB_USER|PDB_ENTER))
1359 kprintf("leaving pmap_enter\n");
1360#endif
1361
1362}
1363
1364/*
1365 * pmap_extract(pmap, va)
1366 * returns the physical address corrsponding to the
1367 * virtual address specified by pmap and va if the
1368 * virtual address is mapped and 0 if it is not.
1369 */
1370vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) {
1371
1372 spl_t spl;
1373 register struct mapping *mp, *mpv;
1374 register vm_offset_t pa;
1375 unsigned int seg;
1376 pmap_t actpmap;
1377
1378
1379#if PMAP_LOWTRACE
1380 dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1381#endif
1382#if DEBUG
1383 if (pmdebug & PDB_USER)
1384 kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va);
1385#endif
1386
1387 seg = va >> 28; /* Isolate segment */
1388 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1389 else actpmap = pmap; /* Otherwise use the one passed in */
1390
1391 pa = (vm_offset_t) 0; /* Clear this to 0 */
1392
1393 debugLog2(52, actpmap->space, va); /* Log pmap_map call */
1394
1395 spl = splhigh(); /* We can't allow any loss of control here */
1396
1397 if(mp=hw_lock_phys_vir(actpmap->space, va)) { /* Find the mapping for this vaddr and lock physent */
1398 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1399 panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1400 splx(spl); /* Interruptions are cool now */
1401 return 0;
1402 }
1403
1404 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1405 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Build the physical address */
1406 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1407 splx(spl); /* Interruptions are cool now */
1408
1409 debugLog2(53, pa, 0); /* Log pmap_map call */
1410
1411 return pa; /* Return the physical address... */
1412 }
1413
1414 pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1415 /* Note no nested pmaps here */
1416 splx(spl); /* Restore 'rupts */
1417 debugLog2(53, pa, 0); /* Log pmap_map call */
1418 return pa; /* Return physical address or 0 */
1419}
1420
1421/*
1422 * pmap_attributes:
1423 *
1424 * Set/Get special memory attributes; Set is not implemented.
1425 *
1426 * Note: 'VAL_GET_INFO' is used to return info about a page.
1427 * If less than 1 page is specified, return the physical page
1428 * mapping and a count of the number of mappings to that page.
1429 * If more than one page is specified, return the number
1430 * of resident pages and the number of shared (more than
1431 * one mapping) pages in the range;
1432 *
1433 */
1434kern_return_t
1435pmap_attribute(pmap, address, size, attribute, value)
1436 pmap_t pmap;
1437 vm_offset_t address;
1438 vm_size_t size;
1439 vm_machine_attribute_t attribute;
1440 vm_machine_attribute_val_t* value;
1441{
1442 spl_t s;
1443 vm_offset_t sva, eva;
1444 vm_offset_t pa;
1445 kern_return_t ret;
1446 register struct mapping *mp, *mpv;
1447 register struct phys_entry *pp;
1448 int total, seg;
1449 pmap_t actpmap;
1450
1451 if (attribute != MATTR_CACHE)
1452 return KERN_INVALID_ARGUMENT;
1453
1454 /* We can't get the caching attribute for more than one page
1455 * at a time
1456 */
1457 if ((*value == MATTR_VAL_GET) &&
1458 (trunc_page(address) != trunc_page(address+size-1)))
1459 return KERN_INVALID_ARGUMENT;
1460
1461 if (pmap == PMAP_NULL)
1462 return KERN_SUCCESS;
1463
1464 sva = trunc_page(address);
1465 eva = round_page(address + size);
1466 ret = KERN_SUCCESS;
1467
1468 debugLog2(54, address, attribute); /* Log pmap_map call */
1469
1470 switch (*value) {
1471 case MATTR_VAL_CACHE_SYNC: /* sync I+D caches */
1472 case MATTR_VAL_CACHE_FLUSH: /* flush from all caches */
1473 case MATTR_VAL_DCACHE_FLUSH: /* flush from data cache(s) */
1474 case MATTR_VAL_ICACHE_FLUSH: /* flush from instr cache(s) */
1475 sva = trunc_page(sva);
1476 s = splhigh();
1477
1478 while (sva < eva) {
1479 seg = sva >> 28; /* Isolate segment */
1480 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1481 else actpmap = pmap; /* Otherwise use the one passed in */
1482
1483/*
1484 * Note: the following should work ok with nested pmaps because there are not overlayed mappings
1485 */
1486 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1487 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1488 if(!sva) break; /* We tried to wrap, kill loop... */
1489 continue; /* Check the next... */
1490 }
1491
1492 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1493 sva += PAGE_SIZE; /* Point to the next page */
1494 continue; /* Skip if the page is not mapped... */
1495 }
1496
1497 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1498 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1499 continue;
1500 }
1501
1502 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1503 if((unsigned int)mpv->physent) { /* Is there a physical entry? */
1504 pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE; /* Yes, get the physical address from there */
1505 }
1506 else {
1507 pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE); /* Otherwise from the mapping */
1508 }
1509
1510 switch (*value) { /* What type was that again? */
1511 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1512 sync_cache(pa, PAGE_SIZE); /* Sync up dem caches */
1513 break; /* Done with this one here... */
1514
1515 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1516 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1517 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1518 break; /* Done with this one here... */
1519
1520 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1521 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1522 break; /* Done with this one here... */
1523
1524 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1525 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1526 break; /* Done with this one here... */
1527 }
1528 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry if it exists*/
1529
1530 sva += PAGE_SIZE; /* Point to the next page */
1531 }
1532 splx(s);
1533 break;
1534
1535 case MATTR_VAL_GET_INFO: /* Get info */
1536 total = 0;
1537 s = splhigh(); /* Lock 'em out */
1538
1539 if (size <= PAGE_SIZE) { /* Do they want just one page */
1540 seg = sva >> 28; /* Isolate segment */
1541 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1542 else actpmap = pmap; /* Otherwise use the one passed in */
1543 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1544 *value = 0; /* Return nothing if no mapping */
1545 }
1546 else {
1547 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1548 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1549 }
1550 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1551 if(pp = mpv->physent) { /* Check for a physical entry */
1552 total = 0; /* Clear the count */
1553 for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++; /* Count the mapping */
1554 *value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total); /* Pass back the physical address and the count of mappings */
1555 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1556 }
1557 else { /* This is the case for an I/O mapped area */
1558 *value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1); /* Pass back the physical address and the count of mappings */
1559 }
1560 }
1561 }
1562 else {
1563 total = 0;
1564 while (sva < eva) {
1565 seg = sva >> 28; /* Isolate segment */
1566 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1567 else actpmap = pmap; /* Otherwise use the one passed in */
1568
1569 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1570 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1571 if(!sva) break; /* We tried to wrap, kill loop... */
1572 continue; /* Check the next... */
1573 }
1574 if(mp = hw_lock_phys_vir(actpmap->space, sva)) { /* Find the mapping for this vaddr and lock physent */
1575 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1576 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1577 continue;
1578 }
1579 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1580 total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next); /* Count the "resident" and shared pages */
1581 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1582 }
1583 sva += PAGE_SIZE;
1584 }
1585 *value = total;
1586 }
1587 splx(s);
1588 break;
1589
1590 case MATTR_VAL_GET: /* return current value */
1591 case MATTR_VAL_OFF: /* turn attribute off */
1592 case MATTR_VAL_ON: /* turn attribute on */
1593 default:
1594 ret = KERN_INVALID_ARGUMENT;
1595 break;
1596 }
1597
1598 debugLog2(55, 0, 0); /* Log pmap_map call */
1599
1600 return ret;
1601}
1602
1603/*
1604 * pmap_collect
1605 *
1606 * Garbage collects the physical map system for pages that are no longer used.
1607 * It isn't implemented or needed or wanted.
1608 */
1609void
1610pmap_collect(pmap_t pmap)
1611{
1612 return;
1613}
1614
1615/*
1616 * Routine: pmap_activate
1617 * Function:
1618 * Binds the given physical map to the given
1619 * processor, and returns a hardware map description.
1620 * It isn't implemented or needed or wanted.
1621 */
1622void
1623pmap_activate(
1624 pmap_t pmap,
1625 thread_t th,
1626 int which_cpu)
1627{
1628 return;
1629}
1630/*
1631 * pmap_deactivate:
1632 * It isn't implemented or needed or wanted.
1633 */
1634void
1635pmap_deactivate(
1636 pmap_t pmap,
1637 thread_t th,
1638 int which_cpu)
1639{
1640 return;
1641}
1642
1643#if DEBUG
1644
1645/*
1646 * pmap_zero_page
1647 * pmap_copy page
1648 *
1649 * are implemented in movc.s, these
1650 * are just wrappers to help debugging
1651 */
1652
1653extern void pmap_zero_page_assembler(vm_offset_t p);
1654extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst);
1655
1656/*
1657 * pmap_zero_page(pa)
1658 *
1659 * pmap_zero_page zeros the specified (machine independent) page pa.
1660 */
1661void
1662pmap_zero_page(
1663 vm_offset_t p)
1664{
1665 register struct mapping *mp;
1666 register struct phys_entry *pp;
1667
1668 if (pmdebug & (PDB_USER|PDB_ZERO))
1669 kprintf("pmap_zero_page(pa=%x)\n", p);
1670
1671 /*
1672 * XXX can these happen?
1673 */
1674 if (pmap_find_physentry(p) == PHYS_NULL)
1675 panic("zero_page: physaddr out of range");
1676
1677 pmap_zero_page_assembler(p);
1678}
1679
1680/*
1681 * pmap_copy_page(src, dst)
1682 *
1683 * pmap_copy_page copies the specified (machine independent)
1684 * page from physical address src to physical address dst.
1685 *
1686 * We need to invalidate the cache for address dst before
1687 * we do the copy. Apparently there won't be any mappings
1688 * to the dst address normally.
1689 */
1690void
1691pmap_copy_page(
1692 vm_offset_t src,
1693 vm_offset_t dst)
1694{
1695 register struct phys_entry *pp;
1696
1697 if (pmdebug & (PDB_USER|PDB_COPY))
1698 kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst);
1699 if (pmdebug & PDB_COPY)
1700 kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n",
1701 src, dst, PAGE_SIZE);
1702
1703 pmap_copy_page_assembler(src, dst);
1704}
1705#endif /* DEBUG */
1706
1707/*
1708 * pmap_pageable(pmap, s, e, pageable)
1709 * Make the specified pages (by pmap, offset)
1710 * pageable (or not) as requested.
1711 *
1712 * A page which is not pageable may not take
1713 * a fault; therefore, its page table entry
1714 * must remain valid for the duration.
1715 *
1716 * This routine is merely advisory; pmap_enter()
1717 * will specify that these pages are to be wired
1718 * down (or not) as appropriate.
1719 *
1720 * (called from vm/vm_fault.c).
1721 */
1722void
1723pmap_pageable(
1724 pmap_t pmap,
1725 vm_offset_t start,
1726 vm_offset_t end,
1727 boolean_t pageable)
1728{
1729
1730 return; /* This is not used... */
1731
1732}
1733/*
1734 * Routine: pmap_change_wiring
1735 * NOTE USED ANYMORE.
1736 */
1737void
1738pmap_change_wiring(
1739 register pmap_t pmap,
1740 vm_offset_t va,
1741 boolean_t wired)
1742{
1743 return; /* This is not used... */
1744}
1745
1746/*
1747 * pmap_modify_pages(pmap, s, e)
1748 * sets the modified bit on all virtual addresses v in the
1749 * virtual address range determined by [s, e] and pmap,
1750 * s and e must be on machine independent page boundaries and
1751 * s must be less than or equal to e.
1752 */
1753void
1754pmap_modify_pages(
1755 pmap_t pmap,
1756 vm_offset_t sva,
1757 vm_offset_t eva)
1758{
1759 spl_t spl;
1760 mapping *mp;
1761
1762#if PMAP_LOWTRACE
1763 dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1764#endif
1765
1766#if DEBUG
1767 if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva);
1768#endif
1769
1770 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1771
1772 debugLog2(56, sva, eva); /* Log pmap_map call */
1773
1774 spl=splhigh(); /* Don't bother me */
1775
1776 for ( ; sva < eva; sva += PAGE_SIZE) { /* Cycle through the whole range */
1777 mp = hw_lock_phys_vir(pmap->space, sva); /* Lock the physical entry for this mapping */
1778 if(mp) { /* Did we find one? */
1779 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1780 panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1781 continue;
1782 }
1783 mp = hw_cpv(mp); /* Convert to virtual addressing */
1784 if(!mp->physent) continue; /* No physical entry means an I/O page, we can't set attributes */
1785 mapping_set_mod(mp->physent); /* Set the modfied bit for this page */
1786 hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1787 }
1788 }
1789 splx(spl); /* Restore the interrupt level */
1790
1791 debugLog2(57, 0, 0); /* Log pmap_map call */
1792 return; /* Leave... */
1793}
1794
1795/*
1796 * pmap_clear_modify(phys)
1797 * clears the hardware modified ("dirty") bit for one
1798 * machine independant page starting at the given
1799 * physical address. phys must be aligned on a machine
1800 * independant page boundary.
1801 */
1802void
1803pmap_clear_modify(vm_offset_t pa)
1804{
1805 register struct phys_entry *pp;
1806 spl_t spl;
1807
1808#if PMAP_LOWTRACE
1809 dbgTrace(0xF1D00011, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1810#endif
1811#if DEBUG
1812 if (pmdebug & PDB_USER)
1813 kprintf("pmap_clear_modify(pa=%x)\n", pa);
1814#endif
1815
1816 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1817 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1818
1819 debugLog2(58, pa, 0); /* Log pmap_map call */
1820
1821 spl=splhigh(); /* Don't bother me */
1822
1823 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1824 panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1825 splx(spl); /* Restore 'rupts */
1826 return; /* Should die before here */
1827 }
1828
1829 mapping_clr_mod(pp); /* Clear all change bits for physical page */
1830
1831 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1832 splx(spl); /* Restore the interrupt level */
1833
1834 debugLog2(59, 0, 0); /* Log pmap_map call */
1835}
1836
1837/*
1838 * pmap_is_modified(phys)
1839 * returns TRUE if the given physical page has been modified
1840 * since the last call to pmap_clear_modify().
1841 */
1842boolean_t
1843pmap_is_modified(register vm_offset_t pa)
1844{
1845 register struct phys_entry *pp;
1846 spl_t spl;
1847 boolean_t ret;
1848
1849
1850#if PMAP_LOWTRACE
1851 dbgTrace(0xF1D00012, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1852#endif
1853#if DEBUG
1854 if (pmdebug & PDB_USER)
1855 kprintf("pmap_is_modified(pa=%x)\n", pa);
1856#endif
1857
1858 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1859 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1860
1861 debugLog2(60, pa, 0); /* Log pmap_map call */
1862
1863 spl=splhigh(); /* Don't bother me */
1864
1865 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1866 panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1867 splx(spl); /* Restore 'rupts */
1868 return 0; /* Should die before here */
1869 }
1870
1871 ret = mapping_tst_mod(pp); /* Check for modified */
1872
1873 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1874 splx(spl); /* Restore the interrupt level */
1875
1876 debugLog2(61, ret, 0); /* Log pmap_map call */
1877
1878 return ret;
1879}
1880
1881/*
1882 * pmap_clear_reference(phys)
1883 * clears the hardware referenced bit in the given machine
1884 * independant physical page.
1885 *
1886 */
1887void
1888pmap_clear_reference(vm_offset_t pa)
1889{
1890 register struct phys_entry *pp;
1891 spl_t spl;
1892
1893
1894#if PMAP_LOWTRACE
1895 dbgTrace(0xF1D00013, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1896#endif
1897#if DEBUG
1898 if (pmdebug & PDB_USER)
1899 kprintf("pmap_clear_reference(pa=%x)\n", pa);
1900#endif
1901
1902 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1903 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1904
1905 debugLog2(62, pa, 0); /* Log pmap_map call */
1906
1907 spl=splhigh(); /* Don't bother me */
1908 mapping_clr_ref(pp); /* Clear all reference bits for physical page */
1909 splx(spl); /* Restore the interrupt level */
1910
1911 debugLog2(63, 0, 0); /* Log pmap_map call */
1912
1913}
1914
1915/*
1916 * pmap_is_referenced(phys)
1917 * returns TRUE if the given physical page has been referenced
1918 * since the last call to pmap_clear_reference().
1919 */
1920boolean_t
1921pmap_is_referenced(vm_offset_t pa)
1922{
1923 register struct phys_entry *pp;
1924 spl_t spl;
1925 boolean_t ret;
1926
1927
1928#if PMAP_LOWTRACE
1929 dbgTrace(0xF1D00014, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1930#endif
1931#if DEBUG
1932 if (pmdebug & PDB_USER)
1933 kprintf("pmap_is_referenced(pa=%x)\n", pa);
1934#endif
1935
1936 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1937 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1938
1939 debugLog2(64, pa, 0); /* Log pmap_map call */
1940
1941 spl=splhigh(); /* Don't bother me */
1942
1943 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1944 panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1945 splx(spl); /* Restore 'rupts */
1946 return 0; /* Should die before here */
1947 }
1948
1949 ret = mapping_tst_ref(pp); /* Check for referenced */
1950
1951 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1952 splx(spl); /* Restore the interrupt level */
1953
1954 debugLog2(65, ret, 0); /* Log pmap_map call */
1955
1956 return ret;
1957}
1958
1959#if MACH_VM_DEBUG
1960int
1961pmap_list_resident_pages(
1962 register pmap_t pmap,
1963 register vm_offset_t *listp,
1964 register int space)
1965{
1966 return 0;
1967}
1968#endif /* MACH_VM_DEBUG */
1969
1970/*
1971 * Locking:
1972 * spl: VM
1973 */
1974void
1975pmap_copy_part_page(
1976 vm_offset_t src,
1977 vm_offset_t src_offset,
1978 vm_offset_t dst,
1979 vm_offset_t dst_offset,
1980 vm_size_t len)
1981{
1982 register struct phys_entry *pp_src, *pp_dst;
1983 spl_t s;
1984
1985
1986#if PMAP_LOWTRACE
1987 dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset); /* (TEST/DEBUG) */
1988 dbgTrace(0xF1D04019, (unsigned int)len, 0); /* (TEST/DEBUG) */
1989#endif
1990 s = splhigh();
1991
1992 assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE);
1993 assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE);
1994
1995 /*
1996 * Since the source and destination are physical addresses,
1997 * turn off data translation to perform a bcopy() in bcopy_phys().
1998 */
1999 phys_copy((vm_offset_t) src+src_offset,
2000 (vm_offset_t) dst+dst_offset, len);
2001
2002 splx(s);
2003}
2004
2005void
2006pmap_zero_part_page(
2007 vm_offset_t p,
2008 vm_offset_t offset,
2009 vm_size_t len)
2010{
2011 panic("pmap_zero_part_page");
2012}
2013
2014boolean_t pmap_verify_free(vm_offset_t pa) {
2015
2016 struct phys_entry *pp;
2017
2018#if PMAP_LOWTRACE
2019 dbgTrace(0xF1D00007, (unsigned int)pa, 0); /* (TEST/DEBUG) */
2020#endif
2021
2022#if DEBUG
2023 if (pmdebug & PDB_USER)
2024 kprintf("pmap_verify_free(pa=%x)\n", pa);
2025#endif
2026
2027 if (!pmap_initialized) return(TRUE);
2028
2029 pp = pmap_find_physentry(pa); /* Look up the physical entry */
2030 if (pp == PHYS_NULL) return FALSE; /* If there isn't one, show no mapping... */
2031 return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL); /* Otherwise, return TRUE if mapping exists... */
2032}
2033
2034
2035/* Determine if we need to switch space and set up for it if so */
2036
2037void pmap_switch(pmap_t map)
2038{
2039 unsigned int i;
2040
2041#if DEBUG
2042 if (watchacts & WA_PCB) {
2043 kprintf("Switching to map at 0x%08x, space=%d\n",
2044 map,map->space);
2045 }
2046#endif /* DEBUG */
2047
2048
2049/* when changing to kernel space, don't bother
2050 * doing anything, the kernel is mapped from here already.
2051 */
2052 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
2053 return; /* If so, we don't do anything... */
2054 }
2055
2056 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
2057 return; /* Bye, bye, butterfly... */
2058}
2059
2060/*
2061 * kern_return_t pmap_nest(grand, subord, vaddr, size)
2062 *
2063 * grand = the pmap that we will nest subord into
2064 * subord = the pmap that goes into the grand
2065 * vaddr = start of range in pmap to be inserted
2066 * size = size of range in pmap to be inserted
2067 *
2068 * Inserts a pmap into another. This is used to implement shared segments.
2069 * On the current PPC processors, this is limited to segment (256MB) aligned
2070 * segment sized ranges.
2071 */
2072
2073kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) {
2074
2075 unsigned int oflags, seg, grandr;
2076 int i;
2077
2078 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2079 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2080
2081 while(1) { /* Test and set the subordinate flag */
2082 oflags = subord->vflags & ~pmapAltSeg; /* Get old unset value */
2083 if(subord->vflags & pmapAltSeg) { /* Are trying to nest one already nested? */
2084 panic("pmap_nest: Attempt to nest an already nested pmap\n");
2085 }
2086 if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break; /* Done if we got it set */
2087 }
2088
2089 simple_lock(&grand->lock); /* Lock the superior pmap */
2090
2091 if(grand->vflags & pmapSubord) { /* Are we only one level deep? */
2092 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2093 panic("pmap_nest: Attempt to nest into subordinate pmap\n");
2094 return KERN_FAILURE; /* Shame on you */
2095 }
2096
2097 seg = vaddr >> 28; /* Isolate the segment number */
2098 if((0x00008000 >> seg) & grand->vflags) { /* See if it is already in use */
2099 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2100 panic("pmap_nest: Attempt to nest into already nested segment\n");
2101 return KERN_FAILURE; /* Shame on you */
2102 }
2103
2104 grand->pmapPmaps[seg] = subord; /* Set the pointer to the subordinate */
2105 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space; /* Set the vsid to the subordinate's vsid */
2106 grand->vflags |= (0x00008000 >> seg); /* Set in-use bit */
2107
2108 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
2109
2110 simple_unlock(&grand->lock); /* Unlock the grand pmap */
2111
2112
2113/*
2114 * Note that the following will force the segment registers to be reloaded following
2115 * the next interrupt on all processors if they are using the pmap we just changed.
2116 *
2117 * This probably isn't needed, but it just feels better to do it. The reason it isn't
2118 * needed is that there is no mapped memory in the grand pmap's segment before we
2119 * nest and we will take a fault if it is accessed.
2120 */
2121
2122
2123 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
2124 (void)hw_compare_and_store((unsigned int)grand, 0, &per_proc_info[i].userpmap); /* Clear if ours */
2125 }
2126
2127 return KERN_SUCCESS; /* Bye, bye, butterfly... */
2128}
2129
2130
2131/*
2132 * kern_return_t pmap_unnest(grand, vaddr, size)
2133 *
2134 * grand = the pmap that we will nest subord into
2135 * vaddr = start of range in pmap to be inserted
2136 * size = size of range in pmap to be inserted
2137 *
2138 * Removes a pmap from another. This is used to implement shared segments.
2139 * On the current PPC processors, this is limited to segment (256MB) aligned
2140 * segment sized ranges.
2141 */
2142
2143kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) {
2144
2145 unsigned int oflags, seg, grandr, tstamp;
2146 int i, tcpu, mycpu;
2147
2148 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2149 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2150
2151 simple_lock(&grand->lock); /* Lock the superior pmap */
2152 disable_preemption(); /* It's all for me! */
2153
2154 seg = vaddr >> 28; /* Isolate the segment number */
2155 if(!((0x00008000 >> seg) & grand->vflags)) { /* See if it is already in use */
2156 enable_preemption(); /* Ok, your turn */
2157 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2158 panic("pmap_unnest: Attempt to unnest an unnested segment\n");
2159 return KERN_FAILURE; /* Shame on you */
2160 }
2161
2162 grand->pmapPmaps[seg] = (pmap_t)0; /* Clear the pointer to the subordinate */
2163 grand->pmapSegs[seg] = grand->space; /* Set the pointer to the subordinate's vsid */
2164 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space; /* Set the vsid to the grand's vsid */
2165 grand->vflags &= ~(0x00008000 >> seg); /* Clear in-use bit */
2166
2167 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
2168
2169 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2170
2171/*
2172 * Note that the following will force the segment registers to be reloaded
2173 * on all processors (if they are using the pmap we just changed) before returning.
2174 *
2175 * This is needed. The reason is that until the segment register is
2176 * reloaded, another thread in the same task on a different processor will
2177 * be able to access memory that it isn't allowed to anymore. That can happen
2178 * because access to the subordinate pmap is being removed, but the pmap is still
2179 * valid.
2180 *
2181 * Note that we only kick the other processor if we see that it was using the pmap while we
2182 * were changing it.
2183 */
2184
2185
2186 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
2187 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
2188 if(hw_compare_and_store((unsigned int)grand, 0, &per_proc_info[i].userpmap)) { /* Clear if ours and kick the other guy if he was using it */
2189 if(i == mycpu) continue; /* Don't diddle ourselves */
2190 tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */
2191 if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change
2192 panic("pmap_unnest: Signal processor (%d) failed\n", i);
2193 }
2194 if(!hw_cpu_wcng(tstamp, &per_proc_info[i].ruptStamp[1], LockTimeOut) { /* Wait for the other processors to enter debug */
2195 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
2196 }
2197 }
2198 }
2199
2200 enable_preemption(); /* Others can run now */
2201 return KERN_SUCCESS; /* Bye, bye, butterfly... */
2202}
2203
2204
2205void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) {
2206
2207 int cnt, i, j, k;
2208 vm_offset_t xx;
2209
2210 if(!pmap) return;
2211
2212 sva = trunc_page(sva);
2213 eva = trunc_page(eva);
2214
2215 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2216 if((pmap->pmapUsage[i]) > 8192) { /* See if this is a sane number */
2217 panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
2218 i * pmapUsageSize, pmap->pmapUsage[i], pmap);
2219 }
2220 }
2221 j = 0;
2222 while(1) { /* Try multiple times */
2223 cnt = 0;
2224 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2225 cnt = cnt + pmap->pmapUsage[i]; /* Sum all slots */
2226 }
2227 if(cnt == pmap->stats.resident_count) break; /* We're ok if we match... */
2228
2229 j++;
2230 for(i = 0; i < 100000; i++) {
2231 k = j + i;
2232 }
2233 if(j >= 10) {
2234 panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n",
2235 cnt, pmap->stats.resident_count, pmap);
2236 }
2237 }
2238
2239 for(xx = sva; xx < eva; xx += PAGE_SIZE) { /* See if any slots not clear */
2240 if(pmap_extract(pmap, xx)) {
2241 panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n",
2242 sva, eva, xx, pmap);
2243 }
2244 }
2245}
2246
2247
2248
2249
2250
2251