2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1990,1991,1992 The University of Utah and
28 * the Center for Software Science (CSS).
29 * Copyright (c) 1991,1987 Carnegie Mellon University.
30 * All rights reserved.
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation,
37 * and that all advertising materials mentioning features or use of
38 * this software display the following acknowledgement: ``This product
39 * includes software developed by the Center for Software Science at
40 * the University of Utah.''
42 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
43 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
44 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
50 * Carnegie Mellon requests users of this software to return to
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
58 * Utah $Hdr: pmap.c 1.28 92/06/23$
59 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
63 * Manages physical address maps for powerpc.
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information to
84 * when physical maps must be made correct.
88 #include <zone_debug.h>
91 #include <mach_kgdb.h>
92 #include <mach_vm_debug.h>
93 #include <db_machine_commands.h>
95 #include <kern/thread.h>
96 #include <mach/vm_attributes.h>
97 #include <mach/vm_param.h>
100 #include <kern/misc_protos.h>
101 #include <ppc/misc_protos.h>
102 #include <ppc/proc_reg.h>
105 #include <vm/vm_map.h>
106 #include <vm/vm_page.h>
108 #include <ppc/pmap.h>
109 #include <ppc/pmap_internals.h>
111 #include <ppc/mappings.h>
113 #include <ppc/new_screen.h>
114 #include <ppc/Firmware.h>
115 #include <ppc/savearea.h>
116 #include <ddb/db_output.h>
118 #if DB_MACHINE_COMMANDS
119 /* optionally enable traces of pmap operations in post-mortem trace table */
120 /* #define PMAP_LOWTRACE 1 */
121 #define PMAP_LOWTRACE 0
122 #else /* DB_MACHINE_COMMANDS */
123 /* Can not trace even if we wanted to */
124 #define PMAP_LOWTRACE 0
125 #endif /* DB_MACHINE_COMMANDS */
129 #if PERFTIMES && DEBUG
130 #define debugLog2(a, b, c) dbgLog2(a, b, c)
132 #define debugLog2(a, b, c)
135 extern unsigned int avail_remaining
;
136 extern unsigned int mappingdeb0
;
137 extern struct Saveanchor saveanchor
; /* Aliged savearea anchor */
138 extern int real_ncpus
; /* Number of actual CPUs */
139 unsigned int debugbackpocket
; /* (TEST/DEBUG) */
141 vm_offset_t avail_next
;
142 vm_offset_t first_free_virt
;
143 int current_free_region
; /* Used in pmap_next_page */
146 void pmap_activate(pmap_t pmap
, thread_t th
, int which_cpu
);
147 void pmap_deactivate(pmap_t pmap
, thread_t th
, int which_cpu
);
148 void copy_to_phys(vm_offset_t sva
, vm_offset_t dpa
, int bytecount
);
151 int pmap_list_resident_pages(pmap_t pmap
, vm_offset_t
*listp
, int space
);
155 #define PDB_USER 0x01 /* exported functions */
156 #define PDB_MAPPING 0x02 /* low-level mapping routines */
157 #define PDB_ENTER 0x04 /* pmap_enter specifics */
158 #define PDB_COPY 0x08 /* copy page debugging */
159 #define PDB_ZERO 0x10 /* zero page debugging */
160 #define PDB_WIRED 0x20 /* things concerning wired entries */
161 #define PDB_PTEG 0x40 /* PTEG overflows */
162 #define PDB_LOCK 0x100 /* locks */
163 #define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */
168 /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
170 extern struct pmap kernel_pmap_store
;
171 pmap_t kernel_pmap
; /* Pointer to kernel pmap and anchor for in-use pmaps */
172 pmap_t cursor_pmap
; /* Pointer to last pmap allocated or previous if removed from in-use list */
173 struct zone
*pmap_zone
; /* zone of pmap structures */
174 boolean_t pmap_initialized
= FALSE
;
177 * Physical-to-virtual translations are handled by inverted page table
178 * structures, phys_tables. Multiple mappings of a single page are handled
179 * by linking the affected mapping structures. We initialise one region
180 * for phys_tables of the physical memory we know about, but more may be
181 * added as it is discovered (eg. by drivers).
183 struct phys_entry
*phys_table
; /* For debugging */
185 lock_t pmap_system_lock
;
187 decl_simple_lock_data(,tlb_system_lock
)
190 * free pmap list. caches the first free_pmap_max pmaps that are freed up
192 int free_pmap_max
= 32;
194 pmap_t free_pmap_list
;
195 decl_simple_lock_data(,free_pmap_lock
)
198 * Function to get index into phys_table for a given physical address
201 struct phys_entry
*pmap_find_physentry(vm_offset_t pa
)
204 struct phys_entry
*entry
;
206 for (i
= pmap_mem_regions_count
-1; i
>= 0; i
--) {
207 if (pa
< pmap_mem_regions
[i
].start
)
209 if (pa
>= pmap_mem_regions
[i
].end
)
212 entry
= &pmap_mem_regions
[i
].phys_table
[(pa
- pmap_mem_regions
[i
].start
) >> PPC_PGSHIFT
];
213 __asm__
volatile("dcbt 0,%0" : : "r" (entry
)); /* We will use this in a little bit */
216 kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa
);
222 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
223 * boolean_t available, unsigned int attr)
224 * Allocate some extra physentries for the physical addresses given,
225 * specifying some default attribute that on the powerpc specifies
226 * the default cachability for any mappings using these addresses
227 * If the memory is marked as available, it is added to the general
228 * VM pool, otherwise it is not (it is reserved for card IO etc).
230 kern_return_t
pmap_add_physical_memory(vm_offset_t spa
, vm_offset_t epa
,
231 boolean_t available
, unsigned int attr
)
236 /* Only map whole pages */
238 panic("Forget it! You can't map no more memory, you greedy puke!\n");
240 spa
= trunc_page(spa
);
241 epa
= round_page(epa
);
243 /* First check that the region doesn't already exist */
246 for (i
= 0; i
< pmap_mem_regions_count
; i
++) {
247 /* If we're below the next region, then no conflict */
248 if (epa
< pmap_mem_regions
[i
].start
)
250 if (spa
< pmap_mem_regions
[i
].end
) {
252 kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa
,epa
,attr
);
254 return KERN_NO_SPACE
;
259 kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i
, pmap_mem_regions_count
); /* (TEST/DEBUG) */
262 /* Check that we've got enough space for another region */
263 if (pmap_mem_regions_count
== PMAP_MEM_REGION_MAX
)
264 return KERN_RESOURCE_SHORTAGE
;
266 /* Once here, i points to the mem_region above ours in physical mem */
268 /* allocate a new phys_table for this new region */
270 kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */
273 phys_table
= (struct phys_entry
*)
274 kalloc(sizeof(struct phys_entry
) * atop(epa
-spa
));
276 kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table
); /* (TEST/DEBUG) */
279 /* Initialise the new phys_table entries */
280 for (j
= 0; j
< atop(epa
-spa
); j
++) {
282 phys_table
[j
].phys_link
= MAPPING_NULL
;
284 mapping_phys_init(&phys_table
[j
], spa
+(j
*PAGE_SIZE
), attr
); /* Initialize the hardware specific portions */
289 /* Move all the phys_table entries up some to make room in
292 for (j
= pmap_mem_regions_count
; j
> i
; j
--)
293 pmap_mem_regions
[j
] = pmap_mem_regions
[j
-1];
295 /* Insert a new entry with some memory to back it */
297 pmap_mem_regions
[i
].start
= spa
;
298 pmap_mem_regions
[i
].end
= epa
;
299 pmap_mem_regions
[i
].phys_table
= phys_table
;
301 pmap_mem_regions_count
++;
305 for(i
=0; i
<pmap_mem_regions_count
; i
++) { /* (TEST/DEBUG) */
306 kprintf("region %d: %08X %08X %08X\n", i
, pmap_mem_regions
[i
].start
,
307 pmap_mem_regions
[i
].end
, pmap_mem_regions
[i
].phys_table
); /* (TEST/DEBUG) */
312 kprintf("warning : pmap_add_physical_mem() "
313 "available not yet supported\n");
320 * pmap_map(va, spa, epa, prot)
321 * is called during boot to map memory in the kernel's address map.
322 * A virtual address range starting at "va" is mapped to the physical
323 * address range "spa" to "epa" with machine independent protection
326 * "va", "spa", and "epa" are byte addresses and must be on machine
327 * independent page boundaries.
329 * Pages with a contiguous virtual address range, the same protection, and attributes.
330 * therefore, we map it with a single block.
346 debugLog2(40, va
, spa
); /* Log pmap_map call */
348 pmap_map_block(kernel_pmap
, va
, spa
, epa
- spa
, prot
, PTE_WIMG_DEFAULT
, blkPerm
); /* Set up a permanent block mapped area */
350 debugLog2(41, epa
, prot
); /* Log pmap_map call */
356 * pmap_map_bd(va, spa, epa, prot)
357 * Back-door routine for mapping kernel VM at initialisation.
358 * Used for mapping memory outside the known physical memory
359 * space, with caching disabled. Designed for use by device probes.
361 * A virtual address range starting at "va" is mapped to the physical
362 * address range "spa" to "epa" with machine independent protection
365 * "va", "spa", and "epa" are byte addresses and must be on machine
366 * independent page boundaries.
368 * WARNING: The current version of memcpy() can use the dcbz instruction
369 * on the destination addresses. This will cause an alignment exception
370 * and consequent overhead if the destination is caching-disabled. So
371 * avoid memcpy()ing into the memory mapped by this function.
373 * also, many other pmap_ routines will misbehave if you try and change
374 * protections or remove these mappings, they are designed to be permanent.
376 * These areas will be added to the autogen list, if possible. Existing translations
377 * are overridden and their mapping stuctures are released. This takes place in
378 * the autogen_map function.
381 * this routine is called only during system initialization when only
382 * one processor is active, so no need to take locks...
391 register struct mapping
*mp
;
392 register struct phys_entry
*pp
;
400 debugLog2(42, va
, epa
); /* Log pmap_map_bd call */
402 pmap_map_block(kernel_pmap
, va
, spa
, epa
- spa
, prot
, PTE_WIMG_IO
, blkPerm
); /* Set up autogen area */
404 debugLog2(43, epa
, prot
); /* Log pmap_map_bd exit */
410 * Bootstrap the system enough to run with virtual memory.
411 * Map the kernel's code and data, and allocate the system page table.
412 * Called with mapping done by BATs. Page_size must already be set.
415 * mem_size: Total memory present
416 * first_avail: First virtual address available
417 * first_phys_avail: First physical address available
420 pmap_bootstrap(unsigned int mem_size
, vm_offset_t
*first_avail
, vm_offset_t
*first_phys_avail
, unsigned int kmapsize
)
422 register struct mapping
*mp
;
425 int i
, num
, j
, rsize
, mapsize
, vmpagesz
, vmmapsz
;
427 vm_offset_t first_used_addr
;
429 savectl
*savec
, *savec2
;
430 vm_offset_t save
, save2
;
432 *first_avail
= round_page(*first_avail
);
435 kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n",
436 *first_avail
, *first_phys_avail
, avail_remaining
);
439 assert(PAGE_SIZE
== PPC_PGBYTES
);
442 * Initialize kernel pmap
444 kernel_pmap
= &kernel_pmap_store
;
445 cursor_pmap
= &kernel_pmap_store
;
447 lock_init(&pmap_system_lock
,
448 FALSE
, /* NOT a sleep lock */
452 simple_lock_init(&kernel_pmap
->lock
, ETAP_VM_PMAP_KERNEL
);
454 kernel_pmap
->pmap_link
.next
= (queue_t
)kernel_pmap
; /* Set up anchor forward */
455 kernel_pmap
->pmap_link
.prev
= (queue_t
)kernel_pmap
; /* Set up anchor reverse */
456 kernel_pmap
->ref_count
= 1;
457 kernel_pmap
->space
= PPC_SID_KERNEL
;
458 kernel_pmap
->pmapvr
= 0; /* Virtual = Real */
459 kernel_pmap
->bmaps
= 0; /* No block pages just yet */
460 for(i
=0; i
< 128; i
++) { /* Clear usage slots */
461 kernel_pmap
->pmapUsage
[i
] = 0;
463 for(i
=0; i
< 16; i
++) { /* Initialize for laughs */
464 kernel_pmap
->pmapSegs
[i
] = SEG_REG_PROT
| (i
<< 20) | PPC_SID_KERNEL
;
468 * Allocate: (from first_avail up)
469 * Aligned to its own size:
470 * hash table (for mem size 2**x, allocate 2**(x-10) entries)
471 * mapping table (same size and immediatly following hash table)
473 /* hash_table_size must be a power of 2, recommended sizes are
474 * taken from PPC601 User Manual, table 6-19. We take the next
475 * highest size if mem_size is not a power of two.
476 * TODO NMGS make this configurable at boot time.
479 num
= sizeof(pte_t
) * (mem_size
>> 10);
481 for (hash_table_size
= 64 * 1024; /* minimum size = 64Kbytes */
482 hash_table_size
< num
;
483 hash_table_size
*= 2)
486 /* Scale to within any physical memory layout constraints */
488 num
= atop(mem_size
); /* num now holds mem_size in pages */
490 /* size of all structures that we're going to allocate */
493 (InitialSaveBloks
* PAGE_SIZE
) + /* Allow space for the initial context saveareas */
494 (8 * PAGE_SIZE
) + /* For backpocket saveareas */
495 hash_table_size
+ /* For hash table */
496 hash_table_size
+ /* For PTEG allocation table */
497 (num
* sizeof(struct phys_entry
)) /* For the physical entries */
500 mapsize
= size
= round_page(size
); /* Get size of area to map that we just calculated */
501 mapsize
= mapsize
+ kmapsize
; /* Account for the kernel text size */
503 vmpagesz
= round_page(num
* sizeof(struct vm_page
)); /* Allow for all vm_pages needed to map physical mem */
504 vmmapsz
= round_page((num
/ 8) * sizeof(struct vm_map_entry
)); /* Allow for vm_maps */
506 mapsize
= mapsize
+ vmpagesz
+ vmmapsz
; /* Add the VM system estimates into the grand total */
508 mapsize
= mapsize
+ (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
509 mapsize
= ((mapsize
/ PAGE_SIZE
) + MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get number of blocks of mappings we need */
510 mapsize
= mapsize
+ ((mapsize
+ MAPPERBLOK
- 1) / MAPPERBLOK
); /* Account for the mappings themselves */
513 kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz
);
514 kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz
);
515 kprintf("pmap_bootstrap: size before mappings = %08X\n", size
);
516 kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize
);
517 kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize
);
520 size
= size
+ (mapsize
* PAGE_SIZE
); /* Get the true size we need */
522 /* hash table must be aligned to its size */
524 addr
= (*first_avail
+
525 (hash_table_size
-1)) & ~(hash_table_size
-1);
527 if (addr
+ size
> pmap_mem_regions
[0].end
) {
528 hash_table_size
/= 2;
532 /* If we have had to shrink hash table to too small, panic */
533 if (hash_table_size
== 32 * 1024)
534 panic("cannot lay out pmap memory map correctly");
538 kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n",
539 hash_table_size
, size
, addr
);
541 if (round_page(*first_phys_avail
) < trunc_page(addr
)) {
542 /* We are stepping over at least one page here, so
543 * add this region to the free regions so that it can
544 * be allocated by pmap_steal
546 free_regions
[free_regions_count
].start
= round_page(*first_phys_avail
);
547 free_regions
[free_regions_count
].end
= trunc_page(addr
);
549 avail_remaining
+= (free_regions
[free_regions_count
].end
-
550 free_regions
[free_regions_count
].start
) /
553 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
554 free_regions
[free_regions_count
].start
,free_regions
[free_regions_count
].end
,
557 free_regions_count
++;
560 /* Zero everything - this also invalidates the hash table entries */
561 bzero((char *)addr
, size
);
563 /* Set up some pointers to our new structures */
565 /* from here, addr points to the next free address */
567 first_used_addr
= addr
; /* remember where we started */
569 /* Set up hash table address and dma buffer address, keeping
570 * alignment. These mappings are all 1-1, so dma_r == dma_v
572 * If hash_table_size == dma_buffer_alignment, then put hash_table
573 * first, since dma_buffer_size may be smaller than alignment, but
574 * hash table alignment==hash_table_size.
576 hash_table_base
= addr
;
578 addr
+= hash_table_size
;
579 addr
+= hash_table_size
; /* Add another for the PTEG Control Area */
580 assert((hash_table_base
& (hash_table_size
-1)) == 0);
582 pcaptr
= (PCA
*)(hash_table_base
+hash_table_size
); /* Point to the PCA table */
584 for(i
=0; i
< (hash_table_size
/64) ; i
++) { /* For all of PTEG control areas: */
585 pcaptr
[i
].flgs
.PCAalflgs
.PCAfree
=0xFF; /* Mark all slots free */
586 pcaptr
[i
].flgs
.PCAalflgs
.PCAsteal
=0x01; /* Initialize steal position */
590 * Allocate our initial context save areas. As soon as we do this,
591 * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed
592 * to be at least page aligned.
594 save2
= addr
; /* Remember first page */
595 save
= addr
; /* Point to the whole block of blocks */
596 savec2
= (savectl
*)(addr
+ PAGE_SIZE
- sizeof(savectl
)); /* Point to the first's control area */
598 for(i
=0; i
< InitialSaveBloks
; i
++) { /* Initialize the saveareas */
600 savec
= (savectl
*)(save
+ PAGE_SIZE
- sizeof(savectl
)); /* Get the control area for this one */
602 savec
->sac_alloc
= sac_empty
; /* Mark both free */
603 savec
->sac_vrswap
= 0; /* V=R, so the translation factor is 0 */
604 savec
->sac_flags
= sac_perm
; /* Mark it permanent */
606 savec
->sac_flags
|= 0x0000EE00; /* (TEST/DEBUG) */
608 save
+= PAGE_SIZE
; /* Jump up to the next one now */
610 savec
->sac_next
= (unsigned int *)save
; /* Link these two */
614 savec
->sac_next
= (unsigned int *)0; /* Clear the forward pointer for the last */
615 savec2
->sac_alloc
&= 0x7FFFFFFF; /* Mark the first one in use */
617 saveanchor
.savefree
= (unsigned int)save2
; /* Point to the first one */
618 saveanchor
.savecount
= InitialSaveBloks
* sac_cnt
; /* The total number of save areas allocated */
619 saveanchor
.saveinuse
= 1; /* Number of areas in use */
620 saveanchor
.savemin
= InitialSaveMin
; /* We abend if lower than this */
621 saveanchor
.saveneghyst
= InitialNegHysteresis
; /* The minimum number to keep free (must be a multiple of sac_cnt) */
622 saveanchor
.savetarget
= InitialSaveTarget
; /* The target point for free save areas (must be a multiple of sac_cnt) */
623 saveanchor
.saveposhyst
= InitialPosHysteresis
; /* The high water mark for free save areas (must be a multiple of sac_cnt) */
624 __asm__
volatile ("mtsprg 1, %0" : : "r" (save2
)); /* Tell the exception handler about it */
626 addr
+= InitialSaveBloks
* PAGE_SIZE
; /* Move up the next free address */
630 savec2
= (savectl
*)(addr
+ PAGE_SIZE
- sizeof(savectl
));
632 for(i
=0; i
< 8; i
++) { /* Allocate backpocket saveareas */
634 savec
= (savectl
*)(save
+ PAGE_SIZE
- sizeof(savectl
));
636 savec
->sac_alloc
= sac_empty
;
637 savec
->sac_vrswap
= 0;
638 savec
->sac_flags
= sac_perm
;
639 savec
->sac_flags
|= 0x0000EE00;
643 savec
->sac_next
= (unsigned int *)save
;
647 savec
->sac_next
= (unsigned int *)0;
648 savec2
->sac_alloc
&= 0x7FFFFFFF;
649 debugbackpocket
= save2
;
650 addr
+= 8 * PAGE_SIZE
;
652 /* phys_table is static to help debugging,
653 * this variable is no longer actually used
654 * outside of this scope
657 phys_table
= (struct phys_entry
*) addr
;
660 kprintf("hash_table_base =%08X\n", hash_table_base
);
661 kprintf("phys_table =%08X\n", phys_table
);
662 kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count
);
665 for (i
= 0; i
< pmap_mem_regions_count
; i
++) {
667 pmap_mem_regions
[i
].phys_table
= phys_table
;
668 rsize
= (pmap_mem_regions
[i
].end
- (unsigned int)pmap_mem_regions
[i
].start
)/PAGE_SIZE
;
671 kprintf("Initializing physical table for region %d\n", i
);
672 kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n",
673 phys_table
, rsize
, pmap_mem_regions
[i
].start
,
674 (unsigned int)pmap_mem_regions
[i
].end
);
677 for (j
= 0; j
< rsize
; j
++) {
678 phys_table
[j
].phys_link
= MAPPING_NULL
;
679 mapping_phys_init(&phys_table
[j
], (unsigned int)pmap_mem_regions
[i
].start
+(j
*PAGE_SIZE
),
680 PTE_WIMG_DEFAULT
); /* Initializes hw specific storage attributes */
682 phys_table
= phys_table
+
683 atop(pmap_mem_regions
[i
].end
- pmap_mem_regions
[i
].start
);
686 /* restore phys_table for debug */
687 phys_table
= (struct phys_entry
*) addr
;
689 addr
+= sizeof(struct phys_entry
) * num
;
691 simple_lock_init(&tlb_system_lock
, ETAP_VM_PMAP_TLB
);
693 /* Initialise the registers necessary for supporting the hashtable */
695 kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base
, hash_table_size
);
698 hash_table_init(hash_table_base
, hash_table_size
);
701 * Remaining space is for mapping entries. Tell the initializer routine that
702 * the mapping system can't release this block because it's permanently assigned
705 mapping_init(); /* Initialize the mapping tables */
707 for(i
= addr
; i
< first_used_addr
+ size
; i
+= PAGE_SIZE
) { /* Add initial mapping blocks */
708 mapping_free_init(i
, 1, 0); /* Pass block address and say that this one is not releasable */
710 mapCtl
.mapcmin
= MAPPERBLOK
; /* Make sure we only adjust one at a time */
714 kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n",
715 first_used_addr
, round_page(first_used_addr
+size
),
719 /* Map V=R the page tables */
720 pmap_map(first_used_addr
, first_used_addr
,
721 round_page(first_used_addr
+size
), VM_PROT_READ
| VM_PROT_WRITE
);
725 for(i
=first_used_addr
; i
< round_page(first_used_addr
+size
); i
+=PAGE_SIZE
) { /* Step through all these mappings */
726 if(i
!= (j
= kvtophys(i
))) { /* Verify that the mapping was made V=R */
727 kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i
, j
);
732 *first_avail
= round_page(first_used_addr
+ size
);
733 first_free_virt
= round_page(first_used_addr
+ size
);
735 /* All the rest of memory is free - add it to the free
736 * regions so that it can be allocated by pmap_steal
738 free_regions
[free_regions_count
].start
= *first_avail
;
739 free_regions
[free_regions_count
].end
= pmap_mem_regions
[0].end
;
741 avail_remaining
+= (free_regions
[free_regions_count
].end
-
742 free_regions
[free_regions_count
].start
) /
746 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
747 free_regions
[free_regions_count
].start
,free_regions
[free_regions_count
].end
,
751 free_regions_count
++;
753 current_free_region
= 0;
755 avail_next
= free_regions
[current_free_region
].start
;
758 kprintf("Number of free regions=%d\n",free_regions_count
); /* (TEST/DEBUG) */
759 kprintf("Current free region=%d\n",current_free_region
); /* (TEST/DEBUG) */
760 for(i
=0;i
<free_regions_count
; i
++) { /* (TEST/DEBUG) */
761 kprintf("Free region %3d - from %08X to %08X\n", i
, free_regions
[i
].start
,
762 free_regions
[i
].end
); /* (TEST/DEBUG) */
764 for (i
= 0; i
< pmap_mem_regions_count
; i
++) { /* (TEST/DEBUG) */
765 kprintf("PMAP region %3d - from %08X to %08X; phys=%08X\n", i
, /* (TEST/DEBUG) */
766 pmap_mem_regions
[i
].start
, /* (TEST/DEBUG) */
767 pmap_mem_regions
[i
].end
, /* (TEST/DEBUG) */
768 pmap_mem_regions
[i
].phys_table
); /* (TEST/DEBUG) */
775 * pmap_init(spa, epa)
776 * finishes the initialization of the pmap module.
777 * This procedure is called from vm_mem_init() in vm/vm_init.c
778 * to initialize any remaining data structures that the pmap module
779 * needs to map virtual memory (VM is already ON).
781 * Note that the pmap needs to be sized and aligned to
782 * a power of two. This is because it is used both in virtual and
783 * real so it can't span a page boundary.
791 pmap_zone
= zinit(pmapSize
, 400 * pmapSize
, 4096, "pmap");
793 zone_debug_disable(pmap_zone
); /* Can't debug this one 'cause it messes with size and alignment */
794 #endif /* ZONE_DEBUG */
796 pmap_initialized
= TRUE
;
799 * Initialize list of freed up pmaps
801 free_pmap_list
= 0; /* Set that there are no free pmaps */
803 simple_lock_init(&free_pmap_lock
, ETAP_VM_PMAP_CACHE
);
806 unsigned int pmap_free_pages(void)
808 return avail_remaining
;
811 boolean_t
pmap_next_page(vm_offset_t
*addrp
)
813 /* Non optimal, but only used for virtual memory startup.
814 * Allocate memory from a table of free physical addresses
815 * If there are no more free entries, too bad. We have two
816 * tables to look through, free_regions[] which holds free
817 * regions from inside pmap_mem_regions[0], and the others...
818 * pmap_mem_regions[1..]
821 /* current_free_region indicates the next free entry,
822 * if it's less than free_regions_count, then we're still
823 * in free_regions, otherwise we're in pmap_mem_regions
826 if (current_free_region
>= free_regions_count
) {
827 /* We're into the pmap_mem_regions, handle this
828 * separately to free_regions
831 int current_pmap_mem_region
= current_free_region
-
832 free_regions_count
+ 1;
833 if (current_pmap_mem_region
> pmap_mem_regions_count
)
836 avail_next
+= PAGE_SIZE
;
838 if (avail_next
>= pmap_mem_regions
[current_pmap_mem_region
].end
) {
839 current_free_region
++;
840 current_pmap_mem_region
++;
841 avail_next
= pmap_mem_regions
[current_pmap_mem_region
].start
;
843 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next
);
849 /* We're in the free_regions, allocate next page and increment
854 avail_next
+= PAGE_SIZE
;
857 if (avail_next
>= free_regions
[current_free_region
].end
) {
858 current_free_region
++;
859 if (current_free_region
< free_regions_count
)
860 avail_next
= free_regions
[current_free_region
].start
;
862 avail_next
= pmap_mem_regions
[current_free_region
-
863 free_regions_count
+ 1].start
;
865 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next
);
871 void pmap_virtual_space(
875 *startp
= round_page(first_free_virt
);
876 *endp
= VM_MAX_KERNEL_ADDRESS
;
882 * Create and return a physical map.
884 * If the size specified for the map is zero, the map is an actual physical
885 * map, and may be referenced by the hardware.
887 * A pmap is either in the free list or in the in-use list. The only use
888 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
889 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
890 * in-use list is matched until a hole in the VSID sequence is found. (Note
891 * that the in-use pmaps are queued in VSID sequence order.) This is all done
892 * while free_pmap_lock is held.
894 * If the size specified is non-zero, the map will be used in software
895 * only, and is bounded by that size.
898 pmap_create(vm_size_t size
)
900 pmap_t pmap
, ckpmap
, fore
, aft
;
903 unsigned int currSID
;
906 dbgTrace(0xF1D00001, size
, 0); /* (TEST/DEBUG) */
910 if (pmdebug
& PDB_USER
)
911 kprintf("pmap_create(size=%x)%c", size
, size
? '\n' : ' ');
915 * A software use-only map doesn't even need a pmap structure.
921 * If there is a pmap in the pmap free list, reuse it.
922 * Note that we use free_pmap_list for all chaining of pmaps, both to
923 * the free list and the in use chain (anchored from kernel_pmap).
926 simple_lock(&free_pmap_lock
);
928 if(free_pmap_list
) { /* Any free? */
929 pmap
= free_pmap_list
; /* Yes, allocate it */
930 free_pmap_list
= (pmap_t
)pmap
->bmaps
; /* Dequeue this one (we chain free ones through bmaps) */
934 simple_unlock(&free_pmap_lock
); /* Unlock just in case */
937 pmap
= (pmap_t
) zalloc(pmap_zone
); /* Get one */
938 if (pmap
== PMAP_NULL
) return(PMAP_NULL
); /* Handle out-of-memory condition */
940 bzero((char *)pmap
, pmapSize
); /* Clean up the pmap */
943 simple_lock(&free_pmap_lock
); /* Lock it back up */
945 ckpmap
= cursor_pmap
; /* Get starting point for free ID search */
946 currSID
= ckpmap
->spaceNum
; /* Get the actual space ID number */
948 while(1) { /* Keep trying until something happens */
950 currSID
= (currSID
+ 1) & SID_MAX
; /* Get the next in the sequence */
951 ckpmap
= (pmap_t
)ckpmap
->pmap_link
.next
; /* On to the next in-use pmap */
953 if(ckpmap
->spaceNum
!= currSID
) break; /* If we are out of sequence, this is free */
955 if(ckpmap
== cursor_pmap
) { /* See if we have 2^20 already allocated */
956 panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */
960 pmap
->space
= (currSID
* incrVSID
) & SID_MAX
; /* Calculate the actual VSID */
961 pmap
->spaceNum
= currSID
; /* Set the space ID number */
964 * Now we link into the chain just before the out of sequence guy.
967 fore
= (pmap_t
)ckpmap
->pmap_link
.prev
; /* Get the current's previous */
968 pmap
->pmap_link
.next
= (queue_t
)ckpmap
; /* My next points to the current */
969 fore
->pmap_link
.next
= (queue_t
)pmap
; /* Current's previous's next points to me */
970 pmap
->pmap_link
.prev
= (queue_t
)fore
; /* My prev points to what the current pointed to */
971 ckpmap
->pmap_link
.prev
= (queue_t
)pmap
; /* Current's prev points to me */
973 simple_lock_init(&pmap
->lock
, ETAP_VM_PMAP
);
974 pmap
->pmapvr
= (unsigned int)pmap
^ (unsigned int)pmap_extract(kernel_pmap
, (vm_offset_t
)pmap
); /* Get physical pointer to the pmap and make mask */
977 pmap
->stats
.resident_count
= 0;
978 pmap
->stats
.wired_count
= 0;
979 pmap
->bmaps
= 0; /* Clear block map pointer to 0 */
980 pmap
->vflags
= 0; /* Mark all alternates invalid for now */
981 for(i
=0; i
< 128; i
++) { /* Clean out usage slots */
982 pmap
->pmapUsage
[i
] = 0;
984 for(i
=0; i
< 16; i
++) { /* Initialize for laughs */
985 pmap
->pmapSegs
[i
] = SEG_REG_PROT
| (i
<< 20) | pmap
->space
;
989 dbgTrace(0xF1D00002, (unsigned int)pmap
, (unsigned int)pmap
->space
); /* (TEST/DEBUG) */
993 if (pmdebug
& PDB_USER
)
994 kprintf("-> %x, space id = %d\n", pmap
, pmap
->space
);
997 simple_unlock(&free_pmap_lock
);
1005 * Gives up a reference to the specified pmap. When the reference count
1006 * reaches zero the pmap structure is added to the pmap free list.
1008 * Should only be called if the map contains no valid mappings.
1011 pmap_destroy(pmap_t pmap
)
1018 dbgTrace(0xF1D00003, (unsigned int)pmap
, 0); /* (TEST/DEBUG) */
1022 if (pmdebug
& PDB_USER
)
1023 kprintf("pmap_destroy(pmap=%x)\n", pmap
);
1026 if (pmap
== PMAP_NULL
)
1029 ref_count
=hw_atomic_sub(&pmap
->ref_count
, 1); /* Back off the count */
1030 if(ref_count
>0) return; /* Still more users, leave now... */
1032 if(ref_count
< 0) /* Did we go too far? */
1033 panic("pmap_destroy(): ref_count < 0");
1036 if(pmap
->stats
.resident_count
!= 0)
1037 panic("PMAP_DESTROY: pmap not empty");
1039 if(pmap
->stats
.resident_count
!= 0) {
1040 pmap_remove(pmap
, 0, 0xFFFFF000);
1045 * Add the pmap to the pmap free list.
1050 * Add the pmap to the pmap free list.
1052 simple_lock(&free_pmap_lock
);
1054 if (free_pmap_count
<= free_pmap_max
) { /* Do we have enough spares? */
1056 pmap
->bmaps
= (struct blokmap
*)free_pmap_list
; /* Queue in front */
1057 free_pmap_list
= pmap
;
1059 simple_unlock(&free_pmap_lock
);
1062 if(cursor_pmap
== pmap
) cursor_pmap
= (pmap_t
)pmap
->pmap_link
.prev
; /* If we are releasing the cursor, back up */
1063 fore
= (pmap_t
)pmap
->pmap_link
.prev
;
1064 aft
= (pmap_t
)pmap
->pmap_link
.next
;
1065 fore
->pmap_link
.next
= pmap
->pmap_link
.next
; /* My previous's next is my next */
1066 aft
->pmap_link
.prev
= pmap
->pmap_link
.prev
; /* My next's previous is my previous */
1067 simple_unlock(&free_pmap_lock
);
1068 zfree(pmap_zone
, (vm_offset_t
) pmap
);
1074 * pmap_reference(pmap)
1075 * gains a reference to the specified pmap.
1078 pmap_reference(pmap_t pmap
)
1083 dbgTrace(0xF1D00004, (unsigned int)pmap
, 0); /* (TEST/DEBUG) */
1087 if (pmdebug
& PDB_USER
)
1088 kprintf("pmap_reference(pmap=%x)\n", pmap
);
1091 if (pmap
!= PMAP_NULL
) hw_atomic_add(&pmap
->ref_count
, 1); /* Bump the count */
1095 * pmap_remove_some_phys
1097 * Removes mappings of the associated page from the specified pmap
1100 void pmap_remove_some_phys(
1104 register struct phys_entry
*pp
;
1105 register struct mapping
*mp
, *mpv
;
1108 if (pmap
== PMAP_NULL
) return; /* Do nothing if no pmap */
1110 pp
= pmap_find_physentry(pa
); /* Get the physent for this page */
1111 if (pp
== PHYS_NULL
) return; /* Leave if not in physical RAM */
1113 mapping_purge_pmap(pp
, pmap
);
1115 return; /* Leave... */
1119 * pmap_remove(pmap, s, e)
1120 * unmaps all virtual addresses v in the virtual address
1121 * range determined by [s, e) and pmap.
1122 * s and e must be on machine independent page boundaries and
1123 * s must be less than or equal to e.
1125 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
1126 * skip those segments.
1135 struct mapping
*mp
, *blm
;
1139 dbgTrace(0xF1D00005, (unsigned int)pmap
, sva
|((eva
-sva
)>>12)); /* (TEST/DEBUG) */
1143 if (pmdebug
& PDB_USER
)
1144 kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n",
1148 if (pmap
== PMAP_NULL
)
1151 /* It is just possible that eva might have wrapped around to zero,
1152 * and sometimes we get asked to liberate something of size zero
1153 * even though it's dumb (eg. after zero length read_overwrites)
1157 /* If these are not page aligned the loop might not terminate */
1158 assert((sva
== trunc_page(sva
)) && (eva
== trunc_page(eva
)));
1160 /* We liberate addresses from high to low, since the stack grows
1161 * down. This means that we won't need to test addresses below
1162 * the limit of stack growth
1165 debugLog2(44, sva
, eva
); /* Log pmap_map call */
1167 sva
= trunc_page(sva
); /* Make it clean */
1168 lpage
= trunc_page(eva
) - PAGE_SIZE
; /* Point to the last page contained in the range */
1171 * Here we will remove all of the block mappings that overlap this range.
1172 * hw_rem_blk removes one mapping in the range and returns. If it returns
1173 * 0, there are no blocks in the range.
1176 while(mp
= (mapping
*)hw_rem_blk(pmap
, sva
, lpage
)) { /* Keep going until no more */
1177 if((unsigned int)mp
& 1) { /* Make sure we don't unmap a permanent one */
1178 blm
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)mp
& 0xFFFFFFFE)); /* Get virtual address */
1179 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
1182 mapping_free(hw_cpv(mp
)); /* Release it */
1185 while (pmap
->stats
.resident_count
&& (eva
> sva
)) {
1187 eva
-= PAGE_SIZE
; /* Back up a page */
1190 if((0x00008000 >> (sva
>> 28)) & pmap
->vflags
)
1191 panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap
, sva
); /* (TEST/DEBUG) panic */
1193 if(!(pmap
->pmapUsage
[(eva
>> pmapUsageShft
) & pmapUsageMask
])) { /* See if this chunk has anything in it */
1194 eva
= eva
& (-pmapUsageSize
); /* Back up into the previous slot */
1195 continue; /* Check the next... */
1197 mapping_remove(pmap
, eva
); /* Remove the mapping for this address */
1200 debugLog2(45, 0, 0); /* Log pmap_map call */
1208 * Lower the permission for all mappings to a given page.
1215 register struct phys_entry
*pp
;
1220 dbgTrace(0xF1D00006, (unsigned int)pa
, (unsigned int)prot
); /* (TEST/DEBUG) */
1224 if (pmdebug
& PDB_USER
)
1225 kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa
, prot
);
1228 debugLog2(46, pa
, prot
); /* Log pmap_page_protect call */
1232 case VM_PROT_READ
|VM_PROT_EXECUTE
:
1242 pp
= pmap_find_physentry(pa
); /* Get the physent for this page */
1243 if (pp
== PHYS_NULL
) return; /* Leave if not in physical RAM */
1245 if (remove
) { /* If the protection was set to none, we'll remove all mappings */
1246 mapping_purge(pp
); /* Get rid of them all */
1248 debugLog2(47, 0, 0); /* Log pmap_map call */
1249 return; /* Leave... */
1252 /* When we get here, it means that we are to change the protection for a
1256 mapping_protect_phys(pp
, prot
, 0); /* Change protection of all mappings to page. */
1258 debugLog2(47, 1, 0); /* Log pmap_map call */
1262 * pmap_protect(pmap, s, e, prot)
1263 * changes the protection on all virtual addresses v in the
1264 * virtual address range determined by [s, e] and pmap to prot.
1265 * s and e must be on machine independent page boundaries and
1266 * s must be less than or equal to e.
1268 * Note that any requests to change the protection of a nested pmap are
1269 * ignored. Those changes MUST be done by calling this with the correct pmap.
1278 register struct phys_entry
*pp
;
1279 register struct mapping
*mp
, *mpv
;
1282 dbgTrace(0xF1D00008, (unsigned int)pmap
, (unsigned int)(sva
|((eva
-sva
)>>12))); /* (TEST/DEBUG) */
1286 if (pmdebug
& PDB_USER
)
1287 kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap
, sva
, eva
, prot
);
1292 if (pmap
== PMAP_NULL
) return; /* Do nothing if no pmap */
1294 debugLog2(48, sva
, eva
); /* Log pmap_map call */
1296 if (prot
== VM_PROT_NONE
) { /* Should we kill the address range?? */
1297 pmap_remove(pmap
, sva
, eva
); /* Yeah, dump 'em */
1299 debugLog2(49, prot
, 0); /* Log pmap_map call */
1301 return; /* Leave... */
1304 sva
= trunc_page(sva
); /* Start up a page boundary */
1306 while(sva
< eva
) { /* Step through */
1308 if(!(pmap
->pmapUsage
[(sva
>> pmapUsageShft
) & pmapUsageMask
])) { /* See if this chunk has anything in it */
1309 sva
= (sva
+ pmapUsageSize
) &(-pmapUsageSize
); /* Jump up into the next slot if nothing here */
1310 if(!sva
) break; /* We tried to wrap, kill loop... */
1311 continue; /* Check the next... */
1315 if((0x00008000 >> (sva
>> 28)) & pmap
->vflags
)
1316 panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap
, sva
); /* (TEST/DEBUG) panic */
1319 mapping_protect(pmap
, sva
, prot
); /* Change the protection on the page */
1320 sva
+= PAGE_SIZE
; /* On to the next page */
1323 debugLog2(49, prot
, 1); /* Log pmap_map call */
1324 return; /* Leave... */
1330 * Create a translation for the virtual address (virt) to the physical
1331 * address (phys) in the pmap with the protection requested. If the
1332 * translation is wired then we can not allow a full page fault, i.e.,
1333 * the mapping control block is not eligible to be stolen in a low memory
1336 * NB: This is the only routine which MAY NOT lazy-evaluate
1337 * or lose information. That is, this routine must actually
1338 * insert this page into the given map NOW.
1341 pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_prot_t prot
,
1346 struct phys_entry
*pp
;
1350 dbgTrace(0xF1D00009, (unsigned int)pmap
, (unsigned int)va
); /* (TEST/DEBUG) */
1351 dbgTrace(0xF1D04009, (unsigned int)pa
, (unsigned int)prot
); /* (TEST/DEBUG) */
1354 if (pmap
== PMAP_NULL
) return; /* If they gave us no pmap, just leave... */
1356 debugLog2(50, va
, pa
); /* Log pmap_map call */
1358 pp
= pmap_find_physentry(pa
); /* Get the physent for this physical page */
1360 if((0x00008000 >> (va
>> 28)) & pmap
->vflags
)
1361 panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap
, va
); /* (TEST/DEBUG) panic */
1363 spl
=splhigh(); /* Have to disallow interrupts between the
1364 time we possibly clear a mapping and the time
1365 we get it remapped again. An I/O SLIH could
1366 try to drive an IOR using the page before
1367 we get it mapped (Dude! This was a tough
1370 mapping_remove(pmap
, va
); /* Remove any other mapping at this address */
1372 memattr
= PTE_WIMG_IO
; /* Assume I/O mapping for a moment */
1373 if(pp
) memattr
= ((pp
->pte1
&0x00000078) >> 3); /* Set the attribute to the physical default */
1375 mp
=mapping_make(pmap
, pp
, va
, pa
, prot
, memattr
, 0); /* Make the address mapping */
1377 splx(spl
); /* I'm not busy no more - come what may */
1379 debugLog2(51, prot
, 0); /* Log pmap_map call */
1382 if (pmdebug
& (PDB_USER
|PDB_ENTER
))
1383 kprintf("leaving pmap_enter\n");
1389 * pmap_extract(pmap, va)
1390 * returns the physical address corrsponding to the
1391 * virtual address specified by pmap and va if the
1392 * virtual address is mapped and 0 if it is not.
1394 vm_offset_t
pmap_extract(pmap_t pmap
, vm_offset_t va
) {
1397 register struct mapping
*mp
, *mpv
;
1398 register vm_offset_t pa
;
1404 dbgTrace(0xF1D0000B, (unsigned int)pmap
, (unsigned int)va
); /* (TEST/DEBUG) */
1407 if (pmdebug
& PDB_USER
)
1408 kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap
, va
);
1411 seg
= va
>> 28; /* Isolate segment */
1412 if((0x00008000 >> seg
) & pmap
->vflags
) actpmap
= pmap
->pmapPmaps
[seg
]; /* Set nested pmap if there is one */
1413 else actpmap
= pmap
; /* Otherwise use the one passed in */
1415 pa
= (vm_offset_t
) 0; /* Clear this to 0 */
1417 debugLog2(52, actpmap
->space
, va
); /* Log pmap_map call */
1419 spl
= splhigh(); /* We can't allow any loss of control here */
1421 if(mp
=hw_lock_phys_vir(actpmap
->space
, va
)) { /* Find the mapping for this vaddr and lock physent */
1422 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1423 panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1424 splx(spl
); /* Interruptions are cool now */
1428 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1429 pa
= (vm_offset_t
)((mpv
->PTEr
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Build the physical address */
1430 if(mpv
->physent
) hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1431 splx(spl
); /* Interruptions are cool now */
1433 debugLog2(53, pa
, 0); /* Log pmap_map call */
1435 return pa
; /* Return the physical address... */
1438 pa
= hw_cvp_blk(pmap
, va
); /* Try to convert odd-sized page (returns 0 if not found) */
1439 /* Note no nested pmaps here */
1440 splx(spl
); /* Restore 'rupts */
1441 debugLog2(53, pa
, 0); /* Log pmap_map call */
1442 return pa
; /* Return physical address or 0 */
1448 * Set/Get special memory attributes; Set is not implemented.
1450 * Note: 'VAL_GET_INFO' is used to return info about a page.
1451 * If less than 1 page is specified, return the physical page
1452 * mapping and a count of the number of mappings to that page.
1453 * If more than one page is specified, return the number
1454 * of resident pages and the number of shared (more than
1455 * one mapping) pages in the range;
1459 pmap_attribute(pmap
, address
, size
, attribute
, value
)
1461 vm_offset_t address
;
1463 vm_machine_attribute_t attribute
;
1464 vm_machine_attribute_val_t
* value
;
1467 vm_offset_t sva
, eva
;
1470 register struct mapping
*mp
, *mpv
;
1471 register struct phys_entry
*pp
;
1475 if (attribute
!= MATTR_CACHE
)
1476 return KERN_INVALID_ARGUMENT
;
1478 /* We can't get the caching attribute for more than one page
1481 if ((*value
== MATTR_VAL_GET
) &&
1482 (trunc_page(address
) != trunc_page(address
+size
-1)))
1483 return KERN_INVALID_ARGUMENT
;
1485 if (pmap
== PMAP_NULL
)
1486 return KERN_SUCCESS
;
1488 sva
= trunc_page(address
);
1489 eva
= round_page(address
+ size
);
1492 debugLog2(54, address
, attribute
); /* Log pmap_map call */
1495 case MATTR_VAL_CACHE_SYNC
: /* sync I+D caches */
1496 case MATTR_VAL_CACHE_FLUSH
: /* flush from all caches */
1497 case MATTR_VAL_DCACHE_FLUSH
: /* flush from data cache(s) */
1498 case MATTR_VAL_ICACHE_FLUSH
: /* flush from instr cache(s) */
1499 sva
= trunc_page(sva
);
1503 seg
= sva
>> 28; /* Isolate segment */
1504 if((0x00008000 >> seg
) & pmap
->vflags
) actpmap
= pmap
->pmapPmaps
[seg
]; /* Set nested pmap if there is one */
1505 else actpmap
= pmap
; /* Otherwise use the one passed in */
1508 * Note: the following should work ok with nested pmaps because there are not overlayed mappings
1510 if(!(actpmap
->pmapUsage
[(sva
>> pmapUsageShft
) & pmapUsageMask
])) { /* See if this chunk has anything in it */
1511 sva
= (sva
+ pmapUsageSize
) & (-pmapUsageSize
); /* Jump up into the next slot if nothing here */
1512 if(!sva
) break; /* We tried to wrap, kill loop... */
1513 continue; /* Check the next... */
1516 if(!(mp
= hw_lock_phys_vir(actpmap
->space
, sva
))) { /* Find the mapping for this vaddr and lock physent */
1517 sva
+= PAGE_SIZE
; /* Point to the next page */
1518 continue; /* Skip if the page is not mapped... */
1521 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1522 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1526 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1527 if((unsigned int)mpv
->physent
) { /* Is there a physical entry? */
1528 pa
= (vm_offset_t
)mpv
->physent
->pte1
& -PAGE_SIZE
; /* Yes, get the physical address from there */
1531 pa
= (vm_offset_t
)(mpv
->PTEr
& PAGE_SIZE
); /* Otherwise from the mapping */
1534 switch (*value
) { /* What type was that again? */
1535 case MATTR_VAL_CACHE_SYNC
: /* It is sync I+D caches */
1536 sync_cache(pa
, PAGE_SIZE
); /* Sync up dem caches */
1537 break; /* Done with this one here... */
1539 case MATTR_VAL_CACHE_FLUSH
: /* It is flush from all caches */
1540 flush_dcache(pa
, PAGE_SIZE
, TRUE
); /* Flush out the data cache */
1541 invalidate_icache(pa
, PAGE_SIZE
, TRUE
); /* Flush out the instruction cache */
1542 break; /* Done with this one here... */
1544 case MATTR_VAL_DCACHE_FLUSH
: /* It is flush from data cache(s) */
1545 flush_dcache(pa
, PAGE_SIZE
, TRUE
); /* Flush out the data cache */
1546 break; /* Done with this one here... */
1548 case MATTR_VAL_ICACHE_FLUSH
: /* It is flush from instr cache(s) */
1549 invalidate_icache(pa
, PAGE_SIZE
, TRUE
); /* Flush out the instruction cache */
1550 break; /* Done with this one here... */
1552 if(mpv
->physent
) hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry if it exists*/
1554 sva
+= PAGE_SIZE
; /* Point to the next page */
1559 case MATTR_VAL_GET_INFO
: /* Get info */
1561 s
= splhigh(); /* Lock 'em out */
1563 if (size
<= PAGE_SIZE
) { /* Do they want just one page */
1564 seg
= sva
>> 28; /* Isolate segment */
1565 if((0x00008000 >> seg
) & pmap
->vflags
) actpmap
= pmap
->pmapPmaps
[seg
]; /* Set nested pmap if there is one */
1566 else actpmap
= pmap
; /* Otherwise use the one passed in */
1567 if(!(mp
= hw_lock_phys_vir(actpmap
->space
, sva
))) { /* Find the mapping for this vaddr and lock physent */
1568 *value
= 0; /* Return nothing if no mapping */
1571 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1572 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1574 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1575 if(pp
= mpv
->physent
) { /* Check for a physical entry */
1576 total
= 0; /* Clear the count */
1577 for (mpv
= (mapping
*)hw_cpv((mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
)); mpv
!= NULL
; mpv
= hw_cpv(mp
->next
)) total
++; /* Count the mapping */
1578 *value
= (vm_machine_attribute_val_t
) ((pp
->pte1
& -PAGE_SIZE
) | total
); /* Pass back the physical address and the count of mappings */
1579 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Clear the physical entry lock */
1581 else { /* This is the case for an I/O mapped area */
1582 *value
= (vm_machine_attribute_val_t
) ((mpv
->PTEr
& -PAGE_SIZE
) | 1); /* Pass back the physical address and the count of mappings */
1589 seg
= sva
>> 28; /* Isolate segment */
1590 if((0x00008000 >> seg
) & pmap
->vflags
) actpmap
= pmap
->pmapPmaps
[seg
]; /* Set nested pmap if there is one */
1591 else actpmap
= pmap
; /* Otherwise use the one passed in */
1593 if(!(actpmap
->pmapUsage
[(sva
>> pmapUsageShft
) & pmapUsageMask
])) { /* See if this chunk has anything in it */
1594 sva
= (sva
+ pmapUsageSize
) & (-pmapUsageSize
); /* Jump up into the next slot if nothing here */
1595 if(!sva
) break; /* We tried to wrap, kill loop... */
1596 continue; /* Check the next... */
1598 if(mp
= hw_lock_phys_vir(actpmap
->space
, sva
)) { /* Find the mapping for this vaddr and lock physent */
1599 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1600 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1603 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1604 total
+= 65536 + (mpv
->physent
&& ((mapping
*)((unsigned int)mpv
->physent
->phys_link
& -32))->next
); /* Count the "resident" and shared pages */
1605 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Clear the physical entry lock */
1614 case MATTR_VAL_GET
: /* return current value */
1615 case MATTR_VAL_OFF
: /* turn attribute off */
1616 case MATTR_VAL_ON
: /* turn attribute on */
1618 ret
= KERN_INVALID_ARGUMENT
;
1622 debugLog2(55, 0, 0); /* Log pmap_map call */
1630 * Garbage collects the physical map system for pages that are no longer used.
1631 * It isn't implemented or needed or wanted.
1634 pmap_collect(pmap_t pmap
)
1640 * Routine: pmap_activate
1642 * Binds the given physical map to the given
1643 * processor, and returns a hardware map description.
1644 * It isn't implemented or needed or wanted.
1656 * It isn't implemented or needed or wanted.
1673 * are implemented in movc.s, these
1674 * are just wrappers to help debugging
1677 extern void pmap_zero_page_assembler(vm_offset_t p
);
1678 extern void pmap_copy_page_assembler(vm_offset_t src
, vm_offset_t dst
);
1681 * pmap_zero_page(pa)
1683 * pmap_zero_page zeros the specified (machine independent) page pa.
1689 register struct mapping
*mp
;
1690 register struct phys_entry
*pp
;
1692 if (pmdebug
& (PDB_USER
|PDB_ZERO
))
1693 kprintf("pmap_zero_page(pa=%x)\n", p
);
1696 * XXX can these happen?
1698 if (pmap_find_physentry(p
) == PHYS_NULL
)
1699 panic("zero_page: physaddr out of range");
1701 pmap_zero_page_assembler(p
);
1705 * pmap_copy_page(src, dst)
1707 * pmap_copy_page copies the specified (machine independent)
1708 * page from physical address src to physical address dst.
1710 * We need to invalidate the cache for address dst before
1711 * we do the copy. Apparently there won't be any mappings
1712 * to the dst address normally.
1719 register struct phys_entry
*pp
;
1721 if (pmdebug
& (PDB_USER
|PDB_COPY
))
1722 kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src
, dst
);
1723 if (pmdebug
& PDB_COPY
)
1724 kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n",
1725 src
, dst
, PAGE_SIZE
);
1727 pmap_copy_page_assembler(src
, dst
);
1732 * pmap_pageable(pmap, s, e, pageable)
1733 * Make the specified pages (by pmap, offset)
1734 * pageable (or not) as requested.
1736 * A page which is not pageable may not take
1737 * a fault; therefore, its page table entry
1738 * must remain valid for the duration.
1740 * This routine is merely advisory; pmap_enter()
1741 * will specify that these pages are to be wired
1742 * down (or not) as appropriate.
1744 * (called from vm/vm_fault.c).
1754 return; /* This is not used... */
1758 * Routine: pmap_change_wiring
1759 * NOTE USED ANYMORE.
1763 register pmap_t pmap
,
1767 return; /* This is not used... */
1771 * pmap_modify_pages(pmap, s, e)
1772 * sets the modified bit on all virtual addresses v in the
1773 * virtual address range determined by [s, e] and pmap,
1774 * s and e must be on machine independent page boundaries and
1775 * s must be less than or equal to e.
1787 dbgTrace(0xF1D00010, (unsigned int)pmap
, (unsigned int)(sva
|((eva
-sva
)>>12))); /* (TEST/DEBUG) */
1791 if (pmdebug
& PDB_USER
) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap
, sva
, eva
);
1794 if (pmap
== PMAP_NULL
) return; /* If no pmap, can't do it... */
1796 debugLog2(56, sva
, eva
); /* Log pmap_map call */
1798 spl
=splhigh(); /* Don't bother me */
1800 for ( ; sva
< eva
; sva
+= PAGE_SIZE
) { /* Cycle through the whole range */
1801 mp
= hw_lock_phys_vir(pmap
->space
, sva
); /* Lock the physical entry for this mapping */
1802 if(mp
) { /* Did we find one? */
1803 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1804 panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1807 mp
= hw_cpv(mp
); /* Convert to virtual addressing */
1808 if(!mp
->physent
) continue; /* No physical entry means an I/O page, we can't set attributes */
1809 mapping_set_mod(mp
->physent
); /* Set the modfied bit for this page */
1810 hw_unlock_bit((unsigned int *)&mp
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1813 splx(spl
); /* Restore the interrupt level */
1815 debugLog2(57, 0, 0); /* Log pmap_map call */
1816 return; /* Leave... */
1820 * pmap_clear_modify(phys)
1821 * clears the hardware modified ("dirty") bit for one
1822 * machine independant page starting at the given
1823 * physical address. phys must be aligned on a machine
1824 * independant page boundary.
1827 pmap_clear_modify(vm_offset_t pa
)
1829 register struct phys_entry
*pp
;
1833 dbgTrace(0xF1D00011, (unsigned int)pa
, 0); /* (TEST/DEBUG) */
1836 if (pmdebug
& PDB_USER
)
1837 kprintf("pmap_clear_modify(pa=%x)\n", pa
);
1840 pp
= pmap_find_physentry(pa
); /* Find the physent for this page */
1841 if (pp
== PHYS_NULL
) return; /* If there isn't one, just leave... */
1843 debugLog2(58, pa
, 0); /* Log pmap_map call */
1845 spl
=splhigh(); /* Don't bother me */
1847 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Try to get the lock on the physical entry */
1848 panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp
); /* Arrrgghhhh! */
1849 splx(spl
); /* Restore 'rupts */
1850 return; /* Should die before here */
1853 mapping_clr_mod(pp
); /* Clear all change bits for physical page */
1855 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1856 splx(spl
); /* Restore the interrupt level */
1858 debugLog2(59, 0, 0); /* Log pmap_map call */
1862 * pmap_is_modified(phys)
1863 * returns TRUE if the given physical page has been modified
1864 * since the last call to pmap_clear_modify().
1867 pmap_is_modified(register vm_offset_t pa
)
1869 register struct phys_entry
*pp
;
1875 dbgTrace(0xF1D00012, (unsigned int)pa
, 0); /* (TEST/DEBUG) */
1878 if (pmdebug
& PDB_USER
)
1879 kprintf("pmap_is_modified(pa=%x)\n", pa
);
1882 pp
= pmap_find_physentry(pa
); /* Find the physent for this page */
1883 if (pp
== PHYS_NULL
) return(FALSE
); /* Just indicate not set... */
1885 debugLog2(60, pa
, 0); /* Log pmap_map call */
1887 spl
=splhigh(); /* Don't bother me */
1889 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Try to get the lock on the physical entry */
1890 panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp
); /* Arrrgghhhh! */
1891 splx(spl
); /* Restore 'rupts */
1892 return 0; /* Should die before here */
1895 ret
= mapping_tst_mod(pp
); /* Check for modified */
1897 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1898 splx(spl
); /* Restore the interrupt level */
1900 debugLog2(61, ret
, 0); /* Log pmap_map call */
1906 * pmap_clear_reference(phys)
1907 * clears the hardware referenced bit in the given machine
1908 * independant physical page.
1912 pmap_clear_reference(vm_offset_t pa
)
1914 register struct phys_entry
*pp
;
1919 dbgTrace(0xF1D00013, (unsigned int)pa
, 0); /* (TEST/DEBUG) */
1922 if (pmdebug
& PDB_USER
)
1923 kprintf("pmap_clear_reference(pa=%x)\n", pa
);
1926 pp
= pmap_find_physentry(pa
); /* Find the physent for this page */
1927 if (pp
== PHYS_NULL
) return; /* If there isn't one, just leave... */
1929 debugLog2(62, pa
, 0); /* Log pmap_map call */
1931 spl
=splhigh(); /* Don't bother me */
1932 mapping_clr_ref(pp
); /* Clear all reference bits for physical page */
1933 splx(spl
); /* Restore the interrupt level */
1935 debugLog2(63, 0, 0); /* Log pmap_map call */
1940 * pmap_is_referenced(phys)
1941 * returns TRUE if the given physical page has been referenced
1942 * since the last call to pmap_clear_reference().
1945 pmap_is_referenced(vm_offset_t pa
)
1947 register struct phys_entry
*pp
;
1953 dbgTrace(0xF1D00014, (unsigned int)pa
, 0); /* (TEST/DEBUG) */
1956 if (pmdebug
& PDB_USER
)
1957 kprintf("pmap_is_referenced(pa=%x)\n", pa
);
1960 pp
= pmap_find_physentry(pa
); /* Find the physent for this page */
1961 if (pp
== PHYS_NULL
) return(FALSE
); /* Just indicate not set... */
1963 debugLog2(64, pa
, 0); /* Log pmap_map call */
1965 spl
=splhigh(); /* Don't bother me */
1967 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Try to get the lock on the physical entry */
1968 panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp
); /* Arrrgghhhh! */
1969 splx(spl
); /* Restore 'rupts */
1970 return 0; /* Should die before here */
1973 ret
= mapping_tst_ref(pp
); /* Check for referenced */
1975 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1976 splx(spl
); /* Restore the interrupt level */
1978 debugLog2(65, ret
, 0); /* Log pmap_map call */
1985 pmap_list_resident_pages(
1986 register pmap_t pmap
,
1987 register vm_offset_t
*listp
,
1992 #endif /* MACH_VM_DEBUG */
1999 pmap_copy_part_page(
2001 vm_offset_t src_offset
,
2003 vm_offset_t dst_offset
,
2006 register struct phys_entry
*pp_src
, *pp_dst
;
2011 dbgTrace(0xF1D00019, (unsigned int)src
+src_offset
, (unsigned int)dst
+dst_offset
); /* (TEST/DEBUG) */
2012 dbgTrace(0xF1D04019, (unsigned int)len
, 0); /* (TEST/DEBUG) */
2016 assert(((dst
& PAGE_MASK
)+dst_offset
+len
) <= PAGE_SIZE
);
2017 assert(((src
& PAGE_MASK
)+src_offset
+len
) <= PAGE_SIZE
);
2020 * Since the source and destination are physical addresses,
2021 * turn off data translation to perform a bcopy() in bcopy_phys().
2023 phys_copy((vm_offset_t
) src
+src_offset
,
2024 (vm_offset_t
) dst
+dst_offset
, len
);
2030 pmap_zero_part_page(
2035 panic("pmap_zero_part_page");
2038 boolean_t
pmap_verify_free(vm_offset_t pa
) {
2040 struct phys_entry
*pp
;
2043 dbgTrace(0xF1D00007, (unsigned int)pa
, 0); /* (TEST/DEBUG) */
2047 if (pmdebug
& PDB_USER
)
2048 kprintf("pmap_verify_free(pa=%x)\n", pa
);
2051 if (!pmap_initialized
) return(TRUE
);
2053 pp
= pmap_find_physentry(pa
); /* Look up the physical entry */
2054 if (pp
== PHYS_NULL
) return FALSE
; /* If there isn't one, show no mapping... */
2055 return ((mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
) == MAPPING_NULL
); /* Otherwise, return TRUE if mapping exists... */
2059 /* Determine if we need to switch space and set up for it if so */
2061 void pmap_switch(pmap_t map
)
2066 if (watchacts
& WA_PCB
) {
2067 kprintf("Switching to map at 0x%08x, space=%d\n",
2073 /* when changing to kernel space, don't bother
2074 * doing anything, the kernel is mapped from here already.
2076 if (map
->space
== PPC_SID_KERNEL
) { /* Are we switching into kernel space? */
2077 return; /* If so, we don't do anything... */
2080 hw_set_user_space(map
); /* Indicate if we need to load the SRs or not */
2081 return; /* Bye, bye, butterfly... */
2085 * kern_return_t pmap_nest(grand, subord, vaddr, size)
2087 * grand = the pmap that we will nest subord into
2088 * subord = the pmap that goes into the grand
2089 * vaddr = start of range in pmap to be inserted
2090 * size = size of range in pmap to be inserted
2092 * Inserts a pmap into another. This is used to implement shared segments.
2093 * On the current PPC processors, this is limited to segment (256MB) aligned
2094 * segment sized ranges.
2097 kern_return_t
pmap_nest(pmap_t grand
, pmap_t subord
, vm_offset_t vaddr
, vm_size_t size
) {
2099 unsigned int oflags
, seg
, grandr
;
2102 if(size
!= 0x10000000) return KERN_INVALID_VALUE
; /* We can only do this for 256MB for now */
2103 if(vaddr
& 0x0FFFFFFF) return KERN_INVALID_VALUE
; /* We can only do this aligned to 256MB */
2105 while(1) { /* Test and set the subordinate flag */
2106 oflags
= subord
->vflags
& ~pmapAltSeg
; /* Get old unset value */
2107 if(subord
->vflags
& pmapAltSeg
) { /* Are trying to nest one already nested? */
2108 panic("pmap_nest: Attempt to nest an already nested pmap\n");
2110 if(hw_compare_and_store(oflags
, oflags
| pmapSubord
, &subord
->vflags
)) break; /* Done if we got it set */
2113 simple_lock(&grand
->lock
); /* Lock the superior pmap */
2115 if(grand
->vflags
& pmapSubord
) { /* Are we only one level deep? */
2116 simple_unlock(&grand
->lock
); /* Unlock the superior pmap */
2117 panic("pmap_nest: Attempt to nest into subordinate pmap\n");
2118 return KERN_FAILURE
; /* Shame on you */
2121 seg
= vaddr
>> 28; /* Isolate the segment number */
2122 if((0x00008000 >> seg
) & grand
->vflags
) { /* See if it is already in use */
2123 simple_unlock(&grand
->lock
); /* Unlock the superior pmap */
2124 panic("pmap_nest: Attempt to nest into already nested segment\n");
2125 return KERN_FAILURE
; /* Shame on you */
2128 grand
->pmapPmaps
[seg
] = subord
; /* Set the pointer to the subordinate */
2129 grand
->pmapSegs
[seg
] = SEG_REG_PROT
| (seg
<< 20) | subord
->space
; /* Set the vsid to the subordinate's vsid */
2130 grand
->vflags
|= (0x00008000 >> seg
); /* Set in-use bit */
2132 grandr
= (unsigned int)grand
^ grand
->pmapvr
; /* Get real address of the grand pmap */
2134 simple_unlock(&grand
->lock
); /* Unlock the grand pmap */
2138 * Note that the following will force the segment registers to be reloaded following
2139 * the next interrupt on all processors if they are using the pmap we just changed.
2144 for(i
=0; i
< real_ncpus
; i
++) { /* Cycle through processors */
2145 (void)hw_compare_and_store((unsigned int)grandr
, 0, &per_proc_info
[i
].Lastpmap
); /* Clear if ours */
2148 return KERN_SUCCESS
; /* Bye, bye, butterfly... */
2153 * kern_return_t pmap_unnest(grand, vaddr, size)
2155 * grand = the pmap that we will nest subord into
2156 * vaddr = start of range in pmap to be inserted
2157 * size = size of range in pmap to be inserted
2159 * Removes a pmap from another. This is used to implement shared segments.
2160 * On the current PPC processors, this is limited to segment (256MB) aligned
2161 * segment sized ranges.
2164 kern_return_t
pmap_unnest(pmap_t grand
, vm_offset_t vaddr
, vm_size_t size
) {
2166 unsigned int oflags
, seg
, grandr
, tstamp
;
2169 if(size
!= 0x10000000) return KERN_INVALID_VALUE
; /* We can only do this for 256MB for now */
2170 if(vaddr
& 0x0FFFFFFF) return KERN_INVALID_VALUE
; /* We can only do this aligned to 256MB */
2172 simple_lock(&grand
->lock
); /* Lock the superior pmap */
2173 disable_preemption(); /* It's all for me! */
2175 seg
= vaddr
>> 28; /* Isolate the segment number */
2176 if(!((0x00008000 >> seg
) & grand
->vflags
)) { /* See if it is already in use */
2177 enable_preemption(); /* Ok, your turn */
2178 simple_unlock(&grand
->lock
); /* Unlock the superior pmap */
2179 panic("pmap_unnest: Attempt to unnest an unnested segment\n");
2180 return KERN_FAILURE
; /* Shame on you */
2183 grand
->pmapPmaps
[seg
] = (pmap_t
)0; /* Clear the pointer to the subordinate */
2184 grand
->pmapSegs
[seg
] = grand
->space
; /* Set the pointer to the subordinate's vsid */
2185 grand
->pmapSegs
[seg
] = SEG_REG_PROT
| (seg
<< 20) | grand
->space
; /* Set the vsid to the grand's vsid */
2186 grand
->vflags
&= ~(0x00008000 >> seg
); /* Clear in-use bit */
2188 grandr
= (unsigned int)grand
^ grand
->pmapvr
; /* Get real address of the grand pmap */
2190 simple_unlock(&grand
->lock
); /* Unlock the superior pmap */
2193 * Note that the following will force the segment registers to be reloaded
2194 * on all processors (if they are using the pmap we just changed) before returning.
2196 * This is needed. The reason is that until the segment register is
2197 * reloaded, another thread in the same task on a different processor will
2198 * be able to access memory that it isn't allowed to anymore. That can happen
2199 * because access to the subordinate pmap is being removed, but the pmap is still
2202 * Note that we only kick the other processor if we see that it was using the pmap while we
2207 mycpu
= cpu_number(); /* Who am I? Am I just a dream? */
2208 for(i
=0; i
< real_ncpus
; i
++) { /* Cycle through processors */
2209 if(hw_compare_and_store((unsigned int)grandr
, 0, &per_proc_info
[i
].Lastpmap
)) { /* Clear if ours and kick the other guy if he was using it */
2210 if(i
== mycpu
) continue; /* Don't diddle ourselves */
2211 tstamp
= per_proc_info
[i
].ruptStamp
[1]; /* Save the processor's last interrupt time stamp */
2212 if(cpu_signal(i
, SIGPwake
, 0, 0) != KERN_SUCCESS
) { /* Make sure we see the pmap change */
2215 if(!hw_cpu_wcng(&per_proc_info
[i
].ruptStamp
[1], tstamp
, LockTimeOut
)) { /* Wait for the other processors to enter debug */
2216 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i
);
2221 enable_preemption(); /* Others can run now */
2222 return KERN_SUCCESS
; /* Bye, bye, butterfly... */
2226 void pmap_ver(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
) {
2233 sva
= trunc_page(sva
);
2234 eva
= trunc_page(eva
);
2236 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* Step through them all */
2237 if((pmap
->pmapUsage
[i
]) > 8192) { /* See if this is a sane number */
2238 panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
2239 i
* pmapUsageSize
, pmap
->pmapUsage
[i
], pmap
);
2243 while(1) { /* Try multiple times */
2245 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* Step through them all */
2246 cnt
= cnt
+ pmap
->pmapUsage
[i
]; /* Sum all slots */
2248 if(cnt
== pmap
->stats
.resident_count
) break; /* We're ok if we match... */
2251 for(i
= 0; i
< 100000; i
++) {
2255 panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n",
2256 cnt
, pmap
->stats
.resident_count
, pmap
);
2260 for(xx
= sva
; xx
< eva
; xx
+= PAGE_SIZE
) { /* See if any slots not clear */
2261 if(pmap_extract(pmap
, xx
)) {
2262 panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n",
2263 sva
, eva
, xx
, pmap
);