2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1990,1991,1992 The University of Utah and
34 * the Center for Software Science (CSS).
35 * Copyright (c) 1991,1987 Carnegie Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation,
43 * and that all advertising materials mentioning features or use of
44 * this software display the following acknowledgement: ``This product
45 * includes software developed by the Center for Software Science at
46 * the University of Utah.''
48 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
49 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
50 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
53 * CSS requests users of this software to return to css-dist@cs.utah.edu any
54 * improvements that they make and grant CSS redistribution rights.
56 * Carnegie Mellon requests users of this software to return to
57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
58 * School of Computer Science
59 * Carnegie Mellon University
60 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie Mellon
62 * the rights to redistribute these changes.
64 * Utah $Hdr: pmap.c 1.28 92/06/23$
65 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
69 * Manages physical address maps for powerpc.
71 * In addition to hardware address maps, this
72 * module is called upon to provide software-use-only
73 * maps which may or may not be stored in the same
74 * form as hardware maps. These pseudo-maps are
75 * used to store intermediate results from copy
76 * operations to and from address spaces.
78 * Since the information managed by this module is
79 * also stored by the logical address mapping module,
80 * this module may throw away valid virtual-to-physical
81 * mappings at almost any time. However, invalidations
82 * of virtual-to-physical mappings must be done as
85 * In order to cope with hardware architectures which
86 * make virtual-to-physical map invalidates expensive,
87 * this module may delay invalidate or reduced protection
88 * operations until such time as they are actually
89 * necessary. This module is given full information to
90 * when physical maps must be made correct.
94 #include <zone_debug.h>
96 #include <mach_kgdb.h>
97 #include <mach_vm_debug.h>
98 #include <db_machine_commands.h>
100 #include <kern/thread.h>
101 #include <kern/simple_lock.h>
102 #include <mach/vm_attributes.h>
103 #include <mach/vm_param.h>
104 #include <vm/vm_kern.h>
105 #include <kern/spl.h>
107 #include <kern/misc_protos.h>
108 #include <ppc/misc_protos.h>
109 #include <ppc/proc_reg.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_page.h>
115 #include <ppc/pmap.h>
117 #include <ppc/mappings.h>
119 #include <ppc/new_screen.h>
120 #include <ppc/Firmware.h>
121 #include <ppc/savearea.h>
122 #include <ppc/cpu_internal.h>
123 #include <ppc/exception.h>
124 #include <ppc/low_trace.h>
125 #include <ppc/lowglobals.h>
126 #include <ppc/limits.h>
127 #include <ddb/db_output.h>
128 #include <machine/cpu_capabilities.h>
130 #include <vm/vm_protos.h> /* must be last */
133 extern unsigned int avail_remaining
;
134 unsigned int debugbackpocket
; /* (TEST/DEBUG) */
136 vm_offset_t first_free_virt
;
137 unsigned int current_free_region
; /* Used in pmap_next_page */
139 pmapTransTab
*pmapTrans
; /* Point to the hash to pmap translations */
140 struct phys_entry
*phys_table
;
143 static void pmap_map_physical(void);
144 static void pmap_map_iohole(addr64_t paddr
, addr64_t size
);
145 void pmap_activate(pmap_t pmap
, thread_t th
, int which_cpu
);
146 void pmap_deactivate(pmap_t pmap
, thread_t th
, int which_cpu
);
148 extern void hw_hash_init(void);
150 /* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
152 extern struct pmap kernel_pmap_store
;
153 pmap_t kernel_pmap
; /* Pointer to kernel pmap and anchor for in-use pmaps */
154 addr64_t kernel_pmap_phys
; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */
155 pmap_t cursor_pmap
; /* Pointer to last pmap allocated or previous if removed from in-use list */
156 pmap_t sharedPmap
; /* Pointer to common pmap for 64-bit address spaces */
157 struct zone
*pmap_zone
; /* zone of pmap structures */
158 boolean_t pmap_initialized
= FALSE
;
160 int ppc_max_pmaps
; /* Maximum number of concurrent address spaces allowed. This is machine dependent */
161 addr64_t vm_max_address
; /* Maximum effective address supported */
162 addr64_t vm_max_physical
; /* Maximum physical address supported */
165 * Physical-to-virtual translations are handled by inverted page table
166 * structures, phys_tables. Multiple mappings of a single page are handled
167 * by linking the affected mapping structures. We initialise one region
168 * for phys_tables of the physical memory we know about, but more may be
169 * added as it is discovered (eg. by drivers).
173 * free pmap list. caches the first free_pmap_max pmaps that are freed up
175 int free_pmap_max
= 32;
177 pmap_t free_pmap_list
;
178 decl_simple_lock_data(,free_pmap_lock
)
181 * Function to get index into phys_table for a given physical address
184 struct phys_entry
*pmap_find_physentry(ppnum_t pa
)
189 for (i
= pmap_mem_regions_count
- 1; i
>= 0; i
--) {
190 if (pa
< pmap_mem_regions
[i
].mrStart
) continue; /* See if we fit in this region */
191 if (pa
> pmap_mem_regions
[i
].mrEnd
) continue; /* Check the end too */
193 entry
= (unsigned int)pmap_mem_regions
[i
].mrPhysTab
+ ((pa
- pmap_mem_regions
[i
].mrStart
) * sizeof(phys_entry_t
));
194 return (struct phys_entry
*)entry
;
196 // kprintf("DEBUG - pmap_find_physentry: page 0x%08X not found\n", pa);
202 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
203 * boolean_t available, unsigned int attr)
205 * THIS IS NOT SUPPORTED
208 pmap_add_physical_memory(
209 __unused vm_offset_t spa
,
210 __unused vm_offset_t epa
,
211 __unused boolean_t available
,
212 __unused
unsigned int attr
)
215 panic("Forget it! You can't map no more memory, you greedy puke!\n");
220 * pmap_map(va, spa, epa, prot)
221 * is called during boot to map memory in the kernel's address map.
222 * A virtual address range starting at "va" is mapped to the physical
223 * address range "spa" to "epa" with machine independent protection
226 * "va", "spa", and "epa" are byte addresses and must be on machine
227 * independent page boundaries.
229 * Pages with a contiguous virtual address range, the same protection, and attributes.
230 * therefore, we map it with a single block.
232 * Note that this call will only map into 32-bit space
246 mflags
= 0; /* Make sure this is initialized to nothing special */
247 if(!(flags
& VM_WIMG_USE_DEFAULT
)) { /* Are they supplying the attributes? */
248 mflags
= mmFlgUseAttr
| (flags
& VM_MEM_GUARDED
) | ((flags
& VM_MEM_NOT_CACHEABLE
) >> 1); /* Convert to our mapping_make flags */
251 if (spa
== epa
) return(va
);
255 colladr
= mapping_make(kernel_pmap
, (addr64_t
)va
, (ppnum_t
)(spa
>> 12),
256 (mmFlgBlock
| mmFlgPerm
), (epa
- spa
) >> 12, (prot
& VM_PROT_ALL
) );
258 if(colladr
) { /* Was something already mapped in the range? */
259 panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n",
260 va
, spa
, epa
, colladr
);
266 * pmap_map_physical()
267 * Maps physical memory into the kernel's address map beginning at lgPMWvaddr, the
268 * physical memory window.
272 pmap_map_physical(void)
275 uint64_t msize
, size
;
276 addr64_t paddr
, vaddr
, colladdr
;
278 /* Iterate over physical memory regions, block mapping each into the kernel's address map */
279 for (region
= 0; region
< (unsigned)pmap_mem_regions_count
; region
++) {
280 paddr
= ((addr64_t
)pmap_mem_regions
[region
].mrStart
<< 12); /* Get starting physical address */
281 size
= (((addr64_t
)pmap_mem_regions
[region
].mrEnd
+ 1) << 12) - paddr
;
283 vaddr
= paddr
+ lowGlo
.lgPMWvaddr
; /* Get starting virtual address */
287 msize
= ((size
> 0x0000020000000000ULL
) ? 0x0000020000000000ULL
: size
); /* Get size, but no more than 2TBs */
289 colladdr
= mapping_make(kernel_pmap
, vaddr
, (paddr
>> 12),
290 (mmFlgBlock
| mmFlgPerm
), (msize
>> 12),
291 (VM_PROT_READ
| VM_PROT_WRITE
));
293 panic ("pmap_map_physical: mapping failure - va = %016llX, pa = %016llX, size = %016llX, collision = %016llX\n",
294 vaddr
, (paddr
>> 12), (msize
>> 12), colladdr
);
297 vaddr
= vaddr
+ (uint64_t)msize
; /* Point to the next virtual addr */
298 paddr
= paddr
+ (uint64_t)msize
; /* Point to the next physical addr */
305 * pmap_map_iohole(addr64_t paddr, addr64_t size)
306 * Maps an I/O hole into the kernel's address map at its proper offset in
307 * the physical memory window.
311 pmap_map_iohole(addr64_t paddr
, addr64_t size
)
314 addr64_t vaddr
, colladdr
, msize
;
316 vaddr
= paddr
+ lowGlo
.lgPMWvaddr
; /* Get starting virtual address */
320 msize
= ((size
> 0x0000020000000000ULL
) ? 0x0000020000000000ULL
: size
); /* Get size, but no more than 2TBs */
322 colladdr
= mapping_make(kernel_pmap
, vaddr
, (paddr
>> 12),
323 (mmFlgBlock
| mmFlgPerm
| mmFlgGuarded
| mmFlgCInhib
), (msize
>> 12),
324 (VM_PROT_READ
| VM_PROT_WRITE
));
326 panic ("pmap_map_iohole: mapping failed - va = %016llX, pa = %016llX, size = %016llX, collision = %016llX\n",
327 vaddr
, (paddr
>> 12), (msize
>> 12), colladdr
);
330 vaddr
= vaddr
+ (uint64_t)msize
; /* Point to the next virtual addr */
331 paddr
= paddr
+ (uint64_t)msize
; /* Point to the next physical addr */
337 * Bootstrap the system enough to run with virtual memory.
338 * Map the kernel's code and data, and allocate the system page table.
339 * Called with mapping done by BATs. Page_size must already be set.
342 * msize: Total memory present
343 * first_avail: First virtual address available
344 * kmapsize: Size of kernel text and data
347 pmap_bootstrap(uint64_t msize
, vm_offset_t
*first_avail
, unsigned int kmapsize
)
351 unsigned int i
, num
, mapsize
, vmpagesz
, vmmapsz
, nbits
;
355 vm_offset_t first_used_addr
, PCAsize
;
356 struct phys_entry
*phys_entry
;
358 *first_avail
= round_page(*first_avail
); /* Make sure we start out on a page boundary */
359 vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Set the highest address know to VM */
362 * Initialize kernel pmap
364 kernel_pmap
= &kernel_pmap_store
;
365 kernel_pmap_phys
= (addr64_t
)(uintptr_t)&kernel_pmap_store
;
366 cursor_pmap
= &kernel_pmap_store
;
368 kernel_pmap
->pmap_link
.next
= (queue_t
)kernel_pmap
; /* Set up anchor forward */
369 kernel_pmap
->pmap_link
.prev
= (queue_t
)kernel_pmap
; /* Set up anchor reverse */
370 kernel_pmap
->ref_count
= 1;
371 kernel_pmap
->pmapFlags
= pmapKeyDef
; /* Set the default keys */
372 kernel_pmap
->pmapFlags
|= pmapNXdisabled
;
373 kernel_pmap
->pmapCCtl
= pmapCCtlVal
; /* Initialize cache control */
374 kernel_pmap
->space
= PPC_SID_KERNEL
;
375 kernel_pmap
->pmapvr
= 0; /* Virtual = Real */
378 * IBM's recommended hash table size is one PTEG for every 2 physical pages.
379 * However, we have found that OSX rarely uses more than 4 PTEs in a PTEG
380 * with this size table. Therefore, by default we allocate a hash table
381 * one half IBM's recommended size, ie one PTEG per 4 pages. The "ht_shift" boot-arg
382 * can be used to override the default hash table size.
383 * We will allocate the hash table in physical RAM, outside of kernel virtual memory,
384 * at the top of the highest bank that will contain it.
385 * Note that "bank" doesn't refer to a physical memory slot here, it is a range of
386 * physically contiguous memory.
388 * The PCA will go there as well, immediately before the hash table.
391 nbits
= cntlzw(((msize
<< 1) - 1) >> 32); /* Get first bit in upper half */
392 if (nbits
== 32) /* If upper half was empty, find bit in bottom half */
393 nbits
= nbits
+ cntlzw((uint_t
)((msize
<< 1) - 1));
394 tmemsize
= 0x8000000000000000ULL
>> nbits
; /* Get memory size rounded up to power of 2 */
396 /* Calculate hash table size: First, make sure we don't overflow 32-bit arithmetic. */
397 if (tmemsize
> 0x0000002000000000ULL
)
398 tmemsize
= 0x0000002000000000ULL
;
400 /* Second, calculate IBM recommended hash table size, ie one PTEG per 2 physical pages */
401 hash_table_size
= (uint_t
)(tmemsize
>> 13) * PerProcTable
[0].ppe_vaddr
->pf
.pfPTEG
;
403 /* Third, cut this in half to produce the OSX default, ie one PTEG per 4 physical pages */
404 hash_table_size
>>= 1;
406 /* Fourth, adjust default size per "ht_shift" boot arg */
407 if (hash_table_shift
>= 0) /* if positive, make size bigger */
408 hash_table_size
<<= hash_table_shift
;
409 else /* if "ht_shift" is negative, make smaller */
410 hash_table_size
>>= (-hash_table_shift
);
412 /* Fifth, make sure we are at least minimum size */
413 if (hash_table_size
< (256 * 1024))
414 hash_table_size
= (256 * 1024);
416 while(1) { /* Try to fit hash table in PCA into contiguous memory */
418 if(hash_table_size
< (256 * 1024)) { /* Have we dropped too short? This should never, ever happen */
419 panic("pmap_bootstrap: Can't find space for hash table\n"); /* This will never print, system isn't up far enough... */
422 PCAsize
= (hash_table_size
/ PerProcTable
[0].ppe_vaddr
->pf
.pfPTEG
) * sizeof(PCA_t
); /* Get total size of PCA table */
423 PCAsize
= round_page(PCAsize
); /* Make sure it is at least a page long */
425 for(bank
= pmap_mem_regions_count
- 1; bank
>= 0; bank
--) { /* Search backwards through banks */
427 hash_table_base
= ((addr64_t
)pmap_mem_regions
[bank
].mrEnd
<< 12) - hash_table_size
+ PAGE_SIZE
; /* Get tenative address */
429 htslop
= hash_table_base
& (hash_table_size
- 1); /* Get the extra that we will round down when we align */
430 hash_table_base
= hash_table_base
& -(addr64_t
)hash_table_size
; /* Round down to correct boundary */
432 if((hash_table_base
- round_page(PCAsize
)) >= ((addr64_t
)pmap_mem_regions
[bank
].mrStart
<< 12)) break; /* Leave if we fit */
435 if(bank
>= 0) break; /* We are done if we found a suitable bank */
437 hash_table_size
= hash_table_size
>> 1; /* Try the next size down */
440 if(htslop
) { /* If there was slop (i.e., wasted pages for alignment) add a new region */
441 for(i
= pmap_mem_regions_count
- 1; i
>= (unsigned)bank
; i
--) { /* Copy from end to our bank, including our bank */
442 pmap_mem_regions
[i
+ 1].mrStart
= pmap_mem_regions
[i
].mrStart
; /* Set the start of the bank */
443 pmap_mem_regions
[i
+ 1].mrAStart
= pmap_mem_regions
[i
].mrAStart
; /* Set the start of allocatable area */
444 pmap_mem_regions
[i
+ 1].mrEnd
= pmap_mem_regions
[i
].mrEnd
; /* Set the end address of bank */
445 pmap_mem_regions
[i
+ 1].mrAEnd
= pmap_mem_regions
[i
].mrAEnd
; /* Set the end address of allocatable area */
448 pmap_mem_regions
[i
+ 1].mrStart
= (hash_table_base
+ hash_table_size
) >> 12; /* Set the start of the next bank to the start of the slop area */
449 pmap_mem_regions
[i
+ 1].mrAStart
= (hash_table_base
+ hash_table_size
) >> 12; /* Set the start of allocatable area to the start of the slop area */
450 pmap_mem_regions
[i
].mrEnd
= (hash_table_base
+ hash_table_size
- 4096) >> 12; /* Set the end of our bank to the end of the hash table */
454 pmap_mem_regions
[bank
].mrAEnd
= (hash_table_base
- PCAsize
- 4096) >> 12; /* Set the maximum allocatable in this bank */
456 hw_hash_init(); /* Initiaize the hash table and PCA */
457 hw_setup_trans(); /* Set up hardware registers needed for translation */
460 * The hash table is now all initialized and so is the PCA. Go on to do the rest of it.
461 * This allocation is from the bottom up.
464 num
= atop_64(msize
); /* Get number of pages in all of memory */
466 /* Figure out how much we need to allocate */
469 (InitialSaveBloks
* PAGE_SIZE
) + /* Allow space for the initial context saveareas */
470 (BackPocketSaveBloks
* PAGE_SIZE
) + /* For backpocket saveareas */
471 trcWork
.traceSize
+ /* Size of trace table */
472 ((((1 << maxAdrSpb
) * sizeof(pmapTransTab
)) + 4095) & -4096) + /* Size of pmap translate table */
473 (((num
* sizeof(struct phys_entry
)) + 4095) & -4096) /* For the physical entries */
476 mapsize
= size
= round_page(size
); /* Get size of area to map that we just calculated */
477 mapsize
= mapsize
+ kmapsize
; /* Account for the kernel text size */
479 vmpagesz
= round_page(num
* sizeof(struct vm_page
)); /* Allow for all vm_pages needed to map physical mem */
480 vmmapsz
= round_page((num
/ 8) * sizeof(struct vm_map_entry
)); /* Allow for vm_maps */
482 mapsize
= mapsize
+ vmpagesz
+ vmmapsz
; /* Add the VM system estimates into the grand total */
484 mapsize
= mapsize
+ (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
485 mapsize
= ((mapsize
/ PAGE_SIZE
) + MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get number of blocks of mappings we need */
486 mapsize
= mapsize
+ ((mapsize
+ MAPPERBLOK
- 1) / MAPPERBLOK
); /* Account for the mappings themselves */
488 size
= size
+ (mapsize
* PAGE_SIZE
); /* Get the true size we need */
490 /* hash table must be aligned to its size */
492 addr
= *first_avail
; /* Set the address to start allocations */
493 first_used_addr
= addr
; /* Remember where we started */
495 bzero((char *)addr
, size
); /* Clear everything that we are allocating */
497 savearea_init(addr
); /* Initialize the savearea chains and data */
499 addr
= (vm_offset_t
)((unsigned int)addr
+ ((InitialSaveBloks
+ BackPocketSaveBloks
) * PAGE_SIZE
)); /* Point past saveareas */
501 trcWork
.traceCurr
= (unsigned int)addr
; /* Set first trace slot to use */
502 trcWork
.traceStart
= (unsigned int)addr
; /* Set start of trace table */
503 trcWork
.traceEnd
= (unsigned int)addr
+ trcWork
.traceSize
; /* Set end of trace table */
505 addr
= (vm_offset_t
)trcWork
.traceEnd
; /* Set next allocatable location */
507 pmapTrans
= (pmapTransTab
*)addr
; /* Point to the pmap to hash translation table */
509 pmapTrans
[PPC_SID_KERNEL
].pmapPAddr
= (addr64_t
)((uintptr_t)kernel_pmap
); /* Initialize the kernel pmap in the translate table */
510 pmapTrans
[PPC_SID_KERNEL
].pmapVAddr
= CAST_DOWN(unsigned int, kernel_pmap
); /* Initialize the kernel pmap in the translate table */
512 addr
+= ((((1 << maxAdrSpb
) * sizeof(pmapTransTab
)) + 4095) & -4096); /* Point past pmap translate table */
514 /* NOTE: the phys_table must be within the first 2GB of physical RAM. This makes sure we only need to do 32-bit arithmetic */
516 phys_entry
= (struct phys_entry
*) addr
; /* Get pointer to physical table */
518 for (bank
= 0; (unsigned)bank
< pmap_mem_regions_count
; bank
++) { /* Set pointer and initialize all banks of ram */
520 pmap_mem_regions
[bank
].mrPhysTab
= phys_entry
; /* Set pointer to the physical table for this bank */
522 phys_entry
= phys_entry
+ (pmap_mem_regions
[bank
].mrEnd
- pmap_mem_regions
[bank
].mrStart
+ 1); /* Point to the next */
525 addr
+= (((num
* sizeof(struct phys_entry
)) + 4095) & -4096); /* Step on past the physical entries */
528 * Remaining space is for mapping entries. Tell the initializer routine that
529 * the mapping system can't release this block because it's permanently assigned
532 mapping_init(); /* Initialize the mapping tables */
534 for(i
= addr
; i
< first_used_addr
+ size
; i
+= PAGE_SIZE
) { /* Add initial mapping blocks */
535 mapping_free_init(i
, 1, 0); /* Pass block address and say that this one is not releasable */
537 mapCtl
.mapcmin
= MAPPERBLOK
; /* Make sure we only adjust one at a time */
539 /* Map V=R the page tables */
540 pmap_map(first_used_addr
, first_used_addr
,
541 round_page(first_used_addr
+ size
), VM_PROT_READ
| VM_PROT_WRITE
, VM_WIMG_USE_DEFAULT
);
543 *first_avail
= round_page(first_used_addr
+ size
); /* Set next available page */
544 first_free_virt
= *first_avail
; /* Ditto */
546 /* For 64-bit machines, block map physical memory and the I/O hole into kernel space */
547 if(BootProcInfo
.pf
.Available
& pf64Bit
) { /* Are we on a 64-bit machine? */
548 lowGlo
.lgPMWvaddr
= PHYS_MEM_WINDOW_VADDR
; /* Initialize the physical memory window's virtual address */
550 pmap_map_physical(); /* Block map physical memory into the window */
552 pmap_map_iohole(IO_MEM_WINDOW_VADDR
, IO_MEM_WINDOW_SIZE
);
553 /* Block map the I/O hole */
556 /* All the rest of memory is free - add it to the free
557 * regions so that it can be allocated by pmap_steal
560 pmap_mem_regions
[0].mrAStart
= (*first_avail
>> 12); /* Set up the free area to start allocations (always in the first bank) */
562 current_free_region
= 0; /* Set that we will start allocating in bank 0 */
563 avail_remaining
= 0; /* Clear free page count */
564 for(bank
= 0; (unsigned)bank
< pmap_mem_regions_count
; bank
++) { /* Total up all of the pages in the system that are available */
565 avail_remaining
+= (pmap_mem_regions
[bank
].mrAEnd
- pmap_mem_regions
[bank
].mrAStart
) + 1; /* Add in allocatable pages in this bank */
572 * pmap_init(spa, epa)
573 * finishes the initialization of the pmap module.
574 * This procedure is called from vm_mem_init() in vm/vm_init.c
575 * to initialize any remaining data structures that the pmap module
576 * needs to map virtual memory (VM is already ON).
578 * Note that the pmap needs to be sized and aligned to
579 * a power of two. This is because it is used both in virtual and
580 * real so it can't span a page boundary.
587 pmap_zone
= zinit(pmapSize
, 400 * pmapSize
, 4096, "pmap");
589 zone_debug_disable(pmap_zone
); /* Can't debug this one 'cause it messes with size and alignment */
590 #endif /* ZONE_DEBUG */
592 pmap_initialized
= TRUE
;
595 * Initialize list of freed up pmaps
597 free_pmap_list
= NULL
; /* Set that there are no free pmaps */
599 simple_lock_init(&free_pmap_lock
, 0);
603 unsigned int pmap_free_pages(void)
605 return avail_remaining
;
609 * This function allocates physical pages.
613 pmap_next_page_hi(ppnum_t
* pnum
)
615 return pmap_next_page(pnum
);
619 /* Non-optimal, but only used for virtual memory startup.
620 * Allocate memory from a table of free physical addresses
621 * If there are no more free entries, too bad.
625 pmap_next_page(ppnum_t
*addrp
)
629 if(current_free_region
>= pmap_mem_regions_count
) return FALSE
; /* Return failure if we have used everything... */
631 for(i
= current_free_region
; i
< pmap_mem_regions_count
; i
++) { /* Find the next bank with free pages */
632 if(pmap_mem_regions
[i
].mrAStart
<= pmap_mem_regions
[i
].mrAEnd
) break; /* Found one */
635 current_free_region
= i
; /* Set our current bank */
636 if(i
>= pmap_mem_regions_count
) return FALSE
; /* Couldn't find a free page */
638 *addrp
= pmap_mem_regions
[i
].mrAStart
; /* Allocate the page */
639 pmap_mem_regions
[i
].mrAStart
= pmap_mem_regions
[i
].mrAStart
+ 1; /* Set the next one to go */
640 avail_remaining
--; /* Drop free count */
645 void pmap_virtual_space(
649 *startp
= round_page(first_free_virt
);
650 *endp
= vm_last_addr
;
656 * Create and return a physical map.
658 * If the size specified for the map is zero, the map is an actual physical
659 * map, and may be referenced by the hardware.
661 * A pmap is either in the free list or in the in-use list. The only use
662 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
663 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
664 * in-use list is matched until a hole in the VSID sequence is found. (Note
665 * that the in-use pmaps are queued in VSID sequence order.) This is all done
666 * while free_pmap_lock is held.
668 * If the size specified is non-zero, the map will be used in software
669 * only, and is bounded by that size.
672 pmap_create(vm_map_size_t size
, __unused boolean_t is_64bit
)
674 pmap_t pmap
, ckpmap
, fore
;
676 unsigned int currSID
;
680 * A software use-only map doesn't even need a pmap structure.
686 * If there is a pmap in the pmap free list, reuse it.
687 * Note that we use free_pmap_list for all chaining of pmaps, both to
688 * the free list and the in use chain (anchored from kernel_pmap).
691 simple_lock(&free_pmap_lock
);
693 if(free_pmap_list
) { /* Any free? */
694 pmap
= free_pmap_list
; /* Yes, allocate it */
695 free_pmap_list
= (pmap_t
)pmap
->freepmap
; /* Dequeue this one (we chain free ones through freepmap) */
699 simple_unlock(&free_pmap_lock
); /* Unlock just in case */
702 pmap
= (pmap_t
) zalloc(pmap_zone
); /* Get one */
703 if (pmap
== PMAP_NULL
) return(PMAP_NULL
); /* Handle out-of-memory condition */
705 bzero((char *)pmap
, pmapSize
); /* Clean up the pmap */
708 simple_lock(&free_pmap_lock
); /* Lock it back up */
710 ckpmap
= cursor_pmap
; /* Get starting point for free ID search */
711 currSID
= ckpmap
->spaceNum
; /* Get the actual space ID number */
713 while(1) { /* Keep trying until something happens */
715 currSID
= (currSID
+ 1) & (maxAdrSp
- 1); /* Get the next in the sequence */
716 if(((currSID
* incrVSID
) & (maxAdrSp
- 1)) == invalSpace
) continue; /* Skip the space we have reserved */
717 ckpmap
= (pmap_t
)ckpmap
->pmap_link
.next
; /* On to the next in-use pmap */
719 if(ckpmap
->spaceNum
!= currSID
) break; /* If we are out of sequence, this is free */
721 if(ckpmap
== cursor_pmap
) { /* See if we have 2^20 already allocated */
722 panic("pmap_create: Maximum number (%d) active address spaces reached\n", maxAdrSp
); /* Die pig dog */
726 pmap
->space
= (currSID
* incrVSID
) & (maxAdrSp
- 1); /* Calculate the actual VSID */
727 pmap
->spaceNum
= currSID
; /* Set the space ID number */
729 * Now we link into the chain just before the out of sequence guy.
732 fore
= (pmap_t
)ckpmap
->pmap_link
.prev
; /* Get the current's previous */
733 pmap
->pmap_link
.next
= (queue_t
)ckpmap
; /* My next points to the current */
734 fore
->pmap_link
.next
= (queue_t
)pmap
; /* Current's previous's next points to me */
735 pmap
->pmap_link
.prev
= (queue_t
)fore
; /* My prev points to what the current pointed to */
736 ckpmap
->pmap_link
.prev
= (queue_t
)pmap
; /* Current's prev points to me */
738 physpmap
= ((addr64_t
)pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)pmap
)) << 12) | (addr64_t
)((unsigned int)pmap
& 0xFFF); /* Get the physical address of the pmap */
740 pmap
->pmapvr
= (addr64_t
)((uintptr_t)pmap
) ^ physpmap
; /* Make V to R translation mask */
742 pmapTrans
[pmap
->space
].pmapPAddr
= physpmap
; /* Set translate table physical to point to us */
743 pmapTrans
[pmap
->space
].pmapVAddr
= CAST_DOWN(unsigned int, pmap
); /* Set translate table virtual to point to us */
746 pmap
->pmapVmmExt
= NULL
; /* Clear VMM extension block vaddr */
747 pmap
->pmapVmmExtPhys
= 0; /* and the paddr, too */
748 pmap
->pmapFlags
= pmapKeyDef
; /* Set default key */
749 pmap
->pmapCCtl
= pmapCCtlVal
; /* Initialize cache control */
751 pmap
->stats
.resident_count
= 0;
752 pmap
->stats
.wired_count
= 0;
753 pmap
->pmapSCSubTag
= 0x0000000000000000ULL
; /* Make sure this is clean an tidy */
754 simple_unlock(&free_pmap_lock
);
763 * Gives up a reference to the specified pmap. When the reference count
764 * reaches zero the pmap structure is added to the pmap free list.
766 * Should only be called if the map contains no valid mappings.
769 pmap_destroy(pmap_t pmap
)
775 if (pmap
== PMAP_NULL
)
778 if ((ref_count
= hw_atomic_sub(&pmap
->ref_count
, 1)) == UINT_MAX
) /* underflow */
779 panic("pmap_destroy(): ref_count < 0");
782 return; /* Still more users, leave now... */
784 if (!(pmap
->pmapFlags
& pmapVMgsaa
)) { /* Don't try this for a shadow assist guest */
785 pmap_unmap_sharedpage(pmap
); /* Remove any mapping of page -1 */
789 if(pmap
->stats
.resident_count
!= 0)
790 panic("PMAP_DESTROY: pmap not empty");
792 if(pmap
->stats
.resident_count
!= 0) {
793 pmap_remove(pmap
, 0, 0xFFFFFFFFFFFFF000ULL
);
798 * Add the pmap to the pmap free list.
803 * Add the pmap to the pmap free list.
805 simple_lock(&free_pmap_lock
);
807 if (free_pmap_count
<= free_pmap_max
) { /* Do we have enough spares? */
809 pmap
->freepmap
= free_pmap_list
; /* Queue in front */
810 free_pmap_list
= pmap
;
812 simple_unlock(&free_pmap_lock
);
815 if(cursor_pmap
== pmap
) cursor_pmap
= (pmap_t
)pmap
->pmap_link
.prev
; /* If we are releasing the cursor, back up */
816 fore
= (pmap_t
)pmap
->pmap_link
.prev
;
817 aft
= (pmap_t
)pmap
->pmap_link
.next
;
818 fore
->pmap_link
.next
= pmap
->pmap_link
.next
; /* My previous's next is my next */
819 aft
->pmap_link
.prev
= pmap
->pmap_link
.prev
; /* My next's previous is my previous */
820 simple_unlock(&free_pmap_lock
);
821 pmapTrans
[pmap
->space
].pmapPAddr
= -1; /* Invalidate the translate table physical */
822 pmapTrans
[pmap
->space
].pmapVAddr
= -1; /* Invalidate the translate table virtual */
823 zfree(pmap_zone
, pmap
);
829 * pmap_reference(pmap)
830 * gains a reference to the specified pmap.
833 pmap_reference(pmap_t pmap
)
835 if (pmap
!= PMAP_NULL
)
836 (void)hw_atomic_add(&pmap
->ref_count
, 1); /* Bump the count */
840 * pmap_remove_some_phys
842 * Removes mappings of the associated page from the specified pmap
845 void pmap_remove_some_phys(
849 register struct phys_entry
*pp
;
850 register struct mapping
*mp
;
853 if (pmap
== PMAP_NULL
) { /* This should never be called with a null pmap */
854 panic("pmap_remove_some_phys: null pmap\n");
857 pp
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
858 if (pp
== 0) return; /* Leave if not in physical RAM */
860 do { /* Keep going until we toss all pages from this pmap */
861 if (pmap
->pmapFlags
& pmapVMhost
) {
862 mp
= hw_purge_phys(pp
); /* Toss a map */
863 switch ((unsigned int)mp
& mapRetCode
) {
865 mapping_free(mp
); /* Return mapping to free inventory */
868 break; /* Don't try to return a guest mapping */
870 break; /* Physent chain empty, we're done */
872 break; /* Mapping disappeared on us, retry */
874 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %p, pmap = %p, code = %p\n",
875 pp
, pmap
, mp
); /* Handle failure with our usual lack of tact */
878 mp
= hw_purge_space(pp
, pmap
); /* Toss a map */
879 switch ((unsigned int)mp
& mapRetCode
) {
881 mapping_free(mp
); /* Return mapping to free inventory */
884 break; /* Physent chain empty, we're done */
886 break; /* Mapping disappeared on us, retry */
888 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %p, pmap = %p, code = %p\n",
889 pp
, pmap
, mp
); /* Handle failure with our usual lack of tact */
892 } while (mapRtEmpty
!= ((unsigned int)mp
& mapRetCode
));
895 if ((pmap
->pmapFlags
& pmapVMhost
) && !pmap_verify_free(pa
))
896 panic("pmap_remove_some_phys: cruft left behind - pa = %08X, pmap = %p\n", pa
, pmap
);
899 return; /* Leave... */
903 * pmap_remove(pmap, s, e)
904 * unmaps all virtual addresses v in the virtual address
905 * range determined by [s, e) and pmap.
906 * s and e must be on machine independent page boundaries and
907 * s must be less than or equal to e.
909 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
910 * skip those segments.
920 if (pmap
== PMAP_NULL
) return; /* Leave if software pmap */
923 /* It is just possible that eva might have wrapped around to zero,
924 * and sometimes we get asked to liberate something of size zero
925 * even though it's dumb (eg. after zero length read_overwrites)
929 /* If these are not page aligned the loop might not terminate */
930 assert((sva
== trunc_page_64(sva
)) && (eva
== trunc_page_64(eva
)));
932 va
= sva
& -4096LL; /* Round start down to a page */
933 endva
= eva
& -4096LL; /* Round end down to a page */
935 while(1) { /* Go until we finish the range */
936 va
= mapping_remove(pmap
, va
); /* Remove the mapping and see what's next */
937 va
= va
& -4096LL; /* Make sure the "not found" indication is clear */
938 if((va
== 0) || (va
>= endva
)) break; /* End loop if we finish range or run off the end */
948 * Lower the permission for all mappings to a given page.
955 register struct phys_entry
*pp
;
961 switch (prot
& VM_PROT_ALL
) {
963 case VM_PROT_READ
|VM_PROT_EXECUTE
:
974 pp
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
975 if (pp
== 0) return; /* Leave if not in physical RAM */
977 if (remove
) { /* If the protection was set to none, we'll remove all mappings */
979 do { /* Keep going until we toss all pages from this physical page */
980 mp
= hw_purge_phys(pp
); /* Toss a map */
981 switch ((unsigned int)mp
& mapRetCode
) {
983 mapping_free(mp
); /* Return mapping to free inventory */
986 break; /* Don't try to return a guest mapping */
988 break; /* Mapping disappeared on us, retry */
990 break; /* Physent chain empty, we're done */
991 default: panic("pmap_page_protect: hw_purge_phys failed - pp = %p, code = %p\n",
992 pp
, mp
); /* Handle failure with our usual lack of tact */
994 } while (mapRtEmpty
!= ((unsigned int)mp
& mapRetCode
));
997 if (!pmap_verify_free(pa
))
998 panic("pmap_page_protect: cruft left behind - pa = %08X\n", pa
);
1001 return; /* Leave... */
1004 /* When we get here, it means that we are to change the protection for a
1008 mapping_protect_phys(pa
, (prot
& VM_PROT_ALL
) ); /* Change protection of all mappings to page. */
1017 * Disconnect all mappings for this page and return reference and change status
1018 * in generic format.
1021 unsigned int pmap_disconnect(
1024 register struct phys_entry
*pp
;
1025 unsigned int pindex
;
1028 pp
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
1029 if (pp
== 0) return (0); /* Return null ref and chg if not in physical RAM */
1030 do { /* Iterate until all mappings are dead and gone */
1031 mp
= hw_purge_phys(pp
); /* Disconnect a mapping */
1032 if (!mp
) break; /* All mappings are gone, leave the loop */
1033 switch ((unsigned int)mp
& mapRetCode
) {
1035 mapping_free(mp
); /* Return mapping to free inventory */
1038 break; /* Don't try to return a guest mapping */
1040 break; /* Mapping disappeared on us, retry */
1042 break; /* Physent chain empty, we're done */
1043 default: panic("hw_purge_phys: hw_purge_phys failed - pp = %p, code = %p\n",
1044 pp
, mp
); /* Handle failure with our usual lack of tact */
1046 } while (mapRtEmpty
!= ((unsigned int)mp
& mapRetCode
));
1049 if (!pmap_verify_free(pa
))
1050 panic("pmap_disconnect: cruft left behind - pa = %08X\n", pa
);
1053 return (mapping_tst_refmod(pa
)); /* Return page ref and chg in generic format */
1058 pmap_is_noencrypt(__unused ppnum_t pn
)
1064 pmap_set_noencrypt(__unused ppnum_t pn
)
1069 pmap_clear_noencrypt(__unused ppnum_t pn
)
1075 * pmap_protect(pmap, s, e, prot)
1076 * changes the protection on all virtual addresses v in the
1077 * virtual address range determined by [s, e] and pmap to prot.
1078 * s and e must be on machine independent page boundaries and
1079 * s must be less than or equal to e.
1081 * Note that any requests to change the protection of a nested pmap are
1082 * ignored. Those changes MUST be done by calling this with the correct pmap.
1086 vm_map_offset_t sva
,
1087 vm_map_offset_t eva
,
1093 if (pmap
== PMAP_NULL
) return; /* Do nothing if no pmap */
1095 if (prot
== VM_PROT_NONE
) { /* Should we kill the address range?? */
1096 pmap_remove(pmap
, (addr64_t
)sva
, (addr64_t
)eva
); /* Yeah, dump 'em */
1097 return; /* Leave... */
1100 va
= sva
& -4096LL; /* Round start down to a page */
1101 endva
= eva
& -4096LL; /* Round end down to a page */
1103 while(1) { /* Go until we finish the range */
1104 mapping_protect(pmap
, va
, (prot
& VM_PROT_ALL
), &va
); /* Change the protection and see what's next */
1105 if((va
== 0) || (va
>= endva
)) break; /* End loop if we finish range or run off the end */
1115 * Create a translation for the virtual address (virt) to the physical
1116 * address (phys) in the pmap with the protection requested. If the
1117 * translation is wired then we can not allow a full page fault, i.e.,
1118 * the mapping control block is not eligible to be stolen in a low memory
1121 * NB: This is the only routine which MAY NOT lazy-evaluate
1122 * or lose information. That is, this routine must actually
1123 * insert this page into the given map NOW.
1126 pmap_enter(pmap_t pmap
, vm_map_offset_t va
, ppnum_t pa
, vm_prot_t prot
,
1127 unsigned int flags
, __unused boolean_t wired
)
1129 unsigned int mflags
;
1132 if (pmap
== PMAP_NULL
) return; /* Leave if software pmap */
1134 mflags
= 0; /* Make sure this is initialized to nothing special */
1135 if(!(flags
& VM_WIMG_USE_DEFAULT
)) { /* Are they supplying the attributes? */
1136 mflags
= mmFlgUseAttr
| (flags
& VM_MEM_GUARDED
) | ((flags
& VM_MEM_NOT_CACHEABLE
) >> 1); /* Convert to our mapping_make flags */
1140 * It is possible to hang here if another processor is remapping any pages we collide with and are removing
1143 while(1) { /* Keep trying the enter until it goes in */
1145 colva
= mapping_make(pmap
, va
, pa
, mflags
, 1, (prot
& VM_PROT_ALL
) ); /* Enter the mapping into the pmap */
1147 if(!colva
) break; /* If there were no collisions, we are done... */
1149 mapping_remove(pmap
, colva
); /* Remove the mapping that collided */
1154 * Enters translations for odd-sized V=F blocks.
1156 * The higher level VM map should be locked to insure that we don't have a
1157 * double diddle here.
1159 * We panic if we get a block that overlaps with another. We do not merge adjacent
1160 * blocks because removing any address within a block removes the entire block and if
1161 * would really mess things up if we trashed too much.
1163 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
1164 * not be changed. The block must be unmapped and then remapped with the new stuff.
1165 * We also do not keep track of reference or change flags.
1167 * Any block that is larger than 256MB must be a multiple of 32MB. We panic if it is not.
1169 * Note that pmap_map_block_rc is the same but doesn't panic if collision.
1173 void pmap_map_block(pmap_t pmap
, addr64_t va
, ppnum_t pa
, uint32_t size
, vm_prot_t prot
, int attr
, unsigned int flags
) { /* Map an autogenned block */
1175 unsigned int mflags
;
1179 if (pmap
== PMAP_NULL
) { /* Did they give us a pmap? */
1180 panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */
1183 // kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */
1184 // current_thread(), va, pa, size, prot, attr, flags); /* (BRINGUP) */
1186 mflags
= mmFlgBlock
| mmFlgUseAttr
| (attr
& VM_MEM_GUARDED
) | ((attr
& VM_MEM_NOT_CACHEABLE
) >> 1); /* Convert to our mapping_make flags */
1187 if(flags
) mflags
|= mmFlgPerm
; /* Mark permanent if requested */
1189 colva
= mapping_make(pmap
, va
, pa
, mflags
, size
, prot
); /* Enter the mapping into the pmap */
1191 if(colva
) { /* If there was a collision, panic */
1192 panic("pmap_map_block: mapping error %d, pmap = %p, va = %016llX\n", (uint32_t)(colva
& mapRetCode
), pmap
, va
);
1195 return; /* Return */
1198 int pmap_map_block_rc(pmap_t pmap
, addr64_t va
, ppnum_t pa
, uint32_t size
, vm_prot_t prot
, int attr
, unsigned int flags
) { /* Map an autogenned block */
1200 unsigned int mflags
;
1204 if (pmap
== PMAP_NULL
) { /* Did they give us a pmap? */
1205 panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */
1208 mflags
= mmFlgBlock
| mmFlgUseAttr
| (attr
& VM_MEM_GUARDED
) | ((attr
& VM_MEM_NOT_CACHEABLE
) >> 1); /* Convert to our mapping_make flags */
1209 if(flags
) mflags
|= mmFlgPerm
; /* Mark permanent if requested */
1211 colva
= mapping_make(pmap
, va
, pa
, mflags
, size
, prot
); /* Enter the mapping into the pmap */
1213 if(colva
) return 0; /* If there was a collision, fail */
1215 return 1; /* Return true of we worked */
1219 * pmap_extract(pmap, va)
1220 * returns the physical address corrsponding to the
1221 * virtual address specified by pmap and va if the
1222 * virtual address is mapped and 0 if it is not.
1223 * Note: we assume nothing is ever mapped to phys 0.
1225 * NOTE: This call always will fail for physical addresses greater than 0xFFFFF000.
1227 vm_offset_t
pmap_extract(pmap_t pmap
, vm_map_offset_t va
) {
1230 register struct mapping
*mp
;
1231 register vm_offset_t pa
;
1237 panic("pmap_extract: THIS CALL IS BOGUS. NEVER USE IT EVER. So there...\n"); /* Don't use this */
1240 gva
= (unsigned int)va
; /* Make sure we don't have a sign */
1242 spl
= splhigh(); /* We can't allow any loss of control here */
1244 mp
= mapping_find(pmap
, (addr64_t
)gva
, &nextva
,1); /* Find the mapping for this address */
1246 if(!mp
) { /* Is the page mapped? */
1247 splx(spl
); /* Enable interrupts */
1248 return 0; /* Pass back 0 if not found */
1251 ppoffset
= (ppnum_t
)(((gva
& -4096LL) - (mp
->mpVAddr
& -4096LL)) >> 12); /* Get offset from va to base va */
1254 pa
= mp
->mpPAddr
+ ppoffset
; /* Remember ppage because mapping may vanish after drop call */
1256 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
1257 splx(spl
); /* Restore 'rupts */
1259 if(pa
> maxPPage32
) return 0; /* Force large addresses to fail */
1261 pa
= (pa
<< 12) | (va
& 0xFFF); /* Convert physical page number to address */
1264 return pa
; /* Return physical address or 0 */
1268 * ppnum_t pmap_find_phys(pmap, addr64_t va)
1269 * returns the physical page corrsponding to the
1270 * virtual address specified by pmap and va if the
1271 * virtual address is mapped and 0 if it is not.
1272 * Note: we assume nothing is ever mapped to phys 0.
1275 ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
) {
1278 register struct mapping
*mp
;
1279 ppnum_t pa
, ppoffset
;
1282 spl
= splhigh(); /* We can't allow any loss of control here */
1284 mp
= mapping_find(pmap
, va
, &nextva
, 1); /* Find the mapping for this address */
1286 if(!mp
) { /* Is the page mapped? */
1287 splx(spl
); /* Enable interrupts */
1288 return 0; /* Pass back 0 if not found */
1292 ppoffset
= (ppnum_t
)(((va
& -4096LL) - (mp
->mpVAddr
& -4096LL)) >> 12); /* Get offset from va to base va */
1294 pa
= mp
->mpPAddr
+ ppoffset
; /* Get the actual physical address */
1296 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
1298 splx(spl
); /* Restore 'rupts */
1299 return pa
; /* Return physical address or 0 */
1306 * Set/Get special memory attributes; not implemented.
1308 * Note: 'VAL_GET_INFO' is used to return info about a page.
1309 * If less than 1 page is specified, return the physical page
1310 * mapping and a count of the number of mappings to that page.
1311 * If more than one page is specified, return the number
1312 * of resident pages and the number of shared (more than
1313 * one mapping) pages in the range;
1319 __unused pmap_t pmap
,
1320 __unused vm_map_offset_t address
,
1321 __unused vm_map_size_t size
,
1322 __unused vm_machine_attribute_t attribute
,
1323 __unused vm_machine_attribute_val_t
* value
)
1326 return KERN_INVALID_ARGUMENT
;
1332 unsigned int pmap_cache_attributes(ppnum_t pgn
) {
1335 struct phys_entry
* pp
;
1337 // Find physical address
1338 if ((pp
= pmap_find_physentry(pgn
))) {
1339 // Use physical attributes as default
1340 // NOTE: DEVICE_PAGER_FLAGS are made to line up
1341 flags
= VM_MEM_COHERENT
; /* We only support coherent memory */
1342 if (pp
->ppLink
& ppG
) flags
|= VM_MEM_GUARDED
; /* Add in guarded if it is */
1343 if (pp
->ppLink
& ppI
) flags
|= VM_MEM_NOT_CACHEABLE
; /* Add in cache inhibited if so */
1345 // If no physical, just hard code attributes
1354 * pmap_attribute_cache_sync(vm_offset_t pa)
1356 * Invalidates all of the instruction cache on a physical page and
1357 * pushes any dirty data from the data cache for the same physical page
1360 kern_return_t
pmap_attribute_cache_sync(ppnum_t pp
, vm_size_t size
,
1361 __unused vm_machine_attribute_t attribute
,
1362 __unused vm_machine_attribute_val_t
* value
) {
1365 unsigned int i
, npages
;
1367 npages
= round_page(size
) >> 12; /* Get the number of pages to do */
1369 for(i
= 0; i
< npages
; i
++) { /* Do all requested pages */
1370 s
= splhigh(); /* No interruptions here */
1371 sync_ppage(pp
+ i
); /* Go flush data cache and invalidate icache */
1372 splx(s
); /* Allow interruptions */
1375 return KERN_SUCCESS
;
1379 * pmap_sync_page_data_phys(ppnum_t pa)
1381 * Invalidates all of the instruction cache on a physical page and
1382 * pushes any dirty data from the data cache for the same physical page
1385 void pmap_sync_page_data_phys(ppnum_t pa
) {
1389 s
= splhigh(); /* No interruptions here */
1390 sync_ppage(pa
); /* Sync up dem caches */
1391 splx(s
); /* Allow interruptions */
1396 pmap_sync_page_attributes_phys(ppnum_t pa
)
1398 pmap_sync_page_data_phys(pa
);
1401 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
1405 * Garbage collects the physical map system for pages that are no longer used.
1406 * It isn't implemented or needed or wanted.
1409 pmap_collect(__unused pmap_t pmap
)
1416 * Routine: pmap_activate
1418 * Binds the given physical map to the given
1419 * processor, and returns a hardware map description.
1420 * It isn't implemented or needed or wanted.
1424 __unused pmap_t pmap
,
1425 __unused thread_t th
,
1426 __unused
int which_cpu
)
1432 * It isn't implemented or needed or wanted.
1436 __unused pmap_t pmap
,
1437 __unused thread_t th
,
1438 __unused
int which_cpu
)
1445 * pmap_pageable(pmap, s, e, pageable)
1446 * Make the specified pages (by pmap, offset)
1447 * pageable (or not) as requested.
1449 * A page which is not pageable may not take
1450 * a fault; therefore, its page table entry
1451 * must remain valid for the duration.
1453 * This routine is merely advisory; pmap_enter()
1454 * will specify that these pages are to be wired
1455 * down (or not) as appropriate.
1457 * (called from vm/vm_fault.c).
1461 __unused pmap_t pmap
,
1462 __unused vm_map_offset_t start
,
1463 __unused vm_map_offset_t end
,
1464 __unused boolean_t pageable
)
1467 return; /* This is not used... */
1471 * Routine: pmap_change_wiring
1476 __unused pmap_t pmap
,
1477 __unused vm_map_offset_t va
,
1478 __unused boolean_t wired
)
1480 return; /* This is not used... */
1484 * pmap_clear_modify(phys)
1485 * clears the hardware modified ("dirty") bit for one
1486 * machine independant page starting at the given
1487 * physical address. phys must be aligned on a machine
1488 * independant page boundary.
1491 pmap_clear_modify(ppnum_t pa
)
1494 mapping_clr_mod(pa
); /* Clear all change bits for physical page */
1499 * pmap_is_modified(phys)
1500 * returns TRUE if the given physical page has been modified
1501 * since the last call to pmap_clear_modify().
1504 pmap_is_modified(register ppnum_t pa
)
1506 return mapping_tst_mod(pa
); /* Check for modified */
1511 * pmap_clear_reference(phys)
1512 * clears the hardware referenced bit in the given machine
1513 * independant physical page.
1517 pmap_clear_reference(ppnum_t pa
)
1519 mapping_clr_ref(pa
); /* Check for modified */
1523 * pmap_is_referenced(phys)
1524 * returns TRUE if the given physical page has been referenced
1525 * since the last call to pmap_clear_reference().
1528 pmap_is_referenced(ppnum_t pa
)
1530 return mapping_tst_ref(pa
); /* Check for referenced */
1534 * pmap_get_refmod(phys)
1535 * returns the referenced and modified bits of the specified
1539 pmap_get_refmod(ppnum_t pa
)
1541 return (mapping_tst_refmod(pa
));
1545 * pmap_clear_refmod(phys, mask)
1546 * clears the referenced and modified bits as specified by the mask
1547 * of the specified physical page.
1550 pmap_clear_refmod(ppnum_t pa
, unsigned int mask
)
1552 mapping_clr_refmod(pa
, mask
);
1556 * pmap_eligible_for_execute(ppnum_t pa)
1557 * return true if physical address is eligible to contain executable code;
1558 * otherwise, return false
1561 pmap_eligible_for_execute(ppnum_t pa
)
1563 phys_entry_t
*physent
;
1564 unsigned int pindex
;
1566 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
1568 if((!physent
) || (physent
->ppLink
& ppG
))
1569 return 0; /* If there is no physical entry or marked guarded,
1570 the entry is not eligible for execute */
1572 return 1; /* Otherwise, entry is eligible for execute */
1577 pmap_list_resident_pages(
1578 __unused pmap_t pmap
,
1579 __unused vm_offset_t
*listp
,
1584 #endif /* MACH_VM_DEBUG */
1591 pmap_copy_part_page(
1593 vm_offset_t src_offset
,
1595 vm_offset_t dst_offset
,
1598 addr64_t fsrc
, fdst
;
1600 assert((((dst
<< 12) & PAGE_MASK
) + dst_offset
+ len
) <= PAGE_SIZE
);
1601 assert((((src
<< 12) & PAGE_MASK
) + src_offset
+ len
) <= PAGE_SIZE
);
1603 fsrc
= ((addr64_t
)src
<< 12) + src_offset
;
1604 fdst
= ((addr64_t
)dst
<< 12) + dst_offset
;
1606 phys_copy(fsrc
, fdst
, len
); /* Copy the stuff physically */
1610 pmap_zero_part_page(
1611 __unused vm_offset_t p
,
1612 __unused vm_offset_t offset
,
1613 __unused vm_size_t len
)
1615 panic("pmap_zero_part_page");
1618 boolean_t
pmap_verify_free(ppnum_t pa
) {
1620 struct phys_entry
*pp
;
1621 unsigned int pindex
;
1623 pp
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
1624 if (pp
== 0) return FALSE
; /* If there isn't one, show no mapping... */
1626 if(pp
->ppLink
& ~(ppLock
| ppFlags
)) return FALSE
; /* We have at least one mapping */
1627 return TRUE
; /* No mappings */
1631 /* Determine if we need to switch space and set up for it if so */
1633 void pmap_switch(pmap_t map
)
1635 hw_blow_seg(lowGlo
.lgUMWvaddr
); /* Blow off the first segment */
1636 hw_blow_seg(lowGlo
.lgUMWvaddr
+ 0x10000000ULL
); /* Blow off the second segment */
1638 /* when changing to kernel space, don't bother
1639 * doing anything, the kernel is mapped from here already.
1641 if (map
->space
== PPC_SID_KERNEL
) { /* Are we switching into kernel space? */
1642 return; /* If so, we don't do anything... */
1645 hw_set_user_space(map
); /* Indicate if we need to load the SRs or not */
1646 return; /* Bye, bye, butterfly... */
1651 * The PPC pmap can only nest segments of 256MB, aligned on a 256MB boundary.
1653 uint64_t pmap_nesting_size_min
= 0x10000000ULL
;
1654 uint64_t pmap_nesting_size_max
= 0x10000000ULL
;
1657 * kern_return_t pmap_nest(grand, subord, vstart, size)
1659 * grand = the pmap that we will nest subord into
1660 * subord = the pmap that goes into the grand
1661 * vstart = start of range in pmap to be inserted
1662 * nstart = start of range in pmap nested pmap
1663 * size = Size of nest area (up to 2TB)
1665 * Inserts a pmap into another. This is used to implement shared segments.
1666 * On the current PPC processors, this is limited to segment (256MB) aligned
1667 * segment sized ranges.
1669 * We actually kinda allow recursive nests. The gating factor is that we do not allow
1670 * nesting on top of something that is already mapped, i.e., the range must be empty.
1672 * Note that we depend upon higher level VM locks to insure that things don't change while
1673 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
1674 * or do 2 nests at once.
1677 kern_return_t
pmap_nest(pmap_t grand
, pmap_t subord
, addr64_t vstart
, addr64_t nstart
, uint64_t size
) {
1679 addr64_t vend
, colladdr
;
1684 if(size
& 0x0FFFFFFFULL
) return KERN_INVALID_VALUE
; /* We can only do this for multiples of 256MB */
1685 if((size
>> 25) > 65536) return KERN_INVALID_VALUE
; /* Max size we can nest is 2TB */
1686 if(vstart
& 0x0FFFFFFFULL
) return KERN_INVALID_VALUE
; /* We can only do this aligned to 256MB */
1687 if(nstart
& 0x0FFFFFFFULL
) return KERN_INVALID_VALUE
; /* We can only do this aligned to 256MB */
1689 if(size
== 0) { /* Is the size valid? */
1690 panic("pmap_nest: size is invalid - %016llX\n", size
);
1693 msize
= (size
>> 25) - 1; /* Change size to blocks of 32MB */
1695 nlists
= mapSetLists(grand
); /* Set number of lists this will be on */
1697 mp
= mapping_alloc(nlists
); /* Get a spare mapping block */
1699 mp
->mpFlags
= 0x01000000 | mpNest
| mpPerm
| mpBSu
| nlists
; /* Make this a permanent nested pmap with a 32MB basic size unit */
1700 /* Set the flags. Make sure busy count is 1 */
1701 mp
->mpSpace
= subord
->space
; /* Set the address space/pmap lookup ID */
1702 mp
->u
.mpBSize
= msize
; /* Set the size */
1703 mp
->mpPte
= 0; /* Set the PTE invalid */
1704 mp
->mpPAddr
= 0; /* Set the physical page number */
1705 mp
->mpVAddr
= vstart
; /* Set the address */
1706 mp
->mpNestReloc
= nstart
- vstart
; /* Set grand to nested vaddr relocation value */
1708 colladdr
= hw_add_map(grand
, mp
); /* Go add the mapping to the pmap */
1710 if(colladdr
) { /* Did it collide? */
1711 vend
= vstart
+ size
- 4096; /* Point to the last page we would cover in nest */
1712 panic("pmap_nest: attempt to nest into a non-empty range - pmap = %p, start = %016llX, end = %016llX\n",
1713 grand
, vstart
, vend
);
1716 return KERN_SUCCESS
;
1720 * kern_return_t pmap_unnest(grand, vaddr, size)
1722 * grand = the pmap that we will nest subord into
1723 * vaddr = start of range in pmap to be unnested
1724 * size = size of range in pmap to be unnested
1726 * Removes a pmap from another. This is used to implement shared segments.
1727 * On the current PPC processors, this is limited to segment (256MB) aligned
1728 * segment sized ranges.
1731 kern_return_t
pmap_unnest(pmap_t grand
, addr64_t vaddr
, uint64_t size
) {
1733 unsigned int tstamp
, i
, mycpu
;
1738 if (size
!= pmap_nesting_size_min
||
1739 (vaddr
& (pmap_nesting_size_min
-1))) {
1740 panic("pmap_unnest(vaddr=0x%016llx, size=0x016%llx): "
1741 "must be 256MB and aligned\n",
1745 s
= splhigh(); /* Make sure interruptions are disabled */
1747 mp
= mapping_find(grand
, vaddr
, &nextva
, 0); /* Find the nested map */
1749 if(((unsigned int)mp
& mapRetCode
) != mapRtOK
) { /* See if it was even nested */
1750 panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr
);
1753 if((mp
->mpFlags
& mpType
) != mpNest
) { /* Did we find something other than a nest? */
1754 panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr
);
1757 if(mp
->mpVAddr
!= vaddr
) { /* Make sure the address is the same */
1758 panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr
);
1761 hw_atomic_and_noret(&mp
->mpFlags
, ~mpPerm
); /* Show that this mapping is now removable */
1763 mapping_drop_busy(mp
); /* Go ahead and release the mapping now */
1765 splx(s
); /* Restore 'rupts */
1767 (void)mapping_remove(grand
, vaddr
); /* Toss the nested pmap mapping */
1769 invalidateSegs(grand
); /* Invalidate the pmap segment cache */
1772 * Note that the following will force the segment registers to be reloaded
1773 * on all processors (if they are using the pmap we just changed) before returning.
1775 * This is needed. The reason is that until the segment register is
1776 * reloaded, another thread in the same task on a different processor will
1777 * be able to access memory that it isn't allowed to anymore. That can happen
1778 * because access to the subordinate pmap is being removed, but the pmap is still
1781 * Note that we only kick the other processor if we see that it was using the pmap while we
1786 for(i
=0; i
< real_ncpus
; i
++) { /* Cycle through processors */
1787 disable_preemption();
1788 mycpu
= cpu_number(); /* Who am I? Am I just a dream? */
1789 if((unsigned int)grand
== PerProcTable
[i
].ppe_vaddr
->ppUserPmapVirt
) { /* Is this guy using the changed pmap? */
1791 PerProcTable
[i
].ppe_vaddr
->ppInvSeg
= 1; /* Show that we need to invalidate the segments */
1795 tstamp
= PerProcTable
[i
].ppe_vaddr
->ruptStamp
[1]; /* Save the processor's last interrupt time stamp */
1796 if(cpu_signal(i
, SIGPcpureq
, CPRQsegload
, 0) == KERN_SUCCESS
) { /* Make sure we see the pmap change */
1797 if(!hw_cpu_wcng(&PerProcTable
[i
].ppe_vaddr
->ruptStamp
[1], tstamp
, LockTimeOut
)) { /* Wait for the other processors to enter debug */
1798 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i
);
1803 enable_preemption();
1806 return KERN_SUCCESS
; /* Bye, bye, butterfly... */
1809 boolean_t
pmap_adjust_unnest_parameters(__unused pmap_t p
, __unused vm_map_offset_t
*s
, __unused vm_map_offset_t
*e
) {
1810 return FALSE
; /* Not implemented on PowerPC */
1814 * void MapUserMemoryWindowInit(void)
1816 * Initialize anything we need to in order to map user address space slices into
1817 * the kernel. Primarily used for copy in/out.
1819 * Currently we only support one 512MB slot for this purpose. There are two special
1820 * mappings defined for the purpose: the special pmap nest, and linkage mapping.
1822 * The special pmap nest (which is allocated in this function) is used as a place holder
1823 * in the kernel's pmap search list. It is 512MB long and covers the address range
1824 * starting at lgUMWvaddr. It points to no actual memory and when the fault handler
1825 * hits in it, it knows to look in the per_proc and start using the linkage
1826 * mapping contained therin.
1828 * The linkage mapping is used to glue the user address space slice into the
1829 * kernel. It contains the relocation information used to transform the faulting
1830 * kernel address into the user address space. It also provides the link to the
1831 * user's pmap. This is pointed to by the per_proc and is switched in and out
1832 * whenever there is a context switch.
1836 void MapUserMemoryWindowInit(void) {
1842 nlists
= mapSetLists(kernel_pmap
); /* Set number of lists this will be on */
1844 mp
= mapping_alloc(nlists
); /* Get a spare mapping block */
1846 mp
->mpFlags
= 0x01000000 | mpLinkage
| mpPerm
| mpBSu
| nlists
; /* Make this a permanent nested pmap with a 32MB basic size unit */
1847 /* Set the flags. Make sure busy count is 1 */
1848 mp
->mpSpace
= kernel_pmap
->space
; /* Set the address space/pmap lookup ID */
1849 mp
->u
.mpBSize
= 15; /* Set the size to 2 segments in 32MB chunks - 1 */
1850 mp
->mpPte
= 0; /* Means nothing */
1851 mp
->mpPAddr
= 0; /* Means nothing */
1852 mp
->mpVAddr
= lowGlo
.lgUMWvaddr
; /* Set the address range we cover */
1853 mp
->mpNestReloc
= 0; /* Means nothing */
1855 colladdr
= hw_add_map(kernel_pmap
, mp
); /* Go add the mapping to the pmap */
1857 if(colladdr
) { /* Did it collide? */
1858 panic("MapUserMemoryWindowInit: MapUserMemoryWindow range already mapped\n");
1865 * addr64_t MapUserMemoryWindow(vm_map_t map, vm_offset_t va, size)
1867 * map = the vm_map that we are mapping into the kernel
1868 * va = start of the address range we are mapping
1869 * Note that we do not test validty, we chose to trust our fellows...
1871 * Maps a 512M slice of a user address space into a predefined kernel range
1872 * on a per-thread basis. We map only the first 256M segment, allowing the
1873 * second 256M segment to fault in as needed. This allows our clients to access
1874 * an arbitrarily aligned operand up to 256M in size.
1876 * In the future, the restriction of a predefined range may be loosened.
1878 * Builds the proper linkage map to map the user range
1879 * We will round this down to the previous segment boundary and calculate
1880 * the relocation to the kernel slot
1882 * We always make a segment table entry here if we need to. This is mainly because of
1883 * copyin/out and if we don't, there will be multiple segment faults for
1884 * each system call. I have seen upwards of 30000 per second.
1886 * We do check, however, to see if the slice is already mapped and if so,
1887 * we just exit. This is done for performance reasons. It was found that
1888 * there was a considerable boost in copyin/out performance if we did not
1889 * invalidate the segment at ReleaseUserAddressSpace time, so we dumped the
1890 * restriction that you had to bracket MapUserMemoryWindow. Further, there
1891 * is a yet further boost if you didn't need to map it each time. The theory
1892 * behind this is that many times copies are to or from the same segment and
1893 * done multiple times within the same system call. To take advantage of that,
1894 * we check umwSpace and umwRelo to see if we've already got it.
1896 * We also need to half-invalidate the slice when we context switch or go
1897 * back to user state. A half-invalidate does not clear the actual mapping,
1898 * but it does force the MapUserMemoryWindow function to reload the segment
1899 * register/SLBE. If this is not done, we can end up some pretty severe
1900 * performance penalties. If we map a slice, and the cached space/relocation is
1901 * the same, we won't reload the segment registers. Howver, since we ran someone else,
1902 * our SR is cleared and we will take a fault. This is reasonable if we block
1903 * while copying (e.g., we took a page fault), but it is not reasonable when we
1904 * just start. For this reason, we half-invalidate to make sure that the SR is
1905 * explicitly reloaded.
1907 * Note that we do not go to the trouble of making a pmap segment cache
1908 * entry for these guys because they are very short term -- 99.99% of the time
1909 * they will be unmapped before the next context switch.
1913 addr64_t
MapUserMemoryWindow(
1917 addr64_t baddrs
, reladd
;
1921 baddrs
= va
& 0xFFFFFFFFF0000000ULL
; /* Isolate the segment */
1922 thread
= current_thread(); /* Remember our activation */
1924 reladd
= baddrs
- lowGlo
.lgUMWvaddr
; /* Get the relocation from user to kernel */
1926 if((thread
->machine
.umwSpace
== map
->pmap
->space
) && (thread
->machine
.umwRelo
== reladd
)) { /* Already mapped? */
1927 return ((va
& 0x0FFFFFFFULL
) | lowGlo
.lgUMWvaddr
); /* Pass back the kernel address we are to use */
1930 disable_preemption(); /* Don't move... */
1932 mp
= (mapping_t
*)&(getPerProc()->ppUMWmp
); /* Make up for C */
1933 thread
->machine
.umwRelo
= reladd
; /* Relocation from user to kernel */
1934 mp
->mpNestReloc
= reladd
; /* Relocation from user to kernel */
1936 thread
->machine
.umwSpace
= map
->pmap
->space
; /* Set the address space/pmap lookup ID */
1937 mp
->mpSpace
= map
->pmap
->space
; /* Set the address space/pmap lookup ID */
1940 * Here we make an assumption that we are going to be using the base pmap's address space.
1941 * If we are wrong, and that would be very, very, very rare, the fault handler will fix us up.
1944 hw_map_seg(map
->pmap
, lowGlo
.lgUMWvaddr
, baddrs
); /* Make the entry for the first segment */
1946 enable_preemption(); /* Let's move */
1947 return ((va
& 0x0FFFFFFFULL
) | lowGlo
.lgUMWvaddr
); /* Pass back the kernel address we are to use */
1952 * Constrain DTrace copyin/copyout actions
1954 extern kern_return_t
dtrace_copyio_preflight(addr64_t
);
1955 extern kern_return_t
dtrace_copyio_postflight(addr64_t
);
1957 kern_return_t
dtrace_copyio_preflight(__unused addr64_t va
)
1959 if (current_map() == kernel_map
)
1960 return KERN_FAILURE
;
1962 return KERN_SUCCESS
;
1965 kern_return_t
dtrace_copyio_postflight(__unused addr64_t va
)
1967 thread_t thread
= current_thread();
1969 thread
->machine
.umwSpace
|= umwSwitchAway
;
1970 return KERN_SUCCESS
;
1972 #endif /* CONFIG_DTRACE */
1975 * kern_return_t pmap_boot_map(size)
1977 * size = size of virtual address range to be mapped
1979 * This function is used to assign a range of virtual addresses before VM in
1980 * initialized. It starts at VM_MAX_KERNEL_ADDRESS and works downward.
1981 * The variable vm_last_addr contains the current highest possible VM
1982 * assignable address. It is a panic to attempt to call this after VM has
1983 * started up. The only problem is, is that we may not have the serial or
1984 * framebuffer mapped, so we'll never know we died.........
1987 vm_offset_t
pmap_boot_map(vm_size_t size
) {
1989 if(kernel_map
!= VM_MAP_NULL
) { /* Has VM already started? */
1990 panic("pmap_boot_map: VM started\n");
1993 size
= round_page(size
); /* Make sure this is in pages */
1994 vm_last_addr
= vm_last_addr
- size
; /* Allocate the memory */
1995 return (vm_last_addr
+ 1); /* Return the vaddr we just allocated */
2001 * void pmap_init_sharedpage(void);
2003 * Hack map for the 64-bit commpage
2006 void pmap_init_sharedpage(vm_offset_t cpg
){
2008 addr64_t cva
, cpoff
;
2011 sharedPmap
= pmap_create(0, FALSE
); /* Get a pmap to hold the common segment */
2012 if(!sharedPmap
) { /* Check for errors */
2013 panic("pmap_init_sharedpage: couldn't make sharedPmap\n");
2016 for(cpoff
= 0; cpoff
< _COMM_PAGE_AREA_USED
; cpoff
+= 4096) { /* Step along now */
2018 cpphys
= pmap_find_phys(kernel_pmap
, (addr64_t
)cpg
+ cpoff
);
2020 panic("pmap_init_sharedpage: compage %016llX not mapped in kernel\n", cpg
+ cpoff
);
2023 cva
= mapping_make(sharedPmap
, (addr64_t
)((uint32_t)_COMM_PAGE_BASE_ADDRESS
) + cpoff
,
2024 cpphys
, mmFlgPerm
, 1, VM_PROT_READ
| VM_PROT_EXECUTE
); /* Map the page read/execute only */
2025 if(cva
) { /* Check for errors */
2026 panic("pmap_init_sharedpage: couldn't map commpage page - cva = %016llX\n", cva
);
2036 * void pmap_map_sharedpage(pmap_t pmap);
2038 * Maps the last segment in a 64-bit address space
2043 void pmap_map_sharedpage(task_t task
, pmap_t pmap
){
2047 if(task_has_64BitAddr(task
) || _cpu_capabilities
& k64Bit
) { /* Should we map the 64-bit page -1? */
2048 ret
= pmap_nest(pmap
, sharedPmap
, 0xFFFFFFFFF0000000ULL
, 0x00000000F0000000ULL
,
2049 0x0000000010000000ULL
); /* Nest the highest possible segment to map comm page */
2050 if(ret
!= KERN_SUCCESS
) { /* Did it work? */
2051 panic("pmap_map_sharedpage: couldn't nest shared page - ret = %08X\n", ret
);
2060 * void pmap_unmap_sharedpage(pmap_t pmap);
2062 * Unmaps the last segment in a 64-bit address space
2066 void pmap_unmap_sharedpage(pmap_t pmap
){
2074 if(BootProcInfo
.pf
.Available
& pf64Bit
) { /* Are we on a 64-bit machine? */
2076 inter
= ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
2077 mp
= hw_find_map(pmap
, 0xFFFFFFFFF0000000ULL
, &nextva
); /* Find the mapping for this address */
2078 if((unsigned int)mp
== mapRtBadLk
) { /* Did we lock up ok? */
2079 panic("pmap_unmap_sharedpage: mapping lock failure - rc = %p, pmap = %p\n", mp
, pmap
); /* Die... */
2082 gotnest
= 0; /* Assume nothing here */
2084 gotnest
= ((mp
->mpFlags
& mpType
) == mpNest
);
2085 /* Remember if we have a nest here */
2086 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
2088 ml_set_interrupts_enabled(inter
); /* Put interrupts back to what they were */
2090 if(!gotnest
) return; /* Leave if there isn't any nesting here */
2092 ret
= pmap_unnest(pmap
, 0xFFFFFFFFF0000000ULL
, 0x0000000010000000ULL
); /* Unnest the max 64-bit page */
2094 if(ret
!= KERN_SUCCESS
) { /* Did it work? */
2095 panic("pmap_unmap_sharedpage: couldn't unnest shared page - ret = %08X\n", ret
);
2103 /* temporary workaround */
2106 __unused vm_map_t map
,
2107 __unused vm_offset_t va
)
2114 * disable no-execute capability on
2115 * the specified pmap
2117 void pmap_disable_NX(pmap_t pmap
) {
2119 pmap
->pmapFlags
|= pmapNXdisabled
;