2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
34 #include <mach_kgdb.h>
35 #include <mach_vm_debug.h>
36 #include <db_machine_commands.h>
38 #include <mach/mach_types.h>
39 #include <mach/vm_attributes.h>
40 #include <mach/vm_param.h>
42 #include <kern/kern_types.h>
43 #include <kern/thread.h>
45 #include <kern/misc_protos.h>
47 #include <vm/vm_fault.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_page.h>
53 #include <ppc/exception.h>
54 #include <ppc/misc_protos.h>
55 #include <ppc/proc_reg.h>
58 #include <ppc/new_screen.h>
59 #include <ppc/Firmware.h>
60 #include <ppc/mappings.h>
61 #include <ddb/db_output.h>
63 #include <console/video_console.h> /* (TEST/DEBUG) */
67 vm_map_t mapping_map
= VM_MAP_NULL
;
69 unsigned int incrVSID
= 0; /* VSID increment value */
70 unsigned int mappingdeb0
= 0;
71 unsigned int mappingdeb1
= 0;
72 int ppc_max_adrsp
; /* Maximum address spaces */
74 addr64_t
*mapdebug
; /* (BRINGUP) */
75 extern unsigned int DebugWork
; /* (BRINGUP) */
77 void mapping_verify(void);
78 void mapping_phys_unused(ppnum_t pa
);
81 * ppc_prot translates Mach's representation of protections to that of the PPC hardware.
82 * For Virtual Machines (VMM), we also provide translation entries where the output is
83 * the same as the input, allowing direct specification of PPC protections. Mach's
84 * representations are always in the range 0..7, so they always fall into the first
85 * 8 table entries; direct translations are placed in the range 8..16, so they fall into
86 * the second half of the table.
88 * ***NOTE*** I've commented out the Mach->PPC translations that would set page-level
89 * no-execute, pending updates to the VM layer that will properly enable its
90 * use. Bob Abeles 08.02.04
93 //unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2, /* Mach -> PPC translations */
94 unsigned char ppc_prot
[16] = { 0, 3, 2, 2, 3, 3, 2, 2, /* Mach -> PPC translations */
95 0, 1, 2, 3, 4, 5, 6, 7 }; /* VMM direct translations */
98 * About PPC VSID generation:
100 * This function is called to generate an address space ID. This space ID must be unique within
101 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
102 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
103 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
104 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
105 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
106 * they are release. This causes us to lose track of what space IDs are free to be reused.
107 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
108 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
110 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
111 * calculation for virtual address lookup. An improperly chosen value could potentially cause
112 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
113 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
114 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
115 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
116 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
117 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
118 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
119 * with no overflow. I don't think that this is a problem.
121 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
122 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
123 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
124 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
125 * the same modulo 512. We can reduce this problem by having the segment number be bits
126 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
127 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
128 * I don't think that it is as signifigant as the other, so, I'll make the space ID
129 * with segment first.
131 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
132 * While this is a problem that should only happen in periods counted in weeks, it can and
133 * will happen. This is assuming a monotonically increasing space ID. If we were to search
134 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
135 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
137 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
138 * locked by free_pmap_lock) that is sorted in VSID sequence order.
140 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
141 * the last that was freed. The we allocate that.
143 * NOTE: We must be called with interruptions off and free_pmap_lock held.
149 * Do anything that needs to be done before the mapping system can be used.
150 * Hash table must be initialized before we call this.
152 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
155 void mapping_init(void) {
157 unsigned int tmp
, maxeff
, rwidth
;
159 ppc_max_adrsp
= maxAdrSp
; /* Set maximum address spaces */
161 maxeff
= 32; /* Assume 32-bit */
162 if(PerProcTable
[0].ppe_vaddr
->pf
.Available
& pf64Bit
) maxeff
= 64; /* Is this a 64-bit machine? */
164 rwidth
= PerProcTable
[0].ppe_vaddr
->pf
.pfMaxVAddr
- maxAdrSpb
; /* Reduce address width by width of address space ID */
165 if(rwidth
> maxeff
) rwidth
= maxeff
; /* If we still have more virtual than effective, clamp at effective */
167 vm_max_address
= 0xFFFFFFFFFFFFFFFFULL
>> (64 - rwidth
); /* Get maximum effective address supported */
168 vm_max_physical
= 0xFFFFFFFFFFFFFFFFULL
>> (64 - PerProcTable
[0].ppe_vaddr
->pf
.pfMaxPAddr
); /* Get maximum physical address supported */
170 if(PerProcTable
[0].ppe_vaddr
->pf
.Available
& pf64Bit
) { /* Are we 64 bit? */
171 tmp
= 12; /* Size of hash space */
174 __asm__
volatile("cntlzw %0, %1" : "=r" (tmp
) : "r" (hash_table_size
)); /* Get number of leading 0s */
175 tmp
= 32 - tmp
; /* Size of hash space */
178 incrVSID
= 1 << ((tmp
+ 1) >> 1); /* Get ceiling of sqrt of table size */
179 incrVSID
|= 1 << ((tmp
+ 1) >> 2); /* Get ceiling of quadroot of table size */
180 incrVSID
|= 1; /* Set bit and add 1 */
188 * mapping_remove(pmap_t pmap, addr64_t va);
189 * Given a pmap and virtual address, this routine finds the mapping and unmaps it.
190 * The mapping block will be added to
191 * the free list. If the free list threshold is reached, garbage collection will happen.
193 * We also pass back the next higher mapped address. This is done so that the higher level
194 * pmap_remove function can release a range of addresses simply by calling mapping_remove
195 * in a loop until it finishes the range or is returned a vaddr of 0.
197 * Note that if the mapping is not found, we return the next VA ORed with 1
201 addr64_t
mapping_remove(pmap_t pmap
, addr64_t va
) { /* Remove a single mapping for this VADDR
202 Returns TRUE if a mapping was found to remove */
208 va
&= ~PAGE_MASK
; /* Scrub noise bits */
210 do { /* Keep trying until we truely fail */
211 mp
= hw_rem_map(pmap
, va
, &nextva
); /* Remove a mapping from this pmap */
212 } while (mapRtRemove
== ((unsigned int)mp
& mapRetCode
));
214 switch ((unsigned int)mp
& mapRetCode
) {
216 break; /* Mapping removed */
218 return (nextva
| 1); /* Nothing found to unmap */
220 panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
225 pgaddr
= mp
->mpPAddr
; /* Get page number from mapping */
227 mapping_free(mp
); /* Add mapping to the free list */
229 if ((pmap
->pmapFlags
& pmapVMhost
) && pmap
->pmapVmmExt
) {
230 /* If this is an assisted host, scrub any guest mappings */
232 phys_entry_t
*physent
= mapping_phys_lookup(pgaddr
, &idx
);
233 /* Get physent for our physical page */
234 if (!physent
) { /* No physent, could be in I/O area, so exit */
238 do { /* Iterate 'till all guest mappings are gone */
239 mp
= hw_scrub_guest(physent
, pmap
); /* Attempt to scrub a guest mapping */
240 switch ((unsigned int)mp
& mapRetCode
) {
241 case mapRtGuest
: /* Found a guest mapping */
242 case mapRtNotFnd
: /* Mapping was there, but disappeared, must retry */
243 case mapRtEmpty
: /* No guest mappings left to scrub */
246 panic("mapping_remove: hw_scrub_guest failed - physent = %08X, code = %08X\n",
247 physent
, mp
); /* Cry havoc, cry wrack,
248 at least we die with harness on our backs */
251 } while (mapRtEmpty
!= ((unsigned int)mp
& mapRetCode
));
254 return nextva
; /* Tell them we did it */
258 * mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one
260 * This routine takes the given parameters, builds a mapping block, and queues it into the
263 * pmap (virtual address) is the pmap to map into
264 * va (virtual address) is the 64-bit virtual address that is being mapped
265 * pa (physical page number) is the physical page number (i.e., physcial address >> 12). This is
268 * block if 1, mapping is a block, size parameter is used. Note: we do not keep
269 * reference and change information or allow protection changes of blocks.
270 * any changes must first unmap and then remap the area.
271 * use attribute Use specified attributes for map, not defaults for physical page
272 * perm Mapping is permanent
273 * cache inhibited Cache inhibited (used if use attribute or block set )
274 * guarded Guarded access (used if use attribute or block set )
275 * size size of block in pages - 1 (not used if not block)
276 * prot VM protection bits
277 * attr Cachability/Guardedness
279 * Returns 0 if mapping was successful. Returns vaddr that overlaps/collides.
280 * Returns 1 for any other failure.
282 * Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
283 * for I/O and default the cache attrubutes appropriately. The caller is free to set whatever they want however.
285 * If there is any physical page that is not found in the physent table, the mapping is forced to be a
286 * block mapping of length 1. This keeps us from trying to update a physent during later mapping use,
287 * e.g., fault handling.
292 addr64_t
mapping_make(pmap_t pmap
, addr64_t va
, ppnum_t pa
, unsigned int flags
, unsigned int size
, vm_prot_t prot
) { /* Make an address mapping */
294 register mapping_t
*mp
;
295 addr64_t colladdr
, psmask
;
296 unsigned int pindex
, mflags
, pattr
, wimg
, rc
;
297 phys_entry_t
*physent
;
302 mflags
= 0x01000000; /* Start building mpFlags field (busy count = 1) */
304 pcf
= (flags
& mmFlgPcfg
) >> 24; /* Get the physical page config index */
305 if(!(pPcfg
[pcf
].pcfFlags
)) { /* Validate requested physical page configuration */
306 panic("mapping_make: invalid physical page configuration request - pmap = %08X, va = %016llX, cfg = %d\n",
310 psmask
= (1ULL << pPcfg
[pcf
].pcfPSize
) - 1; /* Mask to isolate any offset into a page */
311 if(va
& psmask
) { /* Make sure we are page aligned on virtual */
312 panic("mapping_make: attempt to map unaligned vaddr - pmap = %08X, va = %016llX, cfg = %d\n",
315 if(((addr64_t
)pa
<< 12) & psmask
) { /* Make sure we are page aligned on physical */
316 panic("mapping_make: attempt to map unaligned paddr - pmap = %08X, pa = %016llX, cfg = %d\n",
320 mflags
|= (pcf
<< (31-mpPcfgb
)); /* Insert physical page configuration index */
322 if(!(flags
& mmFlgBlock
)) { /* Is this a block map? */
324 size
= 1; /* Set size to 1 page if not block */
326 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
327 if(!physent
) { /* Did we find the physical page? */
328 mflags
|= mpBlock
; /* Force this to a block if no physent */
329 pattr
= 0; /* Assume normal, non-I/O memory */
330 if((pa
& 0xFFF80000) == 0x00080000) pattr
= mmFlgCInhib
| mmFlgGuarded
; /* If this page is in I/O range, set I/O attributes */
332 else pattr
= ((physent
->ppLink
& (ppI
| ppG
)) >> 60); /* Get the default attributes from physent */
334 if(flags
& mmFlgUseAttr
) pattr
= flags
& (mmFlgCInhib
| mmFlgGuarded
); /* Use requested attributes */
336 else { /* This is a block */
338 pattr
= flags
& (mmFlgCInhib
| mmFlgGuarded
); /* Use requested attributes */
339 mflags
|= mpBlock
; /* Show that this is a block */
341 if(size
> pmapSmallBlock
) { /* Is it one? */
342 if(size
& 0x00001FFF) return mapRtBadSz
; /* Fail if bigger than 256MB and not a 32MB multiple */
343 size
= size
>> 13; /* Convert to 32MB chunks */
344 mflags
= mflags
| mpBSu
; /* Show 32MB basic size unit */
348 wimg
= 0x2; /* Set basic PPC wimg to 0b0010 - Coherent */
349 if(pattr
& mmFlgCInhib
) wimg
|= 0x4; /* Add cache inhibited if we need to */
350 if(pattr
& mmFlgGuarded
) wimg
|= 0x1; /* Add guarded if we need to */
352 mflags
= mflags
| (pindex
<< 16); /* Stick in the physical entry table index */
354 if(flags
& mmFlgPerm
) mflags
|= mpPerm
; /* Set permanent mapping */
356 size
= size
- 1; /* Change size to offset */
357 if(size
> 0xFFFF) return mapRtBadSz
; /* Leave if size is too big */
359 nlists
= mapSetLists(pmap
); /* Set number of lists this will be on */
361 mp
= mapping_alloc(nlists
); /* Get a spare mapping block with this many lists */
363 /* the mapping is zero except that the mpLists field is set */
364 mp
->mpFlags
|= mflags
; /* Add in the rest of the flags to mpLists */
365 mp
->mpSpace
= pmap
->space
; /* Set the address space/pmap lookup ID */
366 mp
->u
.mpBSize
= size
; /* Set the size */
367 mp
->mpPte
= 0; /* Set the PTE invalid */
368 mp
->mpPAddr
= pa
; /* Set the physical page number */
369 mp
->mpVAddr
= (va
& ~mpHWFlags
) | (wimg
<< 3) /* Add the protection and attributes to the field */
370 | ((PerProcTable
[0].ppe_vaddr
->pf
.Available
& pf64Bit
)?
371 getProtPPC(prot
) : (getProtPPC(prot
) & 0x3)); /* Mask off no-execute control for 32-bit machines */
373 while(1) { /* Keep trying... */
374 colladdr
= hw_add_map(pmap
, mp
); /* Go add the mapping to the pmap */
375 rc
= colladdr
& mapRetCode
; /* Separate return code */
376 colladdr
&= ~mapRetCode
; /* Clean up collision effective address */
380 return mapRtOK
; /* Mapping added successfully */
382 case mapRtRemove
: /* Remove in progress */
383 (void)mapping_remove(pmap
, colladdr
); /* Lend a helping hand to another CPU doing block removal */
384 continue; /* Retry mapping add */
386 case mapRtMapDup
: /* Identical mapping already present */
387 mapping_free(mp
); /* Free duplicate mapping */
388 return mapRtOK
; /* Return success */
390 case mapRtSmash
: /* Mapping already present but does not match new mapping */
391 mapping_free(mp
); /* Free duplicate mapping */
392 return (colladdr
| mapRtSmash
); /* Return colliding address, with some dirt added to avoid
393 confusion if effective address is 0 */
395 panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %08X, va = %016llX, mapping = %08X\n",
396 colladdr
, rc
, pmap
, va
, mp
); /* Die dead */
401 return 1; /* Unreachable, but pleases compiler */
406 * mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping
408 * Looks up the vaddr and returns the mapping and the next mapped va
409 * If full is true, it will descend through all nested pmaps to find actual mapping
411 * Must be called with interruptions disabled or we can hang trying to remove found mapping.
413 * Returns 0 if not found and the virtual address of the mapping if it is
414 * Note that the mappings busy count is bumped. It is the responsibility of the caller
415 * to drop the count. If this is not done, any attempt to remove the mapping will hang.
417 * NOTE: The nextva field is not valid when full is TRUE.
422 mapping_t
*mapping_find(pmap_t pmap
, addr64_t va
, addr64_t
*nextva
, int full
) { /* Make an address mapping */
424 register mapping_t
*mp
;
429 curpmap
= pmap
; /* Remember entry */
430 nestdepth
= 0; /* Set nest depth */
431 curva
= (addr64_t
)va
; /* Set current va */
435 mp
= hw_find_map(curpmap
, curva
, nextva
); /* Find the mapping for this address */
436 if((unsigned int)mp
== mapRtBadLk
) { /* Did we lock up ok? */
437 panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp
, curpmap
); /* Die... */
440 if(!mp
|| ((mp
->mpFlags
& mpType
) < mpMinSpecial
) || !full
) break; /* Are we done looking? */
442 if((mp
->mpFlags
& mpType
) != mpNest
) { /* Don't chain through anything other than a nested pmap */
443 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
444 mp
= 0; /* Set not found */
448 if(nestdepth
++ > 64) { /* Have we nested too far down? */
449 panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
450 va
, curva
, pmap
, curpmap
);
453 curva
= curva
+ mp
->mpNestReloc
; /* Relocate va to new pmap */
454 curpmap
= (pmap_t
) pmapTrans
[mp
->mpSpace
].pmapVAddr
; /* Get the address of the nested pmap */
455 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
459 return mp
; /* Return the mapping if we found one */
463 * void mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
465 * This routine takes a pmap and virtual address and changes
466 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
467 * the protection is changed.
469 * We return success if we change the protection or if there is no page mapped at va. We return failure if
470 * the va corresponds to a block mapped area or the mapping is permanant.
476 mapping_protect(pmap_t pmap
, addr64_t va
, vm_prot_t prot
, addr64_t
*nextva
) { /* Change protection of a virtual page */
480 ret
= hw_protect(pmap
, va
, getProtPPC(prot
), nextva
); /* Try to change the protect here */
482 switch (ret
) { /* Decode return code */
484 case mapRtOK
: /* Changed */
485 case mapRtNotFnd
: /* Didn't find it */
486 case mapRtBlock
: /* Block map, just ignore request */
487 case mapRtNest
: /* Nested pmap, just ignore request */
491 panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret
, pmap
, va
);
498 * void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
500 * This routine takes a physical entry and runs through all mappings attached to it and changes
501 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
502 * the protection is changed. There is no limitation on changes, e.g.,
503 * higher to lower, lower to higher.
505 * Any mapping that is marked permanent is not changed
507 * Phys_entry is unlocked.
510 void mapping_protect_phys(ppnum_t pa
, vm_prot_t prot
) { /* Change protection of all mappings to page */
513 phys_entry_t
*physent
;
515 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
516 if(!physent
) { /* Did we find the physical page? */
517 panic("mapping_protect_phys: invalid physical page %08X\n", pa
);
520 hw_walk_phys(physent
, hwpNoop
, hwpSPrtMap
, hwpNoop
,
521 getProtPPC(prot
), hwpPurgePTE
); /* Set the new protection for page and mappings */
523 return; /* Leave... */
528 * void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
530 * This routine takes a physical entry and runs through all mappings attached to it and turns
531 * off the change bit.
534 void mapping_clr_mod(ppnum_t pa
) { /* Clears the change bit of a physical page */
537 phys_entry_t
*physent
;
539 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
540 if(!physent
) { /* Did we find the physical page? */
541 panic("mapping_clr_mod: invalid physical page %08X\n", pa
);
544 hw_walk_phys(physent
, hwpNoop
, hwpCCngMap
, hwpCCngPhy
,
545 0, hwpPurgePTE
); /* Clear change for page and mappings */
546 return; /* Leave... */
551 * void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
553 * This routine takes a physical entry and runs through all mappings attached to it and turns
557 void mapping_set_mod(ppnum_t pa
) { /* Sets the change bit of a physical page */
560 phys_entry_t
*physent
;
562 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
563 if(!physent
) { /* Did we find the physical page? */
564 panic("mapping_set_mod: invalid physical page %08X\n", pa
);
567 hw_walk_phys(physent
, hwpNoop
, hwpSCngMap
, hwpSCngPhy
,
568 0, hwpNoopPTE
); /* Set change for page and mappings */
569 return; /* Leave... */
574 * void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
576 * This routine takes a physical entry and runs through all mappings attached to it and turns
577 * off the reference bit.
580 void mapping_clr_ref(ppnum_t pa
) { /* Clears the reference bit of a physical page */
583 phys_entry_t
*physent
;
585 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
586 if(!physent
) { /* Did we find the physical page? */
587 panic("mapping_clr_ref: invalid physical page %08X\n", pa
);
590 hw_walk_phys(physent
, hwpNoop
, hwpCRefMap
, hwpCRefPhy
,
591 0, hwpPurgePTE
); /* Clear reference for page and mappings */
592 return; /* Leave... */
597 * void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
599 * This routine takes a physical entry and runs through all mappings attached to it and turns
600 * on the reference bit.
603 void mapping_set_ref(ppnum_t pa
) { /* Sets the reference bit of a physical page */
606 phys_entry_t
*physent
;
608 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
609 if(!physent
) { /* Did we find the physical page? */
610 panic("mapping_set_ref: invalid physical page %08X\n", pa
);
613 hw_walk_phys(physent
, hwpNoop
, hwpSRefMap
, hwpSRefPhy
,
614 0, hwpNoopPTE
); /* Set reference for page and mappings */
615 return; /* Leave... */
620 * boolean_t mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
622 * This routine takes a physical entry and runs through all mappings attached to it and tests
626 boolean_t
mapping_tst_mod(ppnum_t pa
) { /* Tests the change bit of a physical page */
628 unsigned int pindex
, rc
;
629 phys_entry_t
*physent
;
631 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
632 if(!physent
) { /* Did we find the physical page? */
633 panic("mapping_tst_mod: invalid physical page %08X\n", pa
);
636 rc
= hw_walk_phys(physent
, hwpTCngPhy
, hwpTCngMap
, hwpNoop
,
637 0, hwpMergePTE
); /* Set change for page and mappings */
638 return ((rc
& (unsigned long)ppC
) != 0); /* Leave with change bit */
643 * boolean_t mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
645 * This routine takes a physical entry and runs through all mappings attached to it and tests
649 boolean_t
mapping_tst_ref(ppnum_t pa
) { /* Tests the reference bit of a physical page */
651 unsigned int pindex
, rc
;
652 phys_entry_t
*physent
;
654 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
655 if(!physent
) { /* Did we find the physical page? */
656 panic("mapping_tst_ref: invalid physical page %08X\n", pa
);
659 rc
= hw_walk_phys(physent
, hwpTRefPhy
, hwpTRefMap
, hwpNoop
,
660 0, hwpMergePTE
); /* Test reference for page and mappings */
661 return ((rc
& (unsigned long)ppR
) != 0); /* Leave with reference bit */
666 * unsigned int mapping_tst_refmod(ppnum_t pa) - tests the reference and change bits of a physical page
668 * This routine takes a physical entry and runs through all mappings attached to it and tests
669 * their reference and changed bits.
672 unsigned int mapping_tst_refmod(ppnum_t pa
) { /* Tests the reference and change bits of a physical page */
674 unsigned int pindex
, rc
;
675 phys_entry_t
*physent
;
677 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
678 if (!physent
) { /* Did we find the physical page? */
679 panic("mapping_tst_refmod: invalid physical page %08X\n", pa
);
682 rc
= hw_walk_phys(physent
, hwpTRefCngPhy
, hwpTRefCngMap
, hwpNoop
,
683 0, hwpMergePTE
); /* Test reference and change bits in page and mappings */
684 return (((rc
& ppC
)? VM_MEM_MODIFIED
: 0) | ((rc
& ppR
)? VM_MEM_REFERENCED
: 0));
685 /* Convert bits to generic format and return */
691 * void mapping_clr_refmod(ppnum_t pa, unsigned int mask) - clears the reference and change bits specified
692 * by mask of a physical page
694 * This routine takes a physical entry and runs through all mappings attached to it and turns
695 * off all the reference and change bits.
698 void mapping_clr_refmod(ppnum_t pa
, unsigned int mask
) { /* Clears the reference and change bits of a physical page */
701 phys_entry_t
*physent
;
702 unsigned int ppcMask
;
704 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
705 if(!physent
) { /* Did we find the physical page? */
706 panic("mapping_clr_refmod: invalid physical page %08X\n", pa
);
709 ppcMask
= (((mask
& VM_MEM_MODIFIED
)? ppC
: 0) | ((mask
& VM_MEM_REFERENCED
)? ppR
: 0));
710 /* Convert mask bits to PPC-specific format */
711 hw_walk_phys(physent
, hwpNoop
, hwpCRefCngMap
, hwpCRefCngPhy
,
712 ppcMask
, hwpPurgePTE
); /* Clear reference and change bits for page and mappings */
713 return; /* Leave... */
719 * phys_ent *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
721 * This routine takes a physical page number and returns the phys_entry associated with it. It also
722 * calculates the bank address associated with the entry
726 phys_entry_t
*mapping_phys_lookup(ppnum_t pp
, unsigned int *pindex
) { /* Finds the physical entry for the page */
730 for(i
= 0; i
< pmap_mem_regions_count
; i
++) { /* Walk through the list */
731 if(!(unsigned int)pmap_mem_regions
[i
].mrPhysTab
) continue; /* Skip any empty lists */
732 if((pp
< pmap_mem_regions
[i
].mrStart
) || (pp
> pmap_mem_regions
[i
].mrEnd
)) continue; /* This isn't ours */
734 *pindex
= (i
* sizeof(mem_region_t
)) / 4; /* Make the word index to this list */
736 return &pmap_mem_regions
[i
].mrPhysTab
[pp
- pmap_mem_regions
[i
].mrStart
]; /* Return the physent pointer */
739 return (phys_entry_t
*)0; /* Shucks, can't find it... */
747 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
749 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
750 * the number of free mappings remaining, and if below a threshold, replenishes them.
751 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
752 * a new one is allocated.
754 * This routine allocates and/or frees memory and must be called from a safe place.
755 * Currently, vm_pageout_scan is the safest place.
758 thread_call_t mapping_adjust_call
;
759 static thread_call_data_t mapping_adjust_call_data
;
761 void mapping_adjust(void) { /* Adjust free mappings */
763 kern_return_t retr
= KERN_SUCCESS
;
764 mappingblok_t
*mb
, *mbn
;
768 if(mapCtl
.mapcmin
<= MAPPERBLOK
) {
769 mapCtl
.mapcmin
= (sane_size
/ PAGE_SIZE
) / 16;
772 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl
.mapcmin
);
773 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
774 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, mapCtl
.mapcreln
);
778 s
= splhigh(); /* Don't bother from now on */
779 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
780 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
783 if (mapping_adjust_call
== NULL
) {
784 thread_call_setup(&mapping_adjust_call_data
,
785 (thread_call_func_t
)mapping_adjust
,
786 (thread_call_param_t
)NULL
);
787 mapping_adjust_call
= &mapping_adjust_call_data
;
790 while(1) { /* Keep going until we've got enough */
792 allocsize
= mapCtl
.mapcmin
- mapCtl
.mapcfree
; /* Figure out how much we need */
793 if(allocsize
< 1) break; /* Leave if we have all we need */
795 if((unsigned int)(mbn
= mapCtl
.mapcrel
)) { /* Can we rescue a free one? */
796 mapCtl
.mapcrel
= mbn
->nextblok
; /* Dequeue it */
797 mapCtl
.mapcreln
--; /* Back off the count */
798 allocsize
= MAPPERBLOK
; /* Show we allocated one block */
800 else { /* No free ones, try to get it */
802 allocsize
= (allocsize
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get the number of pages we need */
804 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
805 splx(s
); /* Restore 'rupts */
807 for(; allocsize
> 0; allocsize
>>= 1) { /* Try allocating in descending halves */
808 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
* allocsize
); /* Find a virtual address to use */
809 if((retr
!= KERN_SUCCESS
) && (allocsize
== 1)) { /* Did we find any memory at all? */
812 if(retr
== KERN_SUCCESS
) break; /* We got some memory, bail out... */
815 allocsize
= allocsize
* MAPPERBLOK
; /* Convert pages to number of maps allocated */
816 s
= splhigh(); /* Don't bother from now on */
817 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
818 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
822 if (retr
!= KERN_SUCCESS
)
823 break; /* Fail to alocate, bail out... */
824 for(; allocsize
> 0; allocsize
-= MAPPERBLOK
) { /* Release one block at a time */
825 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
826 mbn
= (mappingblok_t
*)((unsigned int)mbn
+ PAGE_SIZE
); /* Point to the next slot */
829 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
830 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
833 if(mapCtl
.mapcholdoff
) { /* Should we hold off this release? */
834 mapCtl
.mapcrecurse
= 0; /* We are done now */
835 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
836 splx(s
); /* Restore 'rupts */
837 return; /* Return... */
840 mbn
= mapCtl
.mapcrel
; /* Get first pending release block */
841 mapCtl
.mapcrel
= 0; /* Dequeue them */
842 mapCtl
.mapcreln
= 0; /* Set count to 0 */
844 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
845 splx(s
); /* Restore 'rupts */
847 while((unsigned int)mbn
) { /* Toss 'em all */
848 mb
= mbn
->nextblok
; /* Get the next */
850 kmem_free(mapping_map
, (vm_offset_t
) mbn
, PAGE_SIZE
); /* Release this mapping block */
852 mbn
= mb
; /* Chain to the next */
855 __asm__
volatile("eieio"); /* Make sure all is well */
856 mapCtl
.mapcrecurse
= 0; /* We are done now */
861 * mapping_free(mapping *mp) - release a mapping to the free list
863 * This routine takes a mapping and adds it to the free list.
864 * If this mapping make the block non-empty, we queue it to the free block list.
865 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
866 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
867 * If this release fills a block and we are above the threshold, we release the block
870 void mapping_free(struct mapping
*mp
) { /* Release a mapping */
872 mappingblok_t
*mb
, *mbn
;
874 unsigned int full
, mindx
, lists
;
876 mindx
= ((unsigned int)mp
& (PAGE_SIZE
- 1)) >> 6; /* Get index to mapping */
877 mb
= (mappingblok_t
*)((unsigned int)mp
& -PAGE_SIZE
); /* Point to the mapping block */
878 lists
= (mp
->mpFlags
& mpLists
); /* get #lists */
879 if ((lists
== 0) || (lists
> kSkipListMaxLists
)) /* panic if out of range */
880 panic("mapping_free: mpLists invalid\n");
883 mp
->mpFlags
= 0x99999999; /* (BRINGUP) */
884 mp
->mpSpace
= 0x9999; /* (BRINGUP) */
885 mp
->u
.mpBSize
= 0x9999; /* (BRINGUP) */
886 mp
->mpPte
= 0x99999998; /* (BRINGUP) */
887 mp
->mpPAddr
= 0x99999999; /* (BRINGUP) */
888 mp
->mpVAddr
= 0x9999999999999999ULL
; /* (BRINGUP) */
889 mp
->mpAlias
= 0x9999999999999999ULL
; /* (BRINGUP) */
890 mp
->mpList0
= 0x9999999999999999ULL
; /* (BRINGUP) */
891 mp
->mpList
[0] = 0x9999999999999999ULL
; /* (BRINGUP) */
892 mp
->mpList
[1] = 0x9999999999999999ULL
; /* (BRINGUP) */
893 mp
->mpList
[2] = 0x9999999999999999ULL
; /* (BRINGUP) */
895 if(lists
> mpBasicLists
) { /* (BRINGUP) */
896 mp
->mpList
[3] = 0x9999999999999999ULL
; /* (BRINGUP) */
897 mp
->mpList
[4] = 0x9999999999999999ULL
; /* (BRINGUP) */
898 mp
->mpList
[5] = 0x9999999999999999ULL
; /* (BRINGUP) */
899 mp
->mpList
[6] = 0x9999999999999999ULL
; /* (BRINGUP) */
900 mp
->mpList
[7] = 0x9999999999999999ULL
; /* (BRINGUP) */
901 mp
->mpList
[8] = 0x9999999999999999ULL
; /* (BRINGUP) */
902 mp
->mpList
[9] = 0x9999999999999999ULL
; /* (BRINGUP) */
903 mp
->mpList
[10] = 0x9999999999999999ULL
; /* (BRINGUP) */
908 s
= splhigh(); /* Don't bother from now on */
909 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
910 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
913 full
= !(mb
->mapblokfree
[0] | mb
->mapblokfree
[1]); /* See if full now */
914 mb
->mapblokfree
[mindx
>> 5] |= (0x80000000 >> (mindx
& 31)); /* Flip on the free bit */
915 if ( lists
> mpBasicLists
) { /* if big block, lite the 2nd bit too */
917 mb
->mapblokfree
[mindx
>> 5] |= (0x80000000 >> (mindx
& 31));
922 if(full
) { /* If it was full before this: */
923 mb
->nextblok
= mapCtl
.mapcnext
; /* Move head of list to us */
924 mapCtl
.mapcnext
= mb
; /* Chain us to the head of the list */
925 if(!((unsigned int)mapCtl
.mapclast
))
926 mapCtl
.mapclast
= mb
;
929 mapCtl
.mapcfree
++; /* Bump free count */
930 mapCtl
.mapcinuse
--; /* Decriment in use count */
932 mapCtl
.mapcfreec
++; /* Count total calls */
934 if(mapCtl
.mapcfree
> mapCtl
.mapcmin
) { /* Should we consider releasing this? */
935 if(((mb
->mapblokfree
[0] | 0x80000000) & mb
->mapblokfree
[1]) == 0xFFFFFFFF) { /* See if empty now */
937 if(mapCtl
.mapcnext
== mb
) { /* Are we first on the list? */
938 mapCtl
.mapcnext
= mb
->nextblok
; /* Unchain us */
939 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* If last, remove last */
941 else { /* We're not first */
942 for(mbn
= mapCtl
.mapcnext
; mbn
!= 0; mbn
= mbn
->nextblok
) { /* Search for our block */
943 if(mbn
->nextblok
== mb
) break; /* Is the next one our's? */
945 if(!mbn
) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp
);
946 mbn
->nextblok
= mb
->nextblok
; /* Dequeue us */
947 if(mapCtl
.mapclast
== mb
) mapCtl
.mapclast
= mbn
; /* If last, make our predecessor last */
950 if(mb
->mapblokflags
& mbPerm
) { /* Is this permanently assigned? */
951 mb
->nextblok
= mapCtl
.mapcnext
; /* Move chain head to us */
952 mapCtl
.mapcnext
= mb
; /* Chain us to the head */
953 if(!((unsigned int)mb
->nextblok
)) mapCtl
.mapclast
= mb
; /* If last, make us so */
956 mapCtl
.mapcfree
-= MAPPERBLOK
; /* Remove the block from the free count */
957 mapCtl
.mapcreln
++; /* Count on release list */
958 mb
->nextblok
= mapCtl
.mapcrel
; /* Move pointer */
959 mapCtl
.mapcrel
= mb
; /* Chain us in front */
964 if(mapCtl
.mapcreln
> MAPFRTHRSH
) { /* Do we have way too many releasable mappings? */
965 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
966 thread_call_enter(mapping_adjust_call
); /* Go toss some */
969 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
970 splx(s
); /* Restore 'rupts */
972 return; /* Bye, dude... */
977 * mapping_alloc(lists) - obtain a mapping from the free list
979 * This routine takes a mapping off of the free list and returns its address.
980 * The mapping is zeroed, and its mpLists count is set. The caller passes in
981 * the number of skiplists it would prefer; if this number is greater than
982 * mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
983 * just two consequtive free entries coallesced into one. If we cannot find
984 * two consequtive free entries, we clamp the list count down to mpBasicLists
985 * and return a basic 64-byte node. Our caller never knows the difference.
987 * If this allocation empties a block, we remove it from the free list.
988 * If this allocation drops the total number of free entries below a threshold,
989 * we allocate a new block.
992 decl_simple_lock_data(extern,free_pmap_lock
)
995 mapping_alloc(int lists
) { /* Obtain a mapping */
997 register mapping_t
*mp
;
998 mappingblok_t
*mb
, *mbn
;
1001 int big
= (lists
> mpBasicLists
); /* set flag if big block req'd */
1002 pmap_t refpmap
, ckpmap
;
1003 unsigned int space
, i
;
1004 addr64_t va
, nextva
;
1005 boolean_t found_mapping
;
1006 boolean_t do_rescan
;
1008 s
= splhigh(); /* Don't bother from now on */
1009 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1010 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1013 if(!((unsigned int)mapCtl
.mapcnext
)) { /* Are there any free mappings? */
1016 * No free mappings. First, there may be some mapping blocks on the "to be released"
1017 * list. If so, rescue one. Otherwise, try to steal a couple blocks worth.
1020 if((mbn
= mapCtl
.mapcrel
) != 0) { /* Try to rescue a block from impending doom */
1021 mapCtl
.mapcrel
= mbn
->nextblok
; /* Pop the queue */
1022 mapCtl
.mapcreln
--; /* Back off the count */
1023 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1027 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1029 simple_lock(&free_pmap_lock
);
1031 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1032 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1035 if (!((unsigned int)mapCtl
.mapcnext
)) {
1037 refpmap
= (pmap_t
)cursor_pmap
->pmap_link
.next
;
1038 space
= mapCtl
.mapcflush
.spacenum
;
1039 while (refpmap
!= cursor_pmap
) {
1040 if(((pmap_t
)(refpmap
->pmap_link
.next
))->spaceNum
> space
) break;
1041 refpmap
= (pmap_t
)refpmap
->pmap_link
.next
;
1045 va
= mapCtl
.mapcflush
.addr
;
1046 found_mapping
= FALSE
;
1048 while (mapCtl
.mapcfree
<= (MAPPERBLOK
*2)) {
1050 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1052 ckpmap
= (pmap_t
)ckpmap
->pmap_link
.next
;
1054 /* We don't steal mappings from the kernel pmap, a VMM host pmap, or a VMM guest pmap with guest
1055 shadow assist active.
1057 if ((ckpmap
->stats
.resident_count
!= 0) && (ckpmap
!= kernel_pmap
)
1058 && !(ckpmap
->pmapFlags
& (pmapVMgsaa
|pmapVMhost
))) {
1061 mp
= hw_purge_map(ckpmap
, va
, &nextva
);
1063 switch ((unsigned int)mp
& mapRetCode
) {
1066 found_mapping
= TRUE
;
1071 panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap
, va
, mp
);
1075 if (mapRtNotFnd
== ((unsigned int)mp
& mapRetCode
))
1085 if (ckpmap
== refpmap
) {
1086 if (found_mapping
== FALSE
)
1087 panic("no valid pmap to purge mappings\n");
1089 found_mapping
= FALSE
;
1092 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1093 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1098 mapCtl
.mapcflush
.spacenum
= ckpmap
->spaceNum
;
1099 mapCtl
.mapcflush
.addr
= nextva
;
1102 simple_unlock(&free_pmap_lock
);
1107 mb
= mapCtl
.mapcnext
;
1109 if ( big
) { /* if we need a big (128-byte) mapping */
1110 mapCtl
.mapcbig
++; /* count attempts to allocate a big mapping */
1111 mbn
= NULL
; /* this will be prev ptr */
1113 while( mb
) { /* loop over mapping blocks with free entries */
1114 mindx
= mapalc2(mb
); /* try for 2 consequtive free bits in this block */
1116 if ( mindx
) break; /* exit loop if we found them */
1117 mbn
= mb
; /* remember previous block */
1118 mb
= mb
->nextblok
; /* move on to next block */
1120 if ( mindx
== 0 ) { /* if we couldn't find 2 consequtive bits... */
1121 mapCtl
.mapcbigfails
++; /* count failures */
1122 big
= 0; /* forget that we needed a big mapping */
1123 lists
= mpBasicLists
; /* clamp list count down to the max in a 64-byte mapping */
1124 mb
= mapCtl
.mapcnext
; /* back to the first block with a free entry */
1126 else { /* if we did find a big mapping */
1127 mapCtl
.mapcfree
--; /* Decrement free count twice */
1128 mapCtl
.mapcinuse
++; /* Bump in use count twice */
1129 if ( mindx
< 0 ) { /* if we just used the last 2 free bits in this block */
1130 if (mbn
) { /* if this wasn't the first block */
1131 mindx
= -mindx
; /* make positive */
1132 mbn
->nextblok
= mb
->nextblok
; /* unlink this one from the middle of block list */
1133 if (mb
== mapCtl
.mapclast
) { /* if we emptied last block */
1134 mapCtl
.mapclast
= mbn
; /* then prev block is now last */
1141 if ( !big
) { /* if we need a small (64-byte) mapping */
1142 if(!(mindx
= mapalc1(mb
))) /* Allocate a 1-bit slot */
1143 panic("mapping_alloc - empty mapping block detected at %08X\n", mb
);
1146 if(mindx
< 0) { /* Did we just take the last one */
1147 mindx
= -mindx
; /* Make positive */
1148 mapCtl
.mapcnext
= mb
->nextblok
; /* Remove us from the list */
1149 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* Removed the last one */
1152 mapCtl
.mapcfree
--; /* Decrement free count */
1153 mapCtl
.mapcinuse
++; /* Bump in use count */
1155 mapCtl
.mapcallocc
++; /* Count total calls */
1158 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1159 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1160 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1161 * if we haven't already done it.
1162 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1163 * the release list with as much as we need until threads start.
1166 if(mapCtl
.mapcfree
< mapCtl
.mapcmin
) { /* See if we need to replenish */
1167 if((mbn
= mapCtl
.mapcrel
) != 0) { /* Try to rescue a block from impending doom */
1168 mapCtl
.mapcrel
= mbn
->nextblok
; /* Pop the queue */
1169 mapCtl
.mapcreln
--; /* Back off the count */
1170 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1172 else { /* We need to replenish */
1173 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1174 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1175 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1181 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1182 splx(s
); /* Restore 'rupts */
1184 mp
= &((mapping_t
*)mb
)[mindx
]; /* Point to the allocated mapping */
1185 mp
->mpFlags
= lists
; /* set the list count */
1188 return mp
; /* Send it back... */
1193 consider_mapping_adjust(void)
1197 s
= splhigh(); /* Don't bother from now on */
1198 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1199 panic("consider_mapping_adjust -- lock timeout\n");
1202 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1203 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1204 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1208 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1209 splx(s
); /* Restore 'rupts */
1216 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1218 * The mapping block is a page size area on a page boundary. It contains 1 header and 63
1219 * mappings. This call adds and initializes a block for use. Mappings come in two sizes,
1220 * 64 and 128 bytes (the only difference is the number of skip-lists.) When we allocate a
1221 * 128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
1222 * code only deals with "basic" 64-byte mappings. This works for two reasons:
1223 * - Only one in 256 mappings is big, so they are rare.
1224 * - If we cannot find two consequtive free mappings, we just return a small one.
1225 * There is no problem with doing this, except a minor performance degredation.
1226 * Therefore, all counts etc in the mapping control structure are in units of small blocks.
1228 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1229 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1230 * corresponds to the header). The translation mask is the XOR of the virtual and real
1231 * addresses (needless to say, the block must be wired).
1233 * We handle these mappings the same way as saveareas: the block is only on the chain so
1234 * long as there are free entries in it.
1236 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1237 * mappings. Blocks marked PERM won't ever be released.
1239 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1240 * list. We do this only at start up time. This is done because we only allocate blocks
1241 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1242 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1243 * them on the release queue, the allocate routine will rescue them. Then when the
1244 * pageout scan starts, all extra ones will be released.
1249 void mapping_free_init(vm_offset_t mbl
, int perm
, boolean_t locked
) {
1250 /* Set's start and end of a block of mappings
1251 perm indicates if the block can be released
1252 or goes straight to the release queue .
1253 locked indicates if the lock is held already */
1260 mb
= (mappingblok_t
*)mbl
; /* Start of area */
1262 if(perm
>= 0) { /* See if we need to initialize the block */
1264 raddr
= (addr64_t
)((unsigned int)mbl
); /* Perm means V=R */
1265 mb
->mapblokflags
= mbPerm
; /* Set perm */
1266 // mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1269 pp
= pmap_find_phys(kernel_pmap
, (addr64_t
)mbl
); /* Get the physical page */
1270 if(!pp
) { /* What gives? Where's the page? */
1271 panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t
)mbl
);
1274 raddr
= (addr64_t
)pp
<< 12; /* Convert physical page to physical address */
1275 mb
->mapblokflags
= 0; /* Set not perm */
1276 // mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1279 mb
->mapblokvrswap
= raddr
^ (addr64_t
)((unsigned int)mbl
); /* Form translation mask */
1281 mb
->mapblokfree
[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1282 mb
->mapblokfree
[1] = 0xFFFFFFFF; /* Set next 32 free */
1285 s
= splhigh(); /* Don't bother from now on */
1286 if(!locked
) { /* Do we need the lock? */
1287 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1288 panic("mapping_free_init: timeout getting control lock\n"); /* Tell all and die */
1292 if(perm
< 0) { /* Direct to release queue? */
1293 mb
->nextblok
= mapCtl
.mapcrel
; /* Move forward pointer */
1294 mapCtl
.mapcrel
= mb
; /* Queue us on in */
1295 mapCtl
.mapcreln
++; /* Count the free block */
1297 else { /* Add to the free list */
1299 mb
->nextblok
= 0; /* We always add to the end */
1300 mapCtl
.mapcfree
+= MAPPERBLOK
; /* Bump count */
1302 if(!((unsigned int)mapCtl
.mapcnext
)) { /* First entry on list? */
1303 mapCtl
.mapcnext
= mapCtl
.mapclast
= mb
; /* Chain to us */
1305 else { /* We are not the first */
1306 mapCtl
.mapclast
->nextblok
= mb
; /* Point the last to us */
1307 mapCtl
.mapclast
= mb
; /* We are now last */
1311 if(!locked
) { /* Do we need to unlock? */
1312 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1315 splx(s
); /* Restore 'rupts */
1316 return; /* All done, leave... */
1321 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1323 * No locks can be held, because we allocate memory here.
1324 * This routine needs a corresponding mapping_relpre call to remove the
1325 * hold off flag so that the adjust routine will free the extra mapping
1326 * blocks on the release list. I don't like this, but I don't know
1327 * how else to do this for now...
1331 void mapping_prealloc(unsigned int size
) { /* Preallocates mapppings for large request */
1338 s
= splhigh(); /* Don't bother from now on */
1339 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1340 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1343 nmapb
= (size
>> 12) + mapCtl
.mapcmin
; /* Get number of entries needed for this and the minimum */
1345 mapCtl
.mapcholdoff
++; /* Bump the hold off count */
1347 if((nmapb
= (nmapb
- mapCtl
.mapcfree
)) <= 0) { /* Do we already have enough? */
1348 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1349 splx(s
); /* Restore 'rupts */
1352 if (!hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1353 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1354 splx(s
); /* Restore 'rupts */
1357 nmapb
= (nmapb
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get number of blocks to get */
1359 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1360 splx(s
); /* Restore 'rupts */
1362 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1363 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1364 if(retr
!= KERN_SUCCESS
) /* Did we get some memory? */
1366 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize on to the release queue */
1368 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1369 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1371 mapCtl
.mapcrecurse
= 0; /* We are done now */
1375 * void mapping_relpre(void) - Releases preallocation release hold off
1377 * This routine removes the
1378 * hold off flag so that the adjust routine will free the extra mapping
1379 * blocks on the release list. I don't like this, but I don't know
1380 * how else to do this for now...
1384 void mapping_relpre(void) { /* Releases release hold off */
1388 s
= splhigh(); /* Don't bother from now on */
1389 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1390 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1392 if(--mapCtl
.mapcholdoff
< 0) { /* Back down the hold off count */
1393 panic("mapping_relpre: hold-off count went negative\n");
1396 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1397 splx(s
); /* Restore 'rupts */
1401 * void mapping_free_prime(void) - Primes the mapping block release list
1403 * See mapping_free_init.
1404 * No locks can be held, because we allocate memory here.
1405 * One processor running only.
1409 void mapping_free_prime(void) { /* Primes the mapping block release list */
1414 vm_offset_t mapping_min
;
1416 retr
= kmem_suballoc(kernel_map
, &mapping_min
, sane_size
/ 16,
1417 FALSE
, VM_FLAGS_ANYWHERE
, &mapping_map
);
1419 if (retr
!= KERN_SUCCESS
)
1420 panic("mapping_free_prime: kmem_suballoc failed");
1423 nmapb
= (mapCtl
.mapcfree
+ mapCtl
.mapcinuse
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get permanent allocation */
1424 nmapb
= nmapb
* 4; /* Get 4 times our initial allocation */
1427 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1428 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, nmapb
);
1431 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1432 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1433 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1434 panic("Whoops... Not a bit of wired memory left for anyone\n");
1436 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize onto release queue */
1438 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1439 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1444 mapping_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
1445 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
1447 *count
= mapCtl
.mapcinuse
;
1448 *cur_size
= ((PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * (mapCtl
.mapcinuse
+ mapCtl
.mapcfree
)) + (PAGE_SIZE
* mapCtl
.mapcreln
);
1449 *max_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * mapCtl
.mapcmaxalloc
;
1450 *elem_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1));
1451 *alloc_size
= PAGE_SIZE
;
1459 * addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
1461 * First looks up the physical entry associated witht the physical page. Then searches the alias
1462 * list for a matching pmap. It grabs the virtual address from the mapping, drops busy, and returns
1467 addr64_t
mapping_p2v(pmap_t pmap
, ppnum_t pa
) { /* Finds first virtual mapping of a physical page in a space */
1471 unsigned int pindex
;
1472 phys_entry_t
*physent
;
1475 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
1476 if(!physent
) { /* Did we find the physical page? */
1477 panic("mapping_p2v: invalid physical page %08X\n", pa
);
1480 s
= splhigh(); /* Make sure interruptions are disabled */
1482 mp
= hw_find_space(physent
, pmap
->space
); /* Go find the first mapping to the page from the requested pmap */
1484 if(mp
) { /* Did we find one? */
1485 va
= mp
->mpVAddr
& -4096; /* If so, get the cleaned up vaddr */
1486 mapping_drop_busy(mp
); /* Go ahead and relase the mapping now */
1488 else va
= 0; /* Return failure */
1490 splx(s
); /* Restore 'rupts */
1492 return va
; /* Bye, bye... */
1499 * Convert a physical address to a kernel virtual address if
1500 * there is a mapping, otherwise return NULL
1503 vm_offset_t
phystokv(vm_offset_t pa
) {
1508 pp
= pa
>> 12; /* Convert to a page number */
1510 if(!(va
= mapping_p2v(kernel_pmap
, pp
))) {
1511 return 0; /* Can't find it, return 0... */
1514 return (va
| (pa
& (PAGE_SIZE
- 1))); /* Build and return VADDR... */
1521 * Convert a kernel virtual address to a physical address
1523 vm_offset_t
kvtophys(vm_offset_t va
) {
1525 return pmap_extract(kernel_pmap
, va
); /* Find mapping and lock the physical entry for this mapping */
1532 * Convert a kernel virtual address to a 64-bit physical address
1534 vm_map_offset_t
kvtophys64(vm_map_offset_t va
) {
1535 ppnum_t pa
= pmap_find_phys(kernel_pmap
, (addr64_t
)va
);
1538 return (vm_map_offset_t
)0;
1539 return (((vm_map_offset_t
)pa
) << 12) | (va
& 0xfff);
1543 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1544 * page 0 access for the current thread.
1546 * If parameter is TRUE, faults are ignored
1547 * If parameter is FALSE, faults are honored
1551 void ignore_zero_fault(boolean_t type
) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1553 if(type
) current_thread()->machine
.specFlags
|= ignoreZeroFault
; /* Ignore faults on page 0 */
1554 else current_thread()->machine
.specFlags
&= ~ignoreZeroFault
; /* Honor faults on page 0 */
1556 return; /* Return the result or 0... */
1561 * Copies data between a physical page and a virtual page, or 2 physical. This is used to
1562 * move data from the kernel to user state. Note that the "which" parm
1563 * says which of the parameters is physical and if we need to flush sink/source.
1564 * Note that both addresses may be physical, but only one may be virtual.
1566 * The rules are that the size can be anything. Either address can be on any boundary
1567 * and span pages. The physical data must be contiguous as must the virtual.
1569 * We can block when we try to resolve the virtual address at each page boundary.
1570 * We don't check protection on the physical page.
1572 * Note that we will not check the entire range and if a page translation fails,
1573 * we will stop with partial contents copied.
1577 kern_return_t
hw_copypv_32(addr64_t source
, addr64_t sink
, unsigned int size
, int which
) {
1581 addr64_t nextva
, vaddr
, paddr
;
1582 register mapping_t
*mp
;
1584 unsigned int lop
, csize
;
1585 int needtran
, bothphys
;
1586 unsigned int pindex
;
1587 phys_entry_t
*physent
;
1593 map
= (which
& cppvKmap
) ? kernel_map
: current_map_fast();
1595 if((which
& (cppvPsrc
| cppvPsnk
)) == 0 ) { /* Make sure that only one is virtual */
1596 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
1599 bothphys
= 1; /* Assume both are physical */
1601 if(!(which
& cppvPsnk
)) { /* Is sink page virtual? */
1602 vaddr
= sink
; /* Sink side is virtual */
1603 bothphys
= 0; /* Show both aren't physical */
1604 prot
= VM_PROT_READ
| VM_PROT_WRITE
; /* Sink always must be read/write */
1605 } else if (!(which
& cppvPsrc
)) { /* Is source page virtual? */
1606 vaddr
= source
; /* Source side is virtual */
1607 bothphys
= 0; /* Show both aren't physical */
1608 prot
= VM_PROT_READ
; /* Virtual source is always read only */
1611 needtran
= 1; /* Show we need to map the virtual the first time */
1612 s
= splhigh(); /* Don't bother me */
1616 if(!bothphys
&& (needtran
|| !(vaddr
& 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
1617 if(!needtran
) { /* If this is not the first translation, we need to drop the old busy */
1618 mapping_drop_busy(mp
); /* Release the old mapping now */
1623 mp
= mapping_find(map
->pmap
, vaddr
, &nextva
, 1); /* Find and busy the mapping */
1624 if(!mp
) { /* Was it there? */
1625 if(getPerProc()->istackptr
== 0)
1626 panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr
);
1628 splx(s
); /* Restore the interrupt level */
1629 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), prot
, FALSE
, THREAD_UNINT
, NULL
, 0); /* Didn't find it, try to fault it in... */
1631 if(ret
!= KERN_SUCCESS
)return KERN_FAILURE
; /* Didn't find any, return no good... */
1633 s
= splhigh(); /* Don't bother me */
1634 continue; /* Go try for the map again... */
1637 if (mp
->mpVAddr
& mpI
) { /* cache inhibited, so force the appropriate page to be flushed before */
1638 if (which
& cppvPsrc
) /* and after the copy to avoid cache paradoxes */
1645 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
1648 if((which
& cppvPsnk
) || !(mp
->mpVAddr
& 1)) break; /* We got it mapped R/W or the source is not virtual, leave... */
1650 mapping_drop_busy(mp
); /* Go ahead and release the mapping for now */
1651 if(getPerProc()->istackptr
== 0)
1652 panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr
);
1653 splx(s
); /* Restore the interrupt level */
1655 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, THREAD_UNINT
, NULL
, 0); /* check for a COW area */
1656 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* We couldn't get it R/W, leave in disgrace... */
1657 s
= splhigh(); /* Don't bother me */
1659 paddr
= ((addr64_t
)mp
->mpPAddr
<< 12) + (vaddr
- (mp
->mpVAddr
& -4096LL)); /* construct the physical address... this calculation works */
1660 /* properly on both single page and block mappings */
1661 if(which
& cppvPsrc
) sink
= paddr
; /* If source is physical, then the sink is virtual */
1662 else source
= paddr
; /* Otherwise the source is */
1665 lop
= (unsigned int)(4096LL - (sink
& 4095LL)); /* Assume sink smallest */
1666 if(lop
> (unsigned int)(4096LL - (source
& 4095LL))) lop
= (unsigned int)(4096LL - (source
& 4095LL)); /* No, source is smaller */
1668 csize
= size
; /* Assume we can copy it all */
1669 if(lop
< size
) csize
= lop
; /* Nope, we can't do it all */
1671 if(which
& cppvFsrc
) flush_dcache64(source
, csize
, 1); /* If requested, flush source before move */
1672 if(which
& cppvFsnk
) flush_dcache64(sink
, csize
, 1); /* If requested, flush sink before move */
1674 bcopy_physvir_32(source
, sink
, csize
); /* Do a physical copy, virtually */
1676 if(which
& cppvFsrc
) flush_dcache64(source
, csize
, 1); /* If requested, flush source after move */
1677 if(which
& cppvFsnk
) flush_dcache64(sink
, csize
, 1); /* If requested, flush sink after move */
1680 * Note that for certain ram disk flavors, we may be copying outside of known memory.
1681 * Therefore, before we try to mark it modifed, we check if it exists.
1684 if( !(which
& cppvNoModSnk
)) {
1685 physent
= mapping_phys_lookup(sink
>> 12, &pindex
); /* Get physical entry for sink */
1686 if(physent
) mapping_set_mod((ppnum_t
)(sink
>> 12)); /* Make sure we know that it is modified */
1688 if( !(which
& cppvNoRefSrc
)) {
1689 physent
= mapping_phys_lookup(source
>> 12, &pindex
); /* Get physical entry for source */
1690 if(physent
) mapping_set_ref((ppnum_t
)(source
>> 12)); /* Make sure we know that it is modified */
1692 size
= size
- csize
; /* Calculate what is left */
1693 vaddr
= vaddr
+ csize
; /* Move to next sink address */
1694 source
= source
+ csize
; /* Bump source to next physical address */
1695 sink
= sink
+ csize
; /* Bump sink to next physical address */
1698 if(!bothphys
) mapping_drop_busy(mp
); /* Go ahead and release the mapping of the virtual page if any */
1699 splx(s
); /* Open up for interrupts */
1701 return KERN_SUCCESS
;
1709 void mapping_verify(void) {
1712 mappingblok_t
*mb
, *mbn
;
1713 unsigned int relncnt
;
1714 unsigned int dumbodude
;
1718 s
= splhigh(); /* Don't bother from now on */
1720 mbn
= 0; /* Start with none */
1721 for(mb
= mapCtl
.mapcnext
; mb
; mb
= mb
->nextblok
) { /* Walk the free chain */
1722 if((mappingblok_t
*)(mb
->mapblokflags
& 0x7FFFFFFF) != mb
) { /* Is tag ok? */
1723 panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb
, mb
->mapblokflags
);
1725 mbn
= mb
; /* Remember the last one */
1728 if(mapCtl
.mapcnext
&& (mapCtl
.mapclast
!= mbn
)) { /* Do we point to the last one? */
1729 panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb
, mapCtl
.mapclast
);
1732 relncnt
= 0; /* Clear count */
1733 for(mb
= mapCtl
.mapcrel
; mb
; mb
= mb
->nextblok
) { /* Walk the release chain */
1734 dumbodude
|= mb
->mapblokflags
; /* Just touch it to make sure it is mapped */
1735 relncnt
++; /* Count this one */
1738 if(mapCtl
.mapcreln
!= relncnt
) { /* Is the count on release queue ok? */
1739 panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl
.mapcreln
, relncnt
, dumbodude
);
1742 splx(s
); /* Restore 'rupts */
1747 void mapping_phys_unused(ppnum_t pa
) {
1749 unsigned int pindex
;
1750 phys_entry_t
*physent
;
1752 physent
= mapping_phys_lookup(pa
, &pindex
); /* Get physical entry */
1753 if(!physent
) return; /* Did we find the physical page? */
1755 if(!(physent
->ppLink
& ~(ppLock
| ppFlags
))) return; /* No one else is here */
1757 panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa
, physent
);
1761 void mapping_hibernate_flush(void)
1765 struct phys_entry
* entry
;
1767 for (bank
= 0; bank
< pmap_mem_regions_count
; bank
++)
1769 entry
= (struct phys_entry
*) pmap_mem_regions
[bank
].mrPhysTab
;
1770 for (page
= pmap_mem_regions
[bank
].mrStart
; page
<= pmap_mem_regions
[bank
].mrEnd
; page
++)
1772 hw_walk_phys(entry
, hwpNoop
, hwpNoop
, hwpNoop
, 0, hwpPurgePTE
);