2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
35 #include <mach_kgdb.h>
36 #include <mach_vm_debug.h>
37 #include <db_machine_commands.h>
39 #include <kern/thread.h>
40 #include <kern/thread_act.h>
41 #include <mach/vm_attributes.h>
42 #include <mach/vm_param.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
48 #include <kern/misc_protos.h>
49 #include <ppc/misc_protos.h>
50 #include <ppc/proc_reg.h>
54 #include <ppc/pmap_internals.h>
57 #include <ppc/new_screen.h>
58 #include <ppc/Firmware.h>
59 #include <ppc/mappings.h>
60 #include <ddb/db_output.h>
62 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
66 #if PERFTIMES && DEBUG
67 #define debugLog2(a, b, c) dbgLog2(a, b, c)
69 #define debugLog2(a, b, c)
72 vm_map_t mapping_map
= VM_MAP_NULL
;
73 #define MAPPING_MAP_SIZE 33554432 /* 32MB address space */
75 unsigned int incrVSID
= 0; /* VSID increment value */
76 unsigned int mappingdeb0
= 0;
77 unsigned int mappingdeb1
= 0;
78 extern unsigned int hash_table_size
;
79 extern vm_offset_t mem_size
;
81 * ppc_prot translates from the mach representation of protections to the PPC version.
82 * We also allow for a direct setting of the protection bits. This extends the mach
83 * concepts to allow the greater control we need for Virtual Machines (VMM).
84 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
85 * It eliminates the used of this table.
86 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
89 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
92 * About PPC VSID generation:
94 * This function is called to generate an address space ID. This space ID must be unique within
95 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
96 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
97 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
98 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
99 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
100 * they are release. This causes us to lose track of what space IDs are free to be reused.
101 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
102 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
104 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
105 * calculation for virtual address lookup. An improperly chosen value could potentially cause
106 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
107 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
108 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
109 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
110 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
111 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
112 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
113 * with no overflow. I don't think that this is a problem.
115 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
116 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
117 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
118 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
119 * the same modulo 512. We can reduce this problem by having the segment number be bits
120 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
121 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
122 * I don't think that it is as signifigant as the other, so, I'll make the space ID
123 * with segment first.
125 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
126 * While this is a problem that should only happen in periods counted in weeks, it can and
127 * will happen. This is assuming a monotonically increasing space ID. If we were to search
128 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
129 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
131 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
132 * locked by free_pmap_lock) that is sorted in VSID sequence order.
134 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
135 * the last that was freed. The we allocate that.
137 * NOTE: We must be called with interruptions off and free_pmap_lock held.
143 * Do anything that needs to be done before the mapping system can be used.
144 * Hash table must be initialized before we call this.
146 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
149 void mapping_init(void) {
153 __asm__
volatile("cntlzw %0, %1" : "=r" (tmp
) : "r" (hash_table_size
)); /* Get number of leading 0s */
155 incrVSID
= 1 << ((32 - tmp
+ 1) >> 1); /* Get ceiling of sqrt of table size */
156 incrVSID
|= 1 << ((32 - tmp
+ 1) >> 2); /* Get ceiling of quadroot of table size */
157 incrVSID
|= 1; /* Set bit and add 1 */
164 * mapping_remove(pmap_t pmap, vm_offset_t va);
165 * Given a pmap and virtual address, this routine finds the mapping and removes it from
166 * both its PTEG hash list and the physical entry list. The mapping block will be added to
167 * the free list. If the free list threshold is reached, garbage collection will happen.
168 * We also kick back a return code to say whether or not we had one to remove.
170 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
171 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
172 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
173 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
174 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
175 * It's just that simple!
177 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
178 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
179 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
181 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
182 * the mapping is outside of physical memory and usually refers to a memory mapped device of
183 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
184 * routines return normally, but don't do anything.
187 boolean_t
mapping_remove(pmap_t pmap
, vm_offset_t va
) { /* Remove a single mapping for this VADDR
188 Returns TRUE if a mapping was found to remove */
191 register blokmap
*blm
;
193 unsigned int *useadd
, *useaddr
, uindx
;
195 struct phys_entry
*pp
;
198 debugLog2(1, va
, pmap
->space
); /* start mapping_remove */
200 s
=splhigh(); /* Don't bother me */
202 mp
= hw_lock_phys_vir(pmap
->space
, va
); /* Lock the physical entry for this mapping */
204 if(!mp
) { /* Did we find one? */
205 splx(s
); /* Allow 'rupts now */
206 if(mp
= (mapping
*)hw_rem_blk(pmap
, va
, va
)) { /* No normal pages, try to remove an odd-sized one */
208 if((unsigned int)mp
& 1) { /* Make sure we don't unmap a permanent one */
209 blm
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)mp
& 0xFFFFFFFC)); /* Get virtual address */
210 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
213 while ((unsigned int)mp
& 2)
214 mp
= (mapping
*)hw_rem_blk(pmap
, va
, va
);
216 blm
= (blokmap
*)hw_cpv(mp
); /* (TEST/DEBUG) */
217 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
218 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
220 mapping_free(hw_cpv(mp
)); /* Release it */
221 debugLog2(2, 1, 0); /* End mapping_remove */
222 return TRUE
; /* Tell them we did it */
224 debugLog2(2, 0, 0); /* end mapping_remove */
225 return FALSE
; /* Didn't find any, return FALSE... */
227 if((unsigned int)mp
&1) { /* Did we timeout? */
228 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
229 splx(s
); /* Restore the interrupt level */
230 return FALSE
; /* Bad hair day, return FALSE... */
233 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
235 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
237 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
239 useadd
= (unsigned int *)&pmap
->pmapUsage
[(va
>> pmapUsageShft
) & pmapUsageMask
]; /* Point to slot to bump */
240 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
241 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
244 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
245 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
246 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
247 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
252 hw_rem_map(mp
); /* Remove the corresponding mapping */
256 if ((mpv
->physent
) && (pmap
->vflags
& pmapVMhost
)) {
258 while(mp1
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
)) { /* Keep going so long as there's another */
260 mpv1
= hw_cpv(mp1
); /* Get the virtual address */
262 if(hw_atomic_sub(&mpv1
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
264 (void)hw_atomic_sub(&mpv1
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
267 uindx
= ((mpv1
->PTEv
>> 24) & 0x78) | ((mpv1
->PTEv
>> 3) & 7); /* Join segment number and top 2 bits of the API */
268 useadd
= (unsigned int *)&mpv1
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
269 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
270 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
273 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
274 if((mpv1
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
275 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
276 i
* pmapUsageSize
, mpv1
->pmap
->pmapUsage
[i
], mpv1
->pmap
);
281 hw_rem_map(mp1
); /* Remove the mapping */
282 mapping_free(mpv1
); /* Add mapping to the free list */
286 if(mpv
->physent
)hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock physical entry associated with mapping */
288 splx(s
); /* Was there something you needed? */
290 mapping_free(mpv
); /* Add mapping to the free list */
291 debugLog2(2, 1, 0); /* end mapping_remove */
292 return TRUE
; /* Tell them we did it */
296 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
298 * This guy releases any mappings that exist for a physical page on a specified map.
299 * We get the lock on the phys_entry, and hold it through out this whole routine.
300 * That way, no one can change the queue out from underneath us. We keep fetching
301 * the physents mapping anchor until it is null, then we're done.
303 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
304 * decriment the pmap's residency count. Then we release the mapping back to the free list.
309 void mapping_purge_pmap(struct phys_entry
*pp
, pmap_t pmap
) { /* Remove all mappings from specified pmap for this physent */
311 mapping
*mp
, *mp_next
, *mpv
;
313 unsigned int *useadd
, *useaddr
, uindx
;
316 s
=splhigh(); /* Don't bother me */
318 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
319 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
320 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
323 mp
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
);
325 while(mp
) { /* Keep going so long as there's another */
327 mpv
= hw_cpv(mp
); /* Get the virtual address */
328 if(mpv
->pmap
!= pmap
) {
329 mp
= (mapping
*)((unsigned int)mpv
->next
& ~PHYS_FLAGS
);
333 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
335 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
338 uindx
= ((mpv
->PTEv
>> 24) & 0x78) | ((mpv
->PTEv
>> 3) & 7); /* Join seg # and top 2 bits of API */
339 useadd
= (unsigned int *)&mpv
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
340 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
341 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Incr the even or odd slot */
345 mp_next
= (mapping
*)((unsigned int)mpv
->next
& ~PHYS_FLAGS
);
346 hw_rem_map(mp
); /* Remove the mapping */
347 mapping_free(mpv
); /* Add mapping to the free list */
351 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
356 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
358 * This guy releases any mappings that exist for a physical page.
359 * We get the lock on the phys_entry, and hold it through out this whole routine.
360 * That way, no one can change the queue out from underneath us. We keep fetching
361 * the physents mapping anchor until it is null, then we're done.
363 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
364 * decriment the pmap's residency count. Then we release the mapping back to the free list.
368 void mapping_purge(struct phys_entry
*pp
) { /* Remove all mappings for this physent */
372 unsigned int *useadd
, *useaddr
, uindx
;
375 s
=splhigh(); /* Don't bother me */
376 debugLog2(3, pp
->pte1
, 0); /* start mapping_purge */
378 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
379 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
380 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
383 while(mp
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
)) { /* Keep going so long as there's another */
385 mpv
= hw_cpv(mp
); /* Get the virtual address */
387 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
389 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
392 uindx
= ((mpv
->PTEv
>> 24) & 0x78) | ((mpv
->PTEv
>> 3) & 7); /* Join segment number and top 2 bits of the API */
393 useadd
= (unsigned int *)&mpv
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
394 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
395 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
398 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
399 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
400 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
401 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
407 hw_rem_map(mp
); /* Remove the mapping */
408 mapping_free(mpv
); /* Add mapping to the free list */
411 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
413 debugLog2(4, pp
->pte1
, 0); /* end mapping_purge */
414 splx(s
); /* Was there something you needed? */
415 return; /* Tell them we did it */
420 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
422 * This routine takes the given parameters, builds a mapping block, and queues it into the
425 * The pp parameter can be null. This allows us to make a mapping that is not
426 * associated with any physical page. We may need this for certain I/O areas.
428 * If the phys_entry address is null, we neither lock or chain into it.
429 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
432 mapping
*mapping_make(pmap_t pmap
, struct phys_entry
*pp
, vm_offset_t va
, vm_offset_t pa
, vm_prot_t prot
, int attr
, boolean_t locked
) { /* Make an address mapping */
434 register mapping
*mp
, *mpv
;
435 unsigned int *useadd
, *useaddr
;
439 debugLog2(5, va
, pa
); /* start mapping_purge */
440 mpv
= mapping_alloc(); /* Get a spare mapping block */
442 mpv
->pmap
= pmap
; /* Initialize the pmap pointer */
443 mpv
->physent
= pp
; /* Initialize the pointer to the physical entry */
444 mpv
->PTEr
= ((unsigned int)pa
& ~(PAGE_SIZE
- 1)) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the PTE */
445 mpv
->PTEv
= (((unsigned int)va
>> 1) & 0x78000000) | (pmap
->space
<< 7) | (((unsigned int)va
>> 22) & 0x0000003F); /* Build the VSID */
447 s
=splhigh(); /* Don't bother from now on */
449 mp
= hw_cvp(mpv
); /* Get the physical address of this */
451 if(pp
&& !locked
) { /* Is there a physical entry? Or do we already hold the lock? */
452 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
453 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
454 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
458 if(pp
) { /* See of there is a physcial entry */
459 mpv
->next
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
); /* Move the old anchor to the new mappings forward */
460 pp
->phys_link
= (mapping
*)((unsigned int)mp
| (unsigned int)pp
->phys_link
& PHYS_FLAGS
); /* Point the anchor at us. Now we're on the list (keep the flags) */
463 hw_add_map(mp
, pmap
->space
, va
); /* Stick it on the PTEG hash list */
465 (void)hw_atomic_add(&mpv
->pmap
->stats
.resident_count
, 1); /* Increment the resident page count */
466 useadd
= (unsigned int *)&pmap
->pmapUsage
[(va
>> pmapUsageShft
) & pmapUsageMask
]; /* Point to slot to bump */
467 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
468 (void)hw_atomic_add(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
470 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
471 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
472 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
473 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
478 if(pp
&& !locked
)hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* If we have one and we didn't hold on entry, unlock the physical entry */
480 splx(s
); /* Ok for interruptions now */
481 debugLog2(6, pmap
->space
, prot
); /* end mapping_purge */
482 return mpv
; /* Leave... */
487 * Enters optimal translations for odd-sized V=F blocks.
489 * Builds a block map for each power-of-two hunk o' address
490 * that exists. This is specific to the processor type.
491 * PPC uses BAT register size stuff. Future PPC might have
494 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
495 * stupid to know otherwise so we only look at the va anyhow, so there...
499 void mapping_block_map_opt(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_offset_t bnd
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Maps optimal autogenned blocks */
501 register blokmap
*blm
, *oblm
;
503 unsigned int maxsize
, boundary
, leading
, trailing
, cbsize
, minsize
, tomin
;
504 int i
, maxshft
, nummax
, minshft
;
507 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
508 pmap
, va
, pa
, bnd
, size
, prot
, attr
);
511 minsize
= blokValid
^ (blokValid
& (blokValid
- 1)); /* Set minimum subblock size */
512 maxsize
= 0x80000000 >> cntlzw(blokValid
); /* Set maximum subblock size */
514 minshft
= 31 - cntlzw(minsize
); /* Shift to position minimum size */
515 maxshft
= 31 - cntlzw(blokValid
); /* Shift to position maximum size */
517 leading
= ((va
+ bnd
- 1) & -bnd
) - va
; /* Get size of leading area */
518 trailing
= size
- leading
; /* Get size of trailing area */
519 tomin
= ((va
+ minsize
- 1) & -minsize
) - va
; /* Get size needed to round up to the minimum block size */
522 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd
, leading
, trailing
, tomin
); /* (TEST/DEBUG) */
525 if(tomin
)pmap_map_block(pmap
, va
, pa
, tomin
, prot
, attr
, 0); /* Map up to minimum block size */
527 va
= va
+ tomin
; /* Adjust virtual start */
528 pa
= pa
+ tomin
; /* Adjust physical start */
529 leading
= leading
- tomin
; /* Adjust leading size */
532 * Some of this code is very classic PPC. We need to fix this up.
535 leading
= leading
>> minshft
; /* Position for bit testing */
536 cbsize
= minsize
; /* Set the minimum size */
538 for(i
= 0; i
< (maxshft
- minshft
+ 1); i
++) { /* Cycle through all block sizes, small to large */
541 pmap_map_block(pmap
, va
, pa
, cbsize
, prot
, attr
, 0); /* Map up to next boundary */
542 pa
= pa
+ cbsize
; /* Bump up physical address */
543 va
= va
+ cbsize
; /* Bump up virtual address */
546 leading
= leading
>> 1; /* Shift up to next size */
547 cbsize
= cbsize
<< 1; /* Here too */
551 nummax
= trailing
>> maxshft
; /* Get number of max size blocks left */
552 for(i
=0; i
< nummax
- 1; i
++) { /* Account for all max size block left but 1 */
553 pmap_map_block(pmap
, va
, pa
, maxsize
, prot
, attr
, 0); /* Map up to next boundary */
555 pa
= pa
+ maxsize
; /* Bump up physical address */
556 va
= va
+ maxsize
; /* Bump up virtual address */
557 trailing
-= maxsize
; /* Back off what we just did */
560 cbsize
= maxsize
; /* Start at maximum size */
562 for(i
= 0; i
< (maxshft
- minshft
+ 1); i
++) { /* Cycle through all block sizes, high to low */
564 if(trailing
& cbsize
) {
565 trailing
&= ~cbsize
; /* Remove the block we are allocating */
566 pmap_map_block(pmap
, va
, pa
, cbsize
, prot
, attr
, 0); /* Map up to next boundary */
567 pa
= pa
+ cbsize
; /* Bump up physical address */
568 va
= va
+ cbsize
; /* Bump up virtual address */
570 cbsize
= cbsize
>> 1; /* Next size down */
573 if(trailing
) pmap_map_block(pmap
, va
, pa
, trailing
, prot
, attr
, 0); /* Map up to end */
580 * Enters translations for odd-sized V=F blocks.
582 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
583 * will be split into normal-sized page mappings.
585 * The higher level VM map should be locked to insure that we don't have a
586 * double diddle here.
588 * We panic if we get a block that overlaps with another. We do not merge adjacent
589 * blocks because removing any address within a block removes the entire block and if
590 * would really mess things up if we trashed too much.
592 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
593 * not be changed. The block must be unmapped and then remapped with the new stuff.
594 * We also do not keep track of reference or change flags.
596 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
597 * with interruptions and translation disabled and under the control of the lock located
598 * in the first block map. MRU is used because it is expected that the same entry
599 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
603 void pmap_map_block(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
, unsigned int flags
) { /* Map an autogenned block */
605 register blokmap
*blm
, *oblm
, *oblm_virt
;;
609 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
610 pmap
, va
, pa
, size
, prot
, attr
);
613 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
614 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
615 mapping_make(pmap
, 0, va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
617 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
621 return; /* All done */
624 blm
= (blokmap
*)mapping_alloc(); /* Get a block mapping */
626 blm
->start
= (unsigned int)va
& -PAGE_SIZE
; /* Get virtual block start */
627 blm
->end
= (blm
->start
+ size
- 1) | (PAGE_SIZE
- 1); /* Get virtual block end */
629 blm
->PTEr
= ((unsigned int)pa
& -PAGE_SIZE
) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the base PTE */
630 blm
->space
= pmap
->space
; /* Set the space (only needed for remove) */
631 blm
->blkFlags
= flags
; /* Set the block's flags */
634 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
635 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
638 blm
= (blokmap
*)hw_cvp((mapping
*)blm
); /* Get the physical address of this */
641 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
646 oblm
= hw_add_blk(pmap
, blm
);
647 if ((unsigned int)oblm
& 2) {
648 oblm_virt
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)oblm
& 0xFFFFFFFC));
649 mapping_remove(pmap
, oblm_virt
->start
);
651 } while ((unsigned int)oblm
& 2);
654 oblm
= (blokmap
*)hw_cpv((mapping
*) oblm
); /* Get the old block virtual address */
655 blm
= (blokmap
*)hw_cpv((mapping
*)blm
); /* Back to the virtual address of this */
656 if((oblm
->start
!= blm
->start
) || /* If we have a match, then this is a fault race and */
657 (oblm
->end
!= blm
->end
) || /* is acceptable */
658 (oblm
->PTEr
!= blm
->PTEr
))
659 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm
);/* Otherwise, Squeak loudly and carry a big stick */
660 mapping_free((struct mapping
*)blm
);
664 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
673 * Optimally enters translations for odd-sized V=F blocks.
675 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
676 * will be split into normal-sized page mappings.
678 * This one is different than pmap_map_block in that it will allocate it's own virtual
679 * target address. Rather than allocating a single block,
680 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
681 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
683 * Most considerations for pmap_map_block apply.
688 kern_return_t
pmap_map_block_opt(vm_map_t map
, vm_offset_t
*va
,
689 vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Map an optimal autogenned block */
691 register blokmap
*blm
, *oblm
;
697 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
698 map
, pa
, size
, prot
, attr
);
701 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
702 err
= vm_allocate(map
, va
, size
, VM_FLAGS_ANYWHERE
); /* Make us some memories */
705 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err
); /* Say we died */
707 return(err
); /* Pass back the error */
710 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va
); /* (TEST/DEBUG) */
713 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
714 mapping_make(map
->pmap
, 0, *va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
716 return(KERN_SUCCESS
); /* All done */
719 err
= vm_map_block(map
, va
, &bnd
, pa
, size
, prot
); /* Go get an optimal allocation */
721 if(err
== KERN_INVALID_ADDRESS
) { /* Can we try a brute force block mapping? */
722 err
= vm_allocate(map
, va
, size
, VM_FLAGS_ANYWHERE
); /* Make us some memories */
725 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err
); /* Say we died */
727 return(err
); /* Pass back the error */
730 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va
); /* (TEST/DEBUG) */
732 pmap_map_block(map
->pmap
, *va
, pa
, size
, prot
, attr
, 0); /* Set up a block mapped area */
733 return KERN_SUCCESS
; /* All done now */
736 if(err
!= KERN_SUCCESS
) { /* We couldn't get any address range to map this... */
738 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err
); /* Say we couldn' do it */
744 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va
, bnd
); /* (TEST/DEBUG) */
746 mapping_block_map_opt(map
->pmap
, *va
, pa
, bnd
, size
, prot
, attr
); /* Go build the maps */
747 return(KERN_SUCCESS
); /* All done */
754 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
757 * Once blocks are merged, they act like one block, i.e., if you remove it,
760 * This can only be used during boot. Ain't no way we can handle SMP
761 * or preemption easily, so we restrict it. We don't check either. We
762 * assume only skilled professional programmers will attempt using this
763 * function. We assume no responsibility, either real or imagined, for
764 * injury or death resulting from unauthorized use of this function.
766 * No user servicable parts inside. Notice to be removed by end-user only,
767 * under penalty of applicable federal and state laws.
769 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
770 * overlapping areas. Note that we do panic if we can't merge.
774 void pmap_map_block_merge(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Map an autogenned block */
776 register blokmap
*blm
, *oblm
;
781 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
782 pmap
, va
, pa
, size
, prot
, attr
);
785 s
=splhigh(); /* Don't bother from now on */
786 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
787 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
788 mapping_make(pmap
, 0, va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
790 return; /* All done */
793 blm
= (blokmap
*)mapping_alloc(); /* Get a block mapping */
795 blm
->start
= (unsigned int)va
& -PAGE_SIZE
; /* Get virtual block start */
796 blm
->end
= (blm
->start
+ size
- 1) | (PAGE_SIZE
- 1); /* Get virtual block end */
797 blm
->PTEr
= ((unsigned int)pa
& -PAGE_SIZE
) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the base PTE */
800 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
801 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
804 blm
= (blokmap
*)hw_cvp((mapping
*)blm
); /* Get the physical address of this */
807 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
811 if(oblm
= hw_add_blk(pmap
, blm
)) { /* Add to list and make sure we don't overlap anything */
812 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm
); /* Squeak loudly and carry a big stick */
816 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
819 splx(s
); /* Ok for interruptions now */
826 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
828 * This routine takes a physical entry and runs through all mappings attached to it and changes
829 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
830 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
831 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
832 * higher to lower, lower to higher.
834 * Phys_entry is unlocked.
837 void mapping_protect_phys(struct phys_entry
*pp
, vm_prot_t prot
, boolean_t locked
) { /* Change protection of all mappings to page */
841 debugLog2(9, pp
->pte1
, prot
); /* end remap */
842 spl
=splhigh(); /* No interruptions during this */
843 if(!locked
) { /* Do we need to lock the physent? */
844 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
845 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
846 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
850 hw_prot(pp
, ppc_prot(prot
)); /* Go set the protection on this physical page */
852 if(!locked
) hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
853 splx(spl
); /* Restore interrupt state */
854 debugLog2(10, pp
->pte1
, 0); /* end remap */
856 return; /* Leave... */
860 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
862 * This routine takes a pmap and virtual address and changes
863 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
864 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
865 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
866 * higher to lower, lower to higher.
870 void mapping_protect(pmap_t pmap
, vm_offset_t vaddr
, vm_prot_t prot
) { /* Change protection of a virtual page */
875 debugLog2(9, vaddr
, pmap
); /* start mapping_protect */
876 s
= splhigh(); /* Don't bother me */
878 mp
= hw_lock_phys_vir(pmap
->space
, vaddr
); /* Lock the physical entry for this mapping */
880 if(!mp
) { /* Did we find one? */
881 splx(s
); /* Restore the interrupt level */
882 debugLog2(10, 0, 0); /* end mapping_pmap */
883 return; /* Didn't find any... */
885 if((unsigned int)mp
& 1) { /* Did we timeout? */
886 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
887 splx(s
); /* Restore the interrupt level */
888 return; /* Bad hair day... */
891 hw_prot_virt(mp
, ppc_prot(prot
)); /* Go set the protection on this virtual mapping */
893 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
894 if(mpv
->physent
) { /* If there is a physical page, */
895 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
897 splx(s
); /* Restore interrupt state */
898 debugLog2(10, mpv
->PTEr
, 0); /* end remap */
900 return; /* Leave... */
904 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
906 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
907 * associated with this page when we do it.
910 void mapping_phys_attr(struct phys_entry
*pp
, vm_prot_t prot
, unsigned int wimg
) { /* Sets the default physical page attributes */
912 debugLog2(11, pp
->pte1
, prot
); /* end remap */
914 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
915 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
916 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
919 hw_phys_attr(pp
, ppc_prot(prot
), wimg
); /* Go set the default WIMG and protection */
921 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
922 debugLog2(12, pp
->pte1
, wimg
); /* end remap */
924 return; /* Leave... */
928 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
930 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
933 * Interruptions must be disabled and the physical entry locked at entry.
936 void mapping_invall(struct phys_entry
*pp
) { /* Clear all PTEs pointing to a physical page */
938 hw_inv_all(pp
); /* Go set the change bit of a physical page */
940 return; /* Leave... */
945 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
947 * This routine takes a physical entry and runs through all mappings attached to it and turns
948 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
949 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
950 * either (I don't think, maybe I'll change my mind later).
952 * Interruptions must be disabled and the physical entry locked at entry.
955 void mapping_clr_mod(struct phys_entry
*pp
) { /* Clears the change bit of a physical page */
957 hw_clr_mod(pp
); /* Go clear the change bit of a physical page */
958 return; /* Leave... */
963 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
965 * This routine takes a physical entry and runs through all mappings attached to it and turns
966 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
967 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
968 * either (I don't think, maybe I'll change my mind later).
970 * Interruptions must be disabled and the physical entry locked at entry.
973 void mapping_set_mod(struct phys_entry
*pp
) { /* Sets the change bit of a physical page */
975 hw_set_mod(pp
); /* Go set the change bit of a physical page */
976 return; /* Leave... */
981 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
983 * This routine takes a physical entry and runs through all mappings attached to it and turns
984 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
985 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
986 * either (I don't think, maybe I'll change my mind later).
988 * Interruptions must be disabled at entry.
991 void mapping_clr_ref(struct phys_entry
*pp
) { /* Clears the reference bit of a physical page */
995 debugLog2(13, pp
->pte1
, 0); /* end remap */
996 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry for this mapping */
997 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
999 hw_clr_ref(pp
); /* Go clear the reference bit of a physical page */
1000 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock physical entry */
1001 debugLog2(14, pp
->pte1
, 0); /* end remap */
1002 return; /* Leave... */
1007 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
1009 * This routine takes a physical entry and runs through all mappings attached to it and turns
1010 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1011 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1012 * either (I don't think, maybe I'll change my mind later).
1014 * Interruptions must be disabled and the physical entry locked at entry.
1017 void mapping_set_ref(struct phys_entry
*pp
) { /* Sets the reference bit of a physical page */
1019 hw_set_ref(pp
); /* Go set the reference bit of a physical page */
1020 return; /* Leave... */
1025 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
1027 * This routine takes a physical entry and runs through all mappings attached to it and tests
1028 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
1029 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
1030 * either (I don't think, maybe I'll change my mind later).
1032 * Interruptions must be disabled and the physical entry locked at entry.
1035 boolean_t
mapping_tst_mod(struct phys_entry
*pp
) { /* Tests the change bit of a physical page */
1037 return(hw_tst_mod(pp
)); /* Go test the change bit of a physical page */
1042 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
1044 * This routine takes a physical entry and runs through all mappings attached to it and tests
1045 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1046 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1047 * either (I don't think, maybe I'll change my mind later).
1049 * Interruptions must be disabled and the physical entry locked at entry.
1052 boolean_t
mapping_tst_ref(struct phys_entry
*pp
) { /* Tests the reference bit of a physical page */
1054 return(hw_tst_ref(pp
)); /* Go test the reference bit of a physical page */
1059 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1061 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1064 void mapping_phys_init(struct phys_entry
*pp
, unsigned int pa
, unsigned int wimg
) { /* Initializes hw specific storage attributes */
1066 pp
->pte1
= (pa
& -PAGE_SIZE
) | ((wimg
<< 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1068 return; /* Leave... */
1073 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1075 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1076 * the number of free mappings remaining, and if below a threshold, replenishes them.
1077 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1078 * a new one is allocated.
1080 * This routine allocates and/or memory and must be called from a safe place.
1081 * Currently, vm_pageout_scan is the safest place. We insure that the
1084 thread_call_t mapping_adjust_call
;
1085 static thread_call_data_t mapping_adjust_call_data
;
1087 void mapping_adjust(void) { /* Adjust free mappings */
1090 mappingblok
*mb
, *mbn
;
1093 extern int vm_page_free_count
;
1095 if(mapCtl
.mapcmin
<= MAPPERBLOK
) {
1096 mapCtl
.mapcmin
= (mem_size
/ PAGE_SIZE
) / 16;
1099 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl
.mapcmin
);
1100 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1101 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, mapCtl
.mapcreln
);
1105 s
= splhigh(); /* Don't bother from now on */
1106 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1107 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1110 if (mapping_adjust_call
== NULL
) {
1111 thread_call_setup(&mapping_adjust_call_data
,
1112 (thread_call_func_t
)mapping_adjust
,
1113 (thread_call_param_t
)NULL
);
1114 mapping_adjust_call
= &mapping_adjust_call_data
;
1117 while(1) { /* Keep going until we've got enough */
1119 allocsize
= mapCtl
.mapcmin
- mapCtl
.mapcfree
; /* Figure out how much we need */
1120 if(allocsize
< 1) break; /* Leave if we have all we need */
1122 if((unsigned int)(mbn
= mapCtl
.mapcrel
)) { /* Can we rescue a free one? */
1123 mapCtl
.mapcrel
= mbn
->nextblok
; /* Dequeue it */
1124 mapCtl
.mapcreln
--; /* Back off the count */
1125 allocsize
= MAPPERBLOK
; /* Show we allocated one block */
1127 else { /* No free ones, try to get it */
1129 allocsize
= (allocsize
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get the number of pages we need */
1131 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1132 splx(s
); /* Restore 'rupts */
1134 for(; allocsize
> 0; allocsize
>>= 1) { /* Try allocating in descending halves */
1135 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
* allocsize
); /* Find a virtual address to use */
1136 if((retr
!= KERN_SUCCESS
) && (allocsize
== 1)) { /* Did we find any memory at all? */
1139 if(retr
== KERN_SUCCESS
) break; /* We got some memory, bail out... */
1141 allocsize
= allocsize
* MAPPERBLOK
; /* Convert pages to number of maps allocated */
1142 s
= splhigh(); /* Don't bother from now on */
1143 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1144 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1147 if (retr
!= KERN_SUCCESS
)
1148 break; /* Fail to alocate, bail out... */
1149 for(; allocsize
> 0; allocsize
-= MAPPERBLOK
) { /* Release one block at a time */
1150 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1151 mbn
= (mappingblok
*)((unsigned int)mbn
+ PAGE_SIZE
); /* Point to the next slot */
1153 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1154 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1157 if(mapCtl
.mapcholdoff
) { /* Should we hold off this release? */
1158 mapCtl
.mapcrecurse
= 0; /* We are done now */
1159 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1160 splx(s
); /* Restore 'rupts */
1161 return; /* Return... */
1164 mbn
= mapCtl
.mapcrel
; /* Get first pending release block */
1165 mapCtl
.mapcrel
= 0; /* Dequeue them */
1166 mapCtl
.mapcreln
= 0; /* Set count to 0 */
1168 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1169 splx(s
); /* Restore 'rupts */
1171 while((unsigned int)mbn
) { /* Toss 'em all */
1172 mb
= mbn
->nextblok
; /* Get the next */
1173 kmem_free(mapping_map
, (vm_offset_t
) mbn
, PAGE_SIZE
); /* Release this mapping block */
1174 mbn
= mb
; /* Chain to the next */
1177 __asm__
volatile("sync"); /* Make sure all is well */
1178 mapCtl
.mapcrecurse
= 0; /* We are done now */
1183 * mapping_free(mapping *mp) - release a mapping to the free list
1185 * This routine takes a mapping and adds it to the free list.
1186 * If this mapping make the block non-empty, we queue it to the free block list.
1187 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1188 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1189 * If this release fills a block and we are above the threshold, we release the block
1192 void mapping_free(struct mapping
*mp
) { /* Release a mapping */
1194 mappingblok
*mb
, *mbn
;
1196 unsigned int full
, mindx
;
1198 mindx
= ((unsigned int)mp
& (PAGE_SIZE
- 1)) >> 5; /* Get index to mapping */
1199 mb
= (mappingblok
*)((unsigned int)mp
& -PAGE_SIZE
); /* Point to the mapping block */
1201 s
= splhigh(); /* Don't bother from now on */
1202 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1203 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1206 full
= !(mb
->mapblokfree
[0] | mb
->mapblokfree
[1] | mb
->mapblokfree
[2] | mb
->mapblokfree
[3]); /* See if full now */
1207 mb
->mapblokfree
[mindx
>> 5] |= (0x80000000 >> (mindx
& 31)); /* Flip on the free bit */
1209 if(full
) { /* If it was full before this: */
1210 mb
->nextblok
= mapCtl
.mapcnext
; /* Move head of list to us */
1211 mapCtl
.mapcnext
= mb
; /* Chain us to the head of the list */
1212 if(!((unsigned int)mapCtl
.mapclast
))
1213 mapCtl
.mapclast
= mb
;
1216 mapCtl
.mapcfree
++; /* Bump free count */
1217 mapCtl
.mapcinuse
--; /* Decriment in use count */
1219 mapCtl
.mapcfreec
++; /* Count total calls */
1221 if(mapCtl
.mapcfree
> mapCtl
.mapcmin
) { /* Should we consider releasing this? */
1222 if(((mb
->mapblokfree
[0] | 0x80000000) & mb
->mapblokfree
[1] & mb
->mapblokfree
[2] & mb
->mapblokfree
[3])
1223 == 0xFFFFFFFF) { /* See if empty now */
1225 if(mapCtl
.mapcnext
== mb
) { /* Are we first on the list? */
1226 mapCtl
.mapcnext
= mb
->nextblok
; /* Unchain us */
1227 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* If last, remove last */
1229 else { /* We're not first */
1230 for(mbn
= mapCtl
.mapcnext
; mbn
!= 0; mbn
= mbn
->nextblok
) { /* Search for our block */
1231 if(mbn
->nextblok
== mb
) break; /* Is the next one our's? */
1233 if(!mbn
) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp
);
1234 mbn
->nextblok
= mb
->nextblok
; /* Dequeue us */
1235 if(mapCtl
.mapclast
== mb
) mapCtl
.mapclast
= mbn
; /* If last, make our predecessor last */
1238 if(mb
->mapblokflags
& mbPerm
) { /* Is this permanently assigned? */
1239 mb
->nextblok
= mapCtl
.mapcnext
; /* Move chain head to us */
1240 mapCtl
.mapcnext
= mb
; /* Chain us to the head */
1241 if(!((unsigned int)mb
->nextblok
)) mapCtl
.mapclast
= mb
; /* If last, make us so */
1244 mapCtl
.mapcfree
-= MAPPERBLOK
; /* Remove the block from the free count */
1245 mapCtl
.mapcreln
++; /* Count on release list */
1246 mb
->nextblok
= mapCtl
.mapcrel
; /* Move pointer */
1247 mapCtl
.mapcrel
= mb
; /* Chain us in front */
1252 if(mapCtl
.mapcreln
> MAPFRTHRSH
) { /* Do we have way too many releasable mappings? */
1253 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1254 thread_call_enter(mapping_adjust_call
); /* Go toss some */
1257 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1258 splx(s
); /* Restore 'rupts */
1260 return; /* Bye, dude... */
1265 * mapping_alloc(void) - obtain a mapping from the free list
1267 * This routine takes a mapping off of the free list and returns it's address.
1269 * We do this by finding a free entry in the first block and allocating it.
1270 * If this allocation empties the block, we remove it from the free list.
1271 * If this allocation drops the total number of free entries below a threshold,
1272 * we allocate a new block.
1276 mapping
*mapping_alloc(void) { /* Obtain a mapping */
1278 register mapping
*mp
;
1279 mappingblok
*mb
, *mbn
;
1284 s
= splhigh(); /* Don't bother from now on */
1285 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1286 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1289 if(!(mb
= mapCtl
.mapcnext
)) { /* Get the first block entry */
1291 struct mappingflush mappingflush
;
1292 PCA
*pca_min
, *pca_max
;
1295 pca_min
= (PCA
*)(hash_table_base
+hash_table_size
);
1296 pca_max
= (PCA
*)(hash_table_base
+hash_table_size
+hash_table_size
);
1298 while (mapCtl
.mapcfree
<= (MAPPERBLOK
*2)) {
1299 mapCtl
.mapcflush
.mappingcnt
= 0;
1300 pca_base
= mapCtl
.mapcflush
.pcaptr
;
1302 hw_select_mappings(&mapCtl
.mapcflush
);
1303 mapCtl
.mapcflush
.pcaptr
++;
1304 if (mapCtl
.mapcflush
.pcaptr
>= pca_max
)
1305 mapCtl
.mapcflush
.pcaptr
= pca_min
;
1306 } while ((mapCtl
.mapcflush
.mappingcnt
== 0) && (mapCtl
.mapcflush
.pcaptr
!= pca_base
));
1308 if ((mapCtl
.mapcflush
.mappingcnt
== 0) && (mapCtl
.mapcflush
.pcaptr
== pca_base
)) {
1309 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1310 panic("mapping_alloc - all mappings are wired\n");
1312 mappingflush
= mapCtl
.mapcflush
;
1313 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1315 for (i
=0;i
<mappingflush
.mappingcnt
;i
++)
1316 mapping_remove(mappingflush
.mapping
[i
].pmap
,
1317 mappingflush
.mapping
[i
].offset
);
1319 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) {
1320 panic("mapping_alloc - timeout getting control lock\n");
1323 mb
= mapCtl
.mapcnext
;
1326 if(!(mindx
= mapalc(mb
))) { /* Allocate a slot */
1327 panic("mapping_alloc - empty mapping block detected at %08X\n", mb
); /* Not allowed to find none */
1330 if(mindx
< 0) { /* Did we just take the last one */
1331 mindx
= -mindx
; /* Make positive */
1332 mapCtl
.mapcnext
= mb
->nextblok
; /* Remove us from the list */
1333 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* Removed the last one */
1336 mapCtl
.mapcfree
--; /* Decrement free count */
1337 mapCtl
.mapcinuse
++; /* Bump in use count */
1339 mapCtl
.mapcallocc
++; /* Count total calls */
1342 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1343 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1344 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1345 * if we haven't already done it.
1346 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1347 * the release list with as much as we need until threads start.
1349 if(mapCtl
.mapcfree
< mapCtl
.mapcmin
) { /* See if we need to replenish */
1350 if(mbn
= mapCtl
.mapcrel
) { /* Try to rescue a block from impending doom */
1351 mapCtl
.mapcrel
= mbn
->nextblok
; /* Pop the queue */
1352 mapCtl
.mapcreln
--; /* Back off the count */
1353 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1355 else { /* We need to replenish */
1356 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1357 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1358 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1364 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1365 splx(s
); /* Restore 'rupts */
1367 mp
= &((mapping
*)mb
)[mindx
]; /* Point to the allocated mapping */
1368 __asm__
volatile("dcbz 0,%0" : : "r" (mp
)); /* Clean it up */
1369 return mp
; /* Send it back... */
1374 consider_mapping_adjust()
1378 s
= splhigh(); /* Don't bother from now on */
1379 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1380 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1383 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1384 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1385 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1389 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1390 splx(s
); /* Restore 'rupts */
1397 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1399 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1400 * mappings. This call adds and initializes a block for use.
1402 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1403 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1404 * corresponds to the header). The translation mask is the XOR of the virtual and real
1405 * addresses (needless to say, the block must be wired).
1407 * We handle these mappings the same way as saveareas: the block is only on the chain so
1408 * long as there are free entries in it.
1410 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1411 * mappings. Blocks marked PERM won't ever be released.
1413 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1414 * list. We do this only at start up time. This is done because we only allocate blocks
1415 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1416 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1417 * them on the release queue, the allocate routine will rescue them. Then when the
1418 * pageout scan starts, all extra ones will be released.
1423 void mapping_free_init(vm_offset_t mbl
, int perm
, boolean_t locked
) {
1424 /* Set's start and end of a block of mappings
1425 perm indicates if the block can be released
1426 or goes straight to the release queue .
1427 locked indicates if the lock is held already */
1434 mb
= (mappingblok
*)mbl
; /* Start of area */
1437 if(perm
>= 0) { /* See if we need to initialize the block */
1439 raddr
= (unsigned int)mbl
; /* Perm means V=R */
1440 mb
->mapblokflags
= mbPerm
; /* Set perm */
1443 raddr
= kvtophys(mbl
); /* Get real address */
1444 mb
->mapblokflags
= 0; /* Set not perm */
1447 mb
->mapblokvrswap
= raddr
^ (unsigned int)mbl
; /* Form translation mask */
1449 mb
->mapblokfree
[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1450 mb
->mapblokfree
[1] = 0xFFFFFFFF; /* Set next 32 free */
1451 mb
->mapblokfree
[2] = 0xFFFFFFFF; /* Set next 32 free */
1452 mb
->mapblokfree
[3] = 0xFFFFFFFF; /* Set next 32 free */
1455 s
= splhigh(); /* Don't bother from now on */
1456 if(!locked
) { /* Do we need the lock? */
1457 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1458 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1462 if(perm
< 0) { /* Direct to release queue? */
1463 mb
->nextblok
= mapCtl
.mapcrel
; /* Move forward pointer */
1464 mapCtl
.mapcrel
= mb
; /* Queue us on in */
1465 mapCtl
.mapcreln
++; /* Count the free block */
1467 else { /* Add to the free list */
1469 mb
->nextblok
= 0; /* We always add to the end */
1470 mapCtl
.mapcfree
+= MAPPERBLOK
; /* Bump count */
1472 if(!((unsigned int)mapCtl
.mapcnext
)) { /* First entry on list? */
1473 mapCtl
.mapcnext
= mapCtl
.mapclast
= mb
; /* Chain to us */
1475 else { /* We are not the first */
1476 mapCtl
.mapclast
->nextblok
= mb
; /* Point the last to us */
1477 mapCtl
.mapclast
= mb
; /* We are now last */
1481 if(!locked
) { /* Do we need to unlock? */
1482 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1484 splx(s
); /* Restore 'rupts */
1485 return; /* All done, leave... */
1490 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1492 * No locks can be held, because we allocate memory here.
1493 * This routine needs a corresponding mapping_relpre call to remove the
1494 * hold off flag so that the adjust routine will free the extra mapping
1495 * blocks on the release list. I don't like this, but I don't know
1496 * how else to do this for now...
1500 void mapping_prealloc(unsigned int size
) { /* Preallocates mapppings for large request */
1507 s
= splhigh(); /* Don't bother from now on */
1508 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1509 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1512 nmapb
= (size
>> 12) + mapCtl
.mapcmin
; /* Get number of entries needed for this and the minimum */
1514 mapCtl
.mapcholdoff
++; /* Bump the hold off count */
1516 if((nmapb
= (nmapb
- mapCtl
.mapcfree
)) <= 0) { /* Do we already have enough? */
1517 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1518 splx(s
); /* Restore 'rupts */
1521 if (!hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1522 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1523 splx(s
); /* Restore 'rupts */
1526 nmapb
= (nmapb
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get number of blocks to get */
1528 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1529 splx(s
); /* Restore 'rupts */
1531 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1532 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1533 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1536 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize on to the release queue */
1538 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1539 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1541 mapCtl
.mapcrecurse
= 0; /* We are done now */
1545 * void mapping_relpre(void) - Releases preallocation release hold off
1547 * This routine removes the
1548 * hold off flag so that the adjust routine will free the extra mapping
1549 * blocks on the release list. I don't like this, but I don't know
1550 * how else to do this for now...
1554 void mapping_relpre(void) { /* Releases release hold off */
1558 s
= splhigh(); /* Don't bother from now on */
1559 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1560 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1562 if(--mapCtl
.mapcholdoff
< 0) { /* Back down the hold off count */
1563 panic("mapping_relpre: hold-off count went negative\n");
1566 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1567 splx(s
); /* Restore 'rupts */
1571 * void mapping_free_prime(void) - Primes the mapping block release list
1573 * See mapping_free_init.
1574 * No locks can be held, because we allocate memory here.
1575 * One processor running only.
1579 void mapping_free_prime(void) { /* Primes the mapping block release list */
1584 vm_offset_t mapping_min
;
1586 retr
= kmem_suballoc(kernel_map
, &mapping_min
, MAPPING_MAP_SIZE
,
1587 FALSE
, TRUE
, &mapping_map
);
1589 if (retr
!= KERN_SUCCESS
)
1590 panic("mapping_free_prime: kmem_suballoc failed");
1593 nmapb
= (mapCtl
.mapcfree
+ mapCtl
.mapcinuse
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get permanent allocation */
1594 nmapb
= nmapb
* 4; /* Get 4 times our initial allocation */
1597 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1598 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, nmapb
);
1601 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1602 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1603 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1604 panic("Whoops... Not a bit of wired memory left for anyone\n");
1606 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize onto release queue */
1608 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1609 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1614 mapping_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
1615 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
1617 *count
= mapCtl
.mapcinuse
;
1618 *cur_size
= ((PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * (mapCtl
.mapcinuse
+ mapCtl
.mapcfree
)) + (PAGE_SIZE
* mapCtl
.mapcreln
);
1619 *max_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * mapCtl
.mapcmaxalloc
;
1620 *elem_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1));
1621 *alloc_size
= PAGE_SIZE
;
1629 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1631 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1632 * the same space. If it finds it, it returns the virtual address.
1634 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1635 * for it and fail it myself...
1638 vm_offset_t
mapping_p2v(pmap_t pmap
, struct phys_entry
*pp
) { /* Finds first virtual mapping of a physical page in a space */
1641 register mapping
*mp
, *mpv
;
1644 if(pmap
->vflags
& pmapAltSeg
) return 0; /* If there are nested pmaps, fail immediately */
1647 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Try to get the lock on the physical entry */
1648 splx(s
); /* Restore 'rupts */
1649 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1650 return(0); /* Should die before here */
1653 va
= 0; /* Assume failure */
1655 for(mpv
= hw_cpv(pp
->phys_link
); mpv
; mpv
= hw_cpv(mpv
->next
)) { /* Scan 'em all */
1657 if(!(((mpv
->PTEv
>> 7) & 0x000FFFFF) == pmap
->space
)) continue; /* Skip all the rest if this is not the right space... */
1659 va
= ((((unsigned int)mpv
->PTEhash
& -64) << 6) ^ (pmap
->space
<< 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1660 va
= va
| ((mpv
->PTEv
<< 1) & 0xF0000000); /* Move in the segment number */
1661 va
= va
| ((mpv
->PTEv
<< 22) & 0x0FC00000); /* Add in the API for the top of the address */
1662 break; /* We're done now, pass virtual address back */
1665 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1666 splx(s
); /* Restore 'rupts */
1667 return(va
); /* Return the result or 0... */
1673 * Convert a kernel virtual address to a physical address
1675 vm_offset_t
kvtophys(vm_offset_t va
) {
1677 register mapping
*mp
, *mpv
;
1678 register blokmap
*bmp
;
1679 register vm_offset_t pa
;
1682 s
=splhigh(); /* Don't bother from now on */
1683 mp
= hw_lock_phys_vir(PPC_SID_KERNEL
, va
); /* Find mapping and lock the physical entry for this mapping */
1685 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1686 splx(s
); /* Restore 'rupts */
1687 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va
); /* Scream bloody murder! */
1691 if(!mp
) { /* If it was not a normal page */
1692 pa
= hw_cvp_blk(kernel_pmap
, va
); /* Try to convert odd-sized page (returns 0 if not found) */
1693 splx(s
); /* Restore 'rupts */
1694 return pa
; /* Return physical address */
1697 mpv
= hw_cpv(mp
); /* Convert to virtual addressing */
1699 if(!mpv
->physent
) { /* Was there a physical entry? */
1700 pa
= (vm_offset_t
)((mpv
->PTEr
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Get physical address from physent */
1703 pa
= (vm_offset_t
)((mpv
->physent
->pte1
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Get physical address from physent */
1704 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1707 splx(s
); /* Restore 'rupts */
1708 return pa
; /* Return the physical address... */
1714 * Convert a physical address to a kernel virtual address if
1715 * there is a mapping, otherwise return NULL
1718 vm_offset_t
phystokv(vm_offset_t pa
) {
1720 struct phys_entry
*pp
;
1723 pp
= pmap_find_physentry(pa
); /* Find the physical entry */
1724 if (PHYS_NULL
== pp
) {
1725 return (vm_offset_t
)NULL
; /* If none, return null */
1727 if(!(va
=mapping_p2v(kernel_pmap
, pp
))) {
1728 return 0; /* Can't find it, return 0... */
1730 return (va
| (pa
& (PAGE_SIZE
-1))); /* Build and return VADDR... */
1735 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1736 * page 0 access for the current thread.
1738 * If parameter is TRUE, faults are ignored
1739 * If parameter is FALSE, faults are honored
1743 void ignore_zero_fault(boolean_t type
) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1745 if(type
) current_act()->mact
.specFlags
|= ignoreZeroFault
; /* Ignore faults on page 0 */
1746 else current_act()->mact
.specFlags
&= ~ignoreZeroFault
; /* Honor faults on page 0 */
1748 return; /* Return the result or 0... */
1753 * Allocates a range of virtual addresses in a map as optimally as
1754 * possible for block mapping. The start address is aligned such
1755 * that a minimum number of power-of-two sized/aligned blocks is
1756 * required to cover the entire range.
1758 * We also use a mask of valid block sizes to determine optimality.
1760 * Note that the passed in pa is not actually mapped to the selected va,
1761 * rather, it is used to figure the optimal boundary. The actual
1762 * V to R mapping is done externally.
1764 * This function will return KERN_INVALID_ADDRESS if an optimal address
1765 * can not be found. It is not necessarily a fatal error, the caller may still be
1766 * still be able to do a non-optimal assignment.
1769 kern_return_t
vm_map_block(vm_map_t map
, vm_offset_t
*va
, vm_offset_t
*bnd
, vm_offset_t pa
,
1770 vm_size_t size
, vm_prot_t prot
) {
1772 vm_map_entry_t entry
, next
, tmp_entry
, new_entry
;
1773 vm_offset_t start
, end
, algnpa
, endadr
, strtadr
, curradr
;
1774 vm_offset_t boundary
;
1776 unsigned int maxsize
, minsize
, leading
, trailing
;
1778 assert(page_aligned(pa
));
1779 assert(page_aligned(size
));
1781 if (map
== VM_MAP_NULL
) return(KERN_INVALID_ARGUMENT
); /* Dude, like we need a target map */
1783 minsize
= blokValid
^ (blokValid
& (blokValid
- 1)); /* Set minimum subblock size */
1784 maxsize
= 0x80000000 >> cntlzw(blokValid
); /* Set maximum subblock size */
1786 boundary
= 0x80000000 >> cntlzw(size
); /* Get optimal boundary */
1787 if(boundary
> maxsize
) boundary
= maxsize
; /* Pin this at maximum supported hardware size */
1789 vm_map_lock(map
); /* No touchee no mapee */
1791 for(; boundary
> minsize
; boundary
>>= 1) { /* Try all optimizations until we find one */
1792 if(!(boundary
& blokValid
)) continue; /* Skip unavailable block sizes */
1793 algnpa
= (pa
+ boundary
- 1) & -boundary
; /* Round physical up */
1794 leading
= algnpa
- pa
; /* Get leading size */
1796 curradr
= 0; /* Start low */
1798 while(1) { /* Try all possible values for this opt level */
1800 curradr
= curradr
+ boundary
; /* Get the next optimal address */
1801 strtadr
= curradr
- leading
; /* Calculate start of optimal range */
1802 endadr
= strtadr
+ size
; /* And now the end */
1804 if((curradr
< boundary
) || /* Did address wrap here? */
1805 (strtadr
> curradr
) || /* How about this way? */
1806 (endadr
< strtadr
)) break; /* We wrapped, try next lower optimization... */
1808 if(strtadr
< map
->min_offset
) continue; /* Jump to the next higher slot... */
1809 if(endadr
> map
->max_offset
) break; /* No room right now... */
1811 if(vm_map_lookup_entry(map
, strtadr
, &entry
)) continue; /* Find slot, continue if allocated... */
1813 next
= entry
->vme_next
; /* Get the next entry */
1814 if((next
== vm_map_to_entry(map
)) || /* Are we the last entry? */
1815 (next
->vme_start
>= endadr
)) { /* or do we end before the next entry? */
1817 new_entry
= vm_map_entry_insert(map
, entry
, strtadr
, endadr
, /* Yes, carve out our entry */
1819 0, /* Offset into object of 0 */
1820 FALSE
, /* No copy needed */
1821 FALSE
, /* Not shared */
1822 FALSE
, /* Not in transition */
1823 prot
, /* Set the protection to requested */
1824 prot
, /* We can't change protection */
1825 VM_BEHAVIOR_DEFAULT
, /* Use default behavior, but makes no never mind,
1826 'cause we don't page in this area */
1827 VM_INHERIT_DEFAULT
, /* Default inheritance */
1828 0); /* Nothing is wired */
1830 vm_map_unlock(map
); /* Let the world see it all */
1831 *va
= strtadr
; /* Tell everyone */
1832 *bnd
= boundary
; /* Say what boundary we are aligned to */
1833 return(KERN_SUCCESS
); /* Leave, all is right with the world... */
1838 vm_map_unlock(map
); /* Couldn't find a slot */
1839 return(KERN_INVALID_ADDRESS
);
1843 * Copies data from a physical page to a virtual page. This is used to
1844 * move data from the kernel to user state.
1846 * Note that it is invalid to have a source that spans a page boundry.
1848 * We don't check protection either.
1849 * And we don't handle a block mapped sink address either.
1853 kern_return_t
copyp2v(vm_offset_t source
, vm_offset_t sink
, unsigned int size
) {
1857 unsigned int spaceid
;
1860 register mapping
*mpv
, *mp
;
1863 if((size
== 0) || ((source
^ (source
+ size
- 1)) & -PAGE_SIZE
)) return KERN_FAILURE
; /* We don't allow a source page crosser */
1864 map
= current_act()->map
; /* Get the current map */
1867 s
=splhigh(); /* Don't bother me */
1869 spaceid
= map
->pmap
->pmapSegs
[(unsigned int)sink
>> 28]; /* Get space ID. Don't bother to clean top bits */
1871 mp
= hw_lock_phys_vir(spaceid
, sink
); /* Lock the physical entry for the sink */
1872 if(!mp
) { /* Was it there? */
1873 splx(s
); /* Restore the interrupt level */
1874 ret
= vm_fault(map
, trunc_page(sink
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* Didn't find it, try to fault it in... */
1875 if (ret
== KERN_SUCCESS
) continue; /* We got it in, try again to find it... */
1877 return KERN_FAILURE
; /* Didn't find any, return no good... */
1879 if((unsigned int)mp
&1) { /* Did we timeout? */
1880 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink
); /* Yeah, scream about it! */
1881 splx(s
); /* Restore the interrupt level */
1882 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
1885 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
1887 if(mpv
->PTEr
& 1) { /* Are we write protected? yes, could indicate COW */
1888 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the sink */
1889 splx(s
); /* Restore the interrupt level */
1890 ret
= vm_fault(map
, trunc_page(sink
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* check for a COW area */
1891 if (ret
== KERN_SUCCESS
) continue; /* We got it in, try again to find it... */
1892 return KERN_FAILURE
; /* Didn't find any, return no good... */
1894 left
= PAGE_SIZE
- (sink
& PAGE_MASK
); /* Get amount left on sink page */
1896 csize
= size
< left
? size
: left
; /* Set amount to copy this pass */
1898 pa
= (vm_offset_t
)((mpv
->physent
->pte1
& ~PAGE_MASK
) | ((unsigned int)sink
& PAGE_MASK
)); /* Get physical address of sink */
1900 bcopy_physvir((char *)source
, (char *)pa
, csize
); /* Do a physical copy, virtually */
1902 hw_set_mod(mpv
->physent
); /* Go set the change of the sink */
1904 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the sink */
1905 splx(s
); /* Open up for interrupts */
1907 sink
+= csize
; /* Move up to start of next page */
1908 source
+= csize
; /* Move up source */
1909 size
-= csize
; /* Set amount for next pass */
1911 return KERN_SUCCESS
;
1916 * copy 'size' bytes from physical to physical address
1917 * the caller must validate the physical ranges
1919 * if flush_action == 0, no cache flush necessary
1920 * if flush_action == 1, flush the source
1921 * if flush_action == 2, flush the dest
1922 * if flush_action == 3, flush both source and dest
1925 kern_return_t
copyp2p(vm_offset_t source
, vm_offset_t dest
, unsigned int size
, unsigned int flush_action
) {
1927 switch(flush_action
) {
1929 flush_dcache(source
, size
, 1);
1932 flush_dcache(dest
, size
, 1);
1935 flush_dcache(source
, size
, 1);
1936 flush_dcache(dest
, size
, 1);
1940 bcopy_phys((char *)source
, (char *)dest
, size
); /* Do a physical copy */
1942 switch(flush_action
) {
1944 flush_dcache(source
, size
, 1);
1947 flush_dcache(dest
, size
, 1);
1950 flush_dcache(source
, size
, 1);
1951 flush_dcache(dest
, size
, 1);
1961 * Dumps out the mapping stuff associated with a virtual address
1963 void dumpaddr(space_t space
, vm_offset_t va
) {
1969 s
=splhigh(); /* Don't bother me */
1971 mp
= hw_lock_phys_vir(space
, va
); /* Lock the physical entry for this mapping */
1972 if(!mp
) { /* Did we find one? */
1973 splx(s
); /* Restore the interrupt level */
1974 printf("dumpaddr: virtual address (%08X) not mapped\n", va
);
1975 return; /* Didn't find any, return FALSE... */
1977 if((unsigned int)mp
&1) { /* Did we timeout? */
1978 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va
); /* Yeah, scream about it! */
1979 splx(s
); /* Restore the interrupt level */
1980 return; /* Bad hair day, return FALSE... */
1982 printf("dumpaddr: space=%08X; vaddr=%08X\n", space
, va
); /* Say what address were dumping */
1983 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1987 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock physical entry associated with mapping */
1989 splx(s
); /* Was there something you needed? */
1990 return; /* Tell them we did it */
1996 * Prints out a mapping control block
2000 void dumpmapping(struct mapping
*mp
) { /* Dump out a mapping */
2002 printf("Dump of mapping block: %08X\n", mp
); /* Header */
2003 printf(" next: %08X\n", mp
->next
);
2004 printf(" hashnext: %08X\n", mp
->hashnext
);
2005 printf(" PTEhash: %08X\n", mp
->PTEhash
);
2006 printf(" PTEent: %08X\n", mp
->PTEent
);
2007 printf(" physent: %08X\n", mp
->physent
);
2008 printf(" PTEv: %08X\n", mp
->PTEv
);
2009 printf(" PTEr: %08X\n", mp
->PTEr
);
2010 printf(" pmap: %08X\n", mp
->pmap
);
2012 if(mp
->physent
) { /* Print physent if it exists */
2013 printf("Associated physical entry: %08X %08X\n", mp
->physent
->phys_link
, mp
->physent
->pte1
);
2016 printf("Associated physical entry: none\n");
2019 dumppca(mp
); /* Dump out the PCA information */
2025 * Prints out a PTEG control area
2029 void dumppca(struct mapping
*mp
) { /* PCA */
2034 pca
= (PCA
*)((unsigned int)mp
->PTEhash
&-64); /* Back up to the start of the PCA */
2035 pteg
=(unsigned int *)((unsigned int)pca
-(((hash_table_base
&0x0000FFFF)+1)<<16));
2036 printf(" Dump of PCA: %08X\n", pca
); /* Header */
2037 printf(" PCAlock: %08X\n", pca
->PCAlock
);
2038 printf(" PCAallo: %08X\n", pca
->flgs
.PCAallo
);
2039 printf(" PCAhash: %08X %08X %08X %08X\n", pca
->PCAhash
[0], pca
->PCAhash
[1], pca
->PCAhash
[2], pca
->PCAhash
[3]);
2040 printf(" %08X %08X %08X %08X\n", pca
->PCAhash
[4], pca
->PCAhash
[5], pca
->PCAhash
[6], pca
->PCAhash
[7]);
2041 printf("Dump of PTEG: %08X\n", pteg
); /* Header */
2042 printf(" %08X %08X %08X %08X\n", pteg
[0], pteg
[1], pteg
[2], pteg
[3]);
2043 printf(" %08X %08X %08X %08X\n", pteg
[4], pteg
[5], pteg
[6], pteg
[7]);
2044 printf(" %08X %08X %08X %08X\n", pteg
[8], pteg
[9], pteg
[10], pteg
[11]);
2045 printf(" %08X %08X %08X %08X\n", pteg
[12], pteg
[13], pteg
[14], pteg
[15]);
2050 * Dumps starting with a physical entry
2053 void dumpphys(struct phys_entry
*pp
) { /* Dump from physent */
2059 printf("Dump from physical entry %08X: %08X %08X\n", pp
, pp
->phys_link
, pp
->pte1
);
2060 mp
= hw_cpv(pp
->phys_link
);
2064 mp
= hw_cpv(mp
->next
);
2073 kern_return_t
bmapvideo(vm_offset_t
*info
);
2074 kern_return_t
bmapvideo(vm_offset_t
*info
) {
2076 extern struct vc_info vinfo
;
2078 (void)copyout((char *)&vinfo
, (char *)info
, sizeof(struct vc_info
)); /* Copy out the video info */
2079 return KERN_SUCCESS
;
2082 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
);
2083 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) {
2085 pmap_map_block(current_act()->task
->map
->pmap
, va
, pa
, size
, prot
, attr
, 0); /* Map it in */
2086 return KERN_SUCCESS
;
2089 kern_return_t
bmapmapr(vm_offset_t va
);
2090 kern_return_t
bmapmapr(vm_offset_t va
) {
2092 mapping_remove(current_act()->task
->map
->pmap
, va
); /* Remove map */
2093 return KERN_SUCCESS
;