2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
27 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
28 * Currently, some of the function of this module is contained within pmap.c. We may want to move
29 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
31 * We also depend upon the structure of the phys_entry control block. We do put some processor
32 * specific stuff in there.
38 #include <mach_kgdb.h>
39 #include <mach_vm_debug.h>
40 #include <db_machine_commands.h>
42 #include <kern/thread.h>
43 #include <kern/thread_act.h>
44 #include <mach/vm_attributes.h>
45 #include <mach/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
51 #include <kern/misc_protos.h>
52 #include <ppc/misc_protos.h>
53 #include <ppc/proc_reg.h>
57 #include <ppc/pmap_internals.h>
60 #include <ppc/new_screen.h>
61 #include <ppc/Firmware.h>
62 #include <ppc/mappings.h>
63 #include <ddb/db_output.h>
65 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
69 #if PERFTIMES && DEBUG
70 #define debugLog2(a, b, c) dbgLog2(a, b, c)
72 #define debugLog2(a, b, c)
75 vm_map_t mapping_map
= VM_MAP_NULL
;
76 #define MAPPING_MAP_SIZE 33554432 /* 32MB address space */
78 unsigned int incrVSID
= 0; /* VSID increment value */
79 unsigned int mappingdeb0
= 0;
80 unsigned int mappingdeb1
= 0;
81 extern unsigned int hash_table_size
;
82 extern vm_offset_t mem_size
;
84 * ppc_prot translates from the mach representation of protections to the PPC version.
85 * We also allow for a direct setting of the protection bits. This extends the mach
86 * concepts to allow the greater control we need for Virtual Machines (VMM).
87 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
88 * It eliminates the used of this table.
89 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
92 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
95 * About PPC VSID generation:
97 * This function is called to generate an address space ID. This space ID must be unique within
98 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
99 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
100 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
101 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
102 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
103 * they are release. This causes us to lose track of what space IDs are free to be reused.
104 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
105 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
107 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
108 * calculation for virtual address lookup. An improperly chosen value could potentially cause
109 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
110 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
111 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
112 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
113 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
114 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
115 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
116 * with no overflow. I don't think that this is a problem.
118 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
119 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
120 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
121 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
122 * the same modulo 512. We can reduce this problem by having the segment number be bits
123 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
124 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
125 * I don't think that it is as signifigant as the other, so, I'll make the space ID
126 * with segment first.
128 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
129 * While this is a problem that should only happen in periods counted in weeks, it can and
130 * will happen. This is assuming a monotonically increasing space ID. If we were to search
131 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
132 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
134 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
135 * locked by free_pmap_lock) that is sorted in VSID sequence order.
137 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
138 * the last that was freed. The we allocate that.
140 * NOTE: We must be called with interruptions off and free_pmap_lock held.
146 * Do anything that needs to be done before the mapping system can be used.
147 * Hash table must be initialized before we call this.
149 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
152 void mapping_init(void) {
156 __asm__
volatile("cntlzw %0, %1" : "=r" (tmp
) : "r" (hash_table_size
)); /* Get number of leading 0s */
158 incrVSID
= 1 << ((32 - tmp
+ 1) >> 1); /* Get ceiling of sqrt of table size */
159 incrVSID
|= 1 << ((32 - tmp
+ 1) >> 2); /* Get ceiling of quadroot of table size */
160 incrVSID
|= 1; /* Set bit and add 1 */
167 * mapping_remove(pmap_t pmap, vm_offset_t va);
168 * Given a pmap and virtual address, this routine finds the mapping and removes it from
169 * both its PTEG hash list and the physical entry list. The mapping block will be added to
170 * the free list. If the free list threshold is reached, garbage collection will happen.
171 * We also kick back a return code to say whether or not we had one to remove.
173 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
174 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
175 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
176 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
177 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
178 * It's just that simple!
180 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
181 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
182 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
184 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
185 * the mapping is outside of physical memory and usually refers to a memory mapped device of
186 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
187 * routines return normally, but don't do anything.
190 boolean_t
mapping_remove(pmap_t pmap
, vm_offset_t va
) { /* Remove a single mapping for this VADDR
191 Returns TRUE if a mapping was found to remove */
194 register blokmap
*blm
;
196 unsigned int *useadd
, *useaddr
, uindx
;
198 struct phys_entry
*pp
;
201 debugLog2(1, va
, pmap
->space
); /* start mapping_remove */
203 s
=splhigh(); /* Don't bother me */
205 mp
= hw_lock_phys_vir(pmap
->space
, va
); /* Lock the physical entry for this mapping */
207 if(!mp
) { /* Did we find one? */
208 splx(s
); /* Allow 'rupts now */
209 if(mp
= (mapping
*)hw_rem_blk(pmap
, va
, va
)) { /* No normal pages, try to remove an odd-sized one */
211 if((unsigned int)mp
& 1) { /* Make sure we don't unmap a permanent one */
212 blm
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)mp
& 0xFFFFFFFC)); /* Get virtual address */
213 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
216 while ((unsigned int)mp
& 2)
217 mp
= (mapping
*)hw_rem_blk(pmap
, va
, va
);
219 blm
= (blokmap
*)hw_cpv(mp
); /* (TEST/DEBUG) */
220 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
221 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
223 mapping_free(hw_cpv(mp
)); /* Release it */
224 debugLog2(2, 1, 0); /* End mapping_remove */
225 return TRUE
; /* Tell them we did it */
227 debugLog2(2, 0, 0); /* end mapping_remove */
228 return FALSE
; /* Didn't find any, return FALSE... */
230 if((unsigned int)mp
&1) { /* Did we timeout? */
231 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
232 splx(s
); /* Restore the interrupt level */
233 return FALSE
; /* Bad hair day, return FALSE... */
236 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
238 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
240 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
242 useadd
= (unsigned int *)&pmap
->pmapUsage
[(va
>> pmapUsageShft
) & pmapUsageMask
]; /* Point to slot to bump */
243 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
244 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
247 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
248 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
249 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
250 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
255 hw_rem_map(mp
); /* Remove the corresponding mapping */
259 if ((mpv
->physent
) && (pmap
->vflags
& pmapVMhost
)) {
261 while(mp1
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
)) { /* Keep going so long as there's another */
263 mpv1
= hw_cpv(mp1
); /* Get the virtual address */
265 if(hw_atomic_sub(&mpv1
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
267 (void)hw_atomic_sub(&mpv1
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
270 uindx
= ((mpv1
->PTEv
>> 24) & 0x78) | ((mpv1
->PTEv
>> 3) & 7); /* Join segment number and top 2 bits of the API */
271 useadd
= (unsigned int *)&mpv1
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
272 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
273 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
276 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
277 if((mpv1
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
278 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
279 i
* pmapUsageSize
, mpv1
->pmap
->pmapUsage
[i
], mpv1
->pmap
);
284 hw_rem_map(mp1
); /* Remove the mapping */
285 mapping_free(mpv1
); /* Add mapping to the free list */
289 if(mpv
->physent
)hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock physical entry associated with mapping */
291 splx(s
); /* Was there something you needed? */
293 mapping_free(mpv
); /* Add mapping to the free list */
294 debugLog2(2, 1, 0); /* end mapping_remove */
295 return TRUE
; /* Tell them we did it */
299 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
301 * This guy releases any mappings that exist for a physical page on a specified map.
302 * We get the lock on the phys_entry, and hold it through out this whole routine.
303 * That way, no one can change the queue out from underneath us. We keep fetching
304 * the physents mapping anchor until it is null, then we're done.
306 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
307 * decriment the pmap's residency count. Then we release the mapping back to the free list.
312 void mapping_purge_pmap(struct phys_entry
*pp
, pmap_t pmap
) { /* Remove all mappings from specified pmap for this physent */
314 mapping
*mp
, *mp_next
, *mpv
;
316 unsigned int *useadd
, *useaddr
, uindx
;
319 s
=splhigh(); /* Don't bother me */
321 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
322 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
323 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
326 mp
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
);
328 while(mp
) { /* Keep going so long as there's another */
330 mpv
= hw_cpv(mp
); /* Get the virtual address */
331 if(mpv
->pmap
!= pmap
) {
332 mp
= (mapping
*)((unsigned int)mpv
->next
& ~PHYS_FLAGS
);
336 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
338 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
341 uindx
= ((mpv
->PTEv
>> 24) & 0x78) | ((mpv
->PTEv
>> 3) & 7); /* Join seg # and top 2 bits of API */
342 useadd
= (unsigned int *)&mpv
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
343 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
344 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Incr the even or odd slot */
348 mp_next
= (mapping
*)((unsigned int)mpv
->next
& ~PHYS_FLAGS
);
349 hw_rem_map(mp
); /* Remove the mapping */
350 mapping_free(mpv
); /* Add mapping to the free list */
354 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
359 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
361 * This guy releases any mappings that exist for a physical page.
362 * We get the lock on the phys_entry, and hold it through out this whole routine.
363 * That way, no one can change the queue out from underneath us. We keep fetching
364 * the physents mapping anchor until it is null, then we're done.
366 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
367 * decriment the pmap's residency count. Then we release the mapping back to the free list.
371 void mapping_purge(struct phys_entry
*pp
) { /* Remove all mappings for this physent */
375 unsigned int *useadd
, *useaddr
, uindx
;
378 s
=splhigh(); /* Don't bother me */
379 debugLog2(3, pp
->pte1
, 0); /* start mapping_purge */
381 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
382 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
383 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
386 while(mp
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
)) { /* Keep going so long as there's another */
388 mpv
= hw_cpv(mp
); /* Get the virtual address */
390 if(hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1) < 0) panic("pmap resident count went negative\n");
392 (void)hw_atomic_sub(&mpv
->pmap
->stats
.resident_count
, 1); /* Decrement the resident page count */
395 uindx
= ((mpv
->PTEv
>> 24) & 0x78) | ((mpv
->PTEv
>> 3) & 7); /* Join segment number and top 2 bits of the API */
396 useadd
= (unsigned int *)&mpv
->pmap
->pmapUsage
[uindx
]; /* Point to slot to bump */
397 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
398 (void)hw_atomic_sub(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
401 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
402 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
403 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
404 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
410 hw_rem_map(mp
); /* Remove the mapping */
411 mapping_free(mpv
); /* Add mapping to the free list */
414 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
416 debugLog2(4, pp
->pte1
, 0); /* end mapping_purge */
417 splx(s
); /* Was there something you needed? */
418 return; /* Tell them we did it */
423 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
425 * This routine takes the given parameters, builds a mapping block, and queues it into the
428 * The pp parameter can be null. This allows us to make a mapping that is not
429 * associated with any physical page. We may need this for certain I/O areas.
431 * If the phys_entry address is null, we neither lock or chain into it.
432 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
435 mapping
*mapping_make(pmap_t pmap
, struct phys_entry
*pp
, vm_offset_t va
, vm_offset_t pa
, vm_prot_t prot
, int attr
, boolean_t locked
) { /* Make an address mapping */
437 register mapping
*mp
, *mpv
;
438 unsigned int *useadd
, *useaddr
;
442 debugLog2(5, va
, pa
); /* start mapping_purge */
443 mpv
= mapping_alloc(); /* Get a spare mapping block */
445 mpv
->pmap
= pmap
; /* Initialize the pmap pointer */
446 mpv
->physent
= pp
; /* Initialize the pointer to the physical entry */
447 mpv
->PTEr
= ((unsigned int)pa
& ~(PAGE_SIZE
- 1)) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the PTE */
448 mpv
->PTEv
= (((unsigned int)va
>> 1) & 0x78000000) | (pmap
->space
<< 7) | (((unsigned int)va
>> 22) & 0x0000003F); /* Build the VSID */
450 s
=splhigh(); /* Don't bother from now on */
452 mp
= hw_cvp(mpv
); /* Get the physical address of this */
454 if(pp
&& !locked
) { /* Is there a physical entry? Or do we already hold the lock? */
455 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
456 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
457 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
461 if(pp
) { /* See of there is a physcial entry */
462 mpv
->next
= (mapping
*)((unsigned int)pp
->phys_link
& ~PHYS_FLAGS
); /* Move the old anchor to the new mappings forward */
463 pp
->phys_link
= (mapping
*)((unsigned int)mp
| (unsigned int)pp
->phys_link
& PHYS_FLAGS
); /* Point the anchor at us. Now we're on the list (keep the flags) */
466 hw_add_map(mp
, pmap
->space
, va
); /* Stick it on the PTEG hash list */
468 (void)hw_atomic_add(&mpv
->pmap
->stats
.resident_count
, 1); /* Increment the resident page count */
469 useadd
= (unsigned int *)&pmap
->pmapUsage
[(va
>> pmapUsageShft
) & pmapUsageMask
]; /* Point to slot to bump */
470 useaddr
= (unsigned int *)((unsigned int)useadd
& -4); /* Round down to word */
471 (void)hw_atomic_add(useaddr
, (useaddr
== useadd
) ? 0x00010000 : 1); /* Increment the even or odd slot */
473 for(i
= 0; i
< (pmapUsageMask
+ 1); i
++) { /* (TEST/DEBUG) */
474 if((mpv
->pmap
->pmapUsage
[i
]) > 8192) { /* (TEST/DEBUG) */
475 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
476 i
* pmapUsageSize
, mpv
->pmap
->pmapUsage
[i
], mpv
->pmap
);
481 if(pp
&& !locked
)hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* If we have one and we didn't hold on entry, unlock the physical entry */
483 splx(s
); /* Ok for interruptions now */
484 debugLog2(6, pmap
->space
, prot
); /* end mapping_purge */
485 return mpv
; /* Leave... */
490 * Enters optimal translations for odd-sized V=F blocks.
492 * Builds a block map for each power-of-two hunk o' address
493 * that exists. This is specific to the processor type.
494 * PPC uses BAT register size stuff. Future PPC might have
497 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
498 * stupid to know otherwise so we only look at the va anyhow, so there...
502 void mapping_block_map_opt(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_offset_t bnd
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Maps optimal autogenned blocks */
504 register blokmap
*blm
, *oblm
;
506 unsigned int maxsize
, boundary
, leading
, trailing
, cbsize
, minsize
, tomin
;
507 int i
, maxshft
, nummax
, minshft
;
510 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
511 pmap
, va
, pa
, bnd
, size
, prot
, attr
);
514 minsize
= blokValid
^ (blokValid
& (blokValid
- 1)); /* Set minimum subblock size */
515 maxsize
= 0x80000000 >> cntlzw(blokValid
); /* Set maximum subblock size */
517 minshft
= 31 - cntlzw(minsize
); /* Shift to position minimum size */
518 maxshft
= 31 - cntlzw(blokValid
); /* Shift to position maximum size */
520 leading
= ((va
+ bnd
- 1) & -bnd
) - va
; /* Get size of leading area */
521 trailing
= size
- leading
; /* Get size of trailing area */
522 tomin
= ((va
+ minsize
- 1) & -minsize
) - va
; /* Get size needed to round up to the minimum block size */
525 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd
, leading
, trailing
, tomin
); /* (TEST/DEBUG) */
528 if(tomin
)pmap_map_block(pmap
, va
, pa
, tomin
, prot
, attr
, 0); /* Map up to minimum block size */
530 va
= va
+ tomin
; /* Adjust virtual start */
531 pa
= pa
+ tomin
; /* Adjust physical start */
532 leading
= leading
- tomin
; /* Adjust leading size */
535 * Some of this code is very classic PPC. We need to fix this up.
538 leading
= leading
>> minshft
; /* Position for bit testing */
539 cbsize
= minsize
; /* Set the minimum size */
541 for(i
= 0; i
< (maxshft
- minshft
+ 1); i
++) { /* Cycle through all block sizes, small to large */
544 pmap_map_block(pmap
, va
, pa
, cbsize
, prot
, attr
, 0); /* Map up to next boundary */
545 pa
= pa
+ cbsize
; /* Bump up physical address */
546 va
= va
+ cbsize
; /* Bump up virtual address */
549 leading
= leading
>> 1; /* Shift up to next size */
550 cbsize
= cbsize
<< 1; /* Here too */
554 nummax
= trailing
>> maxshft
; /* Get number of max size blocks left */
555 for(i
=0; i
< nummax
- 1; i
++) { /* Account for all max size block left but 1 */
556 pmap_map_block(pmap
, va
, pa
, maxsize
, prot
, attr
, 0); /* Map up to next boundary */
558 pa
= pa
+ maxsize
; /* Bump up physical address */
559 va
= va
+ maxsize
; /* Bump up virtual address */
560 trailing
-= maxsize
; /* Back off what we just did */
563 cbsize
= maxsize
; /* Start at maximum size */
565 for(i
= 0; i
< (maxshft
- minshft
+ 1); i
++) { /* Cycle through all block sizes, high to low */
567 if(trailing
& cbsize
) {
568 trailing
&= ~cbsize
; /* Remove the block we are allocating */
569 pmap_map_block(pmap
, va
, pa
, cbsize
, prot
, attr
, 0); /* Map up to next boundary */
570 pa
= pa
+ cbsize
; /* Bump up physical address */
571 va
= va
+ cbsize
; /* Bump up virtual address */
573 cbsize
= cbsize
>> 1; /* Next size down */
576 if(trailing
) pmap_map_block(pmap
, va
, pa
, trailing
, prot
, attr
, 0); /* Map up to end */
583 * Enters translations for odd-sized V=F blocks.
585 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
586 * will be split into normal-sized page mappings.
588 * The higher level VM map should be locked to insure that we don't have a
589 * double diddle here.
591 * We panic if we get a block that overlaps with another. We do not merge adjacent
592 * blocks because removing any address within a block removes the entire block and if
593 * would really mess things up if we trashed too much.
595 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
596 * not be changed. The block must be unmapped and then remapped with the new stuff.
597 * We also do not keep track of reference or change flags.
599 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
600 * with interruptions and translation disabled and under the control of the lock located
601 * in the first block map. MRU is used because it is expected that the same entry
602 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
606 void pmap_map_block(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
, unsigned int flags
) { /* Map an autogenned block */
608 register blokmap
*blm
, *oblm
, *oblm_virt
;;
612 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
613 pmap
, va
, pa
, size
, prot
, attr
);
616 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
617 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
618 mapping_make(pmap
, 0, va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
620 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
624 return; /* All done */
627 blm
= (blokmap
*)mapping_alloc(); /* Get a block mapping */
629 blm
->start
= (unsigned int)va
& -PAGE_SIZE
; /* Get virtual block start */
630 blm
->end
= (blm
->start
+ size
- 1) | (PAGE_SIZE
- 1); /* Get virtual block end */
632 blm
->PTEr
= ((unsigned int)pa
& -PAGE_SIZE
) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the base PTE */
633 blm
->space
= pmap
->space
; /* Set the space (only needed for remove) */
634 blm
->blkFlags
= flags
; /* Set the block's flags */
637 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
638 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
641 blm
= (blokmap
*)hw_cvp((mapping
*)blm
); /* Get the physical address of this */
644 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
649 oblm
= hw_add_blk(pmap
, blm
);
650 if ((unsigned int)oblm
& 2) {
651 oblm_virt
= (blokmap
*)hw_cpv((mapping
*)((unsigned int)oblm
& 0xFFFFFFFC));
652 mapping_remove(pmap
, oblm_virt
->start
);
654 } while ((unsigned int)oblm
& 2);
657 oblm
= (blokmap
*)hw_cpv((mapping
*) oblm
); /* Get the old block virtual address */
658 blm
= (blokmap
*)hw_cpv((mapping
*)blm
); /* Back to the virtual address of this */
659 if((oblm
->start
!= blm
->start
) || /* If we have a match, then this is a fault race and */
660 (oblm
->end
!= blm
->end
) || /* is acceptable */
661 (oblm
->PTEr
!= blm
->PTEr
))
662 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm
);/* Otherwise, Squeak loudly and carry a big stick */
663 mapping_free((struct mapping
*)blm
);
667 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
676 * Optimally enters translations for odd-sized V=F blocks.
678 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
679 * will be split into normal-sized page mappings.
681 * This one is different than pmap_map_block in that it will allocate it's own virtual
682 * target address. Rather than allocating a single block,
683 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
684 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
686 * Most considerations for pmap_map_block apply.
691 kern_return_t
pmap_map_block_opt(vm_map_t map
, vm_offset_t
*va
,
692 vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Map an optimal autogenned block */
694 register blokmap
*blm
, *oblm
;
700 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
701 map
, pa
, size
, prot
, attr
);
704 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
705 err
= vm_allocate(map
, va
, size
, VM_FLAGS_ANYWHERE
); /* Make us some memories */
708 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err
); /* Say we died */
710 return(err
); /* Pass back the error */
713 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va
); /* (TEST/DEBUG) */
716 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
717 mapping_make(map
->pmap
, 0, *va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
719 return(KERN_SUCCESS
); /* All done */
722 err
= vm_map_block(map
, va
, &bnd
, pa
, size
, prot
); /* Go get an optimal allocation */
724 if(err
== KERN_INVALID_ADDRESS
) { /* Can we try a brute force block mapping? */
725 err
= vm_allocate(map
, va
, size
, VM_FLAGS_ANYWHERE
); /* Make us some memories */
728 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err
); /* Say we died */
730 return(err
); /* Pass back the error */
733 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va
); /* (TEST/DEBUG) */
735 pmap_map_block(map
->pmap
, *va
, pa
, size
, prot
, attr
, 0); /* Set up a block mapped area */
736 return KERN_SUCCESS
; /* All done now */
739 if(err
!= KERN_SUCCESS
) { /* We couldn't get any address range to map this... */
741 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err
); /* Say we couldn' do it */
747 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va
, bnd
); /* (TEST/DEBUG) */
749 mapping_block_map_opt(map
->pmap
, *va
, pa
, bnd
, size
, prot
, attr
); /* Go build the maps */
750 return(KERN_SUCCESS
); /* All done */
757 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
760 * Once blocks are merged, they act like one block, i.e., if you remove it,
763 * This can only be used during boot. Ain't no way we can handle SMP
764 * or preemption easily, so we restrict it. We don't check either. We
765 * assume only skilled professional programmers will attempt using this
766 * function. We assume no responsibility, either real or imagined, for
767 * injury or death resulting from unauthorized use of this function.
769 * No user servicable parts inside. Notice to be removed by end-user only,
770 * under penalty of applicable federal and state laws.
772 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
773 * overlapping areas. Note that we do panic if we can't merge.
777 void pmap_map_block_merge(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) { /* Map an autogenned block */
779 register blokmap
*blm
, *oblm
;
784 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
785 pmap
, va
, pa
, size
, prot
, attr
);
788 s
=splhigh(); /* Don't bother from now on */
789 if(size
< ODDBLKMIN
) { /* Is this below the minimum size? */
790 for(pg
= 0; pg
< size
; pg
+= PAGE_SIZE
) { /* Add all pages in this block */
791 mapping_make(pmap
, 0, va
+ pg
, pa
+ pg
, prot
, attr
, 0); /* Map this page on in */
793 return; /* All done */
796 blm
= (blokmap
*)mapping_alloc(); /* Get a block mapping */
798 blm
->start
= (unsigned int)va
& -PAGE_SIZE
; /* Get virtual block start */
799 blm
->end
= (blm
->start
+ size
- 1) | (PAGE_SIZE
- 1); /* Get virtual block end */
800 blm
->PTEr
= ((unsigned int)pa
& -PAGE_SIZE
) | attr
<<3 | ppc_prot(prot
); /* Build the real portion of the base PTE */
803 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
804 blm
, blm
->start
, blm
->end
, blm
->PTEr
);
807 blm
= (blokmap
*)hw_cvp((mapping
*)blm
); /* Get the physical address of this */
810 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
814 if(oblm
= hw_add_blk(pmap
, blm
)) { /* Add to list and make sure we don't overlap anything */
815 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm
); /* Squeak loudly and carry a big stick */
819 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
822 splx(s
); /* Ok for interruptions now */
829 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
831 * This routine takes a physical entry and runs through all mappings attached to it and changes
832 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
833 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
834 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
835 * higher to lower, lower to higher.
837 * Phys_entry is unlocked.
840 void mapping_protect_phys(struct phys_entry
*pp
, vm_prot_t prot
, boolean_t locked
) { /* Change protection of all mappings to page */
844 debugLog2(9, pp
->pte1
, prot
); /* end remap */
845 spl
=splhigh(); /* No interruptions during this */
846 if(!locked
) { /* Do we need to lock the physent? */
847 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
848 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
849 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
853 hw_prot(pp
, ppc_prot(prot
)); /* Go set the protection on this physical page */
855 if(!locked
) hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
856 splx(spl
); /* Restore interrupt state */
857 debugLog2(10, pp
->pte1
, 0); /* end remap */
859 return; /* Leave... */
863 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
865 * This routine takes a pmap and virtual address and changes
866 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
867 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
868 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
869 * higher to lower, lower to higher.
873 void mapping_protect(pmap_t pmap
, vm_offset_t vaddr
, vm_prot_t prot
) { /* Change protection of a virtual page */
878 debugLog2(9, vaddr
, pmap
); /* start mapping_protect */
879 s
= splhigh(); /* Don't bother me */
881 mp
= hw_lock_phys_vir(pmap
->space
, vaddr
); /* Lock the physical entry for this mapping */
883 if(!mp
) { /* Did we find one? */
884 splx(s
); /* Restore the interrupt level */
885 debugLog2(10, 0, 0); /* end mapping_pmap */
886 return; /* Didn't find any... */
888 if((unsigned int)mp
& 1) { /* Did we timeout? */
889 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
890 splx(s
); /* Restore the interrupt level */
891 return; /* Bad hair day... */
894 hw_prot_virt(mp
, ppc_prot(prot
)); /* Go set the protection on this virtual mapping */
896 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
897 if(mpv
->physent
) { /* If there is a physical page, */
898 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
900 splx(s
); /* Restore interrupt state */
901 debugLog2(10, mpv
->PTEr
, 0); /* end remap */
903 return; /* Leave... */
907 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
909 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
910 * associated with this page when we do it.
913 void mapping_phys_attr(struct phys_entry
*pp
, vm_prot_t prot
, unsigned int wimg
) { /* Sets the default physical page attributes */
915 debugLog2(11, pp
->pte1
, prot
); /* end remap */
917 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry */
918 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
919 pp
, pp
->phys_link
, pp
->pte1
); /* Complain about timeout */
922 hw_phys_attr(pp
, ppc_prot(prot
), wimg
); /* Go set the default WIMG and protection */
924 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
925 debugLog2(12, pp
->pte1
, wimg
); /* end remap */
927 return; /* Leave... */
931 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
933 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
936 * Interruptions must be disabled and the physical entry locked at entry.
939 void mapping_invall(struct phys_entry
*pp
) { /* Clear all PTEs pointing to a physical page */
941 hw_inv_all(pp
); /* Go set the change bit of a physical page */
943 return; /* Leave... */
948 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
950 * This routine takes a physical entry and runs through all mappings attached to it and turns
951 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
952 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
953 * either (I don't think, maybe I'll change my mind later).
955 * Interruptions must be disabled and the physical entry locked at entry.
958 void mapping_clr_mod(struct phys_entry
*pp
) { /* Clears the change bit of a physical page */
960 hw_clr_mod(pp
); /* Go clear the change bit of a physical page */
961 return; /* Leave... */
966 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
968 * This routine takes a physical entry and runs through all mappings attached to it and turns
969 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
970 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
971 * either (I don't think, maybe I'll change my mind later).
973 * Interruptions must be disabled and the physical entry locked at entry.
976 void mapping_set_mod(struct phys_entry
*pp
) { /* Sets the change bit of a physical page */
978 hw_set_mod(pp
); /* Go set the change bit of a physical page */
979 return; /* Leave... */
984 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
986 * This routine takes a physical entry and runs through all mappings attached to it and turns
987 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
988 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
989 * either (I don't think, maybe I'll change my mind later).
991 * Interruptions must be disabled at entry.
994 void mapping_clr_ref(struct phys_entry
*pp
) { /* Clears the reference bit of a physical page */
998 debugLog2(13, pp
->pte1
, 0); /* end remap */
999 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Lock the physical entry for this mapping */
1000 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
1002 hw_clr_ref(pp
); /* Go clear the reference bit of a physical page */
1003 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock physical entry */
1004 debugLog2(14, pp
->pte1
, 0); /* end remap */
1005 return; /* Leave... */
1010 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
1012 * This routine takes a physical entry and runs through all mappings attached to it and turns
1013 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1014 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1015 * either (I don't think, maybe I'll change my mind later).
1017 * Interruptions must be disabled and the physical entry locked at entry.
1020 void mapping_set_ref(struct phys_entry
*pp
) { /* Sets the reference bit of a physical page */
1022 hw_set_ref(pp
); /* Go set the reference bit of a physical page */
1023 return; /* Leave... */
1028 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
1030 * This routine takes a physical entry and runs through all mappings attached to it and tests
1031 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
1032 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
1033 * either (I don't think, maybe I'll change my mind later).
1035 * Interruptions must be disabled and the physical entry locked at entry.
1038 boolean_t
mapping_tst_mod(struct phys_entry
*pp
) { /* Tests the change bit of a physical page */
1040 return(hw_tst_mod(pp
)); /* Go test the change bit of a physical page */
1045 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
1047 * This routine takes a physical entry and runs through all mappings attached to it and tests
1048 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1049 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1050 * either (I don't think, maybe I'll change my mind later).
1052 * Interruptions must be disabled and the physical entry locked at entry.
1055 boolean_t
mapping_tst_ref(struct phys_entry
*pp
) { /* Tests the reference bit of a physical page */
1057 return(hw_tst_ref(pp
)); /* Go test the reference bit of a physical page */
1062 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1064 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1067 void mapping_phys_init(struct phys_entry
*pp
, unsigned int pa
, unsigned int wimg
) { /* Initializes hw specific storage attributes */
1069 pp
->pte1
= (pa
& -PAGE_SIZE
) | ((wimg
<< 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1071 return; /* Leave... */
1076 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1078 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1079 * the number of free mappings remaining, and if below a threshold, replenishes them.
1080 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1081 * a new one is allocated.
1083 * This routine allocates and/or memory and must be called from a safe place.
1084 * Currently, vm_pageout_scan is the safest place. We insure that the
1087 thread_call_t mapping_adjust_call
;
1088 static thread_call_data_t mapping_adjust_call_data
;
1090 void mapping_adjust(void) { /* Adjust free mappings */
1093 mappingblok
*mb
, *mbn
;
1096 extern int vm_page_free_count
;
1098 if(mapCtl
.mapcmin
<= MAPPERBLOK
) {
1099 mapCtl
.mapcmin
= (mem_size
/ PAGE_SIZE
) / 16;
1102 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl
.mapcmin
);
1103 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1104 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, mapCtl
.mapcreln
);
1108 s
= splhigh(); /* Don't bother from now on */
1109 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1110 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1113 if (mapping_adjust_call
== NULL
) {
1114 thread_call_setup(&mapping_adjust_call_data
,
1115 (thread_call_func_t
)mapping_adjust
,
1116 (thread_call_param_t
)NULL
);
1117 mapping_adjust_call
= &mapping_adjust_call_data
;
1120 while(1) { /* Keep going until we've got enough */
1122 allocsize
= mapCtl
.mapcmin
- mapCtl
.mapcfree
; /* Figure out how much we need */
1123 if(allocsize
< 1) break; /* Leave if we have all we need */
1125 if((unsigned int)(mbn
= mapCtl
.mapcrel
)) { /* Can we rescue a free one? */
1126 mapCtl
.mapcrel
= mbn
->nextblok
; /* Dequeue it */
1127 mapCtl
.mapcreln
--; /* Back off the count */
1128 allocsize
= MAPPERBLOK
; /* Show we allocated one block */
1130 else { /* No free ones, try to get it */
1132 allocsize
= (allocsize
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get the number of pages we need */
1134 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1135 splx(s
); /* Restore 'rupts */
1137 for(; allocsize
> 0; allocsize
>>= 1) { /* Try allocating in descending halves */
1138 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
* allocsize
); /* Find a virtual address to use */
1139 if((retr
!= KERN_SUCCESS
) && (allocsize
== 1)) { /* Did we find any memory at all? */
1142 if(retr
== KERN_SUCCESS
) break; /* We got some memory, bail out... */
1144 allocsize
= allocsize
* MAPPERBLOK
; /* Convert pages to number of maps allocated */
1145 s
= splhigh(); /* Don't bother from now on */
1146 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1147 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1150 if (retr
!= KERN_SUCCESS
)
1151 break; /* Fail to alocate, bail out... */
1152 for(; allocsize
> 0; allocsize
-= MAPPERBLOK
) { /* Release one block at a time */
1153 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1154 mbn
= (mappingblok
*)((unsigned int)mbn
+ PAGE_SIZE
); /* Point to the next slot */
1156 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1157 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1160 if(mapCtl
.mapcholdoff
) { /* Should we hold off this release? */
1161 mapCtl
.mapcrecurse
= 0; /* We are done now */
1162 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1163 splx(s
); /* Restore 'rupts */
1164 return; /* Return... */
1167 mbn
= mapCtl
.mapcrel
; /* Get first pending release block */
1168 mapCtl
.mapcrel
= 0; /* Dequeue them */
1169 mapCtl
.mapcreln
= 0; /* Set count to 0 */
1171 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1172 splx(s
); /* Restore 'rupts */
1174 while((unsigned int)mbn
) { /* Toss 'em all */
1175 mb
= mbn
->nextblok
; /* Get the next */
1176 kmem_free(mapping_map
, (vm_offset_t
) mbn
, PAGE_SIZE
); /* Release this mapping block */
1177 mbn
= mb
; /* Chain to the next */
1180 __asm__
volatile("sync"); /* Make sure all is well */
1181 mapCtl
.mapcrecurse
= 0; /* We are done now */
1186 * mapping_free(mapping *mp) - release a mapping to the free list
1188 * This routine takes a mapping and adds it to the free list.
1189 * If this mapping make the block non-empty, we queue it to the free block list.
1190 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1191 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1192 * If this release fills a block and we are above the threshold, we release the block
1195 void mapping_free(struct mapping
*mp
) { /* Release a mapping */
1197 mappingblok
*mb
, *mbn
;
1199 unsigned int full
, mindx
;
1201 mindx
= ((unsigned int)mp
& (PAGE_SIZE
- 1)) >> 5; /* Get index to mapping */
1202 mb
= (mappingblok
*)((unsigned int)mp
& -PAGE_SIZE
); /* Point to the mapping block */
1204 s
= splhigh(); /* Don't bother from now on */
1205 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1206 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1209 full
= !(mb
->mapblokfree
[0] | mb
->mapblokfree
[1] | mb
->mapblokfree
[2] | mb
->mapblokfree
[3]); /* See if full now */
1210 mb
->mapblokfree
[mindx
>> 5] |= (0x80000000 >> (mindx
& 31)); /* Flip on the free bit */
1212 if(full
) { /* If it was full before this: */
1213 mb
->nextblok
= mapCtl
.mapcnext
; /* Move head of list to us */
1214 mapCtl
.mapcnext
= mb
; /* Chain us to the head of the list */
1215 if(!((unsigned int)mapCtl
.mapclast
))
1216 mapCtl
.mapclast
= mb
;
1219 mapCtl
.mapcfree
++; /* Bump free count */
1220 mapCtl
.mapcinuse
--; /* Decriment in use count */
1222 mapCtl
.mapcfreec
++; /* Count total calls */
1224 if(mapCtl
.mapcfree
> mapCtl
.mapcmin
) { /* Should we consider releasing this? */
1225 if(((mb
->mapblokfree
[0] | 0x80000000) & mb
->mapblokfree
[1] & mb
->mapblokfree
[2] & mb
->mapblokfree
[3])
1226 == 0xFFFFFFFF) { /* See if empty now */
1228 if(mapCtl
.mapcnext
== mb
) { /* Are we first on the list? */
1229 mapCtl
.mapcnext
= mb
->nextblok
; /* Unchain us */
1230 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* If last, remove last */
1232 else { /* We're not first */
1233 for(mbn
= mapCtl
.mapcnext
; mbn
!= 0; mbn
= mbn
->nextblok
) { /* Search for our block */
1234 if(mbn
->nextblok
== mb
) break; /* Is the next one our's? */
1236 if(!mbn
) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp
);
1237 mbn
->nextblok
= mb
->nextblok
; /* Dequeue us */
1238 if(mapCtl
.mapclast
== mb
) mapCtl
.mapclast
= mbn
; /* If last, make our predecessor last */
1241 if(mb
->mapblokflags
& mbPerm
) { /* Is this permanently assigned? */
1242 mb
->nextblok
= mapCtl
.mapcnext
; /* Move chain head to us */
1243 mapCtl
.mapcnext
= mb
; /* Chain us to the head */
1244 if(!((unsigned int)mb
->nextblok
)) mapCtl
.mapclast
= mb
; /* If last, make us so */
1247 mapCtl
.mapcfree
-= MAPPERBLOK
; /* Remove the block from the free count */
1248 mapCtl
.mapcreln
++; /* Count on release list */
1249 mb
->nextblok
= mapCtl
.mapcrel
; /* Move pointer */
1250 mapCtl
.mapcrel
= mb
; /* Chain us in front */
1255 if(mapCtl
.mapcreln
> MAPFRTHRSH
) { /* Do we have way too many releasable mappings? */
1256 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1257 thread_call_enter(mapping_adjust_call
); /* Go toss some */
1260 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1261 splx(s
); /* Restore 'rupts */
1263 return; /* Bye, dude... */
1268 * mapping_alloc(void) - obtain a mapping from the free list
1270 * This routine takes a mapping off of the free list and returns it's address.
1272 * We do this by finding a free entry in the first block and allocating it.
1273 * If this allocation empties the block, we remove it from the free list.
1274 * If this allocation drops the total number of free entries below a threshold,
1275 * we allocate a new block.
1279 mapping
*mapping_alloc(void) { /* Obtain a mapping */
1281 register mapping
*mp
;
1282 mappingblok
*mb
, *mbn
;
1287 s
= splhigh(); /* Don't bother from now on */
1288 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1289 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1292 if(!(mb
= mapCtl
.mapcnext
)) { /* Get the first block entry */
1294 struct mappingflush mappingflush
;
1295 PCA
*pca_min
, *pca_max
;
1298 pca_min
= (PCA
*)(hash_table_base
+hash_table_size
);
1299 pca_max
= (PCA
*)(hash_table_base
+hash_table_size
+hash_table_size
);
1301 while (mapCtl
.mapcfree
<= (MAPPERBLOK
*2)) {
1302 mapCtl
.mapcflush
.mappingcnt
= 0;
1303 pca_base
= mapCtl
.mapcflush
.pcaptr
;
1305 hw_select_mappings(&mapCtl
.mapcflush
);
1306 mapCtl
.mapcflush
.pcaptr
++;
1307 if (mapCtl
.mapcflush
.pcaptr
>= pca_max
)
1308 mapCtl
.mapcflush
.pcaptr
= pca_min
;
1309 } while ((mapCtl
.mapcflush
.mappingcnt
== 0) && (mapCtl
.mapcflush
.pcaptr
!= pca_base
));
1311 if ((mapCtl
.mapcflush
.mappingcnt
== 0) && (mapCtl
.mapcflush
.pcaptr
== pca_base
)) {
1312 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1313 panic("mapping_alloc - all mappings are wired\n");
1315 mappingflush
= mapCtl
.mapcflush
;
1316 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
);
1318 for (i
=0;i
<mappingflush
.mappingcnt
;i
++)
1319 mapping_remove(mappingflush
.mapping
[i
].pmap
,
1320 mappingflush
.mapping
[i
].offset
);
1322 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) {
1323 panic("mapping_alloc - timeout getting control lock\n");
1326 mb
= mapCtl
.mapcnext
;
1329 if(!(mindx
= mapalc(mb
))) { /* Allocate a slot */
1330 panic("mapping_alloc - empty mapping block detected at %08X\n", mb
); /* Not allowed to find none */
1333 if(mindx
< 0) { /* Did we just take the last one */
1334 mindx
= -mindx
; /* Make positive */
1335 mapCtl
.mapcnext
= mb
->nextblok
; /* Remove us from the list */
1336 if(!((unsigned int)mapCtl
.mapcnext
)) mapCtl
.mapclast
= 0; /* Removed the last one */
1339 mapCtl
.mapcfree
--; /* Decrement free count */
1340 mapCtl
.mapcinuse
++; /* Bump in use count */
1342 mapCtl
.mapcallocc
++; /* Count total calls */
1345 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1346 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1347 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1348 * if we haven't already done it.
1349 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1350 * the release list with as much as we need until threads start.
1352 if(mapCtl
.mapcfree
< mapCtl
.mapcmin
) { /* See if we need to replenish */
1353 if(mbn
= mapCtl
.mapcrel
) { /* Try to rescue a block from impending doom */
1354 mapCtl
.mapcrel
= mbn
->nextblok
; /* Pop the queue */
1355 mapCtl
.mapcreln
--; /* Back off the count */
1356 mapping_free_init((vm_offset_t
)mbn
, 0, 1); /* Initialize a non-permanent block */
1358 else { /* We need to replenish */
1359 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1360 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1361 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1367 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1368 splx(s
); /* Restore 'rupts */
1370 mp
= &((mapping
*)mb
)[mindx
]; /* Point to the allocated mapping */
1371 __asm__
volatile("dcbz 0,%0" : : "r" (mp
)); /* Clean it up */
1372 return mp
; /* Send it back... */
1377 consider_mapping_adjust()
1381 s
= splhigh(); /* Don't bother from now on */
1382 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1383 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1386 if (mapCtl
.mapcfree
< (mapCtl
.mapcmin
/ 4)) {
1387 if(hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1388 thread_call_enter(mapping_adjust_call
); /* Go allocate some more */
1392 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1393 splx(s
); /* Restore 'rupts */
1400 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1402 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1403 * mappings. This call adds and initializes a block for use.
1405 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1406 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1407 * corresponds to the header). The translation mask is the XOR of the virtual and real
1408 * addresses (needless to say, the block must be wired).
1410 * We handle these mappings the same way as saveareas: the block is only on the chain so
1411 * long as there are free entries in it.
1413 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1414 * mappings. Blocks marked PERM won't ever be released.
1416 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1417 * list. We do this only at start up time. This is done because we only allocate blocks
1418 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1419 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1420 * them on the release queue, the allocate routine will rescue them. Then when the
1421 * pageout scan starts, all extra ones will be released.
1426 void mapping_free_init(vm_offset_t mbl
, int perm
, boolean_t locked
) {
1427 /* Set's start and end of a block of mappings
1428 perm indicates if the block can be released
1429 or goes straight to the release queue .
1430 locked indicates if the lock is held already */
1437 mb
= (mappingblok
*)mbl
; /* Start of area */
1440 if(perm
>= 0) { /* See if we need to initialize the block */
1442 raddr
= (unsigned int)mbl
; /* Perm means V=R */
1443 mb
->mapblokflags
= mbPerm
; /* Set perm */
1446 raddr
= kvtophys(mbl
); /* Get real address */
1447 mb
->mapblokflags
= 0; /* Set not perm */
1450 mb
->mapblokvrswap
= raddr
^ (unsigned int)mbl
; /* Form translation mask */
1452 mb
->mapblokfree
[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1453 mb
->mapblokfree
[1] = 0xFFFFFFFF; /* Set next 32 free */
1454 mb
->mapblokfree
[2] = 0xFFFFFFFF; /* Set next 32 free */
1455 mb
->mapblokfree
[3] = 0xFFFFFFFF; /* Set next 32 free */
1458 s
= splhigh(); /* Don't bother from now on */
1459 if(!locked
) { /* Do we need the lock? */
1460 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1461 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1465 if(perm
< 0) { /* Direct to release queue? */
1466 mb
->nextblok
= mapCtl
.mapcrel
; /* Move forward pointer */
1467 mapCtl
.mapcrel
= mb
; /* Queue us on in */
1468 mapCtl
.mapcreln
++; /* Count the free block */
1470 else { /* Add to the free list */
1472 mb
->nextblok
= 0; /* We always add to the end */
1473 mapCtl
.mapcfree
+= MAPPERBLOK
; /* Bump count */
1475 if(!((unsigned int)mapCtl
.mapcnext
)) { /* First entry on list? */
1476 mapCtl
.mapcnext
= mapCtl
.mapclast
= mb
; /* Chain to us */
1478 else { /* We are not the first */
1479 mapCtl
.mapclast
->nextblok
= mb
; /* Point the last to us */
1480 mapCtl
.mapclast
= mb
; /* We are now last */
1484 if(!locked
) { /* Do we need to unlock? */
1485 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1487 splx(s
); /* Restore 'rupts */
1488 return; /* All done, leave... */
1493 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1495 * No locks can be held, because we allocate memory here.
1496 * This routine needs a corresponding mapping_relpre call to remove the
1497 * hold off flag so that the adjust routine will free the extra mapping
1498 * blocks on the release list. I don't like this, but I don't know
1499 * how else to do this for now...
1503 void mapping_prealloc(unsigned int size
) { /* Preallocates mapppings for large request */
1510 s
= splhigh(); /* Don't bother from now on */
1511 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1512 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1515 nmapb
= (size
>> 12) + mapCtl
.mapcmin
; /* Get number of entries needed for this and the minimum */
1517 mapCtl
.mapcholdoff
++; /* Bump the hold off count */
1519 if((nmapb
= (nmapb
- mapCtl
.mapcfree
)) <= 0) { /* Do we already have enough? */
1520 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1521 splx(s
); /* Restore 'rupts */
1524 if (!hw_compare_and_store(0, 1, &mapCtl
.mapcrecurse
)) { /* Make sure we aren't recursing */
1525 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1526 splx(s
); /* Restore 'rupts */
1529 nmapb
= (nmapb
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get number of blocks to get */
1531 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1532 splx(s
); /* Restore 'rupts */
1534 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1535 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1536 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1539 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize on to the release queue */
1541 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1542 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1544 mapCtl
.mapcrecurse
= 0; /* We are done now */
1548 * void mapping_relpre(void) - Releases preallocation release hold off
1550 * This routine removes the
1551 * hold off flag so that the adjust routine will free the extra mapping
1552 * blocks on the release list. I don't like this, but I don't know
1553 * how else to do this for now...
1557 void mapping_relpre(void) { /* Releases release hold off */
1561 s
= splhigh(); /* Don't bother from now on */
1562 if(!hw_lock_to((hw_lock_t
)&mapCtl
.mapclock
, LockTimeOut
)) { /* Lock the control header */
1563 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1565 if(--mapCtl
.mapcholdoff
< 0) { /* Back down the hold off count */
1566 panic("mapping_relpre: hold-off count went negative\n");
1569 hw_lock_unlock((hw_lock_t
)&mapCtl
.mapclock
); /* Unlock our stuff */
1570 splx(s
); /* Restore 'rupts */
1574 * void mapping_free_prime(void) - Primes the mapping block release list
1576 * See mapping_free_init.
1577 * No locks can be held, because we allocate memory here.
1578 * One processor running only.
1582 void mapping_free_prime(void) { /* Primes the mapping block release list */
1587 vm_offset_t mapping_min
;
1589 retr
= kmem_suballoc(kernel_map
, &mapping_min
, MAPPING_MAP_SIZE
,
1590 FALSE
, TRUE
, &mapping_map
);
1592 if (retr
!= KERN_SUCCESS
)
1593 panic("mapping_free_prime: kmem_suballoc failed");
1596 nmapb
= (mapCtl
.mapcfree
+ mapCtl
.mapcinuse
+ MAPPERBLOK
- 1) / MAPPERBLOK
; /* Get permanent allocation */
1597 nmapb
= nmapb
* 4; /* Get 4 times our initial allocation */
1600 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1601 mapCtl
.mapcfree
, mapCtl
.mapcinuse
, nmapb
);
1604 for(i
= 0; i
< nmapb
; i
++) { /* Allocate 'em all */
1605 retr
= kmem_alloc_wired(mapping_map
, (vm_offset_t
*)&mbn
, PAGE_SIZE
); /* Find a virtual address to use */
1606 if(retr
!= KERN_SUCCESS
) { /* Did we get some memory? */
1607 panic("Whoops... Not a bit of wired memory left for anyone\n");
1609 mapping_free_init((vm_offset_t
)mbn
, -1, 0); /* Initialize onto release queue */
1611 if ((mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1))) > mapCtl
.mapcmaxalloc
)
1612 mapCtl
.mapcmaxalloc
= mapCtl
.mapcinuse
+ mapCtl
.mapcfree
+ (mapCtl
.mapcreln
* (MAPPERBLOK
+ 1));
1617 mapping_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
1618 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
1620 *count
= mapCtl
.mapcinuse
;
1621 *cur_size
= ((PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * (mapCtl
.mapcinuse
+ mapCtl
.mapcfree
)) + (PAGE_SIZE
* mapCtl
.mapcreln
);
1622 *max_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1)) * mapCtl
.mapcmaxalloc
;
1623 *elem_size
= (PAGE_SIZE
/ (MAPPERBLOK
+ 1));
1624 *alloc_size
= PAGE_SIZE
;
1632 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1634 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1635 * the same space. If it finds it, it returns the virtual address.
1637 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1638 * for it and fail it myself...
1641 vm_offset_t
mapping_p2v(pmap_t pmap
, struct phys_entry
*pp
) { /* Finds first virtual mapping of a physical page in a space */
1644 register mapping
*mp
, *mpv
;
1647 if(pmap
->vflags
& pmapAltSeg
) return 0; /* If there are nested pmaps, fail immediately */
1650 if(!hw_lock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
, LockTimeOut
)) { /* Try to get the lock on the physical entry */
1651 splx(s
); /* Restore 'rupts */
1652 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1653 return(0); /* Should die before here */
1656 va
= 0; /* Assume failure */
1658 for(mpv
= hw_cpv(pp
->phys_link
); mpv
; mpv
= hw_cpv(mpv
->next
)) { /* Scan 'em all */
1660 if(!(((mpv
->PTEv
>> 7) & 0x000FFFFF) == pmap
->space
)) continue; /* Skip all the rest if this is not the right space... */
1662 va
= ((((unsigned int)mpv
->PTEhash
& -64) << 6) ^ (pmap
->space
<< 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1663 va
= va
| ((mpv
->PTEv
<< 1) & 0xF0000000); /* Move in the segment number */
1664 va
= va
| ((mpv
->PTEv
<< 22) & 0x0FC00000); /* Add in the API for the top of the address */
1665 break; /* We're done now, pass virtual address back */
1668 hw_unlock_bit((unsigned int *)&pp
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1669 splx(s
); /* Restore 'rupts */
1670 return(va
); /* Return the result or 0... */
1676 * Convert a kernel virtual address to a physical address
1678 vm_offset_t
kvtophys(vm_offset_t va
) {
1680 register mapping
*mp
, *mpv
;
1681 register blokmap
*bmp
;
1682 register vm_offset_t pa
;
1685 s
=splhigh(); /* Don't bother from now on */
1686 mp
= hw_lock_phys_vir(PPC_SID_KERNEL
, va
); /* Find mapping and lock the physical entry for this mapping */
1688 if((unsigned int)mp
&1) { /* Did the lock on the phys entry time out? */
1689 splx(s
); /* Restore 'rupts */
1690 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va
); /* Scream bloody murder! */
1694 if(!mp
) { /* If it was not a normal page */
1695 pa
= hw_cvp_blk(kernel_pmap
, va
); /* Try to convert odd-sized page (returns 0 if not found) */
1696 splx(s
); /* Restore 'rupts */
1697 return pa
; /* Return physical address */
1700 mpv
= hw_cpv(mp
); /* Convert to virtual addressing */
1702 if(!mpv
->physent
) { /* Was there a physical entry? */
1703 pa
= (vm_offset_t
)((mpv
->PTEr
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Get physical address from physent */
1706 pa
= (vm_offset_t
)((mpv
->physent
->pte1
& -PAGE_SIZE
) | ((unsigned int)va
& (PAGE_SIZE
-1))); /* Get physical address from physent */
1707 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry */
1710 splx(s
); /* Restore 'rupts */
1711 return pa
; /* Return the physical address... */
1717 * Convert a physical address to a kernel virtual address if
1718 * there is a mapping, otherwise return NULL
1721 vm_offset_t
phystokv(vm_offset_t pa
) {
1723 struct phys_entry
*pp
;
1726 pp
= pmap_find_physentry(pa
); /* Find the physical entry */
1727 if (PHYS_NULL
== pp
) {
1728 return (vm_offset_t
)NULL
; /* If none, return null */
1730 if(!(va
=mapping_p2v(kernel_pmap
, pp
))) {
1731 return 0; /* Can't find it, return 0... */
1733 return (va
| (pa
& (PAGE_SIZE
-1))); /* Build and return VADDR... */
1738 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1739 * page 0 access for the current thread.
1741 * If parameter is TRUE, faults are ignored
1742 * If parameter is FALSE, faults are honored
1746 void ignore_zero_fault(boolean_t type
) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1748 if(type
) current_act()->mact
.specFlags
|= ignoreZeroFault
; /* Ignore faults on page 0 */
1749 else current_act()->mact
.specFlags
&= ~ignoreZeroFault
; /* Honor faults on page 0 */
1751 return; /* Return the result or 0... */
1756 * Allocates a range of virtual addresses in a map as optimally as
1757 * possible for block mapping. The start address is aligned such
1758 * that a minimum number of power-of-two sized/aligned blocks is
1759 * required to cover the entire range.
1761 * We also use a mask of valid block sizes to determine optimality.
1763 * Note that the passed in pa is not actually mapped to the selected va,
1764 * rather, it is used to figure the optimal boundary. The actual
1765 * V to R mapping is done externally.
1767 * This function will return KERN_INVALID_ADDRESS if an optimal address
1768 * can not be found. It is not necessarily a fatal error, the caller may still be
1769 * still be able to do a non-optimal assignment.
1772 kern_return_t
vm_map_block(vm_map_t map
, vm_offset_t
*va
, vm_offset_t
*bnd
, vm_offset_t pa
,
1773 vm_size_t size
, vm_prot_t prot
) {
1775 vm_map_entry_t entry
, next
, tmp_entry
, new_entry
;
1776 vm_offset_t start
, end
, algnpa
, endadr
, strtadr
, curradr
;
1777 vm_offset_t boundary
;
1779 unsigned int maxsize
, minsize
, leading
, trailing
;
1781 assert(page_aligned(pa
));
1782 assert(page_aligned(size
));
1784 if (map
== VM_MAP_NULL
) return(KERN_INVALID_ARGUMENT
); /* Dude, like we need a target map */
1786 minsize
= blokValid
^ (blokValid
& (blokValid
- 1)); /* Set minimum subblock size */
1787 maxsize
= 0x80000000 >> cntlzw(blokValid
); /* Set maximum subblock size */
1789 boundary
= 0x80000000 >> cntlzw(size
); /* Get optimal boundary */
1790 if(boundary
> maxsize
) boundary
= maxsize
; /* Pin this at maximum supported hardware size */
1792 vm_map_lock(map
); /* No touchee no mapee */
1794 for(; boundary
> minsize
; boundary
>>= 1) { /* Try all optimizations until we find one */
1795 if(!(boundary
& blokValid
)) continue; /* Skip unavailable block sizes */
1796 algnpa
= (pa
+ boundary
- 1) & -boundary
; /* Round physical up */
1797 leading
= algnpa
- pa
; /* Get leading size */
1799 curradr
= 0; /* Start low */
1801 while(1) { /* Try all possible values for this opt level */
1803 curradr
= curradr
+ boundary
; /* Get the next optimal address */
1804 strtadr
= curradr
- leading
; /* Calculate start of optimal range */
1805 endadr
= strtadr
+ size
; /* And now the end */
1807 if((curradr
< boundary
) || /* Did address wrap here? */
1808 (strtadr
> curradr
) || /* How about this way? */
1809 (endadr
< strtadr
)) break; /* We wrapped, try next lower optimization... */
1811 if(strtadr
< map
->min_offset
) continue; /* Jump to the next higher slot... */
1812 if(endadr
> map
->max_offset
) break; /* No room right now... */
1814 if(vm_map_lookup_entry(map
, strtadr
, &entry
)) continue; /* Find slot, continue if allocated... */
1816 next
= entry
->vme_next
; /* Get the next entry */
1817 if((next
== vm_map_to_entry(map
)) || /* Are we the last entry? */
1818 (next
->vme_start
>= endadr
)) { /* or do we end before the next entry? */
1820 new_entry
= vm_map_entry_insert(map
, entry
, strtadr
, endadr
, /* Yes, carve out our entry */
1822 0, /* Offset into object of 0 */
1823 FALSE
, /* No copy needed */
1824 FALSE
, /* Not shared */
1825 FALSE
, /* Not in transition */
1826 prot
, /* Set the protection to requested */
1827 prot
, /* We can't change protection */
1828 VM_BEHAVIOR_DEFAULT
, /* Use default behavior, but makes no never mind,
1829 'cause we don't page in this area */
1830 VM_INHERIT_DEFAULT
, /* Default inheritance */
1831 0); /* Nothing is wired */
1833 vm_map_unlock(map
); /* Let the world see it all */
1834 *va
= strtadr
; /* Tell everyone */
1835 *bnd
= boundary
; /* Say what boundary we are aligned to */
1836 return(KERN_SUCCESS
); /* Leave, all is right with the world... */
1841 vm_map_unlock(map
); /* Couldn't find a slot */
1842 return(KERN_INVALID_ADDRESS
);
1846 * Copies data from a physical page to a virtual page. This is used to
1847 * move data from the kernel to user state.
1849 * Note that it is invalid to have a source that spans a page boundry.
1851 * We don't check protection either.
1852 * And we don't handle a block mapped sink address either.
1856 kern_return_t
copyp2v(vm_offset_t source
, vm_offset_t sink
, unsigned int size
) {
1860 unsigned int spaceid
;
1863 register mapping
*mpv
, *mp
;
1866 if((size
== 0) || ((source
^ (source
+ size
- 1)) & -PAGE_SIZE
)) return KERN_FAILURE
; /* We don't allow a source page crosser */
1867 map
= current_act()->map
; /* Get the current map */
1870 s
=splhigh(); /* Don't bother me */
1872 spaceid
= map
->pmap
->pmapSegs
[(unsigned int)sink
>> 28]; /* Get space ID. Don't bother to clean top bits */
1874 mp
= hw_lock_phys_vir(spaceid
, sink
); /* Lock the physical entry for the sink */
1875 if(!mp
) { /* Was it there? */
1876 splx(s
); /* Restore the interrupt level */
1877 ret
= vm_fault(map
, trunc_page(sink
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* Didn't find it, try to fault it in... */
1878 if (ret
== KERN_SUCCESS
) continue; /* We got it in, try again to find it... */
1880 return KERN_FAILURE
; /* Didn't find any, return no good... */
1882 if((unsigned int)mp
&1) { /* Did we timeout? */
1883 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink
); /* Yeah, scream about it! */
1884 splx(s
); /* Restore the interrupt level */
1885 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
1888 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
1890 if(mpv
->PTEr
& 1) { /* Are we write protected? yes, could indicate COW */
1891 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the sink */
1892 splx(s
); /* Restore the interrupt level */
1893 ret
= vm_fault(map
, trunc_page(sink
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* check for a COW area */
1894 if (ret
== KERN_SUCCESS
) continue; /* We got it in, try again to find it... */
1895 return KERN_FAILURE
; /* Didn't find any, return no good... */
1897 left
= PAGE_SIZE
- (sink
& PAGE_MASK
); /* Get amount left on sink page */
1899 csize
= size
< left
? size
: left
; /* Set amount to copy this pass */
1901 pa
= (vm_offset_t
)((mpv
->physent
->pte1
& ~PAGE_MASK
) | ((unsigned int)sink
& PAGE_MASK
)); /* Get physical address of sink */
1903 bcopy_physvir((char *)source
, (char *)pa
, csize
); /* Do a physical copy, virtually */
1905 hw_set_mod(mpv
->physent
); /* Go set the change of the sink */
1907 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the sink */
1908 splx(s
); /* Open up for interrupts */
1910 sink
+= csize
; /* Move up to start of next page */
1911 source
+= csize
; /* Move up source */
1912 size
-= csize
; /* Set amount for next pass */
1914 return KERN_SUCCESS
;
1919 * copy 'size' bytes from physical to physical address
1920 * the caller must validate the physical ranges
1922 * if flush_action == 0, no cache flush necessary
1923 * if flush_action == 1, flush the source
1924 * if flush_action == 2, flush the dest
1925 * if flush_action == 3, flush both source and dest
1928 kern_return_t
copyp2p(vm_offset_t source
, vm_offset_t dest
, unsigned int size
, unsigned int flush_action
) {
1930 switch(flush_action
) {
1932 flush_dcache(source
, size
, 1);
1935 flush_dcache(dest
, size
, 1);
1938 flush_dcache(source
, size
, 1);
1939 flush_dcache(dest
, size
, 1);
1943 bcopy_phys((char *)source
, (char *)dest
, size
); /* Do a physical copy */
1945 switch(flush_action
) {
1947 flush_dcache(source
, size
, 1);
1950 flush_dcache(dest
, size
, 1);
1953 flush_dcache(source
, size
, 1);
1954 flush_dcache(dest
, size
, 1);
1964 * Dumps out the mapping stuff associated with a virtual address
1966 void dumpaddr(space_t space
, vm_offset_t va
) {
1972 s
=splhigh(); /* Don't bother me */
1974 mp
= hw_lock_phys_vir(space
, va
); /* Lock the physical entry for this mapping */
1975 if(!mp
) { /* Did we find one? */
1976 splx(s
); /* Restore the interrupt level */
1977 printf("dumpaddr: virtual address (%08X) not mapped\n", va
);
1978 return; /* Didn't find any, return FALSE... */
1980 if((unsigned int)mp
&1) { /* Did we timeout? */
1981 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va
); /* Yeah, scream about it! */
1982 splx(s
); /* Restore the interrupt level */
1983 return; /* Bad hair day, return FALSE... */
1985 printf("dumpaddr: space=%08X; vaddr=%08X\n", space
, va
); /* Say what address were dumping */
1986 mpv
= hw_cpv(mp
); /* Get virtual address of mapping */
1990 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock physical entry associated with mapping */
1992 splx(s
); /* Was there something you needed? */
1993 return; /* Tell them we did it */
1999 * Prints out a mapping control block
2003 void dumpmapping(struct mapping
*mp
) { /* Dump out a mapping */
2005 printf("Dump of mapping block: %08X\n", mp
); /* Header */
2006 printf(" next: %08X\n", mp
->next
);
2007 printf(" hashnext: %08X\n", mp
->hashnext
);
2008 printf(" PTEhash: %08X\n", mp
->PTEhash
);
2009 printf(" PTEent: %08X\n", mp
->PTEent
);
2010 printf(" physent: %08X\n", mp
->physent
);
2011 printf(" PTEv: %08X\n", mp
->PTEv
);
2012 printf(" PTEr: %08X\n", mp
->PTEr
);
2013 printf(" pmap: %08X\n", mp
->pmap
);
2015 if(mp
->physent
) { /* Print physent if it exists */
2016 printf("Associated physical entry: %08X %08X\n", mp
->physent
->phys_link
, mp
->physent
->pte1
);
2019 printf("Associated physical entry: none\n");
2022 dumppca(mp
); /* Dump out the PCA information */
2028 * Prints out a PTEG control area
2032 void dumppca(struct mapping
*mp
) { /* PCA */
2037 pca
= (PCA
*)((unsigned int)mp
->PTEhash
&-64); /* Back up to the start of the PCA */
2038 pteg
=(unsigned int *)((unsigned int)pca
-(((hash_table_base
&0x0000FFFF)+1)<<16));
2039 printf(" Dump of PCA: %08X\n", pca
); /* Header */
2040 printf(" PCAlock: %08X\n", pca
->PCAlock
);
2041 printf(" PCAallo: %08X\n", pca
->flgs
.PCAallo
);
2042 printf(" PCAhash: %08X %08X %08X %08X\n", pca
->PCAhash
[0], pca
->PCAhash
[1], pca
->PCAhash
[2], pca
->PCAhash
[3]);
2043 printf(" %08X %08X %08X %08X\n", pca
->PCAhash
[4], pca
->PCAhash
[5], pca
->PCAhash
[6], pca
->PCAhash
[7]);
2044 printf("Dump of PTEG: %08X\n", pteg
); /* Header */
2045 printf(" %08X %08X %08X %08X\n", pteg
[0], pteg
[1], pteg
[2], pteg
[3]);
2046 printf(" %08X %08X %08X %08X\n", pteg
[4], pteg
[5], pteg
[6], pteg
[7]);
2047 printf(" %08X %08X %08X %08X\n", pteg
[8], pteg
[9], pteg
[10], pteg
[11]);
2048 printf(" %08X %08X %08X %08X\n", pteg
[12], pteg
[13], pteg
[14], pteg
[15]);
2053 * Dumps starting with a physical entry
2056 void dumpphys(struct phys_entry
*pp
) { /* Dump from physent */
2062 printf("Dump from physical entry %08X: %08X %08X\n", pp
, pp
->phys_link
, pp
->pte1
);
2063 mp
= hw_cpv(pp
->phys_link
);
2067 mp
= hw_cpv(mp
->next
);
2076 kern_return_t
bmapvideo(vm_offset_t
*info
);
2077 kern_return_t
bmapvideo(vm_offset_t
*info
) {
2079 extern struct vc_info vinfo
;
2081 (void)copyout((char *)&vinfo
, (char *)info
, sizeof(struct vc_info
)); /* Copy out the video info */
2082 return KERN_SUCCESS
;
2085 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
);
2086 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) {
2088 pmap_map_block(current_act()->task
->map
->pmap
, va
, pa
, size
, prot
, attr
, 0); /* Map it in */
2089 return KERN_SUCCESS
;
2092 kern_return_t
bmapmapr(vm_offset_t va
);
2093 kern_return_t
bmapmapr(vm_offset_t va
) {
2095 mapping_remove(current_act()->task
->map
->pmap
, va
); /* Remove map */
2096 return KERN_SUCCESS
;