]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/mappings.c
xnu-201.5.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
27 *
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
30 *
31 */
32
33#include <cpus.h>
34#include <debug.h>
35#include <mach_kgdb.h>
36#include <mach_vm_debug.h>
37#include <db_machine_commands.h>
38
39#include <kern/thread.h>
40#include <kern/thread_act.h>
41#include <mach/vm_attributes.h>
42#include <mach/vm_param.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_map.h>
45#include <vm/vm_page.h>
46#include <kern/spl.h>
47
48#include <kern/misc_protos.h>
49#include <ppc/misc_protos.h>
50#include <ppc/proc_reg.h>
51
52#include <vm/pmap.h>
53#include <ppc/pmap.h>
54#include <ppc/pmap_internals.h>
55#include <ppc/mem.h>
56
57#include <ppc/new_screen.h>
58#include <ppc/Firmware.h>
59#include <ppc/mappings.h>
60#include <ddb/db_output.h>
61
62#include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
63
64#define PERFTIMES 0
65
66#if PERFTIMES && DEBUG
67#define debugLog2(a, b, c) dbgLog2(a, b, c)
68#else
69#define debugLog2(a, b, c)
70#endif
71
72vm_map_t mapping_map = VM_MAP_NULL;
73
74unsigned int incrVSID = 0; /* VSID increment value */
75unsigned int mappingdeb0 = 0;
76unsigned int mappingdeb1 = 0;
77extern unsigned int hash_table_size;
78extern vm_offset_t mem_size;
79/*
80 * ppc_prot translates from the mach representation of protections to the PPC version.
0b4e3aa0
A
81 * We also allow for a direct setting of the protection bits. This extends the mach
82 * concepts to allow the greater control we need for Virtual Machines (VMM).
1c79356b
A
83 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
84 * It eliminates the used of this table.
0b4e3aa0 85 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
1c79356b
A
86 */
87
0b4e3aa0 88#define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
1c79356b
A
89
90/*
91 * About PPC VSID generation:
92 *
93 * This function is called to generate an address space ID. This space ID must be unique within
94 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
95 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
96 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
97 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
98 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
99 * they are release. This causes us to lose track of what space IDs are free to be reused.
100 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
101 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
102 *
103 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
104 * calculation for virtual address lookup. An improperly chosen value could potentially cause
105 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
106 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
107 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
108 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
109 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
110 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
111 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
112 * with no overflow. I don't think that this is a problem.
113 *
114 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
115 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
116 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
117 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
118 * the same modulo 512. We can reduce this problem by having the segment number be bits
119 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
120 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
121 * I don't think that it is as signifigant as the other, so, I'll make the space ID
122 * with segment first.
123 *
124 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
125 * While this is a problem that should only happen in periods counted in weeks, it can and
126 * will happen. This is assuming a monotonically increasing space ID. If we were to search
127 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
128 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
129 *
130 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
131 * locked by free_pmap_lock) that is sorted in VSID sequence order.
132 *
133 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
134 * the last that was freed. The we allocate that.
135 *
136 * NOTE: We must be called with interruptions off and free_pmap_lock held.
137 *
138 */
139
140/*
141 * mapping_init();
142 * Do anything that needs to be done before the mapping system can be used.
143 * Hash table must be initialized before we call this.
144 *
145 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
146 */
147
148void mapping_init(void) {
149
150 unsigned int tmp;
151
152 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
153
154 incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
155 incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
156 incrVSID |= 1; /* Set bit and add 1 */
157 return;
158
159}
160
161
162/*
163 * mapping_remove(pmap_t pmap, vm_offset_t va);
164 * Given a pmap and virtual address, this routine finds the mapping and removes it from
165 * both its PTEG hash list and the physical entry list. The mapping block will be added to
166 * the free list. If the free list threshold is reached, garbage collection will happen.
167 * We also kick back a return code to say whether or not we had one to remove.
168 *
169 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
170 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
171 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
172 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
173 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
174 * It's just that simple!
175 *
176 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
177 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
178 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
179 *
180 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
181 * the mapping is outside of physical memory and usually refers to a memory mapped device of
182 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
183 * routines return normally, but don't do anything.
184 */
185
186boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR
187 Returns TRUE if a mapping was found to remove */
188
189 mapping *mp, *mpv;
190 register blokmap *blm;
191 spl_t s;
192 unsigned int *useadd, *useaddr;
193 int i;
194
195 debugLog2(1, va, pmap->space); /* start mapping_remove */
196
197 s=splhigh(); /* Don't bother me */
198
199 mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */
200
201 if(!mp) { /* Did we find one? */
202 if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */
203 splx(s); /* Allow 'rupts now */
204
205 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
206 blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFE)); /* Get virtual address */
207 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
208 pmap, va, blm);
209 }
210#if 0
211 blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */
212 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
213 blm, blm->start, blm->end, blm->PTEr);
214#endif
215 mapping_free(hw_cpv(mp)); /* Release it */
216 debugLog2(2, 1, 0); /* End mapping_remove */
217 return TRUE; /* Tell them we did it */
218 }
219 splx(s); /* Restore the interrupt level */
220 debugLog2(2, 0, 0); /* end mapping_remove */
221 return FALSE; /* Didn't find any, return FALSE... */
222 }
223 if((unsigned int)mp&1) { /* Did we timeout? */
224 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
225 splx(s); /* Restore the interrupt level */
226 return FALSE; /* Bad hair day, return FALSE... */
227 }
228
229 mpv = hw_cpv(mp); /* Get virtual address of mapping */
230#if DEBUG
231 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
232#else
233 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
234#endif
235 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
236 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
237 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
238
239#if 0
240 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
241 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
242 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
243 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
244 }
245 }
246#endif
247
248 hw_rem_map(mp); /* Remove the corresponding mapping */
249
250 if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
251
252 splx(s); /* Was there something you needed? */
253
254 mapping_free(mpv); /* Add mapping to the free list */
255 debugLog2(2, 1, 0); /* end mapping_remove */
256 return TRUE; /* Tell them we did it */
257}
258
0b4e3aa0
A
259/*
260 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
261 *
262 * This guy releases any mappings that exist for a physical page on a specified map.
263 * We get the lock on the phys_entry, and hold it through out this whole routine.
264 * That way, no one can change the queue out from underneath us. We keep fetching
265 * the physents mapping anchor until it is null, then we're done.
266 *
267 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
268 * decriment the pmap's residency count. Then we release the mapping back to the free list.
269 *
270 */
271
272
273void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) { /* Remove all mappings from specified pmap for this physent */
274
275 mapping *mp, *mp_next, *mpv;
276 spl_t s;
277 unsigned int *useadd, *useaddr, uindx;
278 int i;
279
280 s=splhigh(); /* Don't bother me */
281
282 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
283 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
284 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
285 }
286
287 mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);
288
289 while(mp) { /* Keep going so long as there's another */
290
291 mpv = hw_cpv(mp); /* Get the virtual address */
292 if(mpv->pmap != pmap) {
293 mp = ((unsigned int)mpv->next & ~PHYS_FLAGS);
294 continue;
295 }
296#if DEBUG
297 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
298#else
299 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
300#endif
301
302 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join seg # and top 2 bits of API */
303 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
304 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
305 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */
306
307
308
309 mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
310 hw_rem_map(mp); /* Remove the mapping */
311 mapping_free(mpv); /* Add mapping to the free list */
312 mp = mp_next;
313 }
314
315 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
316 splx(s);
317 return;
318}
1c79356b
A
319/*
320 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
321 *
322 * This guy releases any mappings that exist for a physical page.
323 * We get the lock on the phys_entry, and hold it through out this whole routine.
324 * That way, no one can change the queue out from underneath us. We keep fetching
325 * the physents mapping anchor until it is null, then we're done.
326 *
327 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
328 * decriment the pmap's residency count. Then we release the mapping back to the free list.
329 *
330 */
331
332void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */
333
334 mapping *mp, *mpv;
335 spl_t s;
336 unsigned int *useadd, *useaddr, uindx;
337 int i;
338
339 s=splhigh(); /* Don't bother me */
340 debugLog2(3, pp->pte1, 0); /* start mapping_purge */
341
342 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
343 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
344 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
345 }
346
347 while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */
348
349 mpv = hw_cpv(mp); /* Get the virtual address */
350#if DEBUG
351 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
352#else
353 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
354#endif
355
356 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */
357 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
358 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
359 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
360
361#if 0
362 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
363 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
364 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
365 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
366 }
367 }
368#endif
369
370
371 hw_rem_map(mp); /* Remove the mapping */
372 mapping_free(mpv); /* Add mapping to the free list */
373 }
374
375 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
376
377 debugLog2(4, pp->pte1, 0); /* end mapping_purge */
378 splx(s); /* Was there something you needed? */
379 return; /* Tell them we did it */
380}
381
382
383/*
384 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
385 *
386 * This routine takes the given parameters, builds a mapping block, and queues it into the
387 * correct lists.
388 *
389 * The pp parameter can be null. This allows us to make a mapping that is not
390 * associated with any physical page. We may need this for certain I/O areas.
391 *
392 * If the phys_entry address is null, we neither lock or chain into it.
393 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
394 */
395
396mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */
397
398 register mapping *mp, *mpv;
399 unsigned int *useadd, *useaddr;
400 spl_t s;
401 int i;
402
403 debugLog2(5, va, pa); /* start mapping_purge */
404 mpv = mapping_alloc(); /* Get a spare mapping block */
405
406 mpv->pmap = pmap; /* Initialize the pmap pointer */
407 mpv->physent = pp; /* Initialize the pointer to the physical entry */
408 mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */
409 mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */
410
411 s=splhigh(); /* Don't bother from now on */
412
413 mp = hw_cvp(mpv); /* Get the physical address of this */
414
415 if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */
416 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
417 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
418 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
419 }
420 }
421
422 if(pp) { /* See of there is a physcial entry */
423 mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */
424 pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */
425 }
426
427 hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */
428
429 (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */
430 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
431 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
432 (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
433#if 0
434 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
435 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
436 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
437 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
438 }
439 }
440#endif
441
442 if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */
443
444 splx(s); /* Ok for interruptions now */
445 debugLog2(6, pmap->space, prot); /* end mapping_purge */
446 return mpv; /* Leave... */
447}
448
449
450/*
451 * Enters optimal translations for odd-sized V=F blocks.
452 *
453 * Builds a block map for each power-of-two hunk o' address
454 * that exists. This is specific to the processor type.
455 * PPC uses BAT register size stuff. Future PPC might have
456 * something else.
457 *
458 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
459 * stupid to know otherwise so we only look at the va anyhow, so there...
460 *
461 */
462
463void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */
464
465 register blokmap *blm, *oblm;
466 unsigned int pg;
467 unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin;
468 int i, maxshft, nummax, minshft;
469
470#if 1
471 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
472 pmap, va, pa, bnd, size, prot, attr);
473#endif
474
475 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
476 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
477
478 minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */
479 maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */
480
481 leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */
482 trailing = size - leading; /* Get size of trailing area */
483 tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */
484
485#if 1
486 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */
487#endif
488
489 if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */
490
491 va = va + tomin; /* Adjust virtual start */
492 pa = pa + tomin; /* Adjust physical start */
493 leading = leading - tomin; /* Adjust leading size */
494
495/*
496 * Some of this code is very classic PPC. We need to fix this up.
497 */
498
499 leading = leading >> minshft; /* Position for bit testing */
500 cbsize = minsize; /* Set the minimum size */
501
502 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */
503
504 if(leading & 1) {
505 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
506 pa = pa + cbsize; /* Bump up physical address */
507 va = va + cbsize; /* Bump up virtual address */
508 }
509
510 leading = leading >> 1; /* Shift up to next size */
511 cbsize = cbsize << 1; /* Here too */
512
513 }
514
515 nummax = trailing >> maxshft; /* Get number of max size blocks left */
516 for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */
517 pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */
518
519 pa = pa + maxsize; /* Bump up physical address */
520 va = va + maxsize; /* Bump up virtual address */
521 trailing -= maxsize; /* Back off what we just did */
522 }
523
524 cbsize = maxsize; /* Start at maximum size */
525
526 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */
527
528 if(trailing & cbsize) {
529 trailing &= ~cbsize; /* Remove the block we are allocating */
530 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
531 pa = pa + cbsize; /* Bump up physical address */
532 va = va + cbsize; /* Bump up virtual address */
533 }
534 cbsize = cbsize >> 1; /* Next size down */
535 }
536
537 if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */
538
539 return; /* Return */
540}
541
542
543/*
544 * Enters translations for odd-sized V=F blocks.
545 *
546 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
547 * will be split into normal-sized page mappings.
548 *
549 * The higher level VM map should be locked to insure that we don't have a
550 * double diddle here.
551 *
552 * We panic if we get a block that overlaps with another. We do not merge adjacent
553 * blocks because removing any address within a block removes the entire block and if
554 * would really mess things up if we trashed too much.
555 *
556 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
557 * not be changed. The block must be unmapped and then remapped with the new stuff.
558 * We also do not keep track of reference or change flags.
559 *
560 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
561 * with interruptions and translation disabled and under the control of the lock located
562 * in the first block map. MRU is used because it is expected that the same entry
563 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
564 *
565 */
566
567void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
568
569 register blokmap *blm, *oblm;
570 unsigned int pg;
571
572#if 0
573 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
574 pmap, va, pa, size, prot, attr);
575#endif
576
577 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
578 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
579 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
580#if 0
581 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
582 va + pg, pa + pg);
583#endif
584 }
585 return; /* All done */
586 }
587
588 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
589
590 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
591 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
592 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
593 blm->space = pmap->space; /* Set the space (only needed for remove) */
594 blm->blkFlags = flags; /* Set the block's flags */
595
596#if 0
597 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
598 blm, blm->start, blm->end, blm->PTEr);
599#endif
600
601 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
602
603#if 0
604 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
605 blm, pmap->bmaps);
606#endif
607
608 if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */
609 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */
610 }
611
612#if 0
613 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
614 blm, pmap->bmaps);
615#endif
616
617 return; /* Return */
618}
619
620
621/*
622 * Optimally enters translations for odd-sized V=F blocks.
623 *
624 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
625 * will be split into normal-sized page mappings.
626 *
627 * This one is different than pmap_map_block in that it will allocate it's own virtual
628 * target address. Rather than allocating a single block,
629 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
630 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
631 *
632 * Most considerations for pmap_map_block apply.
633 *
634 *
635 */
636
637kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va,
638 vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */
639
640 register blokmap *blm, *oblm;
641 unsigned int pg;
642 kern_return_t err;
643 unsigned int bnd;
644
645#if 1
646 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
647 map, pa, size, prot, attr);
648#endif
649
650 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
651 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
652 if(err) {
653#if DEBUG
654 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */
655#endif
656 return(err); /* Pass back the error */
657 }
658#if 1
659 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */
660#endif
661
662 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
663 mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
664 }
665 return(KERN_SUCCESS); /* All done */
666 }
667
668 err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */
669
670 if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */
671 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
672 if(err) {
673#if DEBUG
674 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */
675#endif
676 return(err); /* Pass back the error */
677 }
678#if 1
679 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */
680#endif
681 pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */
682 return KERN_SUCCESS; /* All done now */
683 }
684
685 if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */
686#if DEBUG
687 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */
688#endif
689 return(err);
690 }
691
692#if 1
693 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */
694#endif
695 mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */
696 return(KERN_SUCCESS); /* All done */
697}
698
699
700#if 0
701
702/*
703 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
704 * areas.
705 *
706 * Once blocks are merged, they act like one block, i.e., if you remove it,
707 * it all goes...
708 *
709 * This can only be used during boot. Ain't no way we can handle SMP
710 * or preemption easily, so we restrict it. We don't check either. We
711 * assume only skilled professional programmers will attempt using this
712 * function. We assume no responsibility, either real or imagined, for
713 * injury or death resulting from unauthorized use of this function.
714 *
715 * No user servicable parts inside. Notice to be removed by end-user only,
716 * under penalty of applicable federal and state laws.
717 *
718 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
719 * overlapping areas. Note that we do panic if we can't merge.
720 *
721 */
722
723void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */
724
725 register blokmap *blm, *oblm;
726 unsigned int pg;
727 spl_t s;
728
729#if 1
730 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
731 pmap, va, pa, size, prot, attr);
732#endif
733
734 s=splhigh(); /* Don't bother from now on */
735 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
736 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
737 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
738 }
739 return; /* All done */
740 }
741
742 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
743
744 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
745 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
746 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
747
748#if 1
749 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
750 blm, blm->start, blm->end, blm->PTEr);
751#endif
752
753 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
754
755#if 1
756 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
757 blm, pmap->bmaps);
758#endif
759
760 if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */
761 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */
762 }
763
764#if 1
765 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
766 blm, pmap->bmaps);
767#endif
768 splx(s); /* Ok for interruptions now */
769
770 return; /* Return */
771}
772#endif
773
774/*
775 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
776 *
777 * This routine takes a physical entry and runs through all mappings attached to it and changes
778 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
779 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
780 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
781 * higher to lower, lower to higher.
782 *
783 * Phys_entry is unlocked.
784 */
785
786void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */
787
788 spl_t spl;
789
790 debugLog2(9, pp->pte1, prot); /* end remap */
791 spl=splhigh(); /* No interruptions during this */
792 if(!locked) { /* Do we need to lock the physent? */
793 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
794 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
795 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
796 }
797 }
798
799 hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */
800
801 if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
802 splx(spl); /* Restore interrupt state */
803 debugLog2(10, pp->pte1, 0); /* end remap */
804
805 return; /* Leave... */
806}
807
808/*
809 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
810 *
811 * This routine takes a pmap and virtual address and changes
812 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
813 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
814 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
815 * higher to lower, lower to higher.
816 *
817 */
818
819void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */
820
821 mapping *mp, *mpv;
822 spl_t s;
823
824 debugLog2(9, vaddr, pmap); /* start mapping_protect */
825 s = splhigh(); /* Don't bother me */
826
827 mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */
828
829 if(!mp) { /* Did we find one? */
830 splx(s); /* Restore the interrupt level */
831 debugLog2(10, 0, 0); /* end mapping_pmap */
832 return; /* Didn't find any... */
833 }
834 if((unsigned int)mp & 1) { /* Did we timeout? */
835 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
836 splx(s); /* Restore the interrupt level */
837 return; /* Bad hair day... */
838 }
839
840 hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */
841
842 mpv = hw_cpv(mp); /* Get virtual address of mapping */
843 if(mpv->physent) { /* If there is a physical page, */
844 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
845 }
846 splx(s); /* Restore interrupt state */
847 debugLog2(10, mpv->PTEr, 0); /* end remap */
848
849 return; /* Leave... */
850}
851
852/*
853 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
854 *
855 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
856 * associated with this page when we do it.
857 */
858
859void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */
860
861 debugLog2(11, pp->pte1, prot); /* end remap */
862
863 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
864 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
865 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
866 }
867
868 hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */
869
870 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
871 debugLog2(12, pp->pte1, wimg); /* end remap */
872
873 return; /* Leave... */
874}
875
876/*
877 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
878 *
879 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
880 * any PTEs it finds.
881 *
882 * Interruptions must be disabled and the physical entry locked at entry.
883 */
884
885void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */
886
887 hw_inv_all(pp); /* Go set the change bit of a physical page */
888
889 return; /* Leave... */
890}
891
892
893/*
894 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
895 *
896 * This routine takes a physical entry and runs through all mappings attached to it and turns
897 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
898 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
899 * either (I don't think, maybe I'll change my mind later).
900 *
901 * Interruptions must be disabled and the physical entry locked at entry.
902 */
903
904void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */
905
906 hw_clr_mod(pp); /* Go clear the change bit of a physical page */
907 return; /* Leave... */
908}
909
910
911/*
912 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
913 *
914 * This routine takes a physical entry and runs through all mappings attached to it and turns
915 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
916 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
917 * either (I don't think, maybe I'll change my mind later).
918 *
919 * Interruptions must be disabled and the physical entry locked at entry.
920 */
921
922void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */
923
924 hw_set_mod(pp); /* Go set the change bit of a physical page */
925 return; /* Leave... */
926}
927
928
929/*
930 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
931 *
932 * This routine takes a physical entry and runs through all mappings attached to it and turns
933 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
934 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
935 * either (I don't think, maybe I'll change my mind later).
936 *
937 * Interruptions must be disabled at entry.
938 */
939
940void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */
941
942 mapping *mp;
943
944 debugLog2(13, pp->pte1, 0); /* end remap */
945 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */
946 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
947 }
948 hw_clr_ref(pp); /* Go clear the reference bit of a physical page */
949 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */
950 debugLog2(14, pp->pte1, 0); /* end remap */
951 return; /* Leave... */
952}
953
954
955/*
956 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
957 *
958 * This routine takes a physical entry and runs through all mappings attached to it and turns
959 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
960 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
961 * either (I don't think, maybe I'll change my mind later).
962 *
963 * Interruptions must be disabled and the physical entry locked at entry.
964 */
965
966void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */
967
968 hw_set_ref(pp); /* Go set the reference bit of a physical page */
969 return; /* Leave... */
970}
971
972
973/*
974 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
975 *
976 * This routine takes a physical entry and runs through all mappings attached to it and tests
977 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
978 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
979 * either (I don't think, maybe I'll change my mind later).
980 *
981 * Interruptions must be disabled and the physical entry locked at entry.
982 */
983
984boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */
985
986 return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */
987}
988
989
990/*
991 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
992 *
993 * This routine takes a physical entry and runs through all mappings attached to it and tests
994 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
995 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
996 * either (I don't think, maybe I'll change my mind later).
997 *
998 * Interruptions must be disabled and the physical entry locked at entry.
999 */
1000
1001boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */
1002
1003 return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */
1004}
1005
1006
1007/*
1008 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1009 *
1010 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1011 */
1012
1013void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */
1014
1015 pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1016
1017 return; /* Leave... */
1018}
1019
1020
1021/*
1022 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1023 *
1024 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1025 * the number of free mappings remaining, and if below a threshold, replenishes them.
1026 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1027 * a new one is allocated.
1028 *
1029 * This routine allocates and/or memory and must be called from a safe place.
1030 * Currently, vm_pageout_scan is the safest place. We insure that the
1031 */
1032
1033thread_call_t mapping_adjust_call;
1034static thread_call_data_t mapping_adjust_call_data;
1035
1036void mapping_adjust(void) { /* Adjust free mappings */
1037
1038 kern_return_t retr;
1039 mappingblok *mb, *mbn;
1040 spl_t s;
1041 int allocsize, i;
1042 extern int vm_page_free_count;
1043
1044 if(mapCtl.mapcmin <= MAPPERBLOK) {
1045 mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16;
1046
1047#if DEBUG
1048 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
1049 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1050 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
1051#endif
1052 }
1053
1054 s = splhigh(); /* Don't bother from now on */
1055 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1056 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1057 }
1058
1059 if (mapping_adjust_call == NULL) {
1060 thread_call_setup(&mapping_adjust_call_data, mapping_adjust, NULL);
1061 mapping_adjust_call = &mapping_adjust_call_data;
1062 }
1063
1064 while(1) { /* Keep going until we've got enough */
1065
1066 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
1067 if(allocsize < 1) break; /* Leave if we have all we need */
1068
1069 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
1070 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
1071 mapCtl.mapcreln--; /* Back off the count */
1072 allocsize = MAPPERBLOK; /* Show we allocated one block */
1073 }
1074 else { /* No free ones, try to get it */
1075
1076 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
1077 if(allocsize > (mapCtl.mapcfree / 2)) allocsize = (mapCtl.mapcfree / 2); /* Don't try for anything that we can't comfortably map */
1078
1079 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1080 splx(s); /* Restore 'rupts */
1081
1082 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
1083 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
1084 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
1085 panic("Whoops... Not a bit of wired memory left for anyone\n");
1086 }
1087 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
1088 }
1089
1090 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
1091 s = splhigh(); /* Don't bother from now on */
1092 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1093 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1094 }
1095 }
1096 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
1097 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1098 mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
1099 }
1100 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1101 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1102 }
1103
1104 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
1105 mapCtl.mapcrecurse = 0; /* We are done now */
1106 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1107 splx(s); /* Restore 'rupts */
1108 return; /* Return... */
1109 }
1110
1111 mbn = mapCtl.mapcrel; /* Get first pending release block */
1112 mapCtl.mapcrel = 0; /* Dequeue them */
1113 mapCtl.mapcreln = 0; /* Set count to 0 */
1114
1115 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1116 splx(s); /* Restore 'rupts */
1117
1118 while((unsigned int)mbn) { /* Toss 'em all */
1119 mb = mbn->nextblok; /* Get the next */
1120 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
1121 mbn = mb; /* Chain to the next */
1122 }
1123
1124 __asm__ volatile("sync"); /* Make sure all is well */
1125 mapCtl.mapcrecurse = 0; /* We are done now */
1126 return;
1127}
1128
1129/*
1130 * mapping_free(mapping *mp) - release a mapping to the free list
1131 *
1132 * This routine takes a mapping and adds it to the free list.
1133 * If this mapping make the block non-empty, we queue it to the free block list.
1134 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1135 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1136 * If this release fills a block and we are above the threshold, we release the block
1137 */
1138
1139void mapping_free(struct mapping *mp) { /* Release a mapping */
1140
1141 mappingblok *mb, *mbn;
1142 spl_t s;
1143 unsigned int full, mindx;
1144
1145 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */
1146 mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
1147
1148 s = splhigh(); /* Don't bother from now on */
1149 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1150 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1151 }
1152
1153 full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */
1154 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
1155
1156 if(full) { /* If it was full before this: */
1157 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
1158 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
1159 }
1160
1161 mapCtl.mapcfree++; /* Bump free count */
1162 mapCtl.mapcinuse--; /* Decriment in use count */
1163
1164 mapCtl.mapcfreec++; /* Count total calls */
1165
1166 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
1167 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3])
1168 == 0xFFFFFFFF) { /* See if empty now */
1169
1170 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
1171 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
1172 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
1173 }
1174 else { /* We're not first */
1175 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
1176 if(mbn->nextblok == mb) break; /* Is the next one our's? */
1177 }
1178 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
1179 mbn->nextblok = mb->nextblok; /* Dequeue us */
1180 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
1181 }
1182
1183 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
1184 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
1185 mapCtl.mapcnext = mb; /* Chain us to the head */
1186 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
1187 }
1188 else {
1189 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
1190 mapCtl.mapcreln++; /* Count on release list */
1191 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
1192 mapCtl.mapcrel = mb; /* Chain us in front */
1193 }
1194 }
1195 }
1196
1197 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
1198 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1199 thread_call_enter(mapping_adjust_call); /* Go toss some */
1200 }
1201 }
1202 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1203 splx(s); /* Restore 'rupts */
1204
1205 return; /* Bye, dude... */
1206}
1207
1208
1209/*
1210 * mapping_alloc(void) - obtain a mapping from the free list
1211 *
1212 * This routine takes a mapping off of the free list and returns it's address.
1213 *
1214 * We do this by finding a free entry in the first block and allocating it.
1215 * If this allocation empties the block, we remove it from the free list.
1216 * If this allocation drops the total number of free entries below a threshold,
1217 * we allocate a new block.
1218 *
1219 */
1220
1221mapping *mapping_alloc(void) { /* Obtain a mapping */
1222
1223 register mapping *mp;
1224 mappingblok *mb, *mbn;
1225 spl_t s;
1226 int mindx;
1227 kern_return_t retr;
1228
1229 s = splhigh(); /* Don't bother from now on */
1230 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1231 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1232 }
1233
1234 if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */
1235 panic("mapping_alloc - free mappings exhausted\n"); /* Whine and moan */
1236 }
1237
1238 if(!(mindx = mapalc(mb))) { /* Allocate a slot */
1239 panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */
1240 }
1241
1242 if(mindx < 0) { /* Did we just take the last one */
1243 mindx = -mindx; /* Make positive */
1244 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1245 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1246 }
1247
1248 mapCtl.mapcfree--; /* Decrement free count */
1249 mapCtl.mapcinuse++; /* Bump in use count */
1250
1251 mapCtl.mapcallocc++; /* Count total calls */
1252
1253/*
1254 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1255 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1256 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1257 * if we haven't already done it.
1258 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1259 * the release list with as much as we need until threads start.
1260 */
1261 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
1262 if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */
1263 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1264 mapCtl.mapcreln--; /* Back off the count */
1265 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1266 }
1267 else { /* We need to replenish */
1268 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1269 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1270 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1271 }
1272 }
1273 }
1274 }
1275
1276 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1277 splx(s); /* Restore 'rupts */
1278
1279 mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */
1280 __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */
1281 return mp; /* Send it back... */
1282}
1283
1284
1285void
1286consider_mapping_adjust()
1287{
1288 spl_t s;
1289
1290 s = splhigh(); /* Don't bother from now on */
1291 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1292 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1293 }
1294
1295 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1296 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1297 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1298 }
1299 }
1300
1301 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1302 splx(s); /* Restore 'rupts */
1303
1304}
1305
1306
1307
1308/*
1309 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1310 *
1311 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1312 * mappings. This call adds and initializes a block for use.
1313 *
1314 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1315 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1316 * corresponds to the header). The translation mask is the XOR of the virtual and real
1317 * addresses (needless to say, the block must be wired).
1318 *
1319 * We handle these mappings the same way as saveareas: the block is only on the chain so
1320 * long as there are free entries in it.
1321 *
1322 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1323 * mappings. Blocks marked PERM won't ever be released.
1324 *
1325 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1326 * list. We do this only at start up time. This is done because we only allocate blocks
1327 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1328 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1329 * them on the release queue, the allocate routine will rescue them. Then when the
1330 * pageout scan starts, all extra ones will be released.
1331 *
1332 */
1333
1334
1335void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1336 /* Set's start and end of a block of mappings
1337 perm indicates if the block can be released
1338 or goes straight to the release queue .
1339 locked indicates if the lock is held already */
1340
1341 mappingblok *mb;
1342 spl_t s;
1343 int i;
1344 unsigned int raddr;
1345
1346 mb = (mappingblok *)mbl; /* Start of area */
1347
1348
1349 if(perm >= 0) { /* See if we need to initialize the block */
1350 if(perm) {
1351 raddr = (unsigned int)mbl; /* Perm means V=R */
1352 mb->mapblokflags = mbPerm; /* Set perm */
1353 }
1354 else {
1355 raddr = kvtophys(mbl); /* Get real address */
1356 mb->mapblokflags = 0; /* Set not perm */
1357 }
1358
1359 mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */
1360
1361 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1362 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1363 mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */
1364 mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */
1365 }
1366
1367 s = splhigh(); /* Don't bother from now on */
1368 if(!locked) { /* Do we need the lock? */
1369 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1370 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1371 }
1372 }
1373
1374 if(perm < 0) { /* Direct to release queue? */
1375 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1376 mapCtl.mapcrel = mb; /* Queue us on in */
1377 mapCtl.mapcreln++; /* Count the free block */
1378 }
1379 else { /* Add to the free list */
1380
1381 mb->nextblok = 0; /* We always add to the end */
1382 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1383
1384 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1385 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1386 }
1387 else { /* We are not the first */
1388 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1389 mapCtl.mapclast = mb; /* We are now last */
1390 }
1391 }
1392
1393 if(!locked) { /* Do we need to unlock? */
1394 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1395 }
1396 splx(s); /* Restore 'rupts */
1397 return; /* All done, leave... */
1398}
1399
1400
1401/*
1402 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1403 *
1404 * No locks can be held, because we allocate memory here.
1405 * This routine needs a corresponding mapping_relpre call to remove the
1406 * hold off flag so that the adjust routine will free the extra mapping
1407 * blocks on the release list. I don't like this, but I don't know
1408 * how else to do this for now...
1409 *
1410 */
1411
1412void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1413
1414 int nmapb, i;
1415 kern_return_t retr;
1416 mappingblok *mbn;
1417 spl_t s;
1418
1419 s = splhigh(); /* Don't bother from now on */
1420 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1421 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1422 }
1423
1424 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1425
1426 mapCtl.mapcholdoff++; /* Bump the hold off count */
1427
1428 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1429 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1430 splx(s); /* Restore 'rupts */
1431 return;
1432 }
1433 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1434 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1435 splx(s); /* Restore 'rupts */
1436 return;
1437 }
1438 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1439
1440 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1441 splx(s); /* Restore 'rupts */
1442
1443 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1444 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1445 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1446 panic("Whoops... Not a bit of wired memory left for anyone\n");
1447 }
1448 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1449 }
1450 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1451 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1452
1453 mapCtl.mapcrecurse = 0; /* We are done now */
1454}
1455
1456/*
1457 * void mapping_relpre(void) - Releases preallocation release hold off
1458 *
1459 * This routine removes the
1460 * hold off flag so that the adjust routine will free the extra mapping
1461 * blocks on the release list. I don't like this, but I don't know
1462 * how else to do this for now...
1463 *
1464 */
1465
1466void mapping_relpre(void) { /* Releases release hold off */
1467
1468 spl_t s;
1469
1470 s = splhigh(); /* Don't bother from now on */
1471 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1472 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1473 }
1474 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1475 panic("mapping_relpre: hold-off count went negative\n");
1476 }
1477
1478 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1479 splx(s); /* Restore 'rupts */
1480}
1481
1482/*
1483 * void mapping_free_prime(void) - Primes the mapping block release list
1484 *
1485 * See mapping_free_init.
1486 * No locks can be held, because we allocate memory here.
1487 * One processor running only.
1488 *
1489 */
1490
1491void mapping_free_prime(void) { /* Primes the mapping block release list */
1492
1493 int nmapb, i;
1494 kern_return_t retr;
1495 mappingblok *mbn;
1496 vm_offset_t mapping_min;
1497
1498 retr = kmem_suballoc(kernel_map, &mapping_min, mem_size / 16,
1499 FALSE, TRUE, &mapping_map);
1500
1501 if (retr != KERN_SUCCESS)
1502 panic("mapping_free_prime: kmem_suballoc failed");
1503
1504
1505 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1506 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1507
1508#if DEBUG
1509 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1510 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1511#endif
1512
1513 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1514 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1515 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1516 panic("Whoops... Not a bit of wired memory left for anyone\n");
1517 }
1518 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1519 }
1520 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1521 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1522}
1523
1524
1525
1526mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1527 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1528{
1529 *count = mapCtl.mapcinuse;
1530 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1531 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1532 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1533 *alloc_size = PAGE_SIZE;
1534
1535 *collectable = 1;
1536 *exhaustable = 0;
1537}
1538
1539
1540/*
1541 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1542 *
1543 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1544 * the same space. If it finds it, it returns the virtual address.
1545 *
1546 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1547 * for it and fail it myself...
1548 */
1549
1550vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */
1551
1552 spl_t s;
1553 register mapping *mp, *mpv;
1554 vm_offset_t va;
1555
1556 if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */
1557
1558 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1559 splx(s); /* Restore 'rupts */
1560 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1561 return(0); /* Should die before here */
1562 }
1563
1564 va = 0; /* Assume failure */
1565
1566 for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */
1567
1568 if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */
1569
1570 va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1571 va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
1572 va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
1573 break; /* We're done now, pass virtual address back */
1574 }
1575
1576 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1577 splx(s); /* Restore 'rupts */
1578 return(va); /* Return the result or 0... */
1579}
1580
1581/*
1582 * kvtophys(addr)
1583 *
1584 * Convert a kernel virtual address to a physical address
1585 */
1586vm_offset_t kvtophys(vm_offset_t va) {
1587
1588 register mapping *mp, *mpv;
1589 register blokmap *bmp;
1590 register vm_offset_t pa;
1591 spl_t s;
1592
1593 s=splhigh(); /* Don't bother from now on */
1594 mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */
1595
1596 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1597 splx(s); /* Restore 'rupts */
1598 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */
1599 return 0;
1600 }
1601
1602 if(!mp) { /* If it was not a normal page */
1603 pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1604 splx(s); /* Restore 'rupts */
1605 return pa; /* Return physical address */
1606 }
1607
1608 mpv = hw_cpv(mp); /* Convert to virtual addressing */
1609
1610 if(!mpv->physent) { /* Was there a physical entry? */
1611 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1612 }
1613 else {
1614 pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1615 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1616 }
1617
1618 splx(s); /* Restore 'rupts */
1619 return pa; /* Return the physical address... */
1620}
1621
1622/*
1623 * phystokv(addr)
1624 *
1625 * Convert a physical address to a kernel virtual address if
1626 * there is a mapping, otherwise return NULL
1627 */
1628
1629vm_offset_t phystokv(vm_offset_t pa) {
1630
1631 struct phys_entry *pp;
1632 vm_offset_t va;
1633
1634 pp = pmap_find_physentry(pa); /* Find the physical entry */
1635 if (PHYS_NULL == pp) {
1636 return (vm_offset_t)NULL; /* If none, return null */
1637 }
1638 if(!(va=mapping_p2v(kernel_pmap, pp))) {
1639 return 0; /* Can't find it, return 0... */
1640 }
1641 return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */
1642
1643}
1644
1645/*
1646 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1647 * page 0 access for the current thread.
1648 *
1649 * If parameter is TRUE, faults are ignored
1650 * If parameter is FALSE, faults are honored
1651 *
1652 */
1653
1654void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1655
1656 if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1657 else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1658
1659 return; /* Return the result or 0... */
1660}
1661
1662
1663/*
1664 * Allocates a range of virtual addresses in a map as optimally as
1665 * possible for block mapping. The start address is aligned such
1666 * that a minimum number of power-of-two sized/aligned blocks is
1667 * required to cover the entire range.
1668 *
1669 * We also use a mask of valid block sizes to determine optimality.
1670 *
1671 * Note that the passed in pa is not actually mapped to the selected va,
1672 * rather, it is used to figure the optimal boundary. The actual
1673 * V to R mapping is done externally.
1674 *
1675 * This function will return KERN_INVALID_ADDRESS if an optimal address
1676 * can not be found. It is not necessarily a fatal error, the caller may still be
1677 * still be able to do a non-optimal assignment.
1678 */
1679
1680kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa,
1681 vm_size_t size, vm_prot_t prot) {
1682
1683 vm_map_entry_t entry, next, tmp_entry, new_entry;
1684 vm_offset_t start, end, algnpa, endadr, strtadr, curradr;
1685 vm_offset_t boundary;
1686
1687 unsigned int maxsize, minsize, leading, trailing;
1688
1689 assert(page_aligned(pa));
1690 assert(page_aligned(size));
1691
1692 if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */
1693
1694 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
1695 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
1696
1697 boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */
1698 if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */
1699
1700 vm_map_lock(map); /* No touchee no mapee */
1701
1702 for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */
1703 if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */
1704 algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */
1705 leading = algnpa - pa; /* Get leading size */
1706
1707 curradr = 0; /* Start low */
1708
1709 while(1) { /* Try all possible values for this opt level */
1710
1711 curradr = curradr + boundary; /* Get the next optimal address */
1712 strtadr = curradr - leading; /* Calculate start of optimal range */
1713 endadr = strtadr + size; /* And now the end */
1714
1715 if((curradr < boundary) || /* Did address wrap here? */
1716 (strtadr > curradr) || /* How about this way? */
1717 (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */
1718
1719 if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
1720 if(endadr > map->max_offset) break; /* No room right now... */
1721
1722 if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
1723
1724 next = entry->vme_next; /* Get the next entry */
1725 if((next == vm_map_to_entry(map)) || /* Are we the last entry? */
1726 (next->vme_start >= endadr)) { /* or do we end before the next entry? */
1727
1728 new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
1729 VM_OBJECT_NULL,
1730 0, /* Offset into object of 0 */
1731 FALSE, /* No copy needed */
1732 FALSE, /* Not shared */
1733 FALSE, /* Not in transition */
1734 prot, /* Set the protection to requested */
1735 prot, /* We can't change protection */
1736 VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind,
1737 'cause we don't page in this area */
1738 VM_INHERIT_DEFAULT, /* Default inheritance */
1739 0); /* Nothing is wired */
1740
1741 vm_map_unlock(map); /* Let the world see it all */
1742 *va = strtadr; /* Tell everyone */
1743 *bnd = boundary; /* Say what boundary we are aligned to */
1744 return(KERN_SUCCESS); /* Leave, all is right with the world... */
1745 }
1746 }
1747 }
1748
1749 vm_map_unlock(map); /* Couldn't find a slot */
1750 return(KERN_INVALID_ADDRESS);
1751}
1752
1753/*
1754 * Copies data from a physical page to a virtual page. This is used to
1755 * move data from the kernel to user state.
1756 *
1757 * Note that it is invalid to have a source that spans a page boundry.
1758 * This can block.
1759 * We don't check protection either.
1760 * And we don't handle a block mapped sink address either.
1761 *
1762 */
1763
1764kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) {
1765
1766 vm_map_t map;
1767 kern_return_t ret;
1768 unsigned int spaceid;
1769 int left, csize;
1770 vm_offset_t pa;
1771 register mapping *mpv, *mp;
1772 spl_t s;
1773
1774 if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */
1775 map = current_act()->map; /* Get the current map */
1776
1777 while(size) {
1778 s=splhigh(); /* Don't bother me */
1779
1780 spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */
1781
1782 mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */
1783 if(!mp) { /* Was it there? */
1784 splx(s); /* Restore the interrupt level */
1785 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in... */
1786 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1787
1788 return KERN_FAILURE; /* Didn't find any, return no good... */
1789 }
1790 if((unsigned int)mp&1) { /* Did we timeout? */
1791 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */
1792 splx(s); /* Restore the interrupt level */
1793 return KERN_FAILURE; /* Bad hair day, return FALSE... */
1794 }
1795
1796 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
1797
1798 if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */
1799 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1800 splx(s); /* Restore the interrupt level */
1801 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* check for a COW area */
1802 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1803 return KERN_FAILURE; /* Didn't find any, return no good... */
1804 }
1805 left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */
1806
1807 csize = size < left ? size : left; /* Set amount to copy this pass */
1808
1809 pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */
1810
1811 bcopy_phys((char *)source, (char *)pa, csize); /* Do a physical copy */
1812
1813 hw_set_mod(mpv->physent); /* Go set the change of the sink */
1814
1815 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1816 splx(s); /* Open up for interrupts */
1817
1818 sink += csize; /* Move up to start of next page */
1819 source += csize; /* Move up source */
1820 size -= csize; /* Set amount for next pass */
1821 }
1822 return KERN_SUCCESS;
1823}
1824
1825
1826#if DEBUG
1827/*
1828 * Dumps out the mapping stuff associated with a virtual address
1829 */
1830void dumpaddr(space_t space, vm_offset_t va) {
1831
1832 mapping *mp, *mpv;
1833 vm_offset_t pa;
1834 spl_t s;
1835
1836 s=splhigh(); /* Don't bother me */
1837
1838 mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */
1839 if(!mp) { /* Did we find one? */
1840 splx(s); /* Restore the interrupt level */
1841 printf("dumpaddr: virtual address (%08X) not mapped\n", va);
1842 return; /* Didn't find any, return FALSE... */
1843 }
1844 if((unsigned int)mp&1) { /* Did we timeout? */
1845 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
1846 splx(s); /* Restore the interrupt level */
1847 return; /* Bad hair day, return FALSE... */
1848 }
1849 printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */
1850 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1851 dumpmapping(mpv);
1852 if(mpv->physent) {
1853 dumppca(mpv);
1854 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
1855 }
1856 splx(s); /* Was there something you needed? */
1857 return; /* Tell them we did it */
1858}
1859
1860
1861
1862/*
1863 * Prints out a mapping control block
1864 *
1865 */
1866
1867void dumpmapping(struct mapping *mp) { /* Dump out a mapping */
1868
1869 printf("Dump of mapping block: %08X\n", mp); /* Header */
1870 printf(" next: %08X\n", mp->next);
1871 printf(" hashnext: %08X\n", mp->hashnext);
1872 printf(" PTEhash: %08X\n", mp->PTEhash);
1873 printf(" PTEent: %08X\n", mp->PTEent);
1874 printf(" physent: %08X\n", mp->physent);
1875 printf(" PTEv: %08X\n", mp->PTEv);
1876 printf(" PTEr: %08X\n", mp->PTEr);
1877 printf(" pmap: %08X\n", mp->pmap);
1878
1879 if(mp->physent) { /* Print physent if it exists */
1880 printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1);
1881 }
1882 else {
1883 printf("Associated physical entry: none\n");
1884 }
1885
1886 dumppca(mp); /* Dump out the PCA information */
1887
1888 return;
1889}
1890
1891/*
1892 * Prints out a PTEG control area
1893 *
1894 */
1895
1896void dumppca(struct mapping *mp) { /* PCA */
1897
1898 PCA *pca;
1899 unsigned int *pteg;
1900
1901 pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */
1902 pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16));
1903 printf(" Dump of PCA: %08X\n", pca); /* Header */
1904 printf(" PCAlock: %08X\n", pca->PCAlock);
1905 printf(" PCAallo: %08X\n", pca->flgs.PCAallo);
1906 printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]);
1907 printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]);
1908 printf("Dump of PTEG: %08X\n", pteg); /* Header */
1909 printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]);
1910 printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]);
1911 printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]);
1912 printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]);
1913 return;
1914}
1915
1916/*
1917 * Dumps starting with a physical entry
1918 */
1919
1920void dumpphys(struct phys_entry *pp) { /* Dump from physent */
1921
1922 mapping *mp;
1923 PCA *pca;
1924 unsigned int *pteg;
1925
1926 printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1);
1927 mp = hw_cpv(pp->phys_link);
1928 while(mp) {
1929 dumpmapping(mp);
1930 dumppca(mp);
1931 mp = hw_cpv(mp->next);
1932 }
1933
1934 return;
1935}
1936
1937#endif
1938
1939
1940kern_return_t bmapvideo(vm_offset_t *info);
1941kern_return_t bmapvideo(vm_offset_t *info) {
1942
1943 extern struct vc_info vinfo;
1944
1945 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
1946 return KERN_SUCCESS;
1947}
1948
1949kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
1950kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
1951
1952 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */
1953 return KERN_SUCCESS;
1954}
1955
1956kern_return_t bmapmapr(vm_offset_t va);
1957kern_return_t bmapmapr(vm_offset_t va) {
1958
1959 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
1960 return KERN_SUCCESS;
1961}