]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/mappings.c
237e2bc12e55b1b51277592f0640ea9be24db6f8
[apple/xnu.git] / osfmk / ppc / mappings.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
27 *
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
30 *
31 */
32
33 #include <cpus.h>
34 #include <debug.h>
35 #include <mach_kgdb.h>
36 #include <mach_vm_debug.h>
37 #include <db_machine_commands.h>
38
39 #include <kern/thread.h>
40 #include <kern/thread_act.h>
41 #include <mach/vm_attributes.h>
42 #include <mach/vm_param.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
46 #include <kern/spl.h>
47
48 #include <kern/misc_protos.h>
49 #include <ppc/misc_protos.h>
50 #include <ppc/proc_reg.h>
51
52 #include <vm/pmap.h>
53 #include <ppc/pmap.h>
54 #include <ppc/pmap_internals.h>
55 #include <ppc/mem.h>
56
57 #include <ppc/new_screen.h>
58 #include <ppc/Firmware.h>
59 #include <ppc/mappings.h>
60 #include <ddb/db_output.h>
61
62 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
63
64 #define PERFTIMES 0
65
66 #if PERFTIMES && DEBUG
67 #define debugLog2(a, b, c) dbgLog2(a, b, c)
68 #else
69 #define debugLog2(a, b, c)
70 #endif
71
72 vm_map_t mapping_map = VM_MAP_NULL;
73 #define MAPPING_MAP_SIZE 33554432 /* 32MB address space */
74
75 unsigned int incrVSID = 0; /* VSID increment value */
76 unsigned int mappingdeb0 = 0;
77 unsigned int mappingdeb1 = 0;
78 extern unsigned int hash_table_size;
79 extern vm_offset_t mem_size;
80 /*
81 * ppc_prot translates from the mach representation of protections to the PPC version.
82 * We also allow for a direct setting of the protection bits. This extends the mach
83 * concepts to allow the greater control we need for Virtual Machines (VMM).
84 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
85 * It eliminates the used of this table.
86 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
87 */
88
89 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
90
91 /*
92 * About PPC VSID generation:
93 *
94 * This function is called to generate an address space ID. This space ID must be unique within
95 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
96 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
97 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
98 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
99 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
100 * they are release. This causes us to lose track of what space IDs are free to be reused.
101 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
102 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
103 *
104 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
105 * calculation for virtual address lookup. An improperly chosen value could potentially cause
106 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
107 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
108 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
109 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
110 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
111 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
112 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
113 * with no overflow. I don't think that this is a problem.
114 *
115 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
116 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
117 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
118 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
119 * the same modulo 512. We can reduce this problem by having the segment number be bits
120 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
121 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
122 * I don't think that it is as signifigant as the other, so, I'll make the space ID
123 * with segment first.
124 *
125 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
126 * While this is a problem that should only happen in periods counted in weeks, it can and
127 * will happen. This is assuming a monotonically increasing space ID. If we were to search
128 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
129 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
130 *
131 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
132 * locked by free_pmap_lock) that is sorted in VSID sequence order.
133 *
134 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
135 * the last that was freed. The we allocate that.
136 *
137 * NOTE: We must be called with interruptions off and free_pmap_lock held.
138 *
139 */
140
141 /*
142 * mapping_init();
143 * Do anything that needs to be done before the mapping system can be used.
144 * Hash table must be initialized before we call this.
145 *
146 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
147 */
148
149 void mapping_init(void) {
150
151 unsigned int tmp;
152
153 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
154
155 incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
156 incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
157 incrVSID |= 1; /* Set bit and add 1 */
158 return;
159
160 }
161
162
163 /*
164 * mapping_remove(pmap_t pmap, vm_offset_t va);
165 * Given a pmap and virtual address, this routine finds the mapping and removes it from
166 * both its PTEG hash list and the physical entry list. The mapping block will be added to
167 * the free list. If the free list threshold is reached, garbage collection will happen.
168 * We also kick back a return code to say whether or not we had one to remove.
169 *
170 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
171 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
172 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
173 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
174 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
175 * It's just that simple!
176 *
177 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
178 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
179 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
180 *
181 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
182 * the mapping is outside of physical memory and usually refers to a memory mapped device of
183 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
184 * routines return normally, but don't do anything.
185 */
186
187 boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR
188 Returns TRUE if a mapping was found to remove */
189
190 mapping *mp, *mpv;
191 register blokmap *blm;
192 spl_t s;
193 unsigned int *useadd, *useaddr;
194 int i;
195
196 debugLog2(1, va, pmap->space); /* start mapping_remove */
197
198 s=splhigh(); /* Don't bother me */
199
200 mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */
201
202 if(!mp) { /* Did we find one? */
203 splx(s); /* Allow 'rupts now */
204 if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */
205
206 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
207 blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */
208 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
209 pmap, va, blm);
210 }
211 while ((unsigned int)mp & 2)
212 mp = (mapping *)hw_rem_blk(pmap, va, va);
213 #if 0
214 blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */
215 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
216 blm, blm->start, blm->end, blm->PTEr);
217 #endif
218 mapping_free(hw_cpv(mp)); /* Release it */
219 debugLog2(2, 1, 0); /* End mapping_remove */
220 return TRUE; /* Tell them we did it */
221 }
222 debugLog2(2, 0, 0); /* end mapping_remove */
223 return FALSE; /* Didn't find any, return FALSE... */
224 }
225 if((unsigned int)mp&1) { /* Did we timeout? */
226 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
227 splx(s); /* Restore the interrupt level */
228 return FALSE; /* Bad hair day, return FALSE... */
229 }
230
231 mpv = hw_cpv(mp); /* Get virtual address of mapping */
232 #if DEBUG
233 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
234 #else
235 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
236 #endif
237 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
238 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
239 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
240
241 #if 0
242 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
243 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
244 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
245 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
246 }
247 }
248 #endif
249
250 hw_rem_map(mp); /* Remove the corresponding mapping */
251
252 if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
253
254 splx(s); /* Was there something you needed? */
255
256 mapping_free(mpv); /* Add mapping to the free list */
257 debugLog2(2, 1, 0); /* end mapping_remove */
258 return TRUE; /* Tell them we did it */
259 }
260
261 /*
262 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
263 *
264 * This guy releases any mappings that exist for a physical page on a specified map.
265 * We get the lock on the phys_entry, and hold it through out this whole routine.
266 * That way, no one can change the queue out from underneath us. We keep fetching
267 * the physents mapping anchor until it is null, then we're done.
268 *
269 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
270 * decriment the pmap's residency count. Then we release the mapping back to the free list.
271 *
272 */
273
274
275 void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) { /* Remove all mappings from specified pmap for this physent */
276
277 mapping *mp, *mp_next, *mpv;
278 spl_t s;
279 unsigned int *useadd, *useaddr, uindx;
280 int i;
281
282 s=splhigh(); /* Don't bother me */
283
284 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
285 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
286 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
287 }
288
289 mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);
290
291 while(mp) { /* Keep going so long as there's another */
292
293 mpv = hw_cpv(mp); /* Get the virtual address */
294 if(mpv->pmap != pmap) {
295 mp = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
296 continue;
297 }
298 #if DEBUG
299 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
300 #else
301 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
302 #endif
303
304 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join seg # and top 2 bits of API */
305 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
306 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
307 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */
308
309
310
311 mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
312 hw_rem_map(mp); /* Remove the mapping */
313 mapping_free(mpv); /* Add mapping to the free list */
314 mp = mp_next;
315 }
316
317 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
318 splx(s);
319 return;
320 }
321 /*
322 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
323 *
324 * This guy releases any mappings that exist for a physical page.
325 * We get the lock on the phys_entry, and hold it through out this whole routine.
326 * That way, no one can change the queue out from underneath us. We keep fetching
327 * the physents mapping anchor until it is null, then we're done.
328 *
329 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
330 * decriment the pmap's residency count. Then we release the mapping back to the free list.
331 *
332 */
333
334 void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */
335
336 mapping *mp, *mpv;
337 spl_t s;
338 unsigned int *useadd, *useaddr, uindx;
339 int i;
340
341 s=splhigh(); /* Don't bother me */
342 debugLog2(3, pp->pte1, 0); /* start mapping_purge */
343
344 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
345 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
346 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
347 }
348
349 while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */
350
351 mpv = hw_cpv(mp); /* Get the virtual address */
352 #if DEBUG
353 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
354 #else
355 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
356 #endif
357
358 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */
359 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
360 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
361 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
362
363 #if 0
364 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
365 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
366 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
367 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
368 }
369 }
370 #endif
371
372
373 hw_rem_map(mp); /* Remove the mapping */
374 mapping_free(mpv); /* Add mapping to the free list */
375 }
376
377 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
378
379 debugLog2(4, pp->pte1, 0); /* end mapping_purge */
380 splx(s); /* Was there something you needed? */
381 return; /* Tell them we did it */
382 }
383
384
385 /*
386 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
387 *
388 * This routine takes the given parameters, builds a mapping block, and queues it into the
389 * correct lists.
390 *
391 * The pp parameter can be null. This allows us to make a mapping that is not
392 * associated with any physical page. We may need this for certain I/O areas.
393 *
394 * If the phys_entry address is null, we neither lock or chain into it.
395 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
396 */
397
398 mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */
399
400 register mapping *mp, *mpv;
401 unsigned int *useadd, *useaddr;
402 spl_t s;
403 int i;
404
405 debugLog2(5, va, pa); /* start mapping_purge */
406 mpv = mapping_alloc(); /* Get a spare mapping block */
407
408 mpv->pmap = pmap; /* Initialize the pmap pointer */
409 mpv->physent = pp; /* Initialize the pointer to the physical entry */
410 mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */
411 mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */
412
413 s=splhigh(); /* Don't bother from now on */
414
415 mp = hw_cvp(mpv); /* Get the physical address of this */
416
417 if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */
418 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
419 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
420 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
421 }
422 }
423
424 if(pp) { /* See of there is a physcial entry */
425 mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */
426 pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */
427 }
428
429 hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */
430
431 (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */
432 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
433 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
434 (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
435 #if 0
436 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
437 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
438 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
439 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
440 }
441 }
442 #endif
443
444 if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */
445
446 splx(s); /* Ok for interruptions now */
447 debugLog2(6, pmap->space, prot); /* end mapping_purge */
448 return mpv; /* Leave... */
449 }
450
451
452 /*
453 * Enters optimal translations for odd-sized V=F blocks.
454 *
455 * Builds a block map for each power-of-two hunk o' address
456 * that exists. This is specific to the processor type.
457 * PPC uses BAT register size stuff. Future PPC might have
458 * something else.
459 *
460 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
461 * stupid to know otherwise so we only look at the va anyhow, so there...
462 *
463 */
464
465 void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */
466
467 register blokmap *blm, *oblm;
468 unsigned int pg;
469 unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin;
470 int i, maxshft, nummax, minshft;
471
472 #if 1
473 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
474 pmap, va, pa, bnd, size, prot, attr);
475 #endif
476
477 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
478 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
479
480 minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */
481 maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */
482
483 leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */
484 trailing = size - leading; /* Get size of trailing area */
485 tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */
486
487 #if 1
488 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */
489 #endif
490
491 if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */
492
493 va = va + tomin; /* Adjust virtual start */
494 pa = pa + tomin; /* Adjust physical start */
495 leading = leading - tomin; /* Adjust leading size */
496
497 /*
498 * Some of this code is very classic PPC. We need to fix this up.
499 */
500
501 leading = leading >> minshft; /* Position for bit testing */
502 cbsize = minsize; /* Set the minimum size */
503
504 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */
505
506 if(leading & 1) {
507 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
508 pa = pa + cbsize; /* Bump up physical address */
509 va = va + cbsize; /* Bump up virtual address */
510 }
511
512 leading = leading >> 1; /* Shift up to next size */
513 cbsize = cbsize << 1; /* Here too */
514
515 }
516
517 nummax = trailing >> maxshft; /* Get number of max size blocks left */
518 for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */
519 pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */
520
521 pa = pa + maxsize; /* Bump up physical address */
522 va = va + maxsize; /* Bump up virtual address */
523 trailing -= maxsize; /* Back off what we just did */
524 }
525
526 cbsize = maxsize; /* Start at maximum size */
527
528 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */
529
530 if(trailing & cbsize) {
531 trailing &= ~cbsize; /* Remove the block we are allocating */
532 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
533 pa = pa + cbsize; /* Bump up physical address */
534 va = va + cbsize; /* Bump up virtual address */
535 }
536 cbsize = cbsize >> 1; /* Next size down */
537 }
538
539 if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */
540
541 return; /* Return */
542 }
543
544
545 /*
546 * Enters translations for odd-sized V=F blocks.
547 *
548 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
549 * will be split into normal-sized page mappings.
550 *
551 * The higher level VM map should be locked to insure that we don't have a
552 * double diddle here.
553 *
554 * We panic if we get a block that overlaps with another. We do not merge adjacent
555 * blocks because removing any address within a block removes the entire block and if
556 * would really mess things up if we trashed too much.
557 *
558 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
559 * not be changed. The block must be unmapped and then remapped with the new stuff.
560 * We also do not keep track of reference or change flags.
561 *
562 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
563 * with interruptions and translation disabled and under the control of the lock located
564 * in the first block map. MRU is used because it is expected that the same entry
565 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
566 *
567 */
568
569 void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
570
571 register blokmap *blm, *oblm, *oblm_virt;;
572 unsigned int pg;
573
574 #if 0
575 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
576 pmap, va, pa, size, prot, attr);
577 #endif
578
579 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
580 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
581 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
582 #if 0
583 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
584 va + pg, pa + pg);
585 #endif
586 }
587 return; /* All done */
588 }
589
590 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
591
592 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
593 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
594 blm->current = 0;
595 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
596 blm->space = pmap->space; /* Set the space (only needed for remove) */
597 blm->blkFlags = flags; /* Set the block's flags */
598
599 #if 0
600 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
601 blm, blm->start, blm->end, blm->PTEr);
602 #endif
603
604 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
605
606 #if 0
607 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
608 blm, pmap->bmaps);
609 #endif
610
611 do {
612 oblm = hw_add_blk(pmap, blm);
613 if ((unsigned int)oblm & 2) {
614 oblm_virt = (blokmap *)hw_cpv((mapping *)((unsigned int)oblm & 0xFFFFFFFC));
615 mapping_remove(pmap, oblm_virt->start);
616 };
617 } while ((unsigned int)oblm & 2);
618
619 if (oblm) {
620 oblm = (blokmap *)hw_cpv((mapping *) oblm); /* Get the old block virtual address */
621 blm = (blokmap *)hw_cpv((mapping *)blm); /* Back to the virtual address of this */
622 if((oblm->start != blm->start) || /* If we have a match, then this is a fault race and */
623 (oblm->end != blm->end) || /* is acceptable */
624 (oblm->PTEr != blm->PTEr))
625 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm);/* Otherwise, Squeak loudly and carry a big stick */
626 mapping_free((struct mapping *)blm);
627 }
628
629 #if 0
630 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
631 blm, pmap->bmaps);
632 #endif
633
634 return; /* Return */
635 }
636
637
638 /*
639 * Optimally enters translations for odd-sized V=F blocks.
640 *
641 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
642 * will be split into normal-sized page mappings.
643 *
644 * This one is different than pmap_map_block in that it will allocate it's own virtual
645 * target address. Rather than allocating a single block,
646 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
647 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
648 *
649 * Most considerations for pmap_map_block apply.
650 *
651 *
652 */
653
654 kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va,
655 vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */
656
657 register blokmap *blm, *oblm;
658 unsigned int pg;
659 kern_return_t err;
660 unsigned int bnd;
661
662 #if 1
663 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
664 map, pa, size, prot, attr);
665 #endif
666
667 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
668 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
669 if(err) {
670 #if DEBUG
671 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */
672 #endif
673 return(err); /* Pass back the error */
674 }
675 #if 1
676 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */
677 #endif
678
679 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
680 mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
681 }
682 return(KERN_SUCCESS); /* All done */
683 }
684
685 err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */
686
687 if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */
688 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
689 if(err) {
690 #if DEBUG
691 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */
692 #endif
693 return(err); /* Pass back the error */
694 }
695 #if 1
696 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */
697 #endif
698 pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */
699 return KERN_SUCCESS; /* All done now */
700 }
701
702 if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */
703 #if DEBUG
704 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */
705 #endif
706 return(err);
707 }
708
709 #if 1
710 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */
711 #endif
712 mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */
713 return(KERN_SUCCESS); /* All done */
714 }
715
716
717 #if 0
718
719 /*
720 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
721 * areas.
722 *
723 * Once blocks are merged, they act like one block, i.e., if you remove it,
724 * it all goes...
725 *
726 * This can only be used during boot. Ain't no way we can handle SMP
727 * or preemption easily, so we restrict it. We don't check either. We
728 * assume only skilled professional programmers will attempt using this
729 * function. We assume no responsibility, either real or imagined, for
730 * injury or death resulting from unauthorized use of this function.
731 *
732 * No user servicable parts inside. Notice to be removed by end-user only,
733 * under penalty of applicable federal and state laws.
734 *
735 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
736 * overlapping areas. Note that we do panic if we can't merge.
737 *
738 */
739
740 void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */
741
742 register blokmap *blm, *oblm;
743 unsigned int pg;
744 spl_t s;
745
746 #if 1
747 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
748 pmap, va, pa, size, prot, attr);
749 #endif
750
751 s=splhigh(); /* Don't bother from now on */
752 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
753 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
754 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
755 }
756 return; /* All done */
757 }
758
759 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
760
761 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
762 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
763 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
764
765 #if 1
766 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
767 blm, blm->start, blm->end, blm->PTEr);
768 #endif
769
770 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
771
772 #if 1
773 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
774 blm, pmap->bmaps);
775 #endif
776
777 if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */
778 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */
779 }
780
781 #if 1
782 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
783 blm, pmap->bmaps);
784 #endif
785 splx(s); /* Ok for interruptions now */
786
787 return; /* Return */
788 }
789 #endif
790
791 /*
792 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
793 *
794 * This routine takes a physical entry and runs through all mappings attached to it and changes
795 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
796 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
797 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
798 * higher to lower, lower to higher.
799 *
800 * Phys_entry is unlocked.
801 */
802
803 void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */
804
805 spl_t spl;
806
807 debugLog2(9, pp->pte1, prot); /* end remap */
808 spl=splhigh(); /* No interruptions during this */
809 if(!locked) { /* Do we need to lock the physent? */
810 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
811 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
812 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
813 }
814 }
815
816 hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */
817
818 if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
819 splx(spl); /* Restore interrupt state */
820 debugLog2(10, pp->pte1, 0); /* end remap */
821
822 return; /* Leave... */
823 }
824
825 /*
826 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
827 *
828 * This routine takes a pmap and virtual address and changes
829 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
830 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
831 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
832 * higher to lower, lower to higher.
833 *
834 */
835
836 void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */
837
838 mapping *mp, *mpv;
839 spl_t s;
840
841 debugLog2(9, vaddr, pmap); /* start mapping_protect */
842 s = splhigh(); /* Don't bother me */
843
844 mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */
845
846 if(!mp) { /* Did we find one? */
847 splx(s); /* Restore the interrupt level */
848 debugLog2(10, 0, 0); /* end mapping_pmap */
849 return; /* Didn't find any... */
850 }
851 if((unsigned int)mp & 1) { /* Did we timeout? */
852 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
853 splx(s); /* Restore the interrupt level */
854 return; /* Bad hair day... */
855 }
856
857 hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */
858
859 mpv = hw_cpv(mp); /* Get virtual address of mapping */
860 if(mpv->physent) { /* If there is a physical page, */
861 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
862 }
863 splx(s); /* Restore interrupt state */
864 debugLog2(10, mpv->PTEr, 0); /* end remap */
865
866 return; /* Leave... */
867 }
868
869 /*
870 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
871 *
872 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
873 * associated with this page when we do it.
874 */
875
876 void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */
877
878 debugLog2(11, pp->pte1, prot); /* end remap */
879
880 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
881 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
882 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
883 }
884
885 hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */
886
887 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
888 debugLog2(12, pp->pte1, wimg); /* end remap */
889
890 return; /* Leave... */
891 }
892
893 /*
894 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
895 *
896 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
897 * any PTEs it finds.
898 *
899 * Interruptions must be disabled and the physical entry locked at entry.
900 */
901
902 void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */
903
904 hw_inv_all(pp); /* Go set the change bit of a physical page */
905
906 return; /* Leave... */
907 }
908
909
910 /*
911 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
912 *
913 * This routine takes a physical entry and runs through all mappings attached to it and turns
914 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
915 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
916 * either (I don't think, maybe I'll change my mind later).
917 *
918 * Interruptions must be disabled and the physical entry locked at entry.
919 */
920
921 void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */
922
923 hw_clr_mod(pp); /* Go clear the change bit of a physical page */
924 return; /* Leave... */
925 }
926
927
928 /*
929 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
930 *
931 * This routine takes a physical entry and runs through all mappings attached to it and turns
932 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
933 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
934 * either (I don't think, maybe I'll change my mind later).
935 *
936 * Interruptions must be disabled and the physical entry locked at entry.
937 */
938
939 void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */
940
941 hw_set_mod(pp); /* Go set the change bit of a physical page */
942 return; /* Leave... */
943 }
944
945
946 /*
947 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
948 *
949 * This routine takes a physical entry and runs through all mappings attached to it and turns
950 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
951 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
952 * either (I don't think, maybe I'll change my mind later).
953 *
954 * Interruptions must be disabled at entry.
955 */
956
957 void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */
958
959 mapping *mp;
960
961 debugLog2(13, pp->pte1, 0); /* end remap */
962 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */
963 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
964 }
965 hw_clr_ref(pp); /* Go clear the reference bit of a physical page */
966 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */
967 debugLog2(14, pp->pte1, 0); /* end remap */
968 return; /* Leave... */
969 }
970
971
972 /*
973 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
974 *
975 * This routine takes a physical entry and runs through all mappings attached to it and turns
976 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
977 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
978 * either (I don't think, maybe I'll change my mind later).
979 *
980 * Interruptions must be disabled and the physical entry locked at entry.
981 */
982
983 void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */
984
985 hw_set_ref(pp); /* Go set the reference bit of a physical page */
986 return; /* Leave... */
987 }
988
989
990 /*
991 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
992 *
993 * This routine takes a physical entry and runs through all mappings attached to it and tests
994 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
995 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
996 * either (I don't think, maybe I'll change my mind later).
997 *
998 * Interruptions must be disabled and the physical entry locked at entry.
999 */
1000
1001 boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */
1002
1003 return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */
1004 }
1005
1006
1007 /*
1008 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
1009 *
1010 * This routine takes a physical entry and runs through all mappings attached to it and tests
1011 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1012 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1013 * either (I don't think, maybe I'll change my mind later).
1014 *
1015 * Interruptions must be disabled and the physical entry locked at entry.
1016 */
1017
1018 boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */
1019
1020 return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */
1021 }
1022
1023
1024 /*
1025 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1026 *
1027 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1028 */
1029
1030 void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */
1031
1032 pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1033
1034 return; /* Leave... */
1035 }
1036
1037
1038 /*
1039 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1040 *
1041 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1042 * the number of free mappings remaining, and if below a threshold, replenishes them.
1043 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1044 * a new one is allocated.
1045 *
1046 * This routine allocates and/or memory and must be called from a safe place.
1047 * Currently, vm_pageout_scan is the safest place. We insure that the
1048 */
1049
1050 thread_call_t mapping_adjust_call;
1051 static thread_call_data_t mapping_adjust_call_data;
1052
1053 void mapping_adjust(void) { /* Adjust free mappings */
1054
1055 kern_return_t retr;
1056 mappingblok *mb, *mbn;
1057 spl_t s;
1058 int allocsize, i;
1059 extern int vm_page_free_count;
1060
1061 if(mapCtl.mapcmin <= MAPPERBLOK) {
1062 mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16;
1063
1064 #if DEBUG
1065 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
1066 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1067 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
1068 #endif
1069 }
1070
1071 s = splhigh(); /* Don't bother from now on */
1072 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1073 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1074 }
1075
1076 if (mapping_adjust_call == NULL) {
1077 thread_call_setup(&mapping_adjust_call_data,
1078 (thread_call_func_t)mapping_adjust,
1079 (thread_call_param_t)NULL);
1080 mapping_adjust_call = &mapping_adjust_call_data;
1081 }
1082
1083 while(1) { /* Keep going until we've got enough */
1084
1085 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
1086 if(allocsize < 1) break; /* Leave if we have all we need */
1087
1088 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
1089 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
1090 mapCtl.mapcreln--; /* Back off the count */
1091 allocsize = MAPPERBLOK; /* Show we allocated one block */
1092 }
1093 else { /* No free ones, try to get it */
1094
1095 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
1096
1097 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1098 splx(s); /* Restore 'rupts */
1099
1100 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
1101 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
1102 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
1103 break;
1104 }
1105 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
1106 }
1107 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
1108 s = splhigh(); /* Don't bother from now on */
1109 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1110 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1111 }
1112 }
1113 if (retr != KERN_SUCCESS)
1114 break; /* Fail to alocate, bail out... */
1115 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
1116 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1117 mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
1118 }
1119 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1120 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1121 }
1122
1123 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
1124 mapCtl.mapcrecurse = 0; /* We are done now */
1125 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1126 splx(s); /* Restore 'rupts */
1127 return; /* Return... */
1128 }
1129
1130 mbn = mapCtl.mapcrel; /* Get first pending release block */
1131 mapCtl.mapcrel = 0; /* Dequeue them */
1132 mapCtl.mapcreln = 0; /* Set count to 0 */
1133
1134 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1135 splx(s); /* Restore 'rupts */
1136
1137 while((unsigned int)mbn) { /* Toss 'em all */
1138 mb = mbn->nextblok; /* Get the next */
1139 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
1140 mbn = mb; /* Chain to the next */
1141 }
1142
1143 __asm__ volatile("sync"); /* Make sure all is well */
1144 mapCtl.mapcrecurse = 0; /* We are done now */
1145 return;
1146 }
1147
1148 /*
1149 * mapping_free(mapping *mp) - release a mapping to the free list
1150 *
1151 * This routine takes a mapping and adds it to the free list.
1152 * If this mapping make the block non-empty, we queue it to the free block list.
1153 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1154 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1155 * If this release fills a block and we are above the threshold, we release the block
1156 */
1157
1158 void mapping_free(struct mapping *mp) { /* Release a mapping */
1159
1160 mappingblok *mb, *mbn;
1161 spl_t s;
1162 unsigned int full, mindx;
1163
1164 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */
1165 mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
1166
1167 s = splhigh(); /* Don't bother from now on */
1168 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1169 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1170 }
1171
1172 full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */
1173 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
1174
1175 if(full) { /* If it was full before this: */
1176 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
1177 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
1178 if(!((unsigned int)mapCtl.mapclast))
1179 mapCtl.mapclast = mb;
1180 }
1181
1182 mapCtl.mapcfree++; /* Bump free count */
1183 mapCtl.mapcinuse--; /* Decriment in use count */
1184
1185 mapCtl.mapcfreec++; /* Count total calls */
1186
1187 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
1188 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3])
1189 == 0xFFFFFFFF) { /* See if empty now */
1190
1191 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
1192 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
1193 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
1194 }
1195 else { /* We're not first */
1196 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
1197 if(mbn->nextblok == mb) break; /* Is the next one our's? */
1198 }
1199 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
1200 mbn->nextblok = mb->nextblok; /* Dequeue us */
1201 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
1202 }
1203
1204 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
1205 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
1206 mapCtl.mapcnext = mb; /* Chain us to the head */
1207 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
1208 }
1209 else {
1210 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
1211 mapCtl.mapcreln++; /* Count on release list */
1212 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
1213 mapCtl.mapcrel = mb; /* Chain us in front */
1214 }
1215 }
1216 }
1217
1218 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
1219 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1220 thread_call_enter(mapping_adjust_call); /* Go toss some */
1221 }
1222 }
1223 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1224 splx(s); /* Restore 'rupts */
1225
1226 return; /* Bye, dude... */
1227 }
1228
1229
1230 /*
1231 * mapping_alloc(void) - obtain a mapping from the free list
1232 *
1233 * This routine takes a mapping off of the free list and returns it's address.
1234 *
1235 * We do this by finding a free entry in the first block and allocating it.
1236 * If this allocation empties the block, we remove it from the free list.
1237 * If this allocation drops the total number of free entries below a threshold,
1238 * we allocate a new block.
1239 *
1240 */
1241
1242 mapping *mapping_alloc(void) { /* Obtain a mapping */
1243
1244 register mapping *mp;
1245 mappingblok *mb, *mbn;
1246 spl_t s;
1247 int mindx;
1248 kern_return_t retr;
1249
1250 s = splhigh(); /* Don't bother from now on */
1251 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1252 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1253 }
1254
1255 if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */
1256 unsigned int i;
1257 struct mappingflush mappingflush;
1258 PCA *pca_min, *pca_max;
1259 PCA *pca_base;
1260
1261 pca_min = (PCA *)(hash_table_base+hash_table_size);
1262 pca_max = (PCA *)(hash_table_base+hash_table_size+hash_table_size);
1263
1264 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
1265 mapCtl.mapcflush.mappingcnt = 0;
1266 pca_base = mapCtl.mapcflush.pcaptr;
1267 do {
1268 hw_select_mappings(&mapCtl.mapcflush);
1269 mapCtl.mapcflush.pcaptr++;
1270 if (mapCtl.mapcflush.pcaptr >= pca_max)
1271 mapCtl.mapcflush.pcaptr = pca_min;
1272 } while ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr != pca_base));
1273
1274 if ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr == pca_base)) {
1275 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1276 panic("mapping_alloc - all mappings are wired\n");
1277 }
1278 mappingflush = mapCtl.mapcflush;
1279 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1280 splx(s);
1281 for (i=0;i<mappingflush.mappingcnt;i++)
1282 mapping_remove(mappingflush.mapping[i].pmap,
1283 mappingflush.mapping[i].offset);
1284 s = splhigh();
1285 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {
1286 panic("mapping_alloc - timeout getting control lock\n");
1287 }
1288 }
1289 mb = mapCtl.mapcnext;
1290 }
1291
1292 if(!(mindx = mapalc(mb))) { /* Allocate a slot */
1293 panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */
1294 }
1295
1296 if(mindx < 0) { /* Did we just take the last one */
1297 mindx = -mindx; /* Make positive */
1298 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1299 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1300 }
1301
1302 mapCtl.mapcfree--; /* Decrement free count */
1303 mapCtl.mapcinuse++; /* Bump in use count */
1304
1305 mapCtl.mapcallocc++; /* Count total calls */
1306
1307 /*
1308 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1309 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1310 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1311 * if we haven't already done it.
1312 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1313 * the release list with as much as we need until threads start.
1314 */
1315 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
1316 if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */
1317 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1318 mapCtl.mapcreln--; /* Back off the count */
1319 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1320 }
1321 else { /* We need to replenish */
1322 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1323 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1324 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1325 }
1326 }
1327 }
1328 }
1329
1330 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1331 splx(s); /* Restore 'rupts */
1332
1333 mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */
1334 __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */
1335 return mp; /* Send it back... */
1336 }
1337
1338
1339 void
1340 consider_mapping_adjust()
1341 {
1342 spl_t s;
1343
1344 s = splhigh(); /* Don't bother from now on */
1345 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1346 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1347 }
1348
1349 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1350 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1351 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1352 }
1353 }
1354
1355 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1356 splx(s); /* Restore 'rupts */
1357
1358 }
1359
1360
1361
1362 /*
1363 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1364 *
1365 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1366 * mappings. This call adds and initializes a block for use.
1367 *
1368 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1369 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1370 * corresponds to the header). The translation mask is the XOR of the virtual and real
1371 * addresses (needless to say, the block must be wired).
1372 *
1373 * We handle these mappings the same way as saveareas: the block is only on the chain so
1374 * long as there are free entries in it.
1375 *
1376 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1377 * mappings. Blocks marked PERM won't ever be released.
1378 *
1379 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1380 * list. We do this only at start up time. This is done because we only allocate blocks
1381 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1382 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1383 * them on the release queue, the allocate routine will rescue them. Then when the
1384 * pageout scan starts, all extra ones will be released.
1385 *
1386 */
1387
1388
1389 void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1390 /* Set's start and end of a block of mappings
1391 perm indicates if the block can be released
1392 or goes straight to the release queue .
1393 locked indicates if the lock is held already */
1394
1395 mappingblok *mb;
1396 spl_t s;
1397 int i;
1398 unsigned int raddr;
1399
1400 mb = (mappingblok *)mbl; /* Start of area */
1401
1402
1403 if(perm >= 0) { /* See if we need to initialize the block */
1404 if(perm) {
1405 raddr = (unsigned int)mbl; /* Perm means V=R */
1406 mb->mapblokflags = mbPerm; /* Set perm */
1407 }
1408 else {
1409 raddr = kvtophys(mbl); /* Get real address */
1410 mb->mapblokflags = 0; /* Set not perm */
1411 }
1412
1413 mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */
1414
1415 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1416 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1417 mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */
1418 mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */
1419 }
1420
1421 s = splhigh(); /* Don't bother from now on */
1422 if(!locked) { /* Do we need the lock? */
1423 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1424 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1425 }
1426 }
1427
1428 if(perm < 0) { /* Direct to release queue? */
1429 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1430 mapCtl.mapcrel = mb; /* Queue us on in */
1431 mapCtl.mapcreln++; /* Count the free block */
1432 }
1433 else { /* Add to the free list */
1434
1435 mb->nextblok = 0; /* We always add to the end */
1436 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1437
1438 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1439 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1440 }
1441 else { /* We are not the first */
1442 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1443 mapCtl.mapclast = mb; /* We are now last */
1444 }
1445 }
1446
1447 if(!locked) { /* Do we need to unlock? */
1448 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1449 }
1450 splx(s); /* Restore 'rupts */
1451 return; /* All done, leave... */
1452 }
1453
1454
1455 /*
1456 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1457 *
1458 * No locks can be held, because we allocate memory here.
1459 * This routine needs a corresponding mapping_relpre call to remove the
1460 * hold off flag so that the adjust routine will free the extra mapping
1461 * blocks on the release list. I don't like this, but I don't know
1462 * how else to do this for now...
1463 *
1464 */
1465
1466 void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1467
1468 int nmapb, i;
1469 kern_return_t retr;
1470 mappingblok *mbn;
1471 spl_t s;
1472
1473 s = splhigh(); /* Don't bother from now on */
1474 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1475 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1476 }
1477
1478 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1479
1480 mapCtl.mapcholdoff++; /* Bump the hold off count */
1481
1482 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1483 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1484 splx(s); /* Restore 'rupts */
1485 return;
1486 }
1487 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1488 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1489 splx(s); /* Restore 'rupts */
1490 return;
1491 }
1492 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1493
1494 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1495 splx(s); /* Restore 'rupts */
1496
1497 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1498 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1499 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1500 panic("Whoops... Not a bit of wired memory left for anyone\n");
1501 }
1502 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1503 }
1504 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1505 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1506
1507 mapCtl.mapcrecurse = 0; /* We are done now */
1508 }
1509
1510 /*
1511 * void mapping_relpre(void) - Releases preallocation release hold off
1512 *
1513 * This routine removes the
1514 * hold off flag so that the adjust routine will free the extra mapping
1515 * blocks on the release list. I don't like this, but I don't know
1516 * how else to do this for now...
1517 *
1518 */
1519
1520 void mapping_relpre(void) { /* Releases release hold off */
1521
1522 spl_t s;
1523
1524 s = splhigh(); /* Don't bother from now on */
1525 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1526 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1527 }
1528 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1529 panic("mapping_relpre: hold-off count went negative\n");
1530 }
1531
1532 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1533 splx(s); /* Restore 'rupts */
1534 }
1535
1536 /*
1537 * void mapping_free_prime(void) - Primes the mapping block release list
1538 *
1539 * See mapping_free_init.
1540 * No locks can be held, because we allocate memory here.
1541 * One processor running only.
1542 *
1543 */
1544
1545 void mapping_free_prime(void) { /* Primes the mapping block release list */
1546
1547 int nmapb, i;
1548 kern_return_t retr;
1549 mappingblok *mbn;
1550 vm_offset_t mapping_min;
1551
1552 retr = kmem_suballoc(kernel_map, &mapping_min, MAPPING_MAP_SIZE,
1553 FALSE, TRUE, &mapping_map);
1554
1555 if (retr != KERN_SUCCESS)
1556 panic("mapping_free_prime: kmem_suballoc failed");
1557
1558
1559 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1560 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1561
1562 #if DEBUG
1563 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1564 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1565 #endif
1566
1567 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1568 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1569 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1570 panic("Whoops... Not a bit of wired memory left for anyone\n");
1571 }
1572 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1573 }
1574 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1575 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1576 }
1577
1578
1579
1580 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1581 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1582 {
1583 *count = mapCtl.mapcinuse;
1584 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1585 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1586 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1587 *alloc_size = PAGE_SIZE;
1588
1589 *collectable = 1;
1590 *exhaustable = 0;
1591 }
1592
1593
1594 /*
1595 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1596 *
1597 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1598 * the same space. If it finds it, it returns the virtual address.
1599 *
1600 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1601 * for it and fail it myself...
1602 */
1603
1604 vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */
1605
1606 spl_t s;
1607 register mapping *mp, *mpv;
1608 vm_offset_t va;
1609
1610 if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */
1611
1612 s = splhigh();
1613 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1614 splx(s); /* Restore 'rupts */
1615 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1616 return(0); /* Should die before here */
1617 }
1618
1619 va = 0; /* Assume failure */
1620
1621 for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */
1622
1623 if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */
1624
1625 va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1626 va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
1627 va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
1628 break; /* We're done now, pass virtual address back */
1629 }
1630
1631 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1632 splx(s); /* Restore 'rupts */
1633 return(va); /* Return the result or 0... */
1634 }
1635
1636 /*
1637 * kvtophys(addr)
1638 *
1639 * Convert a kernel virtual address to a physical address
1640 */
1641 vm_offset_t kvtophys(vm_offset_t va) {
1642
1643 register mapping *mp, *mpv;
1644 register blokmap *bmp;
1645 register vm_offset_t pa;
1646 spl_t s;
1647
1648 s=splhigh(); /* Don't bother from now on */
1649 mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */
1650
1651 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1652 splx(s); /* Restore 'rupts */
1653 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */
1654 return 0;
1655 }
1656
1657 if(!mp) { /* If it was not a normal page */
1658 pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1659 splx(s); /* Restore 'rupts */
1660 return pa; /* Return physical address */
1661 }
1662
1663 mpv = hw_cpv(mp); /* Convert to virtual addressing */
1664
1665 if(!mpv->physent) { /* Was there a physical entry? */
1666 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1667 }
1668 else {
1669 pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1670 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1671 }
1672
1673 splx(s); /* Restore 'rupts */
1674 return pa; /* Return the physical address... */
1675 }
1676
1677 /*
1678 * phystokv(addr)
1679 *
1680 * Convert a physical address to a kernel virtual address if
1681 * there is a mapping, otherwise return NULL
1682 */
1683
1684 vm_offset_t phystokv(vm_offset_t pa) {
1685
1686 struct phys_entry *pp;
1687 vm_offset_t va;
1688
1689 pp = pmap_find_physentry(pa); /* Find the physical entry */
1690 if (PHYS_NULL == pp) {
1691 return (vm_offset_t)NULL; /* If none, return null */
1692 }
1693 if(!(va=mapping_p2v(kernel_pmap, pp))) {
1694 return 0; /* Can't find it, return 0... */
1695 }
1696 return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */
1697
1698 }
1699
1700 /*
1701 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1702 * page 0 access for the current thread.
1703 *
1704 * If parameter is TRUE, faults are ignored
1705 * If parameter is FALSE, faults are honored
1706 *
1707 */
1708
1709 void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1710
1711 if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1712 else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1713
1714 return; /* Return the result or 0... */
1715 }
1716
1717
1718 /*
1719 * Allocates a range of virtual addresses in a map as optimally as
1720 * possible for block mapping. The start address is aligned such
1721 * that a minimum number of power-of-two sized/aligned blocks is
1722 * required to cover the entire range.
1723 *
1724 * We also use a mask of valid block sizes to determine optimality.
1725 *
1726 * Note that the passed in pa is not actually mapped to the selected va,
1727 * rather, it is used to figure the optimal boundary. The actual
1728 * V to R mapping is done externally.
1729 *
1730 * This function will return KERN_INVALID_ADDRESS if an optimal address
1731 * can not be found. It is not necessarily a fatal error, the caller may still be
1732 * still be able to do a non-optimal assignment.
1733 */
1734
1735 kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa,
1736 vm_size_t size, vm_prot_t prot) {
1737
1738 vm_map_entry_t entry, next, tmp_entry, new_entry;
1739 vm_offset_t start, end, algnpa, endadr, strtadr, curradr;
1740 vm_offset_t boundary;
1741
1742 unsigned int maxsize, minsize, leading, trailing;
1743
1744 assert(page_aligned(pa));
1745 assert(page_aligned(size));
1746
1747 if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */
1748
1749 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
1750 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
1751
1752 boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */
1753 if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */
1754
1755 vm_map_lock(map); /* No touchee no mapee */
1756
1757 for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */
1758 if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */
1759 algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */
1760 leading = algnpa - pa; /* Get leading size */
1761
1762 curradr = 0; /* Start low */
1763
1764 while(1) { /* Try all possible values for this opt level */
1765
1766 curradr = curradr + boundary; /* Get the next optimal address */
1767 strtadr = curradr - leading; /* Calculate start of optimal range */
1768 endadr = strtadr + size; /* And now the end */
1769
1770 if((curradr < boundary) || /* Did address wrap here? */
1771 (strtadr > curradr) || /* How about this way? */
1772 (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */
1773
1774 if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
1775 if(endadr > map->max_offset) break; /* No room right now... */
1776
1777 if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
1778
1779 next = entry->vme_next; /* Get the next entry */
1780 if((next == vm_map_to_entry(map)) || /* Are we the last entry? */
1781 (next->vme_start >= endadr)) { /* or do we end before the next entry? */
1782
1783 new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
1784 VM_OBJECT_NULL,
1785 0, /* Offset into object of 0 */
1786 FALSE, /* No copy needed */
1787 FALSE, /* Not shared */
1788 FALSE, /* Not in transition */
1789 prot, /* Set the protection to requested */
1790 prot, /* We can't change protection */
1791 VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind,
1792 'cause we don't page in this area */
1793 VM_INHERIT_DEFAULT, /* Default inheritance */
1794 0); /* Nothing is wired */
1795
1796 vm_map_unlock(map); /* Let the world see it all */
1797 *va = strtadr; /* Tell everyone */
1798 *bnd = boundary; /* Say what boundary we are aligned to */
1799 return(KERN_SUCCESS); /* Leave, all is right with the world... */
1800 }
1801 }
1802 }
1803
1804 vm_map_unlock(map); /* Couldn't find a slot */
1805 return(KERN_INVALID_ADDRESS);
1806 }
1807
1808 /*
1809 * Copies data from a physical page to a virtual page. This is used to
1810 * move data from the kernel to user state.
1811 *
1812 * Note that it is invalid to have a source that spans a page boundry.
1813 * This can block.
1814 * We don't check protection either.
1815 * And we don't handle a block mapped sink address either.
1816 *
1817 */
1818
1819 kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) {
1820
1821 vm_map_t map;
1822 kern_return_t ret;
1823 unsigned int spaceid;
1824 int left, csize;
1825 vm_offset_t pa;
1826 register mapping *mpv, *mp;
1827 spl_t s;
1828
1829 if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */
1830 map = current_act()->map; /* Get the current map */
1831
1832 while(size) {
1833 s=splhigh(); /* Don't bother me */
1834
1835 spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */
1836
1837 mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */
1838 if(!mp) { /* Was it there? */
1839 splx(s); /* Restore the interrupt level */
1840 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in... */
1841 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1842
1843 return KERN_FAILURE; /* Didn't find any, return no good... */
1844 }
1845 if((unsigned int)mp&1) { /* Did we timeout? */
1846 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */
1847 splx(s); /* Restore the interrupt level */
1848 return KERN_FAILURE; /* Bad hair day, return FALSE... */
1849 }
1850
1851 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
1852
1853 if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */
1854 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1855 splx(s); /* Restore the interrupt level */
1856 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* check for a COW area */
1857 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1858 return KERN_FAILURE; /* Didn't find any, return no good... */
1859 }
1860 left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */
1861
1862 csize = size < left ? size : left; /* Set amount to copy this pass */
1863
1864 pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */
1865
1866 bcopy_physvir((char *)source, (char *)pa, csize); /* Do a physical copy, virtually */
1867
1868 hw_set_mod(mpv->physent); /* Go set the change of the sink */
1869
1870 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1871 splx(s); /* Open up for interrupts */
1872
1873 sink += csize; /* Move up to start of next page */
1874 source += csize; /* Move up source */
1875 size -= csize; /* Set amount for next pass */
1876 }
1877 return KERN_SUCCESS;
1878 }
1879
1880
1881 /*
1882 * copy 'size' bytes from physical to physical address
1883 * the caller must validate the physical ranges
1884 *
1885 * if flush_action == 0, no cache flush necessary
1886 * if flush_action == 1, flush the source
1887 * if flush_action == 2, flush the dest
1888 * if flush_action == 3, flush both source and dest
1889 */
1890
1891 kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
1892
1893 switch(flush_action) {
1894 case 1:
1895 flush_dcache(source, size, 1);
1896 break;
1897 case 2:
1898 flush_dcache(dest, size, 1);
1899 break;
1900 case 3:
1901 flush_dcache(source, size, 1);
1902 flush_dcache(dest, size, 1);
1903 break;
1904
1905 }
1906 bcopy_phys((char *)source, (char *)dest, size); /* Do a physical copy */
1907
1908 switch(flush_action) {
1909 case 1:
1910 flush_dcache(source, size, 1);
1911 break;
1912 case 2:
1913 flush_dcache(dest, size, 1);
1914 break;
1915 case 3:
1916 flush_dcache(source, size, 1);
1917 flush_dcache(dest, size, 1);
1918 break;
1919
1920 }
1921 }
1922
1923
1924
1925 #if DEBUG
1926 /*
1927 * Dumps out the mapping stuff associated with a virtual address
1928 */
1929 void dumpaddr(space_t space, vm_offset_t va) {
1930
1931 mapping *mp, *mpv;
1932 vm_offset_t pa;
1933 spl_t s;
1934
1935 s=splhigh(); /* Don't bother me */
1936
1937 mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */
1938 if(!mp) { /* Did we find one? */
1939 splx(s); /* Restore the interrupt level */
1940 printf("dumpaddr: virtual address (%08X) not mapped\n", va);
1941 return; /* Didn't find any, return FALSE... */
1942 }
1943 if((unsigned int)mp&1) { /* Did we timeout? */
1944 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
1945 splx(s); /* Restore the interrupt level */
1946 return; /* Bad hair day, return FALSE... */
1947 }
1948 printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */
1949 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1950 dumpmapping(mpv);
1951 if(mpv->physent) {
1952 dumppca(mpv);
1953 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
1954 }
1955 splx(s); /* Was there something you needed? */
1956 return; /* Tell them we did it */
1957 }
1958
1959
1960
1961 /*
1962 * Prints out a mapping control block
1963 *
1964 */
1965
1966 void dumpmapping(struct mapping *mp) { /* Dump out a mapping */
1967
1968 printf("Dump of mapping block: %08X\n", mp); /* Header */
1969 printf(" next: %08X\n", mp->next);
1970 printf(" hashnext: %08X\n", mp->hashnext);
1971 printf(" PTEhash: %08X\n", mp->PTEhash);
1972 printf(" PTEent: %08X\n", mp->PTEent);
1973 printf(" physent: %08X\n", mp->physent);
1974 printf(" PTEv: %08X\n", mp->PTEv);
1975 printf(" PTEr: %08X\n", mp->PTEr);
1976 printf(" pmap: %08X\n", mp->pmap);
1977
1978 if(mp->physent) { /* Print physent if it exists */
1979 printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1);
1980 }
1981 else {
1982 printf("Associated physical entry: none\n");
1983 }
1984
1985 dumppca(mp); /* Dump out the PCA information */
1986
1987 return;
1988 }
1989
1990 /*
1991 * Prints out a PTEG control area
1992 *
1993 */
1994
1995 void dumppca(struct mapping *mp) { /* PCA */
1996
1997 PCA *pca;
1998 unsigned int *pteg;
1999
2000 pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */
2001 pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16));
2002 printf(" Dump of PCA: %08X\n", pca); /* Header */
2003 printf(" PCAlock: %08X\n", pca->PCAlock);
2004 printf(" PCAallo: %08X\n", pca->flgs.PCAallo);
2005 printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]);
2006 printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]);
2007 printf("Dump of PTEG: %08X\n", pteg); /* Header */
2008 printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]);
2009 printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]);
2010 printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]);
2011 printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]);
2012 return;
2013 }
2014
2015 /*
2016 * Dumps starting with a physical entry
2017 */
2018
2019 void dumpphys(struct phys_entry *pp) { /* Dump from physent */
2020
2021 mapping *mp;
2022 PCA *pca;
2023 unsigned int *pteg;
2024
2025 printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1);
2026 mp = hw_cpv(pp->phys_link);
2027 while(mp) {
2028 dumpmapping(mp);
2029 dumppca(mp);
2030 mp = hw_cpv(mp->next);
2031 }
2032
2033 return;
2034 }
2035
2036 #endif
2037
2038
2039 kern_return_t bmapvideo(vm_offset_t *info);
2040 kern_return_t bmapvideo(vm_offset_t *info) {
2041
2042 extern struct vc_info vinfo;
2043
2044 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
2045 return KERN_SUCCESS;
2046 }
2047
2048 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
2049 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
2050
2051 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */
2052 return KERN_SUCCESS;
2053 }
2054
2055 kern_return_t bmapmapr(vm_offset_t va);
2056 kern_return_t bmapmapr(vm_offset_t va) {
2057
2058 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
2059 return KERN_SUCCESS;
2060 }