]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/mappings.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
27 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
28 * Currently, some of the function of this module is contained within pmap.c. We may want to move
29 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
30 *
31 * We also depend upon the structure of the phys_entry control block. We do put some processor
32 * specific stuff in there.
33 *
34 */
35
36 #include <cpus.h>
37 #include <debug.h>
38 #include <mach_kgdb.h>
39 #include <mach_vm_debug.h>
40 #include <db_machine_commands.h>
41
42 #include <kern/thread.h>
43 #include <kern/thread_act.h>
44 #include <mach/vm_attributes.h>
45 #include <mach/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 #include <kern/spl.h>
50
51 #include <kern/misc_protos.h>
52 #include <ppc/misc_protos.h>
53 #include <ppc/proc_reg.h>
54
55 #include <vm/pmap.h>
56 #include <ppc/pmap.h>
57 #include <ppc/pmap_internals.h>
58 #include <ppc/mem.h>
59
60 #include <ppc/new_screen.h>
61 #include <ppc/Firmware.h>
62 #include <ppc/mappings.h>
63 #include <ddb/db_output.h>
64
65 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
66
67 #define PERFTIMES 0
68
69 #if PERFTIMES && DEBUG
70 #define debugLog2(a, b, c) dbgLog2(a, b, c)
71 #else
72 #define debugLog2(a, b, c)
73 #endif
74
75 vm_map_t mapping_map = VM_MAP_NULL;
76 #define MAPPING_MAP_SIZE 33554432 /* 32MB address space */
77
78 unsigned int incrVSID = 0; /* VSID increment value */
79 unsigned int mappingdeb0 = 0;
80 unsigned int mappingdeb1 = 0;
81 extern unsigned int hash_table_size;
82 extern vm_offset_t mem_size;
83 /*
84 * ppc_prot translates from the mach representation of protections to the PPC version.
85 * We also allow for a direct setting of the protection bits. This extends the mach
86 * concepts to allow the greater control we need for Virtual Machines (VMM).
87 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
88 * It eliminates the used of this table.
89 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
90 */
91
92 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
93
94 /*
95 * About PPC VSID generation:
96 *
97 * This function is called to generate an address space ID. This space ID must be unique within
98 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
99 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
100 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
101 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
102 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
103 * they are release. This causes us to lose track of what space IDs are free to be reused.
104 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
105 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
106 *
107 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
108 * calculation for virtual address lookup. An improperly chosen value could potentially cause
109 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
110 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
111 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
112 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
113 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
114 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
115 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
116 * with no overflow. I don't think that this is a problem.
117 *
118 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
119 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
120 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
121 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
122 * the same modulo 512. We can reduce this problem by having the segment number be bits
123 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
124 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
125 * I don't think that it is as signifigant as the other, so, I'll make the space ID
126 * with segment first.
127 *
128 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
129 * While this is a problem that should only happen in periods counted in weeks, it can and
130 * will happen. This is assuming a monotonically increasing space ID. If we were to search
131 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
132 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
133 *
134 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
135 * locked by free_pmap_lock) that is sorted in VSID sequence order.
136 *
137 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
138 * the last that was freed. The we allocate that.
139 *
140 * NOTE: We must be called with interruptions off and free_pmap_lock held.
141 *
142 */
143
144 /*
145 * mapping_init();
146 * Do anything that needs to be done before the mapping system can be used.
147 * Hash table must be initialized before we call this.
148 *
149 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
150 */
151
152 void mapping_init(void) {
153
154 unsigned int tmp;
155
156 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
157
158 incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
159 incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
160 incrVSID |= 1; /* Set bit and add 1 */
161 return;
162
163 }
164
165
166 /*
167 * mapping_remove(pmap_t pmap, vm_offset_t va);
168 * Given a pmap and virtual address, this routine finds the mapping and removes it from
169 * both its PTEG hash list and the physical entry list. The mapping block will be added to
170 * the free list. If the free list threshold is reached, garbage collection will happen.
171 * We also kick back a return code to say whether or not we had one to remove.
172 *
173 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
174 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
175 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
176 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
177 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
178 * It's just that simple!
179 *
180 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
181 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
182 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
183 *
184 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
185 * the mapping is outside of physical memory and usually refers to a memory mapped device of
186 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
187 * routines return normally, but don't do anything.
188 */
189
190 boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR
191 Returns TRUE if a mapping was found to remove */
192
193 mapping *mp, *mpv;
194 register blokmap *blm;
195 spl_t s;
196 unsigned int *useadd, *useaddr, uindx;
197 int i;
198 struct phys_entry *pp;
199 mapping *mp1, *mpv1;
200
201 debugLog2(1, va, pmap->space); /* start mapping_remove */
202
203 s=splhigh(); /* Don't bother me */
204
205 mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */
206
207 if(!mp) { /* Did we find one? */
208 splx(s); /* Allow 'rupts now */
209 if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */
210
211 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
212 blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */
213 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
214 pmap, va, blm);
215 }
216 while ((unsigned int)mp & 2)
217 mp = (mapping *)hw_rem_blk(pmap, va, va);
218 #if 0
219 blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */
220 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
221 blm, blm->start, blm->end, blm->PTEr);
222 #endif
223 mapping_free(hw_cpv(mp)); /* Release it */
224 debugLog2(2, 1, 0); /* End mapping_remove */
225 return TRUE; /* Tell them we did it */
226 }
227 debugLog2(2, 0, 0); /* end mapping_remove */
228 return FALSE; /* Didn't find any, return FALSE... */
229 }
230 if((unsigned int)mp&1) { /* Did we timeout? */
231 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
232 splx(s); /* Restore the interrupt level */
233 return FALSE; /* Bad hair day, return FALSE... */
234 }
235
236 mpv = hw_cpv(mp); /* Get virtual address of mapping */
237 #if DEBUG
238 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
239 #else
240 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
241 #endif
242 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
243 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
244 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
245
246 #if 0
247 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
248 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
249 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
250 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
251 }
252 }
253 #endif
254
255 hw_rem_map(mp); /* Remove the corresponding mapping */
256
257 pp = mpv->physent;
258
259 if ((mpv->physent) && (pmap->vflags & pmapVMhost)) {
260
261 while(mp1 = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */
262
263 mpv1 = hw_cpv(mp1); /* Get the virtual address */
264 #if DEBUG
265 if(hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
266 #else
267 (void)hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1); /* Decrement the resident page count */
268 #endif
269
270 uindx = ((mpv1->PTEv >> 24) & 0x78) | ((mpv1->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */
271 useadd = (unsigned int *)&mpv1->pmap->pmapUsage[uindx]; /* Point to slot to bump */
272 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
273 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
274
275 #if 0
276 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
277 if((mpv1->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
278 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
279 i * pmapUsageSize, mpv1->pmap->pmapUsage[i], mpv1->pmap);
280 }
281 }
282 #endif
283
284 hw_rem_map(mp1); /* Remove the mapping */
285 mapping_free(mpv1); /* Add mapping to the free list */
286 }
287 }
288
289 if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
290
291 splx(s); /* Was there something you needed? */
292
293 mapping_free(mpv); /* Add mapping to the free list */
294 debugLog2(2, 1, 0); /* end mapping_remove */
295 return TRUE; /* Tell them we did it */
296 }
297
298 /*
299 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
300 *
301 * This guy releases any mappings that exist for a physical page on a specified map.
302 * We get the lock on the phys_entry, and hold it through out this whole routine.
303 * That way, no one can change the queue out from underneath us. We keep fetching
304 * the physents mapping anchor until it is null, then we're done.
305 *
306 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
307 * decriment the pmap's residency count. Then we release the mapping back to the free list.
308 *
309 */
310
311
312 void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) { /* Remove all mappings from specified pmap for this physent */
313
314 mapping *mp, *mp_next, *mpv;
315 spl_t s;
316 unsigned int *useadd, *useaddr, uindx;
317 int i;
318
319 s=splhigh(); /* Don't bother me */
320
321 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
322 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
323 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
324 }
325
326 mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);
327
328 while(mp) { /* Keep going so long as there's another */
329
330 mpv = hw_cpv(mp); /* Get the virtual address */
331 if(mpv->pmap != pmap) {
332 mp = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
333 continue;
334 }
335 #if DEBUG
336 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
337 #else
338 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
339 #endif
340
341 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join seg # and top 2 bits of API */
342 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
343 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
344 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */
345
346
347
348 mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
349 hw_rem_map(mp); /* Remove the mapping */
350 mapping_free(mpv); /* Add mapping to the free list */
351 mp = mp_next;
352 }
353
354 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
355 splx(s);
356 return;
357 }
358 /*
359 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
360 *
361 * This guy releases any mappings that exist for a physical page.
362 * We get the lock on the phys_entry, and hold it through out this whole routine.
363 * That way, no one can change the queue out from underneath us. We keep fetching
364 * the physents mapping anchor until it is null, then we're done.
365 *
366 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
367 * decriment the pmap's residency count. Then we release the mapping back to the free list.
368 *
369 */
370
371 void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */
372
373 mapping *mp, *mpv;
374 spl_t s;
375 unsigned int *useadd, *useaddr, uindx;
376 int i;
377
378 s=splhigh(); /* Don't bother me */
379 debugLog2(3, pp->pte1, 0); /* start mapping_purge */
380
381 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
382 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
383 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
384 }
385
386 while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */
387
388 mpv = hw_cpv(mp); /* Get the virtual address */
389 #if DEBUG
390 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
391 #else
392 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
393 #endif
394
395 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */
396 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
397 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
398 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
399
400 #if 0
401 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
402 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
403 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
404 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
405 }
406 }
407 #endif
408
409
410 hw_rem_map(mp); /* Remove the mapping */
411 mapping_free(mpv); /* Add mapping to the free list */
412 }
413
414 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
415
416 debugLog2(4, pp->pte1, 0); /* end mapping_purge */
417 splx(s); /* Was there something you needed? */
418 return; /* Tell them we did it */
419 }
420
421
422 /*
423 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
424 *
425 * This routine takes the given parameters, builds a mapping block, and queues it into the
426 * correct lists.
427 *
428 * The pp parameter can be null. This allows us to make a mapping that is not
429 * associated with any physical page. We may need this for certain I/O areas.
430 *
431 * If the phys_entry address is null, we neither lock or chain into it.
432 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
433 */
434
435 mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */
436
437 register mapping *mp, *mpv;
438 unsigned int *useadd, *useaddr;
439 spl_t s;
440 int i;
441
442 debugLog2(5, va, pa); /* start mapping_purge */
443 mpv = mapping_alloc(); /* Get a spare mapping block */
444
445 mpv->pmap = pmap; /* Initialize the pmap pointer */
446 mpv->physent = pp; /* Initialize the pointer to the physical entry */
447 mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */
448 mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */
449
450 s=splhigh(); /* Don't bother from now on */
451
452 mp = hw_cvp(mpv); /* Get the physical address of this */
453
454 if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */
455 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
456 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
457 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
458 }
459 }
460
461 if(pp) { /* See of there is a physcial entry */
462 mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */
463 pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */
464 }
465
466 hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */
467
468 (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */
469 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
470 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
471 (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
472 #if 0
473 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
474 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
475 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
476 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
477 }
478 }
479 #endif
480
481 if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */
482
483 splx(s); /* Ok for interruptions now */
484 debugLog2(6, pmap->space, prot); /* end mapping_purge */
485 return mpv; /* Leave... */
486 }
487
488
489 /*
490 * Enters optimal translations for odd-sized V=F blocks.
491 *
492 * Builds a block map for each power-of-two hunk o' address
493 * that exists. This is specific to the processor type.
494 * PPC uses BAT register size stuff. Future PPC might have
495 * something else.
496 *
497 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
498 * stupid to know otherwise so we only look at the va anyhow, so there...
499 *
500 */
501
502 void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */
503
504 register blokmap *blm, *oblm;
505 unsigned int pg;
506 unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin;
507 int i, maxshft, nummax, minshft;
508
509 #if 1
510 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
511 pmap, va, pa, bnd, size, prot, attr);
512 #endif
513
514 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
515 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
516
517 minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */
518 maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */
519
520 leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */
521 trailing = size - leading; /* Get size of trailing area */
522 tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */
523
524 #if 1
525 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */
526 #endif
527
528 if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */
529
530 va = va + tomin; /* Adjust virtual start */
531 pa = pa + tomin; /* Adjust physical start */
532 leading = leading - tomin; /* Adjust leading size */
533
534 /*
535 * Some of this code is very classic PPC. We need to fix this up.
536 */
537
538 leading = leading >> minshft; /* Position for bit testing */
539 cbsize = minsize; /* Set the minimum size */
540
541 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */
542
543 if(leading & 1) {
544 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
545 pa = pa + cbsize; /* Bump up physical address */
546 va = va + cbsize; /* Bump up virtual address */
547 }
548
549 leading = leading >> 1; /* Shift up to next size */
550 cbsize = cbsize << 1; /* Here too */
551
552 }
553
554 nummax = trailing >> maxshft; /* Get number of max size blocks left */
555 for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */
556 pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */
557
558 pa = pa + maxsize; /* Bump up physical address */
559 va = va + maxsize; /* Bump up virtual address */
560 trailing -= maxsize; /* Back off what we just did */
561 }
562
563 cbsize = maxsize; /* Start at maximum size */
564
565 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */
566
567 if(trailing & cbsize) {
568 trailing &= ~cbsize; /* Remove the block we are allocating */
569 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
570 pa = pa + cbsize; /* Bump up physical address */
571 va = va + cbsize; /* Bump up virtual address */
572 }
573 cbsize = cbsize >> 1; /* Next size down */
574 }
575
576 if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */
577
578 return; /* Return */
579 }
580
581
582 /*
583 * Enters translations for odd-sized V=F blocks.
584 *
585 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
586 * will be split into normal-sized page mappings.
587 *
588 * The higher level VM map should be locked to insure that we don't have a
589 * double diddle here.
590 *
591 * We panic if we get a block that overlaps with another. We do not merge adjacent
592 * blocks because removing any address within a block removes the entire block and if
593 * would really mess things up if we trashed too much.
594 *
595 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
596 * not be changed. The block must be unmapped and then remapped with the new stuff.
597 * We also do not keep track of reference or change flags.
598 *
599 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
600 * with interruptions and translation disabled and under the control of the lock located
601 * in the first block map. MRU is used because it is expected that the same entry
602 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
603 *
604 */
605
606 void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
607
608 register blokmap *blm, *oblm, *oblm_virt;;
609 unsigned int pg;
610
611 #if 0
612 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
613 pmap, va, pa, size, prot, attr);
614 #endif
615
616 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
617 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
618 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
619 #if 0
620 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
621 va + pg, pa + pg);
622 #endif
623 }
624 return; /* All done */
625 }
626
627 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
628
629 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
630 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
631 blm->current = 0;
632 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
633 blm->space = pmap->space; /* Set the space (only needed for remove) */
634 blm->blkFlags = flags; /* Set the block's flags */
635
636 #if 0
637 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
638 blm, blm->start, blm->end, blm->PTEr);
639 #endif
640
641 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
642
643 #if 0
644 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
645 blm, pmap->bmaps);
646 #endif
647
648 do {
649 oblm = hw_add_blk(pmap, blm);
650 if ((unsigned int)oblm & 2) {
651 oblm_virt = (blokmap *)hw_cpv((mapping *)((unsigned int)oblm & 0xFFFFFFFC));
652 mapping_remove(pmap, oblm_virt->start);
653 };
654 } while ((unsigned int)oblm & 2);
655
656 if (oblm) {
657 oblm = (blokmap *)hw_cpv((mapping *) oblm); /* Get the old block virtual address */
658 blm = (blokmap *)hw_cpv((mapping *)blm); /* Back to the virtual address of this */
659 if((oblm->start != blm->start) || /* If we have a match, then this is a fault race and */
660 (oblm->end != blm->end) || /* is acceptable */
661 (oblm->PTEr != blm->PTEr))
662 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm);/* Otherwise, Squeak loudly and carry a big stick */
663 mapping_free((struct mapping *)blm);
664 }
665
666 #if 0
667 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
668 blm, pmap->bmaps);
669 #endif
670
671 return; /* Return */
672 }
673
674
675 /*
676 * Optimally enters translations for odd-sized V=F blocks.
677 *
678 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
679 * will be split into normal-sized page mappings.
680 *
681 * This one is different than pmap_map_block in that it will allocate it's own virtual
682 * target address. Rather than allocating a single block,
683 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
684 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
685 *
686 * Most considerations for pmap_map_block apply.
687 *
688 *
689 */
690
691 kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va,
692 vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */
693
694 register blokmap *blm, *oblm;
695 unsigned int pg;
696 kern_return_t err;
697 unsigned int bnd;
698
699 #if 1
700 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
701 map, pa, size, prot, attr);
702 #endif
703
704 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
705 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
706 if(err) {
707 #if DEBUG
708 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */
709 #endif
710 return(err); /* Pass back the error */
711 }
712 #if 1
713 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */
714 #endif
715
716 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
717 mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
718 }
719 return(KERN_SUCCESS); /* All done */
720 }
721
722 err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */
723
724 if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */
725 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
726 if(err) {
727 #if DEBUG
728 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */
729 #endif
730 return(err); /* Pass back the error */
731 }
732 #if 1
733 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */
734 #endif
735 pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */
736 return KERN_SUCCESS; /* All done now */
737 }
738
739 if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */
740 #if DEBUG
741 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */
742 #endif
743 return(err);
744 }
745
746 #if 1
747 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */
748 #endif
749 mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */
750 return(KERN_SUCCESS); /* All done */
751 }
752
753
754 #if 0
755
756 /*
757 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
758 * areas.
759 *
760 * Once blocks are merged, they act like one block, i.e., if you remove it,
761 * it all goes...
762 *
763 * This can only be used during boot. Ain't no way we can handle SMP
764 * or preemption easily, so we restrict it. We don't check either. We
765 * assume only skilled professional programmers will attempt using this
766 * function. We assume no responsibility, either real or imagined, for
767 * injury or death resulting from unauthorized use of this function.
768 *
769 * No user servicable parts inside. Notice to be removed by end-user only,
770 * under penalty of applicable federal and state laws.
771 *
772 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
773 * overlapping areas. Note that we do panic if we can't merge.
774 *
775 */
776
777 void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */
778
779 register blokmap *blm, *oblm;
780 unsigned int pg;
781 spl_t s;
782
783 #if 1
784 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
785 pmap, va, pa, size, prot, attr);
786 #endif
787
788 s=splhigh(); /* Don't bother from now on */
789 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
790 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
791 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
792 }
793 return; /* All done */
794 }
795
796 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
797
798 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
799 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
800 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
801
802 #if 1
803 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
804 blm, blm->start, blm->end, blm->PTEr);
805 #endif
806
807 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
808
809 #if 1
810 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
811 blm, pmap->bmaps);
812 #endif
813
814 if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */
815 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */
816 }
817
818 #if 1
819 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
820 blm, pmap->bmaps);
821 #endif
822 splx(s); /* Ok for interruptions now */
823
824 return; /* Return */
825 }
826 #endif
827
828 /*
829 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
830 *
831 * This routine takes a physical entry and runs through all mappings attached to it and changes
832 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
833 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
834 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
835 * higher to lower, lower to higher.
836 *
837 * Phys_entry is unlocked.
838 */
839
840 void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */
841
842 spl_t spl;
843
844 debugLog2(9, pp->pte1, prot); /* end remap */
845 spl=splhigh(); /* No interruptions during this */
846 if(!locked) { /* Do we need to lock the physent? */
847 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
848 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
849 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
850 }
851 }
852
853 hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */
854
855 if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
856 splx(spl); /* Restore interrupt state */
857 debugLog2(10, pp->pte1, 0); /* end remap */
858
859 return; /* Leave... */
860 }
861
862 /*
863 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
864 *
865 * This routine takes a pmap and virtual address and changes
866 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
867 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
868 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
869 * higher to lower, lower to higher.
870 *
871 */
872
873 void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */
874
875 mapping *mp, *mpv;
876 spl_t s;
877
878 debugLog2(9, vaddr, pmap); /* start mapping_protect */
879 s = splhigh(); /* Don't bother me */
880
881 mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */
882
883 if(!mp) { /* Did we find one? */
884 splx(s); /* Restore the interrupt level */
885 debugLog2(10, 0, 0); /* end mapping_pmap */
886 return; /* Didn't find any... */
887 }
888 if((unsigned int)mp & 1) { /* Did we timeout? */
889 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
890 splx(s); /* Restore the interrupt level */
891 return; /* Bad hair day... */
892 }
893
894 hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */
895
896 mpv = hw_cpv(mp); /* Get virtual address of mapping */
897 if(mpv->physent) { /* If there is a physical page, */
898 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
899 }
900 splx(s); /* Restore interrupt state */
901 debugLog2(10, mpv->PTEr, 0); /* end remap */
902
903 return; /* Leave... */
904 }
905
906 /*
907 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
908 *
909 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
910 * associated with this page when we do it.
911 */
912
913 void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */
914
915 debugLog2(11, pp->pte1, prot); /* end remap */
916
917 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
918 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
919 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
920 }
921
922 hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */
923
924 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
925 debugLog2(12, pp->pte1, wimg); /* end remap */
926
927 return; /* Leave... */
928 }
929
930 /*
931 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
932 *
933 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
934 * any PTEs it finds.
935 *
936 * Interruptions must be disabled and the physical entry locked at entry.
937 */
938
939 void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */
940
941 hw_inv_all(pp); /* Go set the change bit of a physical page */
942
943 return; /* Leave... */
944 }
945
946
947 /*
948 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
949 *
950 * This routine takes a physical entry and runs through all mappings attached to it and turns
951 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
952 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
953 * either (I don't think, maybe I'll change my mind later).
954 *
955 * Interruptions must be disabled and the physical entry locked at entry.
956 */
957
958 void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */
959
960 hw_clr_mod(pp); /* Go clear the change bit of a physical page */
961 return; /* Leave... */
962 }
963
964
965 /*
966 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
967 *
968 * This routine takes a physical entry and runs through all mappings attached to it and turns
969 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
970 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
971 * either (I don't think, maybe I'll change my mind later).
972 *
973 * Interruptions must be disabled and the physical entry locked at entry.
974 */
975
976 void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */
977
978 hw_set_mod(pp); /* Go set the change bit of a physical page */
979 return; /* Leave... */
980 }
981
982
983 /*
984 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
985 *
986 * This routine takes a physical entry and runs through all mappings attached to it and turns
987 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
988 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
989 * either (I don't think, maybe I'll change my mind later).
990 *
991 * Interruptions must be disabled at entry.
992 */
993
994 void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */
995
996 mapping *mp;
997
998 debugLog2(13, pp->pte1, 0); /* end remap */
999 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */
1000 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
1001 }
1002 hw_clr_ref(pp); /* Go clear the reference bit of a physical page */
1003 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */
1004 debugLog2(14, pp->pte1, 0); /* end remap */
1005 return; /* Leave... */
1006 }
1007
1008
1009 /*
1010 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
1011 *
1012 * This routine takes a physical entry and runs through all mappings attached to it and turns
1013 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1014 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1015 * either (I don't think, maybe I'll change my mind later).
1016 *
1017 * Interruptions must be disabled and the physical entry locked at entry.
1018 */
1019
1020 void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */
1021
1022 hw_set_ref(pp); /* Go set the reference bit of a physical page */
1023 return; /* Leave... */
1024 }
1025
1026
1027 /*
1028 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
1029 *
1030 * This routine takes a physical entry and runs through all mappings attached to it and tests
1031 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
1032 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
1033 * either (I don't think, maybe I'll change my mind later).
1034 *
1035 * Interruptions must be disabled and the physical entry locked at entry.
1036 */
1037
1038 boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */
1039
1040 return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */
1041 }
1042
1043
1044 /*
1045 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
1046 *
1047 * This routine takes a physical entry and runs through all mappings attached to it and tests
1048 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1049 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1050 * either (I don't think, maybe I'll change my mind later).
1051 *
1052 * Interruptions must be disabled and the physical entry locked at entry.
1053 */
1054
1055 boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */
1056
1057 return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */
1058 }
1059
1060
1061 /*
1062 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1063 *
1064 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1065 */
1066
1067 void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */
1068
1069 pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1070
1071 return; /* Leave... */
1072 }
1073
1074
1075 /*
1076 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1077 *
1078 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1079 * the number of free mappings remaining, and if below a threshold, replenishes them.
1080 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1081 * a new one is allocated.
1082 *
1083 * This routine allocates and/or memory and must be called from a safe place.
1084 * Currently, vm_pageout_scan is the safest place. We insure that the
1085 */
1086
1087 thread_call_t mapping_adjust_call;
1088 static thread_call_data_t mapping_adjust_call_data;
1089
1090 void mapping_adjust(void) { /* Adjust free mappings */
1091
1092 kern_return_t retr;
1093 mappingblok *mb, *mbn;
1094 spl_t s;
1095 int allocsize, i;
1096 extern int vm_page_free_count;
1097
1098 if(mapCtl.mapcmin <= MAPPERBLOK) {
1099 mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16;
1100
1101 #if DEBUG
1102 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
1103 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1104 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
1105 #endif
1106 }
1107
1108 s = splhigh(); /* Don't bother from now on */
1109 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1110 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1111 }
1112
1113 if (mapping_adjust_call == NULL) {
1114 thread_call_setup(&mapping_adjust_call_data,
1115 (thread_call_func_t)mapping_adjust,
1116 (thread_call_param_t)NULL);
1117 mapping_adjust_call = &mapping_adjust_call_data;
1118 }
1119
1120 while(1) { /* Keep going until we've got enough */
1121
1122 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
1123 if(allocsize < 1) break; /* Leave if we have all we need */
1124
1125 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
1126 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
1127 mapCtl.mapcreln--; /* Back off the count */
1128 allocsize = MAPPERBLOK; /* Show we allocated one block */
1129 }
1130 else { /* No free ones, try to get it */
1131
1132 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
1133
1134 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1135 splx(s); /* Restore 'rupts */
1136
1137 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
1138 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
1139 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
1140 break;
1141 }
1142 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
1143 }
1144 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
1145 s = splhigh(); /* Don't bother from now on */
1146 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1147 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1148 }
1149 }
1150 if (retr != KERN_SUCCESS)
1151 break; /* Fail to alocate, bail out... */
1152 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
1153 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1154 mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
1155 }
1156 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1157 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1158 }
1159
1160 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
1161 mapCtl.mapcrecurse = 0; /* We are done now */
1162 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1163 splx(s); /* Restore 'rupts */
1164 return; /* Return... */
1165 }
1166
1167 mbn = mapCtl.mapcrel; /* Get first pending release block */
1168 mapCtl.mapcrel = 0; /* Dequeue them */
1169 mapCtl.mapcreln = 0; /* Set count to 0 */
1170
1171 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1172 splx(s); /* Restore 'rupts */
1173
1174 while((unsigned int)mbn) { /* Toss 'em all */
1175 mb = mbn->nextblok; /* Get the next */
1176 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
1177 mbn = mb; /* Chain to the next */
1178 }
1179
1180 __asm__ volatile("sync"); /* Make sure all is well */
1181 mapCtl.mapcrecurse = 0; /* We are done now */
1182 return;
1183 }
1184
1185 /*
1186 * mapping_free(mapping *mp) - release a mapping to the free list
1187 *
1188 * This routine takes a mapping and adds it to the free list.
1189 * If this mapping make the block non-empty, we queue it to the free block list.
1190 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1191 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1192 * If this release fills a block and we are above the threshold, we release the block
1193 */
1194
1195 void mapping_free(struct mapping *mp) { /* Release a mapping */
1196
1197 mappingblok *mb, *mbn;
1198 spl_t s;
1199 unsigned int full, mindx;
1200
1201 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */
1202 mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
1203
1204 s = splhigh(); /* Don't bother from now on */
1205 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1206 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1207 }
1208
1209 full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */
1210 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
1211
1212 if(full) { /* If it was full before this: */
1213 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
1214 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
1215 if(!((unsigned int)mapCtl.mapclast))
1216 mapCtl.mapclast = mb;
1217 }
1218
1219 mapCtl.mapcfree++; /* Bump free count */
1220 mapCtl.mapcinuse--; /* Decriment in use count */
1221
1222 mapCtl.mapcfreec++; /* Count total calls */
1223
1224 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
1225 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3])
1226 == 0xFFFFFFFF) { /* See if empty now */
1227
1228 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
1229 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
1230 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
1231 }
1232 else { /* We're not first */
1233 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
1234 if(mbn->nextblok == mb) break; /* Is the next one our's? */
1235 }
1236 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
1237 mbn->nextblok = mb->nextblok; /* Dequeue us */
1238 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
1239 }
1240
1241 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
1242 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
1243 mapCtl.mapcnext = mb; /* Chain us to the head */
1244 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
1245 }
1246 else {
1247 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
1248 mapCtl.mapcreln++; /* Count on release list */
1249 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
1250 mapCtl.mapcrel = mb; /* Chain us in front */
1251 }
1252 }
1253 }
1254
1255 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
1256 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1257 thread_call_enter(mapping_adjust_call); /* Go toss some */
1258 }
1259 }
1260 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1261 splx(s); /* Restore 'rupts */
1262
1263 return; /* Bye, dude... */
1264 }
1265
1266
1267 /*
1268 * mapping_alloc(void) - obtain a mapping from the free list
1269 *
1270 * This routine takes a mapping off of the free list and returns it's address.
1271 *
1272 * We do this by finding a free entry in the first block and allocating it.
1273 * If this allocation empties the block, we remove it from the free list.
1274 * If this allocation drops the total number of free entries below a threshold,
1275 * we allocate a new block.
1276 *
1277 */
1278
1279 mapping *mapping_alloc(void) { /* Obtain a mapping */
1280
1281 register mapping *mp;
1282 mappingblok *mb, *mbn;
1283 spl_t s;
1284 int mindx;
1285 kern_return_t retr;
1286
1287 s = splhigh(); /* Don't bother from now on */
1288 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1289 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1290 }
1291
1292 if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */
1293 unsigned int i;
1294 struct mappingflush mappingflush;
1295 PCA *pca_min, *pca_max;
1296 PCA *pca_base;
1297
1298 pca_min = (PCA *)(hash_table_base+hash_table_size);
1299 pca_max = (PCA *)(hash_table_base+hash_table_size+hash_table_size);
1300
1301 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
1302 mapCtl.mapcflush.mappingcnt = 0;
1303 pca_base = mapCtl.mapcflush.pcaptr;
1304 do {
1305 hw_select_mappings(&mapCtl.mapcflush);
1306 mapCtl.mapcflush.pcaptr++;
1307 if (mapCtl.mapcflush.pcaptr >= pca_max)
1308 mapCtl.mapcflush.pcaptr = pca_min;
1309 } while ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr != pca_base));
1310
1311 if ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr == pca_base)) {
1312 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1313 panic("mapping_alloc - all mappings are wired\n");
1314 }
1315 mappingflush = mapCtl.mapcflush;
1316 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1317 splx(s);
1318 for (i=0;i<mappingflush.mappingcnt;i++)
1319 mapping_remove(mappingflush.mapping[i].pmap,
1320 mappingflush.mapping[i].offset);
1321 s = splhigh();
1322 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {
1323 panic("mapping_alloc - timeout getting control lock\n");
1324 }
1325 }
1326 mb = mapCtl.mapcnext;
1327 }
1328
1329 if(!(mindx = mapalc(mb))) { /* Allocate a slot */
1330 panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */
1331 }
1332
1333 if(mindx < 0) { /* Did we just take the last one */
1334 mindx = -mindx; /* Make positive */
1335 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1336 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1337 }
1338
1339 mapCtl.mapcfree--; /* Decrement free count */
1340 mapCtl.mapcinuse++; /* Bump in use count */
1341
1342 mapCtl.mapcallocc++; /* Count total calls */
1343
1344 /*
1345 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1346 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1347 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1348 * if we haven't already done it.
1349 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1350 * the release list with as much as we need until threads start.
1351 */
1352 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
1353 if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */
1354 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1355 mapCtl.mapcreln--; /* Back off the count */
1356 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1357 }
1358 else { /* We need to replenish */
1359 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1360 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1361 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1362 }
1363 }
1364 }
1365 }
1366
1367 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1368 splx(s); /* Restore 'rupts */
1369
1370 mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */
1371 __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */
1372 return mp; /* Send it back... */
1373 }
1374
1375
1376 void
1377 consider_mapping_adjust()
1378 {
1379 spl_t s;
1380
1381 s = splhigh(); /* Don't bother from now on */
1382 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1383 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1384 }
1385
1386 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1387 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1388 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1389 }
1390 }
1391
1392 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1393 splx(s); /* Restore 'rupts */
1394
1395 }
1396
1397
1398
1399 /*
1400 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1401 *
1402 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1403 * mappings. This call adds and initializes a block for use.
1404 *
1405 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1406 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1407 * corresponds to the header). The translation mask is the XOR of the virtual and real
1408 * addresses (needless to say, the block must be wired).
1409 *
1410 * We handle these mappings the same way as saveareas: the block is only on the chain so
1411 * long as there are free entries in it.
1412 *
1413 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1414 * mappings. Blocks marked PERM won't ever be released.
1415 *
1416 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1417 * list. We do this only at start up time. This is done because we only allocate blocks
1418 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1419 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1420 * them on the release queue, the allocate routine will rescue them. Then when the
1421 * pageout scan starts, all extra ones will be released.
1422 *
1423 */
1424
1425
1426 void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1427 /* Set's start and end of a block of mappings
1428 perm indicates if the block can be released
1429 or goes straight to the release queue .
1430 locked indicates if the lock is held already */
1431
1432 mappingblok *mb;
1433 spl_t s;
1434 int i;
1435 unsigned int raddr;
1436
1437 mb = (mappingblok *)mbl; /* Start of area */
1438
1439
1440 if(perm >= 0) { /* See if we need to initialize the block */
1441 if(perm) {
1442 raddr = (unsigned int)mbl; /* Perm means V=R */
1443 mb->mapblokflags = mbPerm; /* Set perm */
1444 }
1445 else {
1446 raddr = kvtophys(mbl); /* Get real address */
1447 mb->mapblokflags = 0; /* Set not perm */
1448 }
1449
1450 mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */
1451
1452 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1453 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1454 mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */
1455 mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */
1456 }
1457
1458 s = splhigh(); /* Don't bother from now on */
1459 if(!locked) { /* Do we need the lock? */
1460 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1461 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1462 }
1463 }
1464
1465 if(perm < 0) { /* Direct to release queue? */
1466 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1467 mapCtl.mapcrel = mb; /* Queue us on in */
1468 mapCtl.mapcreln++; /* Count the free block */
1469 }
1470 else { /* Add to the free list */
1471
1472 mb->nextblok = 0; /* We always add to the end */
1473 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1474
1475 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1476 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1477 }
1478 else { /* We are not the first */
1479 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1480 mapCtl.mapclast = mb; /* We are now last */
1481 }
1482 }
1483
1484 if(!locked) { /* Do we need to unlock? */
1485 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1486 }
1487 splx(s); /* Restore 'rupts */
1488 return; /* All done, leave... */
1489 }
1490
1491
1492 /*
1493 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1494 *
1495 * No locks can be held, because we allocate memory here.
1496 * This routine needs a corresponding mapping_relpre call to remove the
1497 * hold off flag so that the adjust routine will free the extra mapping
1498 * blocks on the release list. I don't like this, but I don't know
1499 * how else to do this for now...
1500 *
1501 */
1502
1503 void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1504
1505 int nmapb, i;
1506 kern_return_t retr;
1507 mappingblok *mbn;
1508 spl_t s;
1509
1510 s = splhigh(); /* Don't bother from now on */
1511 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1512 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1513 }
1514
1515 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1516
1517 mapCtl.mapcholdoff++; /* Bump the hold off count */
1518
1519 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1520 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1521 splx(s); /* Restore 'rupts */
1522 return;
1523 }
1524 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1525 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1526 splx(s); /* Restore 'rupts */
1527 return;
1528 }
1529 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1530
1531 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1532 splx(s); /* Restore 'rupts */
1533
1534 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1535 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1536 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1537 break;
1538 }
1539 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1540 }
1541 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1542 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1543
1544 mapCtl.mapcrecurse = 0; /* We are done now */
1545 }
1546
1547 /*
1548 * void mapping_relpre(void) - Releases preallocation release hold off
1549 *
1550 * This routine removes the
1551 * hold off flag so that the adjust routine will free the extra mapping
1552 * blocks on the release list. I don't like this, but I don't know
1553 * how else to do this for now...
1554 *
1555 */
1556
1557 void mapping_relpre(void) { /* Releases release hold off */
1558
1559 spl_t s;
1560
1561 s = splhigh(); /* Don't bother from now on */
1562 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1563 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1564 }
1565 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1566 panic("mapping_relpre: hold-off count went negative\n");
1567 }
1568
1569 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1570 splx(s); /* Restore 'rupts */
1571 }
1572
1573 /*
1574 * void mapping_free_prime(void) - Primes the mapping block release list
1575 *
1576 * See mapping_free_init.
1577 * No locks can be held, because we allocate memory here.
1578 * One processor running only.
1579 *
1580 */
1581
1582 void mapping_free_prime(void) { /* Primes the mapping block release list */
1583
1584 int nmapb, i;
1585 kern_return_t retr;
1586 mappingblok *mbn;
1587 vm_offset_t mapping_min;
1588
1589 retr = kmem_suballoc(kernel_map, &mapping_min, MAPPING_MAP_SIZE,
1590 FALSE, TRUE, &mapping_map);
1591
1592 if (retr != KERN_SUCCESS)
1593 panic("mapping_free_prime: kmem_suballoc failed");
1594
1595
1596 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1597 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1598
1599 #if DEBUG
1600 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1601 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1602 #endif
1603
1604 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1605 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1606 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1607 panic("Whoops... Not a bit of wired memory left for anyone\n");
1608 }
1609 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1610 }
1611 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1612 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1613 }
1614
1615
1616
1617 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1618 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1619 {
1620 *count = mapCtl.mapcinuse;
1621 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1622 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1623 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1624 *alloc_size = PAGE_SIZE;
1625
1626 *collectable = 1;
1627 *exhaustable = 0;
1628 }
1629
1630
1631 /*
1632 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1633 *
1634 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1635 * the same space. If it finds it, it returns the virtual address.
1636 *
1637 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1638 * for it and fail it myself...
1639 */
1640
1641 vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */
1642
1643 spl_t s;
1644 register mapping *mp, *mpv;
1645 vm_offset_t va;
1646
1647 if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */
1648
1649 s = splhigh();
1650 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1651 splx(s); /* Restore 'rupts */
1652 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1653 return(0); /* Should die before here */
1654 }
1655
1656 va = 0; /* Assume failure */
1657
1658 for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */
1659
1660 if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */
1661
1662 va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1663 va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
1664 va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
1665 break; /* We're done now, pass virtual address back */
1666 }
1667
1668 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1669 splx(s); /* Restore 'rupts */
1670 return(va); /* Return the result or 0... */
1671 }
1672
1673 /*
1674 * kvtophys(addr)
1675 *
1676 * Convert a kernel virtual address to a physical address
1677 */
1678 vm_offset_t kvtophys(vm_offset_t va) {
1679
1680 register mapping *mp, *mpv;
1681 register blokmap *bmp;
1682 register vm_offset_t pa;
1683 spl_t s;
1684
1685 s=splhigh(); /* Don't bother from now on */
1686 mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */
1687
1688 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1689 splx(s); /* Restore 'rupts */
1690 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */
1691 return 0;
1692 }
1693
1694 if(!mp) { /* If it was not a normal page */
1695 pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1696 splx(s); /* Restore 'rupts */
1697 return pa; /* Return physical address */
1698 }
1699
1700 mpv = hw_cpv(mp); /* Convert to virtual addressing */
1701
1702 if(!mpv->physent) { /* Was there a physical entry? */
1703 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1704 }
1705 else {
1706 pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1707 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1708 }
1709
1710 splx(s); /* Restore 'rupts */
1711 return pa; /* Return the physical address... */
1712 }
1713
1714 /*
1715 * phystokv(addr)
1716 *
1717 * Convert a physical address to a kernel virtual address if
1718 * there is a mapping, otherwise return NULL
1719 */
1720
1721 vm_offset_t phystokv(vm_offset_t pa) {
1722
1723 struct phys_entry *pp;
1724 vm_offset_t va;
1725
1726 pp = pmap_find_physentry(pa); /* Find the physical entry */
1727 if (PHYS_NULL == pp) {
1728 return (vm_offset_t)NULL; /* If none, return null */
1729 }
1730 if(!(va=mapping_p2v(kernel_pmap, pp))) {
1731 return 0; /* Can't find it, return 0... */
1732 }
1733 return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */
1734
1735 }
1736
1737 /*
1738 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1739 * page 0 access for the current thread.
1740 *
1741 * If parameter is TRUE, faults are ignored
1742 * If parameter is FALSE, faults are honored
1743 *
1744 */
1745
1746 void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1747
1748 if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1749 else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1750
1751 return; /* Return the result or 0... */
1752 }
1753
1754
1755 /*
1756 * Allocates a range of virtual addresses in a map as optimally as
1757 * possible for block mapping. The start address is aligned such
1758 * that a minimum number of power-of-two sized/aligned blocks is
1759 * required to cover the entire range.
1760 *
1761 * We also use a mask of valid block sizes to determine optimality.
1762 *
1763 * Note that the passed in pa is not actually mapped to the selected va,
1764 * rather, it is used to figure the optimal boundary. The actual
1765 * V to R mapping is done externally.
1766 *
1767 * This function will return KERN_INVALID_ADDRESS if an optimal address
1768 * can not be found. It is not necessarily a fatal error, the caller may still be
1769 * still be able to do a non-optimal assignment.
1770 */
1771
1772 kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa,
1773 vm_size_t size, vm_prot_t prot) {
1774
1775 vm_map_entry_t entry, next, tmp_entry, new_entry;
1776 vm_offset_t start, end, algnpa, endadr, strtadr, curradr;
1777 vm_offset_t boundary;
1778
1779 unsigned int maxsize, minsize, leading, trailing;
1780
1781 assert(page_aligned(pa));
1782 assert(page_aligned(size));
1783
1784 if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */
1785
1786 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
1787 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
1788
1789 boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */
1790 if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */
1791
1792 vm_map_lock(map); /* No touchee no mapee */
1793
1794 for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */
1795 if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */
1796 algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */
1797 leading = algnpa - pa; /* Get leading size */
1798
1799 curradr = 0; /* Start low */
1800
1801 while(1) { /* Try all possible values for this opt level */
1802
1803 curradr = curradr + boundary; /* Get the next optimal address */
1804 strtadr = curradr - leading; /* Calculate start of optimal range */
1805 endadr = strtadr + size; /* And now the end */
1806
1807 if((curradr < boundary) || /* Did address wrap here? */
1808 (strtadr > curradr) || /* How about this way? */
1809 (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */
1810
1811 if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
1812 if(endadr > map->max_offset) break; /* No room right now... */
1813
1814 if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
1815
1816 next = entry->vme_next; /* Get the next entry */
1817 if((next == vm_map_to_entry(map)) || /* Are we the last entry? */
1818 (next->vme_start >= endadr)) { /* or do we end before the next entry? */
1819
1820 new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
1821 VM_OBJECT_NULL,
1822 0, /* Offset into object of 0 */
1823 FALSE, /* No copy needed */
1824 FALSE, /* Not shared */
1825 FALSE, /* Not in transition */
1826 prot, /* Set the protection to requested */
1827 prot, /* We can't change protection */
1828 VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind,
1829 'cause we don't page in this area */
1830 VM_INHERIT_DEFAULT, /* Default inheritance */
1831 0); /* Nothing is wired */
1832
1833 vm_map_unlock(map); /* Let the world see it all */
1834 *va = strtadr; /* Tell everyone */
1835 *bnd = boundary; /* Say what boundary we are aligned to */
1836 return(KERN_SUCCESS); /* Leave, all is right with the world... */
1837 }
1838 }
1839 }
1840
1841 vm_map_unlock(map); /* Couldn't find a slot */
1842 return(KERN_INVALID_ADDRESS);
1843 }
1844
1845 /*
1846 * Copies data from a physical page to a virtual page. This is used to
1847 * move data from the kernel to user state.
1848 *
1849 * Note that it is invalid to have a source that spans a page boundry.
1850 * This can block.
1851 * We don't check protection either.
1852 * And we don't handle a block mapped sink address either.
1853 *
1854 */
1855
1856 kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) {
1857
1858 vm_map_t map;
1859 kern_return_t ret;
1860 unsigned int spaceid;
1861 int left, csize;
1862 vm_offset_t pa;
1863 register mapping *mpv, *mp;
1864 spl_t s;
1865
1866 if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */
1867 map = current_act()->map; /* Get the current map */
1868
1869 while(size) {
1870 s=splhigh(); /* Don't bother me */
1871
1872 spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */
1873
1874 mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */
1875 if(!mp) { /* Was it there? */
1876 splx(s); /* Restore the interrupt level */
1877 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in... */
1878 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1879
1880 return KERN_FAILURE; /* Didn't find any, return no good... */
1881 }
1882 if((unsigned int)mp&1) { /* Did we timeout? */
1883 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */
1884 splx(s); /* Restore the interrupt level */
1885 return KERN_FAILURE; /* Bad hair day, return FALSE... */
1886 }
1887
1888 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
1889
1890 if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */
1891 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1892 splx(s); /* Restore the interrupt level */
1893 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* check for a COW area */
1894 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1895 return KERN_FAILURE; /* Didn't find any, return no good... */
1896 }
1897 left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */
1898
1899 csize = size < left ? size : left; /* Set amount to copy this pass */
1900
1901 pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */
1902
1903 bcopy_physvir((char *)source, (char *)pa, csize); /* Do a physical copy, virtually */
1904
1905 hw_set_mod(mpv->physent); /* Go set the change of the sink */
1906
1907 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1908 splx(s); /* Open up for interrupts */
1909
1910 sink += csize; /* Move up to start of next page */
1911 source += csize; /* Move up source */
1912 size -= csize; /* Set amount for next pass */
1913 }
1914 return KERN_SUCCESS;
1915 }
1916
1917
1918 /*
1919 * copy 'size' bytes from physical to physical address
1920 * the caller must validate the physical ranges
1921 *
1922 * if flush_action == 0, no cache flush necessary
1923 * if flush_action == 1, flush the source
1924 * if flush_action == 2, flush the dest
1925 * if flush_action == 3, flush both source and dest
1926 */
1927
1928 kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
1929
1930 switch(flush_action) {
1931 case 1:
1932 flush_dcache(source, size, 1);
1933 break;
1934 case 2:
1935 flush_dcache(dest, size, 1);
1936 break;
1937 case 3:
1938 flush_dcache(source, size, 1);
1939 flush_dcache(dest, size, 1);
1940 break;
1941
1942 }
1943 bcopy_phys((char *)source, (char *)dest, size); /* Do a physical copy */
1944
1945 switch(flush_action) {
1946 case 1:
1947 flush_dcache(source, size, 1);
1948 break;
1949 case 2:
1950 flush_dcache(dest, size, 1);
1951 break;
1952 case 3:
1953 flush_dcache(source, size, 1);
1954 flush_dcache(dest, size, 1);
1955 break;
1956
1957 }
1958 }
1959
1960
1961
1962 #if DEBUG
1963 /*
1964 * Dumps out the mapping stuff associated with a virtual address
1965 */
1966 void dumpaddr(space_t space, vm_offset_t va) {
1967
1968 mapping *mp, *mpv;
1969 vm_offset_t pa;
1970 spl_t s;
1971
1972 s=splhigh(); /* Don't bother me */
1973
1974 mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */
1975 if(!mp) { /* Did we find one? */
1976 splx(s); /* Restore the interrupt level */
1977 printf("dumpaddr: virtual address (%08X) not mapped\n", va);
1978 return; /* Didn't find any, return FALSE... */
1979 }
1980 if((unsigned int)mp&1) { /* Did we timeout? */
1981 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
1982 splx(s); /* Restore the interrupt level */
1983 return; /* Bad hair day, return FALSE... */
1984 }
1985 printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */
1986 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1987 dumpmapping(mpv);
1988 if(mpv->physent) {
1989 dumppca(mpv);
1990 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
1991 }
1992 splx(s); /* Was there something you needed? */
1993 return; /* Tell them we did it */
1994 }
1995
1996
1997
1998 /*
1999 * Prints out a mapping control block
2000 *
2001 */
2002
2003 void dumpmapping(struct mapping *mp) { /* Dump out a mapping */
2004
2005 printf("Dump of mapping block: %08X\n", mp); /* Header */
2006 printf(" next: %08X\n", mp->next);
2007 printf(" hashnext: %08X\n", mp->hashnext);
2008 printf(" PTEhash: %08X\n", mp->PTEhash);
2009 printf(" PTEent: %08X\n", mp->PTEent);
2010 printf(" physent: %08X\n", mp->physent);
2011 printf(" PTEv: %08X\n", mp->PTEv);
2012 printf(" PTEr: %08X\n", mp->PTEr);
2013 printf(" pmap: %08X\n", mp->pmap);
2014
2015 if(mp->physent) { /* Print physent if it exists */
2016 printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1);
2017 }
2018 else {
2019 printf("Associated physical entry: none\n");
2020 }
2021
2022 dumppca(mp); /* Dump out the PCA information */
2023
2024 return;
2025 }
2026
2027 /*
2028 * Prints out a PTEG control area
2029 *
2030 */
2031
2032 void dumppca(struct mapping *mp) { /* PCA */
2033
2034 PCA *pca;
2035 unsigned int *pteg;
2036
2037 pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */
2038 pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16));
2039 printf(" Dump of PCA: %08X\n", pca); /* Header */
2040 printf(" PCAlock: %08X\n", pca->PCAlock);
2041 printf(" PCAallo: %08X\n", pca->flgs.PCAallo);
2042 printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]);
2043 printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]);
2044 printf("Dump of PTEG: %08X\n", pteg); /* Header */
2045 printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]);
2046 printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]);
2047 printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]);
2048 printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]);
2049 return;
2050 }
2051
2052 /*
2053 * Dumps starting with a physical entry
2054 */
2055
2056 void dumpphys(struct phys_entry *pp) { /* Dump from physent */
2057
2058 mapping *mp;
2059 PCA *pca;
2060 unsigned int *pteg;
2061
2062 printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1);
2063 mp = hw_cpv(pp->phys_link);
2064 while(mp) {
2065 dumpmapping(mp);
2066 dumppca(mp);
2067 mp = hw_cpv(mp->next);
2068 }
2069
2070 return;
2071 }
2072
2073 #endif
2074
2075
2076 kern_return_t bmapvideo(vm_offset_t *info);
2077 kern_return_t bmapvideo(vm_offset_t *info) {
2078
2079 extern struct vc_info vinfo;
2080
2081 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
2082 return KERN_SUCCESS;
2083 }
2084
2085 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
2086 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
2087
2088 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */
2089 return KERN_SUCCESS;
2090 }
2091
2092 kern_return_t bmapmapr(vm_offset_t va);
2093 kern_return_t bmapmapr(vm_offset_t va) {
2094
2095 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
2096 return KERN_SUCCESS;
2097 }