]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/mappings.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
27 *
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
30 *
31 */
32
33 #include <cpus.h>
34 #include <debug.h>
35 #include <mach_kgdb.h>
36 #include <mach_vm_debug.h>
37 #include <db_machine_commands.h>
38
39 #include <kern/thread.h>
40 #include <kern/thread_act.h>
41 #include <mach/vm_attributes.h>
42 #include <mach/vm_param.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
46 #include <kern/spl.h>
47
48 #include <kern/misc_protos.h>
49 #include <ppc/misc_protos.h>
50 #include <ppc/proc_reg.h>
51
52 #include <vm/pmap.h>
53 #include <ppc/pmap.h>
54 #include <ppc/pmap_internals.h>
55 #include <ppc/mem.h>
56
57 #include <ppc/new_screen.h>
58 #include <ppc/Firmware.h>
59 #include <ppc/mappings.h>
60 #include <ddb/db_output.h>
61
62 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
63
64 #define PERFTIMES 0
65
66 #if PERFTIMES && DEBUG
67 #define debugLog2(a, b, c) dbgLog2(a, b, c)
68 #else
69 #define debugLog2(a, b, c)
70 #endif
71
72 vm_map_t mapping_map = VM_MAP_NULL;
73 #define MAPPING_MAP_SIZE 33554432 /* 32MB address space */
74
75 unsigned int incrVSID = 0; /* VSID increment value */
76 unsigned int mappingdeb0 = 0;
77 unsigned int mappingdeb1 = 0;
78 extern unsigned int hash_table_size;
79 extern vm_offset_t mem_size;
80 /*
81 * ppc_prot translates from the mach representation of protections to the PPC version.
82 * We also allow for a direct setting of the protection bits. This extends the mach
83 * concepts to allow the greater control we need for Virtual Machines (VMM).
84 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
85 * It eliminates the used of this table.
86 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
87 */
88
89 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
90
91 /*
92 * About PPC VSID generation:
93 *
94 * This function is called to generate an address space ID. This space ID must be unique within
95 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
96 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
97 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
98 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
99 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
100 * they are release. This causes us to lose track of what space IDs are free to be reused.
101 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
102 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
103 *
104 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
105 * calculation for virtual address lookup. An improperly chosen value could potentially cause
106 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
107 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
108 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
109 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
110 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
111 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
112 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
113 * with no overflow. I don't think that this is a problem.
114 *
115 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
116 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
117 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
118 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
119 * the same modulo 512. We can reduce this problem by having the segment number be bits
120 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
121 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
122 * I don't think that it is as signifigant as the other, so, I'll make the space ID
123 * with segment first.
124 *
125 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
126 * While this is a problem that should only happen in periods counted in weeks, it can and
127 * will happen. This is assuming a monotonically increasing space ID. If we were to search
128 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
129 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
130 *
131 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
132 * locked by free_pmap_lock) that is sorted in VSID sequence order.
133 *
134 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
135 * the last that was freed. The we allocate that.
136 *
137 * NOTE: We must be called with interruptions off and free_pmap_lock held.
138 *
139 */
140
141 /*
142 * mapping_init();
143 * Do anything that needs to be done before the mapping system can be used.
144 * Hash table must be initialized before we call this.
145 *
146 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
147 */
148
149 void mapping_init(void) {
150
151 unsigned int tmp;
152
153 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
154
155 incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
156 incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
157 incrVSID |= 1; /* Set bit and add 1 */
158 return;
159
160 }
161
162
163 /*
164 * mapping_remove(pmap_t pmap, vm_offset_t va);
165 * Given a pmap and virtual address, this routine finds the mapping and removes it from
166 * both its PTEG hash list and the physical entry list. The mapping block will be added to
167 * the free list. If the free list threshold is reached, garbage collection will happen.
168 * We also kick back a return code to say whether or not we had one to remove.
169 *
170 * We have a strict ordering here: the mapping must be removed from the PTEG hash list before
171 * it can be removed from the physical entry list. This allows us to get by with only the PTEG
172 * hash lock at page fault time. The physical entry lock must be held while we remove the mapping
173 * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions,
174 * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
175 * It's just that simple!
176 *
177 * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
178 * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG
179 * lock to control the hash cahin and may move the position of the mapping for MRU calculations.
180 *
181 * Note that mappings do not need to point to a physical entry. When they don't, it indicates
182 * the mapping is outside of physical memory and usually refers to a memory mapped device of
183 * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock
184 * routines return normally, but don't do anything.
185 */
186
187 boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR
188 Returns TRUE if a mapping was found to remove */
189
190 mapping *mp, *mpv;
191 register blokmap *blm;
192 spl_t s;
193 unsigned int *useadd, *useaddr, uindx;
194 int i;
195 struct phys_entry *pp;
196 mapping *mp1, *mpv1;
197
198 debugLog2(1, va, pmap->space); /* start mapping_remove */
199
200 s=splhigh(); /* Don't bother me */
201
202 mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */
203
204 if(!mp) { /* Did we find one? */
205 splx(s); /* Allow 'rupts now */
206 if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */
207
208 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
209 blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */
210 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
211 pmap, va, blm);
212 }
213 while ((unsigned int)mp & 2)
214 mp = (mapping *)hw_rem_blk(pmap, va, va);
215 #if 0
216 blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */
217 kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
218 blm, blm->start, blm->end, blm->PTEr);
219 #endif
220 mapping_free(hw_cpv(mp)); /* Release it */
221 debugLog2(2, 1, 0); /* End mapping_remove */
222 return TRUE; /* Tell them we did it */
223 }
224 debugLog2(2, 0, 0); /* end mapping_remove */
225 return FALSE; /* Didn't find any, return FALSE... */
226 }
227 if((unsigned int)mp&1) { /* Did we timeout? */
228 panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */
229 splx(s); /* Restore the interrupt level */
230 return FALSE; /* Bad hair day, return FALSE... */
231 }
232
233 mpv = hw_cpv(mp); /* Get virtual address of mapping */
234 #if DEBUG
235 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
236 #else
237 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
238 #endif
239 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
240 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
241 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
242
243 #if 0
244 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
245 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
246 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
247 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
248 }
249 }
250 #endif
251
252 hw_rem_map(mp); /* Remove the corresponding mapping */
253
254 pp = mpv->physent;
255
256 if ((mpv->physent) && (pmap->vflags & pmapVMhost)) {
257
258 while(mp1 = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */
259
260 mpv1 = hw_cpv(mp1); /* Get the virtual address */
261 #if DEBUG
262 if(hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
263 #else
264 (void)hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1); /* Decrement the resident page count */
265 #endif
266
267 uindx = ((mpv1->PTEv >> 24) & 0x78) | ((mpv1->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */
268 useadd = (unsigned int *)&mpv1->pmap->pmapUsage[uindx]; /* Point to slot to bump */
269 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
270 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
271
272 #if 0
273 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
274 if((mpv1->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
275 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
276 i * pmapUsageSize, mpv1->pmap->pmapUsage[i], mpv1->pmap);
277 }
278 }
279 #endif
280
281 hw_rem_map(mp1); /* Remove the mapping */
282 mapping_free(mpv1); /* Add mapping to the free list */
283 }
284 }
285
286 if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
287
288 splx(s); /* Was there something you needed? */
289
290 mapping_free(mpv); /* Add mapping to the free list */
291 debugLog2(2, 1, 0); /* end mapping_remove */
292 return TRUE; /* Tell them we did it */
293 }
294
295 /*
296 * mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
297 *
298 * This guy releases any mappings that exist for a physical page on a specified map.
299 * We get the lock on the phys_entry, and hold it through out this whole routine.
300 * That way, no one can change the queue out from underneath us. We keep fetching
301 * the physents mapping anchor until it is null, then we're done.
302 *
303 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
304 * decriment the pmap's residency count. Then we release the mapping back to the free list.
305 *
306 */
307
308
309 void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) { /* Remove all mappings from specified pmap for this physent */
310
311 mapping *mp, *mp_next, *mpv;
312 spl_t s;
313 unsigned int *useadd, *useaddr, uindx;
314 int i;
315
316 s=splhigh(); /* Don't bother me */
317
318 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
319 panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
320 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
321 }
322
323 mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);
324
325 while(mp) { /* Keep going so long as there's another */
326
327 mpv = hw_cpv(mp); /* Get the virtual address */
328 if(mpv->pmap != pmap) {
329 mp = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
330 continue;
331 }
332 #if DEBUG
333 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
334 #else
335 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
336 #endif
337
338 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join seg # and top 2 bits of API */
339 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
340 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
341 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */
342
343
344
345 mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
346 hw_rem_map(mp); /* Remove the mapping */
347 mapping_free(mpv); /* Add mapping to the free list */
348 mp = mp_next;
349 }
350
351 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
352 splx(s);
353 return;
354 }
355 /*
356 * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list
357 *
358 * This guy releases any mappings that exist for a physical page.
359 * We get the lock on the phys_entry, and hold it through out this whole routine.
360 * That way, no one can change the queue out from underneath us. We keep fetching
361 * the physents mapping anchor until it is null, then we're done.
362 *
363 * For each mapping, we call the remove routine to remove it from the PTEG hash list and
364 * decriment the pmap's residency count. Then we release the mapping back to the free list.
365 *
366 */
367
368 void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */
369
370 mapping *mp, *mpv;
371 spl_t s;
372 unsigned int *useadd, *useaddr, uindx;
373 int i;
374
375 s=splhigh(); /* Don't bother me */
376 debugLog2(3, pp->pte1, 0); /* start mapping_purge */
377
378 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
379 panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
380 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
381 }
382
383 while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */
384
385 mpv = hw_cpv(mp); /* Get the virtual address */
386 #if DEBUG
387 if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
388 #else
389 (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */
390 #endif
391
392 uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */
393 useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */
394 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
395 (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
396
397 #if 0
398 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
399 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
400 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
401 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
402 }
403 }
404 #endif
405
406
407 hw_rem_map(mp); /* Remove the mapping */
408 mapping_free(mpv); /* Add mapping to the free list */
409 }
410
411 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
412
413 debugLog2(4, pp->pte1, 0); /* end mapping_purge */
414 splx(s); /* Was there something you needed? */
415 return; /* Tell them we did it */
416 }
417
418
419 /*
420 * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one
421 *
422 * This routine takes the given parameters, builds a mapping block, and queues it into the
423 * correct lists.
424 *
425 * The pp parameter can be null. This allows us to make a mapping that is not
426 * associated with any physical page. We may need this for certain I/O areas.
427 *
428 * If the phys_entry address is null, we neither lock or chain into it.
429 * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
430 */
431
432 mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */
433
434 register mapping *mp, *mpv;
435 unsigned int *useadd, *useaddr;
436 spl_t s;
437 int i;
438
439 debugLog2(5, va, pa); /* start mapping_purge */
440 mpv = mapping_alloc(); /* Get a spare mapping block */
441
442 mpv->pmap = pmap; /* Initialize the pmap pointer */
443 mpv->physent = pp; /* Initialize the pointer to the physical entry */
444 mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */
445 mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */
446
447 s=splhigh(); /* Don't bother from now on */
448
449 mp = hw_cvp(mpv); /* Get the physical address of this */
450
451 if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */
452 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
453 panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
454 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
455 }
456 }
457
458 if(pp) { /* See of there is a physcial entry */
459 mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */
460 pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */
461 }
462
463 hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */
464
465 (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */
466 useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */
467 useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */
468 (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */
469 #if 0
470 for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */
471 if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */
472 panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
473 i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
474 }
475 }
476 #endif
477
478 if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */
479
480 splx(s); /* Ok for interruptions now */
481 debugLog2(6, pmap->space, prot); /* end mapping_purge */
482 return mpv; /* Leave... */
483 }
484
485
486 /*
487 * Enters optimal translations for odd-sized V=F blocks.
488 *
489 * Builds a block map for each power-of-two hunk o' address
490 * that exists. This is specific to the processor type.
491 * PPC uses BAT register size stuff. Future PPC might have
492 * something else.
493 *
494 * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
495 * stupid to know otherwise so we only look at the va anyhow, so there...
496 *
497 */
498
499 void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */
500
501 register blokmap *blm, *oblm;
502 unsigned int pg;
503 unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin;
504 int i, maxshft, nummax, minshft;
505
506 #if 1
507 kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
508 pmap, va, pa, bnd, size, prot, attr);
509 #endif
510
511 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
512 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
513
514 minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */
515 maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */
516
517 leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */
518 trailing = size - leading; /* Get size of trailing area */
519 tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */
520
521 #if 1
522 kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */
523 #endif
524
525 if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */
526
527 va = va + tomin; /* Adjust virtual start */
528 pa = pa + tomin; /* Adjust physical start */
529 leading = leading - tomin; /* Adjust leading size */
530
531 /*
532 * Some of this code is very classic PPC. We need to fix this up.
533 */
534
535 leading = leading >> minshft; /* Position for bit testing */
536 cbsize = minsize; /* Set the minimum size */
537
538 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */
539
540 if(leading & 1) {
541 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
542 pa = pa + cbsize; /* Bump up physical address */
543 va = va + cbsize; /* Bump up virtual address */
544 }
545
546 leading = leading >> 1; /* Shift up to next size */
547 cbsize = cbsize << 1; /* Here too */
548
549 }
550
551 nummax = trailing >> maxshft; /* Get number of max size blocks left */
552 for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */
553 pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */
554
555 pa = pa + maxsize; /* Bump up physical address */
556 va = va + maxsize; /* Bump up virtual address */
557 trailing -= maxsize; /* Back off what we just did */
558 }
559
560 cbsize = maxsize; /* Start at maximum size */
561
562 for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */
563
564 if(trailing & cbsize) {
565 trailing &= ~cbsize; /* Remove the block we are allocating */
566 pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
567 pa = pa + cbsize; /* Bump up physical address */
568 va = va + cbsize; /* Bump up virtual address */
569 }
570 cbsize = cbsize >> 1; /* Next size down */
571 }
572
573 if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */
574
575 return; /* Return */
576 }
577
578
579 /*
580 * Enters translations for odd-sized V=F blocks.
581 *
582 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
583 * will be split into normal-sized page mappings.
584 *
585 * The higher level VM map should be locked to insure that we don't have a
586 * double diddle here.
587 *
588 * We panic if we get a block that overlaps with another. We do not merge adjacent
589 * blocks because removing any address within a block removes the entire block and if
590 * would really mess things up if we trashed too much.
591 *
592 * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
593 * not be changed. The block must be unmapped and then remapped with the new stuff.
594 * We also do not keep track of reference or change flags.
595 *
596 * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
597 * with interruptions and translation disabled and under the control of the lock located
598 * in the first block map. MRU is used because it is expected that the same entry
599 * will be accessed repeatedly while PTEs are being generated to cover those addresses.
600 *
601 */
602
603 void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
604
605 register blokmap *blm, *oblm, *oblm_virt;;
606 unsigned int pg;
607
608 #if 0
609 kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
610 pmap, va, pa, size, prot, attr);
611 #endif
612
613 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
614 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
615 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
616 #if 0
617 kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */
618 va + pg, pa + pg);
619 #endif
620 }
621 return; /* All done */
622 }
623
624 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
625
626 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
627 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
628 blm->current = 0;
629 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
630 blm->space = pmap->space; /* Set the space (only needed for remove) */
631 blm->blkFlags = flags; /* Set the block's flags */
632
633 #if 0
634 kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
635 blm, blm->start, blm->end, blm->PTEr);
636 #endif
637
638 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
639
640 #if 0
641 kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
642 blm, pmap->bmaps);
643 #endif
644
645 do {
646 oblm = hw_add_blk(pmap, blm);
647 if ((unsigned int)oblm & 2) {
648 oblm_virt = (blokmap *)hw_cpv((mapping *)((unsigned int)oblm & 0xFFFFFFFC));
649 mapping_remove(pmap, oblm_virt->start);
650 };
651 } while ((unsigned int)oblm & 2);
652
653 if (oblm) {
654 oblm = (blokmap *)hw_cpv((mapping *) oblm); /* Get the old block virtual address */
655 blm = (blokmap *)hw_cpv((mapping *)blm); /* Back to the virtual address of this */
656 if((oblm->start != blm->start) || /* If we have a match, then this is a fault race and */
657 (oblm->end != blm->end) || /* is acceptable */
658 (oblm->PTEr != blm->PTEr))
659 panic("pmap_map_block: block map overlap - blm = %08X\n", oblm);/* Otherwise, Squeak loudly and carry a big stick */
660 mapping_free((struct mapping *)blm);
661 }
662
663 #if 0
664 kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
665 blm, pmap->bmaps);
666 #endif
667
668 return; /* Return */
669 }
670
671
672 /*
673 * Optimally enters translations for odd-sized V=F blocks.
674 *
675 * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request
676 * will be split into normal-sized page mappings.
677 *
678 * This one is different than pmap_map_block in that it will allocate it's own virtual
679 * target address. Rather than allocating a single block,
680 * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows
681 * hardware-level mapping that takes advantage of BAT maps or large page sizes.
682 *
683 * Most considerations for pmap_map_block apply.
684 *
685 *
686 */
687
688 kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va,
689 vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */
690
691 register blokmap *blm, *oblm;
692 unsigned int pg;
693 kern_return_t err;
694 unsigned int bnd;
695
696 #if 1
697 kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
698 map, pa, size, prot, attr);
699 #endif
700
701 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
702 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
703 if(err) {
704 #if DEBUG
705 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */
706 #endif
707 return(err); /* Pass back the error */
708 }
709 #if 1
710 kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */
711 #endif
712
713 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
714 mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
715 }
716 return(KERN_SUCCESS); /* All done */
717 }
718
719 err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */
720
721 if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */
722 err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */
723 if(err) {
724 #if DEBUG
725 kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */
726 #endif
727 return(err); /* Pass back the error */
728 }
729 #if 1
730 kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */
731 #endif
732 pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */
733 return KERN_SUCCESS; /* All done now */
734 }
735
736 if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */
737 #if DEBUG
738 kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */
739 #endif
740 return(err);
741 }
742
743 #if 1
744 kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */
745 #endif
746 mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */
747 return(KERN_SUCCESS); /* All done */
748 }
749
750
751 #if 0
752
753 /*
754 * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
755 * areas.
756 *
757 * Once blocks are merged, they act like one block, i.e., if you remove it,
758 * it all goes...
759 *
760 * This can only be used during boot. Ain't no way we can handle SMP
761 * or preemption easily, so we restrict it. We don't check either. We
762 * assume only skilled professional programmers will attempt using this
763 * function. We assume no responsibility, either real or imagined, for
764 * injury or death resulting from unauthorized use of this function.
765 *
766 * No user servicable parts inside. Notice to be removed by end-user only,
767 * under penalty of applicable federal and state laws.
768 *
769 * See descriptions of pmap_map_block. Ignore the part where we say we panic for
770 * overlapping areas. Note that we do panic if we can't merge.
771 *
772 */
773
774 void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */
775
776 register blokmap *blm, *oblm;
777 unsigned int pg;
778 spl_t s;
779
780 #if 1
781 kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
782 pmap, va, pa, size, prot, attr);
783 #endif
784
785 s=splhigh(); /* Don't bother from now on */
786 if(size < ODDBLKMIN) { /* Is this below the minimum size? */
787 for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */
788 mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
789 }
790 return; /* All done */
791 }
792
793 blm = (blokmap *)mapping_alloc(); /* Get a block mapping */
794
795 blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */
796 blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */
797 blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
798
799 #if 1
800 kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */
801 blm, blm->start, blm->end, blm->PTEr);
802 #endif
803
804 blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */
805
806 #if 1
807 kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
808 blm, pmap->bmaps);
809 #endif
810
811 if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */
812 panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */
813 }
814
815 #if 1
816 kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */
817 blm, pmap->bmaps);
818 #endif
819 splx(s); /* Ok for interruptions now */
820
821 return; /* Return */
822 }
823 #endif
824
825 /*
826 * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
827 *
828 * This routine takes a physical entry and runs through all mappings attached to it and changes
829 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
830 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
831 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
832 * higher to lower, lower to higher.
833 *
834 * Phys_entry is unlocked.
835 */
836
837 void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */
838
839 spl_t spl;
840
841 debugLog2(9, pp->pte1, prot); /* end remap */
842 spl=splhigh(); /* No interruptions during this */
843 if(!locked) { /* Do we need to lock the physent? */
844 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
845 panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
846 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
847 }
848 }
849
850 hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */
851
852 if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
853 splx(spl); /* Restore interrupt state */
854 debugLog2(10, pp->pte1, 0); /* end remap */
855
856 return; /* Leave... */
857 }
858
859 /*
860 * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
861 *
862 * This routine takes a pmap and virtual address and changes
863 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
864 * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations
865 * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g.,
866 * higher to lower, lower to higher.
867 *
868 */
869
870 void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */
871
872 mapping *mp, *mpv;
873 spl_t s;
874
875 debugLog2(9, vaddr, pmap); /* start mapping_protect */
876 s = splhigh(); /* Don't bother me */
877
878 mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */
879
880 if(!mp) { /* Did we find one? */
881 splx(s); /* Restore the interrupt level */
882 debugLog2(10, 0, 0); /* end mapping_pmap */
883 return; /* Didn't find any... */
884 }
885 if((unsigned int)mp & 1) { /* Did we timeout? */
886 panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */
887 splx(s); /* Restore the interrupt level */
888 return; /* Bad hair day... */
889 }
890
891 hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */
892
893 mpv = hw_cpv(mp); /* Get virtual address of mapping */
894 if(mpv->physent) { /* If there is a physical page, */
895 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
896 }
897 splx(s); /* Restore interrupt state */
898 debugLog2(10, mpv->PTEr, 0); /* end remap */
899
900 return; /* Leave... */
901 }
902
903 /*
904 * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
905 *
906 * This routine takes a physical entry and sets the physical attributes. There can be no mappings
907 * associated with this page when we do it.
908 */
909
910 void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */
911
912 debugLog2(11, pp->pte1, prot); /* end remap */
913
914 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */
915 panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n",
916 pp, pp->phys_link, pp->pte1); /* Complain about timeout */
917 }
918
919 hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */
920
921 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
922 debugLog2(12, pp->pte1, wimg); /* end remap */
923
924 return; /* Leave... */
925 }
926
927 /*
928 * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
929 *
930 * This routine takes a physical entry and runs through all mappings attached to it and invalidates
931 * any PTEs it finds.
932 *
933 * Interruptions must be disabled and the physical entry locked at entry.
934 */
935
936 void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */
937
938 hw_inv_all(pp); /* Go set the change bit of a physical page */
939
940 return; /* Leave... */
941 }
942
943
944 /*
945 * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
946 *
947 * This routine takes a physical entry and runs through all mappings attached to it and turns
948 * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before
949 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
950 * either (I don't think, maybe I'll change my mind later).
951 *
952 * Interruptions must be disabled and the physical entry locked at entry.
953 */
954
955 void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */
956
957 hw_clr_mod(pp); /* Go clear the change bit of a physical page */
958 return; /* Leave... */
959 }
960
961
962 /*
963 * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
964 *
965 * This routine takes a physical entry and runs through all mappings attached to it and turns
966 * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before
967 * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
968 * either (I don't think, maybe I'll change my mind later).
969 *
970 * Interruptions must be disabled and the physical entry locked at entry.
971 */
972
973 void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */
974
975 hw_set_mod(pp); /* Go set the change bit of a physical page */
976 return; /* Leave... */
977 }
978
979
980 /*
981 * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
982 *
983 * This routine takes a physical entry and runs through all mappings attached to it and turns
984 * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
985 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
986 * either (I don't think, maybe I'll change my mind later).
987 *
988 * Interruptions must be disabled at entry.
989 */
990
991 void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */
992
993 mapping *mp;
994
995 debugLog2(13, pp->pte1, 0); /* end remap */
996 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */
997 panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
998 }
999 hw_clr_ref(pp); /* Go clear the reference bit of a physical page */
1000 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */
1001 debugLog2(14, pp->pte1, 0); /* end remap */
1002 return; /* Leave... */
1003 }
1004
1005
1006 /*
1007 * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
1008 *
1009 * This routine takes a physical entry and runs through all mappings attached to it and turns
1010 * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1011 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1012 * either (I don't think, maybe I'll change my mind later).
1013 *
1014 * Interruptions must be disabled and the physical entry locked at entry.
1015 */
1016
1017 void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */
1018
1019 hw_set_ref(pp); /* Go set the reference bit of a physical page */
1020 return; /* Leave... */
1021 }
1022
1023
1024 /*
1025 * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
1026 *
1027 * This routine takes a physical entry and runs through all mappings attached to it and tests
1028 * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before
1029 * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations
1030 * either (I don't think, maybe I'll change my mind later).
1031 *
1032 * Interruptions must be disabled and the physical entry locked at entry.
1033 */
1034
1035 boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */
1036
1037 return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */
1038 }
1039
1040
1041 /*
1042 * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
1043 *
1044 * This routine takes a physical entry and runs through all mappings attached to it and tests
1045 * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before
1046 * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations
1047 * either (I don't think, maybe I'll change my mind later).
1048 *
1049 * Interruptions must be disabled and the physical entry locked at entry.
1050 */
1051
1052 boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */
1053
1054 return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */
1055 }
1056
1057
1058 /*
1059 * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
1060 *
1061 * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits
1062 */
1063
1064 void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */
1065
1066 pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */
1067
1068 return; /* Leave... */
1069 }
1070
1071
1072 /*
1073 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
1074 *
1075 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
1076 * the number of free mappings remaining, and if below a threshold, replenishes them.
1077 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
1078 * a new one is allocated.
1079 *
1080 * This routine allocates and/or memory and must be called from a safe place.
1081 * Currently, vm_pageout_scan is the safest place. We insure that the
1082 */
1083
1084 thread_call_t mapping_adjust_call;
1085 static thread_call_data_t mapping_adjust_call_data;
1086
1087 void mapping_adjust(void) { /* Adjust free mappings */
1088
1089 kern_return_t retr;
1090 mappingblok *mb, *mbn;
1091 spl_t s;
1092 int allocsize, i;
1093 extern int vm_page_free_count;
1094
1095 if(mapCtl.mapcmin <= MAPPERBLOK) {
1096 mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16;
1097
1098 #if DEBUG
1099 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
1100 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
1101 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
1102 #endif
1103 }
1104
1105 s = splhigh(); /* Don't bother from now on */
1106 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1107 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
1108 }
1109
1110 if (mapping_adjust_call == NULL) {
1111 thread_call_setup(&mapping_adjust_call_data,
1112 (thread_call_func_t)mapping_adjust,
1113 (thread_call_param_t)NULL);
1114 mapping_adjust_call = &mapping_adjust_call_data;
1115 }
1116
1117 while(1) { /* Keep going until we've got enough */
1118
1119 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
1120 if(allocsize < 1) break; /* Leave if we have all we need */
1121
1122 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
1123 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
1124 mapCtl.mapcreln--; /* Back off the count */
1125 allocsize = MAPPERBLOK; /* Show we allocated one block */
1126 }
1127 else { /* No free ones, try to get it */
1128
1129 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
1130
1131 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1132 splx(s); /* Restore 'rupts */
1133
1134 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
1135 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
1136 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
1137 break;
1138 }
1139 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
1140 }
1141 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
1142 s = splhigh(); /* Don't bother from now on */
1143 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1144 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
1145 }
1146 }
1147 if (retr != KERN_SUCCESS)
1148 break; /* Fail to alocate, bail out... */
1149 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
1150 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1151 mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
1152 }
1153 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1154 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1155 }
1156
1157 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
1158 mapCtl.mapcrecurse = 0; /* We are done now */
1159 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1160 splx(s); /* Restore 'rupts */
1161 return; /* Return... */
1162 }
1163
1164 mbn = mapCtl.mapcrel; /* Get first pending release block */
1165 mapCtl.mapcrel = 0; /* Dequeue them */
1166 mapCtl.mapcreln = 0; /* Set count to 0 */
1167
1168 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1169 splx(s); /* Restore 'rupts */
1170
1171 while((unsigned int)mbn) { /* Toss 'em all */
1172 mb = mbn->nextblok; /* Get the next */
1173 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
1174 mbn = mb; /* Chain to the next */
1175 }
1176
1177 __asm__ volatile("sync"); /* Make sure all is well */
1178 mapCtl.mapcrecurse = 0; /* We are done now */
1179 return;
1180 }
1181
1182 /*
1183 * mapping_free(mapping *mp) - release a mapping to the free list
1184 *
1185 * This routine takes a mapping and adds it to the free list.
1186 * If this mapping make the block non-empty, we queue it to the free block list.
1187 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
1188 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
1189 * If this release fills a block and we are above the threshold, we release the block
1190 */
1191
1192 void mapping_free(struct mapping *mp) { /* Release a mapping */
1193
1194 mappingblok *mb, *mbn;
1195 spl_t s;
1196 unsigned int full, mindx;
1197
1198 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */
1199 mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
1200
1201 s = splhigh(); /* Don't bother from now on */
1202 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1203 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
1204 }
1205
1206 full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */
1207 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
1208
1209 if(full) { /* If it was full before this: */
1210 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
1211 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
1212 if(!((unsigned int)mapCtl.mapclast))
1213 mapCtl.mapclast = mb;
1214 }
1215
1216 mapCtl.mapcfree++; /* Bump free count */
1217 mapCtl.mapcinuse--; /* Decriment in use count */
1218
1219 mapCtl.mapcfreec++; /* Count total calls */
1220
1221 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
1222 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3])
1223 == 0xFFFFFFFF) { /* See if empty now */
1224
1225 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
1226 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
1227 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
1228 }
1229 else { /* We're not first */
1230 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
1231 if(mbn->nextblok == mb) break; /* Is the next one our's? */
1232 }
1233 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
1234 mbn->nextblok = mb->nextblok; /* Dequeue us */
1235 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
1236 }
1237
1238 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
1239 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
1240 mapCtl.mapcnext = mb; /* Chain us to the head */
1241 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
1242 }
1243 else {
1244 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
1245 mapCtl.mapcreln++; /* Count on release list */
1246 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
1247 mapCtl.mapcrel = mb; /* Chain us in front */
1248 }
1249 }
1250 }
1251
1252 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
1253 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1254 thread_call_enter(mapping_adjust_call); /* Go toss some */
1255 }
1256 }
1257 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1258 splx(s); /* Restore 'rupts */
1259
1260 return; /* Bye, dude... */
1261 }
1262
1263
1264 /*
1265 * mapping_alloc(void) - obtain a mapping from the free list
1266 *
1267 * This routine takes a mapping off of the free list and returns it's address.
1268 *
1269 * We do this by finding a free entry in the first block and allocating it.
1270 * If this allocation empties the block, we remove it from the free list.
1271 * If this allocation drops the total number of free entries below a threshold,
1272 * we allocate a new block.
1273 *
1274 */
1275
1276 mapping *mapping_alloc(void) { /* Obtain a mapping */
1277
1278 register mapping *mp;
1279 mappingblok *mb, *mbn;
1280 spl_t s;
1281 int mindx;
1282 kern_return_t retr;
1283
1284 s = splhigh(); /* Don't bother from now on */
1285 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1286 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1287 }
1288
1289 if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */
1290 unsigned int i;
1291 struct mappingflush mappingflush;
1292 PCA *pca_min, *pca_max;
1293 PCA *pca_base;
1294
1295 pca_min = (PCA *)(hash_table_base+hash_table_size);
1296 pca_max = (PCA *)(hash_table_base+hash_table_size+hash_table_size);
1297
1298 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
1299 mapCtl.mapcflush.mappingcnt = 0;
1300 pca_base = mapCtl.mapcflush.pcaptr;
1301 do {
1302 hw_select_mappings(&mapCtl.mapcflush);
1303 mapCtl.mapcflush.pcaptr++;
1304 if (mapCtl.mapcflush.pcaptr >= pca_max)
1305 mapCtl.mapcflush.pcaptr = pca_min;
1306 } while ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr != pca_base));
1307
1308 if ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr == pca_base)) {
1309 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1310 panic("mapping_alloc - all mappings are wired\n");
1311 }
1312 mappingflush = mapCtl.mapcflush;
1313 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1314 splx(s);
1315 for (i=0;i<mappingflush.mappingcnt;i++)
1316 mapping_remove(mappingflush.mapping[i].pmap,
1317 mappingflush.mapping[i].offset);
1318 s = splhigh();
1319 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {
1320 panic("mapping_alloc - timeout getting control lock\n");
1321 }
1322 }
1323 mb = mapCtl.mapcnext;
1324 }
1325
1326 if(!(mindx = mapalc(mb))) { /* Allocate a slot */
1327 panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */
1328 }
1329
1330 if(mindx < 0) { /* Did we just take the last one */
1331 mindx = -mindx; /* Make positive */
1332 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1333 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1334 }
1335
1336 mapCtl.mapcfree--; /* Decrement free count */
1337 mapCtl.mapcinuse++; /* Bump in use count */
1338
1339 mapCtl.mapcallocc++; /* Count total calls */
1340
1341 /*
1342 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1343 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1344 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1345 * if we haven't already done it.
1346 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1347 * the release list with as much as we need until threads start.
1348 */
1349 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
1350 if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */
1351 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1352 mapCtl.mapcreln--; /* Back off the count */
1353 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1354 }
1355 else { /* We need to replenish */
1356 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1357 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1358 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1359 }
1360 }
1361 }
1362 }
1363
1364 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1365 splx(s); /* Restore 'rupts */
1366
1367 mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */
1368 __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */
1369 return mp; /* Send it back... */
1370 }
1371
1372
1373 void
1374 consider_mapping_adjust()
1375 {
1376 spl_t s;
1377
1378 s = splhigh(); /* Don't bother from now on */
1379 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1380 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1381 }
1382
1383 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1384 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1385 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1386 }
1387 }
1388
1389 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1390 splx(s); /* Restore 'rupts */
1391
1392 }
1393
1394
1395
1396 /*
1397 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1398 *
1399 * The mapping block is a page size area on a page boundary. It contains 1 header and 127
1400 * mappings. This call adds and initializes a block for use.
1401 *
1402 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1403 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1404 * corresponds to the header). The translation mask is the XOR of the virtual and real
1405 * addresses (needless to say, the block must be wired).
1406 *
1407 * We handle these mappings the same way as saveareas: the block is only on the chain so
1408 * long as there are free entries in it.
1409 *
1410 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1411 * mappings. Blocks marked PERM won't ever be released.
1412 *
1413 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1414 * list. We do this only at start up time. This is done because we only allocate blocks
1415 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1416 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1417 * them on the release queue, the allocate routine will rescue them. Then when the
1418 * pageout scan starts, all extra ones will be released.
1419 *
1420 */
1421
1422
1423 void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1424 /* Set's start and end of a block of mappings
1425 perm indicates if the block can be released
1426 or goes straight to the release queue .
1427 locked indicates if the lock is held already */
1428
1429 mappingblok *mb;
1430 spl_t s;
1431 int i;
1432 unsigned int raddr;
1433
1434 mb = (mappingblok *)mbl; /* Start of area */
1435
1436
1437 if(perm >= 0) { /* See if we need to initialize the block */
1438 if(perm) {
1439 raddr = (unsigned int)mbl; /* Perm means V=R */
1440 mb->mapblokflags = mbPerm; /* Set perm */
1441 }
1442 else {
1443 raddr = kvtophys(mbl); /* Get real address */
1444 mb->mapblokflags = 0; /* Set not perm */
1445 }
1446
1447 mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */
1448
1449 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1450 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1451 mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */
1452 mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */
1453 }
1454
1455 s = splhigh(); /* Don't bother from now on */
1456 if(!locked) { /* Do we need the lock? */
1457 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1458 panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */
1459 }
1460 }
1461
1462 if(perm < 0) { /* Direct to release queue? */
1463 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1464 mapCtl.mapcrel = mb; /* Queue us on in */
1465 mapCtl.mapcreln++; /* Count the free block */
1466 }
1467 else { /* Add to the free list */
1468
1469 mb->nextblok = 0; /* We always add to the end */
1470 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1471
1472 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1473 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1474 }
1475 else { /* We are not the first */
1476 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1477 mapCtl.mapclast = mb; /* We are now last */
1478 }
1479 }
1480
1481 if(!locked) { /* Do we need to unlock? */
1482 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1483 }
1484 splx(s); /* Restore 'rupts */
1485 return; /* All done, leave... */
1486 }
1487
1488
1489 /*
1490 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1491 *
1492 * No locks can be held, because we allocate memory here.
1493 * This routine needs a corresponding mapping_relpre call to remove the
1494 * hold off flag so that the adjust routine will free the extra mapping
1495 * blocks on the release list. I don't like this, but I don't know
1496 * how else to do this for now...
1497 *
1498 */
1499
1500 void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1501
1502 int nmapb, i;
1503 kern_return_t retr;
1504 mappingblok *mbn;
1505 spl_t s;
1506
1507 s = splhigh(); /* Don't bother from now on */
1508 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1509 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1510 }
1511
1512 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1513
1514 mapCtl.mapcholdoff++; /* Bump the hold off count */
1515
1516 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1517 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1518 splx(s); /* Restore 'rupts */
1519 return;
1520 }
1521 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1522 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1523 splx(s); /* Restore 'rupts */
1524 return;
1525 }
1526 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1527
1528 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1529 splx(s); /* Restore 'rupts */
1530
1531 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1532 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1533 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1534 break;
1535 }
1536 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1537 }
1538 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1539 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1540
1541 mapCtl.mapcrecurse = 0; /* We are done now */
1542 }
1543
1544 /*
1545 * void mapping_relpre(void) - Releases preallocation release hold off
1546 *
1547 * This routine removes the
1548 * hold off flag so that the adjust routine will free the extra mapping
1549 * blocks on the release list. I don't like this, but I don't know
1550 * how else to do this for now...
1551 *
1552 */
1553
1554 void mapping_relpre(void) { /* Releases release hold off */
1555
1556 spl_t s;
1557
1558 s = splhigh(); /* Don't bother from now on */
1559 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1560 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1561 }
1562 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1563 panic("mapping_relpre: hold-off count went negative\n");
1564 }
1565
1566 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1567 splx(s); /* Restore 'rupts */
1568 }
1569
1570 /*
1571 * void mapping_free_prime(void) - Primes the mapping block release list
1572 *
1573 * See mapping_free_init.
1574 * No locks can be held, because we allocate memory here.
1575 * One processor running only.
1576 *
1577 */
1578
1579 void mapping_free_prime(void) { /* Primes the mapping block release list */
1580
1581 int nmapb, i;
1582 kern_return_t retr;
1583 mappingblok *mbn;
1584 vm_offset_t mapping_min;
1585
1586 retr = kmem_suballoc(kernel_map, &mapping_min, MAPPING_MAP_SIZE,
1587 FALSE, TRUE, &mapping_map);
1588
1589 if (retr != KERN_SUCCESS)
1590 panic("mapping_free_prime: kmem_suballoc failed");
1591
1592
1593 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1594 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1595
1596 #if DEBUG
1597 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1598 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1599 #endif
1600
1601 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1602 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1603 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1604 panic("Whoops... Not a bit of wired memory left for anyone\n");
1605 }
1606 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1607 }
1608 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1609 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1610 }
1611
1612
1613
1614 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1615 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1616 {
1617 *count = mapCtl.mapcinuse;
1618 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1619 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1620 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1621 *alloc_size = PAGE_SIZE;
1622
1623 *collectable = 1;
1624 *exhaustable = 0;
1625 }
1626
1627
1628 /*
1629 * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
1630 *
1631 * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with
1632 * the same space. If it finds it, it returns the virtual address.
1633 *
1634 * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check
1635 * for it and fail it myself...
1636 */
1637
1638 vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */
1639
1640 spl_t s;
1641 register mapping *mp, *mpv;
1642 vm_offset_t va;
1643
1644 if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */
1645
1646 s = splhigh();
1647 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1648 splx(s); /* Restore 'rupts */
1649 panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */
1650 return(0); /* Should die before here */
1651 }
1652
1653 va = 0; /* Assume failure */
1654
1655 for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */
1656
1657 if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */
1658
1659 va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
1660 va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
1661 va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
1662 break; /* We're done now, pass virtual address back */
1663 }
1664
1665 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1666 splx(s); /* Restore 'rupts */
1667 return(va); /* Return the result or 0... */
1668 }
1669
1670 /*
1671 * kvtophys(addr)
1672 *
1673 * Convert a kernel virtual address to a physical address
1674 */
1675 vm_offset_t kvtophys(vm_offset_t va) {
1676
1677 register mapping *mp, *mpv;
1678 register blokmap *bmp;
1679 register vm_offset_t pa;
1680 spl_t s;
1681
1682 s=splhigh(); /* Don't bother from now on */
1683 mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */
1684
1685 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1686 splx(s); /* Restore 'rupts */
1687 panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */
1688 return 0;
1689 }
1690
1691 if(!mp) { /* If it was not a normal page */
1692 pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1693 splx(s); /* Restore 'rupts */
1694 return pa; /* Return physical address */
1695 }
1696
1697 mpv = hw_cpv(mp); /* Convert to virtual addressing */
1698
1699 if(!mpv->physent) { /* Was there a physical entry? */
1700 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1701 }
1702 else {
1703 pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */
1704 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1705 }
1706
1707 splx(s); /* Restore 'rupts */
1708 return pa; /* Return the physical address... */
1709 }
1710
1711 /*
1712 * phystokv(addr)
1713 *
1714 * Convert a physical address to a kernel virtual address if
1715 * there is a mapping, otherwise return NULL
1716 */
1717
1718 vm_offset_t phystokv(vm_offset_t pa) {
1719
1720 struct phys_entry *pp;
1721 vm_offset_t va;
1722
1723 pp = pmap_find_physentry(pa); /* Find the physical entry */
1724 if (PHYS_NULL == pp) {
1725 return (vm_offset_t)NULL; /* If none, return null */
1726 }
1727 if(!(va=mapping_p2v(kernel_pmap, pp))) {
1728 return 0; /* Can't find it, return 0... */
1729 }
1730 return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */
1731
1732 }
1733
1734 /*
1735 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1736 * page 0 access for the current thread.
1737 *
1738 * If parameter is TRUE, faults are ignored
1739 * If parameter is FALSE, faults are honored
1740 *
1741 */
1742
1743 void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1744
1745 if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1746 else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1747
1748 return; /* Return the result or 0... */
1749 }
1750
1751
1752 /*
1753 * Allocates a range of virtual addresses in a map as optimally as
1754 * possible for block mapping. The start address is aligned such
1755 * that a minimum number of power-of-two sized/aligned blocks is
1756 * required to cover the entire range.
1757 *
1758 * We also use a mask of valid block sizes to determine optimality.
1759 *
1760 * Note that the passed in pa is not actually mapped to the selected va,
1761 * rather, it is used to figure the optimal boundary. The actual
1762 * V to R mapping is done externally.
1763 *
1764 * This function will return KERN_INVALID_ADDRESS if an optimal address
1765 * can not be found. It is not necessarily a fatal error, the caller may still be
1766 * still be able to do a non-optimal assignment.
1767 */
1768
1769 kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa,
1770 vm_size_t size, vm_prot_t prot) {
1771
1772 vm_map_entry_t entry, next, tmp_entry, new_entry;
1773 vm_offset_t start, end, algnpa, endadr, strtadr, curradr;
1774 vm_offset_t boundary;
1775
1776 unsigned int maxsize, minsize, leading, trailing;
1777
1778 assert(page_aligned(pa));
1779 assert(page_aligned(size));
1780
1781 if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */
1782
1783 minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */
1784 maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */
1785
1786 boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */
1787 if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */
1788
1789 vm_map_lock(map); /* No touchee no mapee */
1790
1791 for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */
1792 if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */
1793 algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */
1794 leading = algnpa - pa; /* Get leading size */
1795
1796 curradr = 0; /* Start low */
1797
1798 while(1) { /* Try all possible values for this opt level */
1799
1800 curradr = curradr + boundary; /* Get the next optimal address */
1801 strtadr = curradr - leading; /* Calculate start of optimal range */
1802 endadr = strtadr + size; /* And now the end */
1803
1804 if((curradr < boundary) || /* Did address wrap here? */
1805 (strtadr > curradr) || /* How about this way? */
1806 (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */
1807
1808 if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
1809 if(endadr > map->max_offset) break; /* No room right now... */
1810
1811 if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
1812
1813 next = entry->vme_next; /* Get the next entry */
1814 if((next == vm_map_to_entry(map)) || /* Are we the last entry? */
1815 (next->vme_start >= endadr)) { /* or do we end before the next entry? */
1816
1817 new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
1818 VM_OBJECT_NULL,
1819 0, /* Offset into object of 0 */
1820 FALSE, /* No copy needed */
1821 FALSE, /* Not shared */
1822 FALSE, /* Not in transition */
1823 prot, /* Set the protection to requested */
1824 prot, /* We can't change protection */
1825 VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind,
1826 'cause we don't page in this area */
1827 VM_INHERIT_DEFAULT, /* Default inheritance */
1828 0); /* Nothing is wired */
1829
1830 vm_map_unlock(map); /* Let the world see it all */
1831 *va = strtadr; /* Tell everyone */
1832 *bnd = boundary; /* Say what boundary we are aligned to */
1833 return(KERN_SUCCESS); /* Leave, all is right with the world... */
1834 }
1835 }
1836 }
1837
1838 vm_map_unlock(map); /* Couldn't find a slot */
1839 return(KERN_INVALID_ADDRESS);
1840 }
1841
1842 /*
1843 * Copies data from a physical page to a virtual page. This is used to
1844 * move data from the kernel to user state.
1845 *
1846 * Note that it is invalid to have a source that spans a page boundry.
1847 * This can block.
1848 * We don't check protection either.
1849 * And we don't handle a block mapped sink address either.
1850 *
1851 */
1852
1853 kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) {
1854
1855 vm_map_t map;
1856 kern_return_t ret;
1857 unsigned int spaceid;
1858 int left, csize;
1859 vm_offset_t pa;
1860 register mapping *mpv, *mp;
1861 spl_t s;
1862
1863 if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */
1864 map = current_act()->map; /* Get the current map */
1865
1866 while(size) {
1867 s=splhigh(); /* Don't bother me */
1868
1869 spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */
1870
1871 mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */
1872 if(!mp) { /* Was it there? */
1873 splx(s); /* Restore the interrupt level */
1874 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in... */
1875 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1876
1877 return KERN_FAILURE; /* Didn't find any, return no good... */
1878 }
1879 if((unsigned int)mp&1) { /* Did we timeout? */
1880 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */
1881 splx(s); /* Restore the interrupt level */
1882 return KERN_FAILURE; /* Bad hair day, return FALSE... */
1883 }
1884
1885 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
1886
1887 if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */
1888 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1889 splx(s); /* Restore the interrupt level */
1890 ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* check for a COW area */
1891 if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */
1892 return KERN_FAILURE; /* Didn't find any, return no good... */
1893 }
1894 left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */
1895
1896 csize = size < left ? size : left; /* Set amount to copy this pass */
1897
1898 pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */
1899
1900 bcopy_physvir((char *)source, (char *)pa, csize); /* Do a physical copy, virtually */
1901
1902 hw_set_mod(mpv->physent); /* Go set the change of the sink */
1903
1904 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */
1905 splx(s); /* Open up for interrupts */
1906
1907 sink += csize; /* Move up to start of next page */
1908 source += csize; /* Move up source */
1909 size -= csize; /* Set amount for next pass */
1910 }
1911 return KERN_SUCCESS;
1912 }
1913
1914
1915 /*
1916 * copy 'size' bytes from physical to physical address
1917 * the caller must validate the physical ranges
1918 *
1919 * if flush_action == 0, no cache flush necessary
1920 * if flush_action == 1, flush the source
1921 * if flush_action == 2, flush the dest
1922 * if flush_action == 3, flush both source and dest
1923 */
1924
1925 kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
1926
1927 switch(flush_action) {
1928 case 1:
1929 flush_dcache(source, size, 1);
1930 break;
1931 case 2:
1932 flush_dcache(dest, size, 1);
1933 break;
1934 case 3:
1935 flush_dcache(source, size, 1);
1936 flush_dcache(dest, size, 1);
1937 break;
1938
1939 }
1940 bcopy_phys((char *)source, (char *)dest, size); /* Do a physical copy */
1941
1942 switch(flush_action) {
1943 case 1:
1944 flush_dcache(source, size, 1);
1945 break;
1946 case 2:
1947 flush_dcache(dest, size, 1);
1948 break;
1949 case 3:
1950 flush_dcache(source, size, 1);
1951 flush_dcache(dest, size, 1);
1952 break;
1953
1954 }
1955 }
1956
1957
1958
1959 #if DEBUG
1960 /*
1961 * Dumps out the mapping stuff associated with a virtual address
1962 */
1963 void dumpaddr(space_t space, vm_offset_t va) {
1964
1965 mapping *mp, *mpv;
1966 vm_offset_t pa;
1967 spl_t s;
1968
1969 s=splhigh(); /* Don't bother me */
1970
1971 mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */
1972 if(!mp) { /* Did we find one? */
1973 splx(s); /* Restore the interrupt level */
1974 printf("dumpaddr: virtual address (%08X) not mapped\n", va);
1975 return; /* Didn't find any, return FALSE... */
1976 }
1977 if((unsigned int)mp&1) { /* Did we timeout? */
1978 panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
1979 splx(s); /* Restore the interrupt level */
1980 return; /* Bad hair day, return FALSE... */
1981 }
1982 printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */
1983 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1984 dumpmapping(mpv);
1985 if(mpv->physent) {
1986 dumppca(mpv);
1987 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */
1988 }
1989 splx(s); /* Was there something you needed? */
1990 return; /* Tell them we did it */
1991 }
1992
1993
1994
1995 /*
1996 * Prints out a mapping control block
1997 *
1998 */
1999
2000 void dumpmapping(struct mapping *mp) { /* Dump out a mapping */
2001
2002 printf("Dump of mapping block: %08X\n", mp); /* Header */
2003 printf(" next: %08X\n", mp->next);
2004 printf(" hashnext: %08X\n", mp->hashnext);
2005 printf(" PTEhash: %08X\n", mp->PTEhash);
2006 printf(" PTEent: %08X\n", mp->PTEent);
2007 printf(" physent: %08X\n", mp->physent);
2008 printf(" PTEv: %08X\n", mp->PTEv);
2009 printf(" PTEr: %08X\n", mp->PTEr);
2010 printf(" pmap: %08X\n", mp->pmap);
2011
2012 if(mp->physent) { /* Print physent if it exists */
2013 printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1);
2014 }
2015 else {
2016 printf("Associated physical entry: none\n");
2017 }
2018
2019 dumppca(mp); /* Dump out the PCA information */
2020
2021 return;
2022 }
2023
2024 /*
2025 * Prints out a PTEG control area
2026 *
2027 */
2028
2029 void dumppca(struct mapping *mp) { /* PCA */
2030
2031 PCA *pca;
2032 unsigned int *pteg;
2033
2034 pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */
2035 pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16));
2036 printf(" Dump of PCA: %08X\n", pca); /* Header */
2037 printf(" PCAlock: %08X\n", pca->PCAlock);
2038 printf(" PCAallo: %08X\n", pca->flgs.PCAallo);
2039 printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]);
2040 printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]);
2041 printf("Dump of PTEG: %08X\n", pteg); /* Header */
2042 printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]);
2043 printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]);
2044 printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]);
2045 printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]);
2046 return;
2047 }
2048
2049 /*
2050 * Dumps starting with a physical entry
2051 */
2052
2053 void dumpphys(struct phys_entry *pp) { /* Dump from physent */
2054
2055 mapping *mp;
2056 PCA *pca;
2057 unsigned int *pteg;
2058
2059 printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1);
2060 mp = hw_cpv(pp->phys_link);
2061 while(mp) {
2062 dumpmapping(mp);
2063 dumppca(mp);
2064 mp = hw_cpv(mp->next);
2065 }
2066
2067 return;
2068 }
2069
2070 #endif
2071
2072
2073 kern_return_t bmapvideo(vm_offset_t *info);
2074 kern_return_t bmapvideo(vm_offset_t *info) {
2075
2076 extern struct vc_info vinfo;
2077
2078 (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */
2079 return KERN_SUCCESS;
2080 }
2081
2082 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
2083 kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
2084
2085 pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */
2086 return KERN_SUCCESS;
2087 }
2088
2089 kern_return_t bmapmapr(vm_offset_t va);
2090 kern_return_t bmapmapr(vm_offset_t va) {
2091
2092 mapping_remove(current_act()->task->map->pmap, va); /* Remove map */
2093 return KERN_SUCCESS;
2094 }