]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/mappings.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
32 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
33 * Currently, some of the function of this module is contained within pmap.c. We may want to move
34 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
35 *
36 * We also depend upon the structure of the phys_entry control block. We do put some processor
37 * specific stuff in there.
38 *
39 */
40
41 #include <debug.h>
42 #include <mach_kgdb.h>
43 #include <mach_vm_debug.h>
44 #include <db_machine_commands.h>
45
46 #include <mach/mach_types.h>
47 #include <mach/vm_attributes.h>
48 #include <mach/vm_param.h>
49
50 #include <kern/kern_types.h>
51 #include <kern/thread.h>
52 #include <kern/spl.h>
53 #include <kern/misc_protos.h>
54
55 #include <vm/vm_fault.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59 #include <vm/pmap.h>
60
61 #include <ppc/exception.h>
62 #include <ppc/misc_protos.h>
63 #include <ppc/proc_reg.h>
64 #include <ppc/pmap.h>
65 #include <ppc/mem.h>
66 #include <ppc/new_screen.h>
67 #include <ppc/Firmware.h>
68 #include <ppc/mappings.h>
69 #include <ddb/db_output.h>
70
71 #include <console/video_console.h> /* (TEST/DEBUG) */
72
73 #define PERFTIMES 0
74
75 vm_map_t mapping_map = VM_MAP_NULL;
76
77 unsigned int incrVSID = 0; /* VSID increment value */
78 unsigned int mappingdeb0 = 0;
79 unsigned int mappingdeb1 = 0;
80 int ppc_max_adrsp; /* Maximum address spaces */
81
82 addr64_t *mapdebug; /* (BRINGUP) */
83 extern unsigned int DebugWork; /* (BRINGUP) */
84
85 void mapping_verify(void);
86 void mapping_phys_unused(ppnum_t pa);
87
88 /*
89 * ppc_prot translates Mach's representation of protections to that of the PPC hardware.
90 * For Virtual Machines (VMM), we also provide translation entries where the output is
91 * the same as the input, allowing direct specification of PPC protections. Mach's
92 * representations are always in the range 0..7, so they always fall into the first
93 * 8 table entries; direct translations are placed in the range 8..16, so they fall into
94 * the second half of the table.
95 *
96 * ***NOTE*** I've commented out the Mach->PPC translations that would set page-level
97 * no-execute, pending updates to the VM layer that will properly enable its
98 * use. Bob Abeles 08.02.04
99 */
100
101 //unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2, /* Mach -> PPC translations */
102 unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, /* Mach -> PPC translations */
103 0, 1, 2, 3, 4, 5, 6, 7 }; /* VMM direct translations */
104
105 /*
106 * About PPC VSID generation:
107 *
108 * This function is called to generate an address space ID. This space ID must be unique within
109 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
110 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
111 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
112 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
113 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
114 * they are release. This causes us to lose track of what space IDs are free to be reused.
115 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
116 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
117 *
118 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
119 * calculation for virtual address lookup. An improperly chosen value could potentially cause
120 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
121 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
122 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
123 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
124 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
125 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
126 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
127 * with no overflow. I don't think that this is a problem.
128 *
129 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
130 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
131 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
132 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
133 * the same modulo 512. We can reduce this problem by having the segment number be bits
134 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
135 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
136 * I don't think that it is as signifigant as the other, so, I'll make the space ID
137 * with segment first.
138 *
139 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
140 * While this is a problem that should only happen in periods counted in weeks, it can and
141 * will happen. This is assuming a monotonically increasing space ID. If we were to search
142 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
143 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
144 *
145 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
146 * locked by free_pmap_lock) that is sorted in VSID sequence order.
147 *
148 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
149 * the last that was freed. The we allocate that.
150 *
151 * NOTE: We must be called with interruptions off and free_pmap_lock held.
152 *
153 */
154
155 /*
156 * mapping_init();
157 * Do anything that needs to be done before the mapping system can be used.
158 * Hash table must be initialized before we call this.
159 *
160 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
161 */
162
163 void mapping_init(void) {
164
165 unsigned int tmp, maxeff, rwidth;
166
167 ppc_max_adrsp = maxAdrSp; /* Set maximum address spaces */
168
169 maxeff = 32; /* Assume 32-bit */
170 if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) maxeff = 64; /* Is this a 64-bit machine? */
171
172 rwidth = PerProcTable[0].ppe_vaddr->pf.pfMaxVAddr - maxAdrSpb; /* Reduce address width by width of address space ID */
173 if(rwidth > maxeff) rwidth = maxeff; /* If we still have more virtual than effective, clamp at effective */
174
175 vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth); /* Get maximum effective address supported */
176 vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - PerProcTable[0].ppe_vaddr->pf.pfMaxPAddr); /* Get maximum physical address supported */
177
178 if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) { /* Are we 64 bit? */
179 tmp = 12; /* Size of hash space */
180 }
181 else {
182 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
183 tmp = 32 - tmp; /* Size of hash space */
184 }
185
186 incrVSID = 1 << ((tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
187 incrVSID |= 1 << ((tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
188 incrVSID |= 1; /* Set bit and add 1 */
189
190 return;
191
192 }
193
194
195 /*
196 * mapping_remove(pmap_t pmap, addr64_t va);
197 * Given a pmap and virtual address, this routine finds the mapping and unmaps it.
198 * The mapping block will be added to
199 * the free list. If the free list threshold is reached, garbage collection will happen.
200 *
201 * We also pass back the next higher mapped address. This is done so that the higher level
202 * pmap_remove function can release a range of addresses simply by calling mapping_remove
203 * in a loop until it finishes the range or is returned a vaddr of 0.
204 *
205 * Note that if the mapping is not found, we return the next VA ORed with 1
206 *
207 */
208
209 addr64_t mapping_remove(pmap_t pmap, addr64_t va) { /* Remove a single mapping for this VADDR
210 Returns TRUE if a mapping was found to remove */
211
212 mapping_t *mp;
213 addr64_t nextva;
214 ppnum_t pgaddr;
215
216 va &= ~PAGE_MASK; /* Scrub noise bits */
217
218 do { /* Keep trying until we truely fail */
219 mp = hw_rem_map(pmap, va, &nextva); /* Remove a mapping from this pmap */
220 } while (mapRtRemove == ((unsigned int)mp & mapRetCode));
221
222 switch ((unsigned int)mp & mapRetCode) {
223 case mapRtOK:
224 break; /* Mapping removed */
225 case mapRtNotFnd:
226 return (nextva | 1); /* Nothing found to unmap */
227 default:
228 panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
229 pmap, va, mp);
230 break;
231 }
232
233 pgaddr = mp->mpPAddr; /* Get page number from mapping */
234
235 mapping_free(mp); /* Add mapping to the free list */
236
237 if ((pmap->pmapFlags & pmapVMhost) && pmap->pmapVmmExt) {
238 /* If this is an assisted host, scrub any guest mappings */
239 unsigned int idx;
240 phys_entry_t *physent = mapping_phys_lookup(pgaddr, &idx);
241 /* Get physent for our physical page */
242 if (!physent) { /* No physent, could be in I/O area, so exit */
243 return (nextva);
244 }
245
246 do { /* Iterate 'till all guest mappings are gone */
247 mp = hw_scrub_guest(physent, pmap); /* Attempt to scrub a guest mapping */
248 switch ((unsigned int)mp & mapRetCode) {
249 case mapRtGuest: /* Found a guest mapping */
250 case mapRtNotFnd: /* Mapping was there, but disappeared, must retry */
251 case mapRtEmpty: /* No guest mappings left to scrub */
252 break;
253 default:
254 panic("mapping_remove: hw_scrub_guest failed - physent = %08X, code = %08X\n",
255 physent, mp); /* Cry havoc, cry wrack,
256 at least we die with harness on our backs */
257 break;
258 }
259 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
260 }
261
262 return nextva; /* Tell them we did it */
263 }
264
265 /*
266 * mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one
267 *
268 * This routine takes the given parameters, builds a mapping block, and queues it into the
269 * correct lists.
270 *
271 * pmap (virtual address) is the pmap to map into
272 * va (virtual address) is the 64-bit virtual address that is being mapped
273 * pa (physical page number) is the physical page number (i.e., physcial address >> 12). This is
274 * a 32-bit quantity.
275 * Flags:
276 * block if 1, mapping is a block, size parameter is used. Note: we do not keep
277 * reference and change information or allow protection changes of blocks.
278 * any changes must first unmap and then remap the area.
279 * use attribute Use specified attributes for map, not defaults for physical page
280 * perm Mapping is permanent
281 * cache inhibited Cache inhibited (used if use attribute or block set )
282 * guarded Guarded access (used if use attribute or block set )
283 * size size of block in pages - 1 (not used if not block)
284 * prot VM protection bits
285 * attr Cachability/Guardedness
286 *
287 * Returns 0 if mapping was successful. Returns vaddr that overlaps/collides.
288 * Returns 1 for any other failure.
289 *
290 * Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
291 * for I/O and default the cache attrubutes appropriately. The caller is free to set whatever they want however.
292 *
293 * If there is any physical page that is not found in the physent table, the mapping is forced to be a
294 * block mapping of length 1. This keeps us from trying to update a physent during later mapping use,
295 * e.g., fault handling.
296 *
297 *
298 */
299
300 addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) { /* Make an address mapping */
301
302 register mapping_t *mp;
303 addr64_t colladdr, psmask;
304 unsigned int pindex, mflags, pattr, wimg, rc;
305 phys_entry_t *physent;
306 int nlists, pcf;
307
308 pindex = 0;
309
310 mflags = 0x01000000; /* Start building mpFlags field (busy count = 1) */
311
312 pcf = (flags & mmFlgPcfg) >> 24; /* Get the physical page config index */
313 if(!(pPcfg[pcf].pcfFlags)) { /* Validate requested physical page configuration */
314 panic("mapping_make: invalid physical page configuration request - pmap = %08X, va = %016llX, cfg = %d\n",
315 pmap, va, pcf);
316 }
317
318 psmask = (1ULL << pPcfg[pcf].pcfPSize) - 1; /* Mask to isolate any offset into a page */
319 if(va & psmask) { /* Make sure we are page aligned on virtual */
320 panic("mapping_make: attempt to map unaligned vaddr - pmap = %08X, va = %016llX, cfg = %d\n",
321 pmap, va, pcf);
322 }
323 if(((addr64_t)pa << 12) & psmask) { /* Make sure we are page aligned on physical */
324 panic("mapping_make: attempt to map unaligned paddr - pmap = %08X, pa = %016llX, cfg = %d\n",
325 pmap, pa, pcf);
326 }
327
328 mflags |= (pcf << (31-mpPcfgb)); /* Insert physical page configuration index */
329
330 if(!(flags & mmFlgBlock)) { /* Is this a block map? */
331
332 size = 1; /* Set size to 1 page if not block */
333
334 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
335 if(!physent) { /* Did we find the physical page? */
336 mflags |= mpBlock; /* Force this to a block if no physent */
337 pattr = 0; /* Assume normal, non-I/O memory */
338 if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
339 }
340 else pattr = ((physent->ppLink & (ppI | ppG)) >> 60); /* Get the default attributes from physent */
341
342 if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
343 }
344 else { /* This is a block */
345
346 pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
347 mflags |= mpBlock; /* Show that this is a block */
348
349 if(size > pmapSmallBlock) { /* Is it one? */
350 if(size & 0x00001FFF) return mapRtBadSz; /* Fail if bigger than 256MB and not a 32MB multiple */
351 size = size >> 13; /* Convert to 32MB chunks */
352 mflags = mflags | mpBSu; /* Show 32MB basic size unit */
353 }
354 }
355
356 wimg = 0x2; /* Set basic PPC wimg to 0b0010 - Coherent */
357 if(pattr & mmFlgCInhib) wimg |= 0x4; /* Add cache inhibited if we need to */
358 if(pattr & mmFlgGuarded) wimg |= 0x1; /* Add guarded if we need to */
359
360 mflags = mflags | (pindex << 16); /* Stick in the physical entry table index */
361
362 if(flags & mmFlgPerm) mflags |= mpPerm; /* Set permanent mapping */
363
364 size = size - 1; /* Change size to offset */
365 if(size > 0xFFFF) return mapRtBadSz; /* Leave if size is too big */
366
367 nlists = mapSetLists(pmap); /* Set number of lists this will be on */
368
369 mp = mapping_alloc(nlists); /* Get a spare mapping block with this many lists */
370
371 /* the mapping is zero except that the mpLists field is set */
372 mp->mpFlags |= mflags; /* Add in the rest of the flags to mpLists */
373 mp->mpSpace = pmap->space; /* Set the address space/pmap lookup ID */
374 mp->u.mpBSize = size; /* Set the size */
375 mp->mpPte = 0; /* Set the PTE invalid */
376 mp->mpPAddr = pa; /* Set the physical page number */
377 mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) /* Add the protection and attributes to the field */
378 | ((PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit)?
379 getProtPPC(prot) : (getProtPPC(prot) & 0x3)); /* Mask off no-execute control for 32-bit machines */
380
381 while(1) { /* Keep trying... */
382 colladdr = hw_add_map(pmap, mp); /* Go add the mapping to the pmap */
383 rc = colladdr & mapRetCode; /* Separate return code */
384 colladdr &= ~mapRetCode; /* Clean up collision effective address */
385
386 switch (rc) {
387 case mapRtOK:
388 return mapRtOK; /* Mapping added successfully */
389
390 case mapRtRemove: /* Remove in progress */
391 (void)mapping_remove(pmap, colladdr); /* Lend a helping hand to another CPU doing block removal */
392 continue; /* Retry mapping add */
393
394 case mapRtMapDup: /* Identical mapping already present */
395 mapping_free(mp); /* Free duplicate mapping */
396 return mapRtOK; /* Return success */
397
398 case mapRtSmash: /* Mapping already present but does not match new mapping */
399 mapping_free(mp); /* Free duplicate mapping */
400 return (colladdr | mapRtSmash); /* Return colliding address, with some dirt added to avoid
401 confusion if effective address is 0 */
402 default:
403 panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %08X, va = %016llX, mapping = %08X\n",
404 colladdr, rc, pmap, va, mp); /* Die dead */
405 }
406
407 }
408
409 return 1; /* Unreachable, but pleases compiler */
410 }
411
412
413 /*
414 * mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping
415 *
416 * Looks up the vaddr and returns the mapping and the next mapped va
417 * If full is true, it will descend through all nested pmaps to find actual mapping
418 *
419 * Must be called with interruptions disabled or we can hang trying to remove found mapping.
420 *
421 * Returns 0 if not found and the virtual address of the mapping if it is
422 * Note that the mappings busy count is bumped. It is the responsibility of the caller
423 * to drop the count. If this is not done, any attempt to remove the mapping will hang.
424 *
425 * NOTE: The nextva field is not valid when full is TRUE.
426 *
427 *
428 */
429
430 mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) { /* Make an address mapping */
431
432 register mapping_t *mp;
433 addr64_t curva;
434 pmap_t curpmap;
435 int nestdepth;
436
437 curpmap = pmap; /* Remember entry */
438 nestdepth = 0; /* Set nest depth */
439 curva = (addr64_t)va; /* Set current va */
440
441 while(1) {
442
443 mp = hw_find_map(curpmap, curva, nextva); /* Find the mapping for this address */
444 if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
445 panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap); /* Die... */
446 }
447
448 if(!mp || ((mp->mpFlags & mpType) < mpMinSpecial) || !full) break; /* Are we done looking? */
449
450 if((mp->mpFlags & mpType) != mpNest) { /* Don't chain through anything other than a nested pmap */
451 mapping_drop_busy(mp); /* We have everything we need from the mapping */
452 mp = 0; /* Set not found */
453 break;
454 }
455
456 if(nestdepth++ > 64) { /* Have we nested too far down? */
457 panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
458 va, curva, pmap, curpmap);
459 }
460
461 curva = curva + mp->mpNestReloc; /* Relocate va to new pmap */
462 curpmap = (pmap_t) pmapTrans[mp->mpSpace].pmapVAddr; /* Get the address of the nested pmap */
463 mapping_drop_busy(mp); /* We have everything we need from the mapping */
464
465 }
466
467 return mp; /* Return the mapping if we found one */
468 }
469
470 /*
471 * void mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
472 *
473 * This routine takes a pmap and virtual address and changes
474 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
475 * the protection is changed.
476 *
477 * We return success if we change the protection or if there is no page mapped at va. We return failure if
478 * the va corresponds to a block mapped area or the mapping is permanant.
479 *
480 *
481 */
482
483 void
484 mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) { /* Change protection of a virtual page */
485
486 int ret;
487
488 ret = hw_protect(pmap, va, getProtPPC(prot), nextva); /* Try to change the protect here */
489
490 switch (ret) { /* Decode return code */
491
492 case mapRtOK: /* Changed */
493 case mapRtNotFnd: /* Didn't find it */
494 case mapRtBlock: /* Block map, just ignore request */
495 case mapRtNest: /* Nested pmap, just ignore request */
496 break;
497
498 default:
499 panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
500
501 }
502
503 }
504
505 /*
506 * void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
507 *
508 * This routine takes a physical entry and runs through all mappings attached to it and changes
509 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
510 * the protection is changed. There is no limitation on changes, e.g.,
511 * higher to lower, lower to higher.
512 *
513 * Any mapping that is marked permanent is not changed
514 *
515 * Phys_entry is unlocked.
516 */
517
518 void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) { /* Change protection of all mappings to page */
519
520 unsigned int pindex;
521 phys_entry_t *physent;
522
523 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
524 if(!physent) { /* Did we find the physical page? */
525 panic("mapping_protect_phys: invalid physical page %08X\n", pa);
526 }
527
528 hw_walk_phys(physent, hwpNoop, hwpSPrtMap, hwpNoop,
529 getProtPPC(prot), hwpPurgePTE); /* Set the new protection for page and mappings */
530
531 return; /* Leave... */
532 }
533
534
535 /*
536 * void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
537 *
538 * This routine takes a physical entry and runs through all mappings attached to it and turns
539 * off the change bit.
540 */
541
542 void mapping_clr_mod(ppnum_t pa) { /* Clears the change bit of a physical page */
543
544 unsigned int pindex;
545 phys_entry_t *physent;
546
547 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
548 if(!physent) { /* Did we find the physical page? */
549 panic("mapping_clr_mod: invalid physical page %08X\n", pa);
550 }
551
552 hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy,
553 0, hwpPurgePTE); /* Clear change for page and mappings */
554 return; /* Leave... */
555 }
556
557
558 /*
559 * void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
560 *
561 * This routine takes a physical entry and runs through all mappings attached to it and turns
562 * on the change bit.
563 */
564
565 void mapping_set_mod(ppnum_t pa) { /* Sets the change bit of a physical page */
566
567 unsigned int pindex;
568 phys_entry_t *physent;
569
570 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
571 if(!physent) { /* Did we find the physical page? */
572 panic("mapping_set_mod: invalid physical page %08X\n", pa);
573 }
574
575 hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy,
576 0, hwpNoopPTE); /* Set change for page and mappings */
577 return; /* Leave... */
578 }
579
580
581 /*
582 * void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
583 *
584 * This routine takes a physical entry and runs through all mappings attached to it and turns
585 * off the reference bit.
586 */
587
588 void mapping_clr_ref(ppnum_t pa) { /* Clears the reference bit of a physical page */
589
590 unsigned int pindex;
591 phys_entry_t *physent;
592
593 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
594 if(!physent) { /* Did we find the physical page? */
595 panic("mapping_clr_ref: invalid physical page %08X\n", pa);
596 }
597
598 hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy,
599 0, hwpPurgePTE); /* Clear reference for page and mappings */
600 return; /* Leave... */
601 }
602
603
604 /*
605 * void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
606 *
607 * This routine takes a physical entry and runs through all mappings attached to it and turns
608 * on the reference bit.
609 */
610
611 void mapping_set_ref(ppnum_t pa) { /* Sets the reference bit of a physical page */
612
613 unsigned int pindex;
614 phys_entry_t *physent;
615
616 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
617 if(!physent) { /* Did we find the physical page? */
618 panic("mapping_set_ref: invalid physical page %08X\n", pa);
619 }
620
621 hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy,
622 0, hwpNoopPTE); /* Set reference for page and mappings */
623 return; /* Leave... */
624 }
625
626
627 /*
628 * boolean_t mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
629 *
630 * This routine takes a physical entry and runs through all mappings attached to it and tests
631 * the changed bit.
632 */
633
634 boolean_t mapping_tst_mod(ppnum_t pa) { /* Tests the change bit of a physical page */
635
636 unsigned int pindex, rc;
637 phys_entry_t *physent;
638
639 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
640 if(!physent) { /* Did we find the physical page? */
641 panic("mapping_tst_mod: invalid physical page %08X\n", pa);
642 }
643
644 rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop,
645 0, hwpMergePTE); /* Set change for page and mappings */
646 return ((rc & (unsigned long)ppC) != 0); /* Leave with change bit */
647 }
648
649
650 /*
651 * boolean_t mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
652 *
653 * This routine takes a physical entry and runs through all mappings attached to it and tests
654 * the reference bit.
655 */
656
657 boolean_t mapping_tst_ref(ppnum_t pa) { /* Tests the reference bit of a physical page */
658
659 unsigned int pindex, rc;
660 phys_entry_t *physent;
661
662 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
663 if(!physent) { /* Did we find the physical page? */
664 panic("mapping_tst_ref: invalid physical page %08X\n", pa);
665 }
666
667 rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop,
668 0, hwpMergePTE); /* Test reference for page and mappings */
669 return ((rc & (unsigned long)ppR) != 0); /* Leave with reference bit */
670 }
671
672
673 /*
674 * unsigned int mapping_tst_refmod(ppnum_t pa) - tests the reference and change bits of a physical page
675 *
676 * This routine takes a physical entry and runs through all mappings attached to it and tests
677 * their reference and changed bits.
678 */
679
680 unsigned int mapping_tst_refmod(ppnum_t pa) { /* Tests the reference and change bits of a physical page */
681
682 unsigned int pindex, rc;
683 phys_entry_t *physent;
684
685 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
686 if (!physent) { /* Did we find the physical page? */
687 panic("mapping_tst_refmod: invalid physical page %08X\n", pa);
688 }
689
690 rc = hw_walk_phys(physent, hwpTRefCngPhy, hwpTRefCngMap, hwpNoop,
691 0, hwpMergePTE); /* Test reference and change bits in page and mappings */
692 return (((rc & ppC)? VM_MEM_MODIFIED : 0) | ((rc & ppR)? VM_MEM_REFERENCED : 0));
693 /* Convert bits to generic format and return */
694
695 }
696
697
698 /*
699 * void mapping_clr_refmod(ppnum_t pa, unsigned int mask) - clears the reference and change bits specified
700 * by mask of a physical page
701 *
702 * This routine takes a physical entry and runs through all mappings attached to it and turns
703 * off all the reference and change bits.
704 */
705
706 void mapping_clr_refmod(ppnum_t pa, unsigned int mask) { /* Clears the reference and change bits of a physical page */
707
708 unsigned int pindex;
709 phys_entry_t *physent;
710 unsigned int ppcMask;
711
712 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
713 if(!physent) { /* Did we find the physical page? */
714 panic("mapping_clr_refmod: invalid physical page %08X\n", pa);
715 }
716
717 ppcMask = (((mask & VM_MEM_MODIFIED)? ppC : 0) | ((mask & VM_MEM_REFERENCED)? ppR : 0));
718 /* Convert mask bits to PPC-specific format */
719 hw_walk_phys(physent, hwpNoop, hwpCRefCngMap, hwpCRefCngPhy,
720 ppcMask, hwpPurgePTE); /* Clear reference and change bits for page and mappings */
721 return; /* Leave... */
722 }
723
724
725
726 /*
727 * phys_ent *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
728 *
729 * This routine takes a physical page number and returns the phys_entry associated with it. It also
730 * calculates the bank address associated with the entry
731 * the reference bit.
732 */
733
734 phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) { /* Finds the physical entry for the page */
735
736 int i;
737
738 for(i = 0; i < pmap_mem_regions_count; i++) { /* Walk through the list */
739 if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue; /* Skip any empty lists */
740 if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue; /* This isn't ours */
741
742 *pindex = (i * sizeof(mem_region_t)) / 4; /* Make the word index to this list */
743
744 return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart]; /* Return the physent pointer */
745 }
746
747 return (phys_entry_t *)0; /* Shucks, can't find it... */
748
749 }
750
751
752
753
754 /*
755 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
756 *
757 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
758 * the number of free mappings remaining, and if below a threshold, replenishes them.
759 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
760 * a new one is allocated.
761 *
762 * This routine allocates and/or frees memory and must be called from a safe place.
763 * Currently, vm_pageout_scan is the safest place.
764 */
765
766 thread_call_t mapping_adjust_call;
767 static thread_call_data_t mapping_adjust_call_data;
768
769 void mapping_adjust(void) { /* Adjust free mappings */
770
771 kern_return_t retr = KERN_SUCCESS;
772 mappingblok_t *mb, *mbn;
773 spl_t s;
774 int allocsize;
775
776 if(mapCtl.mapcmin <= MAPPERBLOK) {
777 mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
778
779 #if DEBUG
780 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
781 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
782 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
783 #endif
784 }
785
786 s = splhigh(); /* Don't bother from now on */
787 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
788 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
789 }
790
791 if (mapping_adjust_call == NULL) {
792 thread_call_setup(&mapping_adjust_call_data,
793 (thread_call_func_t)mapping_adjust,
794 (thread_call_param_t)NULL);
795 mapping_adjust_call = &mapping_adjust_call_data;
796 }
797
798 while(1) { /* Keep going until we've got enough */
799
800 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
801 if(allocsize < 1) break; /* Leave if we have all we need */
802
803 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
804 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
805 mapCtl.mapcreln--; /* Back off the count */
806 allocsize = MAPPERBLOK; /* Show we allocated one block */
807 }
808 else { /* No free ones, try to get it */
809
810 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
811
812 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
813 splx(s); /* Restore 'rupts */
814
815 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
816 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
817 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
818 break;
819 }
820 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
821 }
822
823 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
824 s = splhigh(); /* Don't bother from now on */
825 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
826 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
827 }
828 }
829
830 if (retr != KERN_SUCCESS)
831 break; /* Fail to alocate, bail out... */
832 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
833 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
834 mbn = (mappingblok_t *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
835 }
836
837 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
838 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
839 }
840
841 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
842 mapCtl.mapcrecurse = 0; /* We are done now */
843 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
844 splx(s); /* Restore 'rupts */
845 return; /* Return... */
846 }
847
848 mbn = mapCtl.mapcrel; /* Get first pending release block */
849 mapCtl.mapcrel = 0; /* Dequeue them */
850 mapCtl.mapcreln = 0; /* Set count to 0 */
851
852 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
853 splx(s); /* Restore 'rupts */
854
855 while((unsigned int)mbn) { /* Toss 'em all */
856 mb = mbn->nextblok; /* Get the next */
857
858 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
859
860 mbn = mb; /* Chain to the next */
861 }
862
863 __asm__ volatile("eieio"); /* Make sure all is well */
864 mapCtl.mapcrecurse = 0; /* We are done now */
865 return;
866 }
867
868 /*
869 * mapping_free(mapping *mp) - release a mapping to the free list
870 *
871 * This routine takes a mapping and adds it to the free list.
872 * If this mapping make the block non-empty, we queue it to the free block list.
873 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
874 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
875 * If this release fills a block and we are above the threshold, we release the block
876 */
877
878 void mapping_free(struct mapping *mp) { /* Release a mapping */
879
880 mappingblok_t *mb, *mbn;
881 spl_t s;
882 unsigned int full, mindx, lists;
883
884 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6; /* Get index to mapping */
885 mb = (mappingblok_t *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
886 lists = (mp->mpFlags & mpLists); /* get #lists */
887 if ((lists == 0) || (lists > kSkipListMaxLists)) /* panic if out of range */
888 panic("mapping_free: mpLists invalid\n");
889
890 #if 0
891 mp->mpFlags = 0x99999999; /* (BRINGUP) */
892 mp->mpSpace = 0x9999; /* (BRINGUP) */
893 mp->u.mpBSize = 0x9999; /* (BRINGUP) */
894 mp->mpPte = 0x99999998; /* (BRINGUP) */
895 mp->mpPAddr = 0x99999999; /* (BRINGUP) */
896 mp->mpVAddr = 0x9999999999999999ULL; /* (BRINGUP) */
897 mp->mpAlias = 0x9999999999999999ULL; /* (BRINGUP) */
898 mp->mpList0 = 0x9999999999999999ULL; /* (BRINGUP) */
899 mp->mpList[0] = 0x9999999999999999ULL; /* (BRINGUP) */
900 mp->mpList[1] = 0x9999999999999999ULL; /* (BRINGUP) */
901 mp->mpList[2] = 0x9999999999999999ULL; /* (BRINGUP) */
902
903 if(lists > mpBasicLists) { /* (BRINGUP) */
904 mp->mpList[3] = 0x9999999999999999ULL; /* (BRINGUP) */
905 mp->mpList[4] = 0x9999999999999999ULL; /* (BRINGUP) */
906 mp->mpList[5] = 0x9999999999999999ULL; /* (BRINGUP) */
907 mp->mpList[6] = 0x9999999999999999ULL; /* (BRINGUP) */
908 mp->mpList[7] = 0x9999999999999999ULL; /* (BRINGUP) */
909 mp->mpList[8] = 0x9999999999999999ULL; /* (BRINGUP) */
910 mp->mpList[9] = 0x9999999999999999ULL; /* (BRINGUP) */
911 mp->mpList[10] = 0x9999999999999999ULL; /* (BRINGUP) */
912 }
913 #endif
914
915
916 s = splhigh(); /* Don't bother from now on */
917 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
918 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
919 }
920
921 full = !(mb->mapblokfree[0] | mb->mapblokfree[1]); /* See if full now */
922 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
923 if ( lists > mpBasicLists ) { /* if big block, lite the 2nd bit too */
924 mindx++;
925 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
926 mapCtl.mapcfree++;
927 mapCtl.mapcinuse--;
928 }
929
930 if(full) { /* If it was full before this: */
931 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
932 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
933 if(!((unsigned int)mapCtl.mapclast))
934 mapCtl.mapclast = mb;
935 }
936
937 mapCtl.mapcfree++; /* Bump free count */
938 mapCtl.mapcinuse--; /* Decriment in use count */
939
940 mapCtl.mapcfreec++; /* Count total calls */
941
942 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
943 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) { /* See if empty now */
944
945 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
946 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
947 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
948 }
949 else { /* We're not first */
950 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
951 if(mbn->nextblok == mb) break; /* Is the next one our's? */
952 }
953 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
954 mbn->nextblok = mb->nextblok; /* Dequeue us */
955 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
956 }
957
958 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
959 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
960 mapCtl.mapcnext = mb; /* Chain us to the head */
961 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
962 }
963 else {
964 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
965 mapCtl.mapcreln++; /* Count on release list */
966 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
967 mapCtl.mapcrel = mb; /* Chain us in front */
968 }
969 }
970 }
971
972 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
973 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
974 thread_call_enter(mapping_adjust_call); /* Go toss some */
975 }
976 }
977 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
978 splx(s); /* Restore 'rupts */
979
980 return; /* Bye, dude... */
981 }
982
983
984 /*
985 * mapping_alloc(lists) - obtain a mapping from the free list
986 *
987 * This routine takes a mapping off of the free list and returns its address.
988 * The mapping is zeroed, and its mpLists count is set. The caller passes in
989 * the number of skiplists it would prefer; if this number is greater than
990 * mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
991 * just two consequtive free entries coallesced into one. If we cannot find
992 * two consequtive free entries, we clamp the list count down to mpBasicLists
993 * and return a basic 64-byte node. Our caller never knows the difference.
994 *
995 * If this allocation empties a block, we remove it from the free list.
996 * If this allocation drops the total number of free entries below a threshold,
997 * we allocate a new block.
998 *
999 */
1000 decl_simple_lock_data(extern,free_pmap_lock)
1001
1002 mapping_t *
1003 mapping_alloc(int lists) { /* Obtain a mapping */
1004
1005 register mapping_t *mp;
1006 mappingblok_t *mb, *mbn;
1007 spl_t s;
1008 int mindx;
1009 int big = (lists > mpBasicLists); /* set flag if big block req'd */
1010 pmap_t refpmap, ckpmap;
1011 unsigned int space, i;
1012 addr64_t va, nextva;
1013 boolean_t found_mapping;
1014 boolean_t do_rescan;
1015
1016 s = splhigh(); /* Don't bother from now on */
1017 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1018 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1019 }
1020
1021 if(!((unsigned int)mapCtl.mapcnext)) { /* Are there any free mappings? */
1022
1023 /*
1024 * No free mappings. First, there may be some mapping blocks on the "to be released"
1025 * list. If so, rescue one. Otherwise, try to steal a couple blocks worth.
1026 */
1027
1028 if((mbn = mapCtl.mapcrel) != 0) { /* Try to rescue a block from impending doom */
1029 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1030 mapCtl.mapcreln--; /* Back off the count */
1031 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1032 goto rescued;
1033 }
1034
1035 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1036
1037 simple_lock(&free_pmap_lock);
1038
1039 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1040 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1041 }
1042
1043 if (!((unsigned int)mapCtl.mapcnext)) {
1044
1045 refpmap = (pmap_t)cursor_pmap->pmap_link.next;
1046 space = mapCtl.mapcflush.spacenum;
1047 while (refpmap != cursor_pmap) {
1048 if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
1049 refpmap = (pmap_t)refpmap->pmap_link.next;
1050 }
1051
1052 ckpmap = refpmap;
1053 va = mapCtl.mapcflush.addr;
1054 found_mapping = FALSE;
1055
1056 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
1057
1058 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1059
1060 ckpmap = (pmap_t)ckpmap->pmap_link.next;
1061
1062 /* We don't steal mappings from the kernel pmap, a VMM host pmap, or a VMM guest pmap with guest
1063 shadow assist active.
1064 */
1065 if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)
1066 && !(ckpmap->pmapFlags & (pmapVMgsaa|pmapVMhost))) {
1067 do_rescan = TRUE;
1068 for (i=0;i<8;i++) {
1069 mp = hw_purge_map(ckpmap, va, &nextva);
1070
1071 switch ((unsigned int)mp & mapRetCode) {
1072 case mapRtOK:
1073 mapping_free(mp);
1074 found_mapping = TRUE;
1075 break;
1076 case mapRtNotFnd:
1077 break;
1078 default:
1079 panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
1080 break;
1081 }
1082
1083 if (mapRtNotFnd == ((unsigned int)mp & mapRetCode))
1084 if (do_rescan)
1085 do_rescan = FALSE;
1086 else
1087 break;
1088
1089 va = nextva;
1090 }
1091 }
1092
1093 if (ckpmap == refpmap) {
1094 if (found_mapping == FALSE)
1095 panic("no valid pmap to purge mappings\n");
1096 else
1097 found_mapping = FALSE;
1098 }
1099
1100 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1101 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1102 }
1103
1104 }
1105
1106 mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
1107 mapCtl.mapcflush.addr = nextva;
1108 }
1109
1110 simple_unlock(&free_pmap_lock);
1111 }
1112
1113 rescued:
1114
1115 mb = mapCtl.mapcnext;
1116
1117 if ( big ) { /* if we need a big (128-byte) mapping */
1118 mapCtl.mapcbig++; /* count attempts to allocate a big mapping */
1119 mbn = NULL; /* this will be prev ptr */
1120 mindx = 0;
1121 while( mb ) { /* loop over mapping blocks with free entries */
1122 mindx = mapalc2(mb); /* try for 2 consequtive free bits in this block */
1123
1124 if ( mindx ) break; /* exit loop if we found them */
1125 mbn = mb; /* remember previous block */
1126 mb = mb->nextblok; /* move on to next block */
1127 }
1128 if ( mindx == 0 ) { /* if we couldn't find 2 consequtive bits... */
1129 mapCtl.mapcbigfails++; /* count failures */
1130 big = 0; /* forget that we needed a big mapping */
1131 lists = mpBasicLists; /* clamp list count down to the max in a 64-byte mapping */
1132 mb = mapCtl.mapcnext; /* back to the first block with a free entry */
1133 }
1134 else { /* if we did find a big mapping */
1135 mapCtl.mapcfree--; /* Decrement free count twice */
1136 mapCtl.mapcinuse++; /* Bump in use count twice */
1137 if ( mindx < 0 ) { /* if we just used the last 2 free bits in this block */
1138 if (mbn) { /* if this wasn't the first block */
1139 mindx = -mindx; /* make positive */
1140 mbn->nextblok = mb->nextblok; /* unlink this one from the middle of block list */
1141 if (mb == mapCtl.mapclast) { /* if we emptied last block */
1142 mapCtl.mapclast = mbn; /* then prev block is now last */
1143 }
1144 }
1145 }
1146 }
1147 }
1148
1149 if ( !big ) { /* if we need a small (64-byte) mapping */
1150 if(!(mindx = mapalc1(mb))) /* Allocate a 1-bit slot */
1151 panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
1152 }
1153
1154 if(mindx < 0) { /* Did we just take the last one */
1155 mindx = -mindx; /* Make positive */
1156 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1157 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1158 }
1159
1160 mapCtl.mapcfree--; /* Decrement free count */
1161 mapCtl.mapcinuse++; /* Bump in use count */
1162
1163 mapCtl.mapcallocc++; /* Count total calls */
1164
1165 /*
1166 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1167 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1168 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1169 * if we haven't already done it.
1170 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1171 * the release list with as much as we need until threads start.
1172 */
1173
1174 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
1175 if((mbn = mapCtl.mapcrel) != 0) { /* Try to rescue a block from impending doom */
1176 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1177 mapCtl.mapcreln--; /* Back off the count */
1178 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1179 }
1180 else { /* We need to replenish */
1181 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1182 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1183 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1184 }
1185 }
1186 }
1187 }
1188
1189 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1190 splx(s); /* Restore 'rupts */
1191
1192 mp = &((mapping_t *)mb)[mindx]; /* Point to the allocated mapping */
1193 mp->mpFlags = lists; /* set the list count */
1194
1195
1196 return mp; /* Send it back... */
1197 }
1198
1199
1200 void
1201 consider_mapping_adjust(void)
1202 {
1203 spl_t s;
1204
1205 s = splhigh(); /* Don't bother from now on */
1206 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1207 panic("consider_mapping_adjust -- lock timeout\n");
1208 }
1209
1210 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1211 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1212 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1213 }
1214 }
1215
1216 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1217 splx(s); /* Restore 'rupts */
1218
1219 }
1220
1221
1222
1223 /*
1224 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1225 *
1226 * The mapping block is a page size area on a page boundary. It contains 1 header and 63
1227 * mappings. This call adds and initializes a block for use. Mappings come in two sizes,
1228 * 64 and 128 bytes (the only difference is the number of skip-lists.) When we allocate a
1229 * 128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
1230 * code only deals with "basic" 64-byte mappings. This works for two reasons:
1231 * - Only one in 256 mappings is big, so they are rare.
1232 * - If we cannot find two consequtive free mappings, we just return a small one.
1233 * There is no problem with doing this, except a minor performance degredation.
1234 * Therefore, all counts etc in the mapping control structure are in units of small blocks.
1235 *
1236 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1237 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1238 * corresponds to the header). The translation mask is the XOR of the virtual and real
1239 * addresses (needless to say, the block must be wired).
1240 *
1241 * We handle these mappings the same way as saveareas: the block is only on the chain so
1242 * long as there are free entries in it.
1243 *
1244 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1245 * mappings. Blocks marked PERM won't ever be released.
1246 *
1247 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1248 * list. We do this only at start up time. This is done because we only allocate blocks
1249 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1250 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1251 * them on the release queue, the allocate routine will rescue them. Then when the
1252 * pageout scan starts, all extra ones will be released.
1253 *
1254 */
1255
1256
1257 void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1258 /* Set's start and end of a block of mappings
1259 perm indicates if the block can be released
1260 or goes straight to the release queue .
1261 locked indicates if the lock is held already */
1262
1263 mappingblok_t *mb;
1264 spl_t s;
1265 addr64_t raddr;
1266 ppnum_t pp;
1267
1268 mb = (mappingblok_t *)mbl; /* Start of area */
1269
1270 if(perm >= 0) { /* See if we need to initialize the block */
1271 if(perm) {
1272 raddr = (addr64_t)((unsigned int)mbl); /* Perm means V=R */
1273 mb->mapblokflags = mbPerm; /* Set perm */
1274 // mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1275 }
1276 else {
1277 pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl); /* Get the physical page */
1278 if(!pp) { /* What gives? Where's the page? */
1279 panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
1280 }
1281
1282 raddr = (addr64_t)pp << 12; /* Convert physical page to physical address */
1283 mb->mapblokflags = 0; /* Set not perm */
1284 // mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1285 }
1286
1287 mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl); /* Form translation mask */
1288
1289 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1290 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1291 }
1292
1293 s = splhigh(); /* Don't bother from now on */
1294 if(!locked) { /* Do we need the lock? */
1295 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1296 panic("mapping_free_init: timeout getting control lock\n"); /* Tell all and die */
1297 }
1298 }
1299
1300 if(perm < 0) { /* Direct to release queue? */
1301 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1302 mapCtl.mapcrel = mb; /* Queue us on in */
1303 mapCtl.mapcreln++; /* Count the free block */
1304 }
1305 else { /* Add to the free list */
1306
1307 mb->nextblok = 0; /* We always add to the end */
1308 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1309
1310 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1311 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1312 }
1313 else { /* We are not the first */
1314 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1315 mapCtl.mapclast = mb; /* We are now last */
1316 }
1317 }
1318
1319 if(!locked) { /* Do we need to unlock? */
1320 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1321 }
1322
1323 splx(s); /* Restore 'rupts */
1324 return; /* All done, leave... */
1325 }
1326
1327
1328 /*
1329 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1330 *
1331 * No locks can be held, because we allocate memory here.
1332 * This routine needs a corresponding mapping_relpre call to remove the
1333 * hold off flag so that the adjust routine will free the extra mapping
1334 * blocks on the release list. I don't like this, but I don't know
1335 * how else to do this for now...
1336 *
1337 */
1338
1339 void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1340
1341 int nmapb, i;
1342 kern_return_t retr;
1343 mappingblok_t *mbn;
1344 spl_t s;
1345
1346 s = splhigh(); /* Don't bother from now on */
1347 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1348 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1349 }
1350
1351 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1352
1353 mapCtl.mapcholdoff++; /* Bump the hold off count */
1354
1355 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1356 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1357 splx(s); /* Restore 'rupts */
1358 return;
1359 }
1360 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1361 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1362 splx(s); /* Restore 'rupts */
1363 return;
1364 }
1365 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1366
1367 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1368 splx(s); /* Restore 'rupts */
1369
1370 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1371 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1372 if(retr != KERN_SUCCESS) /* Did we get some memory? */
1373 break;
1374 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1375 }
1376 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1377 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1378
1379 mapCtl.mapcrecurse = 0; /* We are done now */
1380 }
1381
1382 /*
1383 * void mapping_relpre(void) - Releases preallocation release hold off
1384 *
1385 * This routine removes the
1386 * hold off flag so that the adjust routine will free the extra mapping
1387 * blocks on the release list. I don't like this, but I don't know
1388 * how else to do this for now...
1389 *
1390 */
1391
1392 void mapping_relpre(void) { /* Releases release hold off */
1393
1394 spl_t s;
1395
1396 s = splhigh(); /* Don't bother from now on */
1397 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1398 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1399 }
1400 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1401 panic("mapping_relpre: hold-off count went negative\n");
1402 }
1403
1404 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1405 splx(s); /* Restore 'rupts */
1406 }
1407
1408 /*
1409 * void mapping_free_prime(void) - Primes the mapping block release list
1410 *
1411 * See mapping_free_init.
1412 * No locks can be held, because we allocate memory here.
1413 * One processor running only.
1414 *
1415 */
1416
1417 void mapping_free_prime(void) { /* Primes the mapping block release list */
1418
1419 int nmapb, i;
1420 kern_return_t retr;
1421 mappingblok_t *mbn;
1422 vm_offset_t mapping_min;
1423
1424 retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
1425 FALSE, VM_FLAGS_ANYWHERE, &mapping_map);
1426
1427 if (retr != KERN_SUCCESS)
1428 panic("mapping_free_prime: kmem_suballoc failed");
1429
1430
1431 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1432 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1433
1434 #if DEBUG
1435 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1436 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1437 #endif
1438
1439 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1440 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1441 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1442 panic("Whoops... Not a bit of wired memory left for anyone\n");
1443 }
1444 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1445 }
1446 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1447 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1448 }
1449
1450
1451 void
1452 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1453 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1454 {
1455 *count = mapCtl.mapcinuse;
1456 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1457 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1458 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1459 *alloc_size = PAGE_SIZE;
1460
1461 *collectable = 1;
1462 *exhaustable = 0;
1463 }
1464
1465
1466 /*
1467 * addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
1468 *
1469 * First looks up the physical entry associated witht the physical page. Then searches the alias
1470 * list for a matching pmap. It grabs the virtual address from the mapping, drops busy, and returns
1471 * that.
1472 *
1473 */
1474
1475 addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) { /* Finds first virtual mapping of a physical page in a space */
1476
1477 spl_t s;
1478 mapping_t *mp;
1479 unsigned int pindex;
1480 phys_entry_t *physent;
1481 addr64_t va;
1482
1483 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1484 if(!physent) { /* Did we find the physical page? */
1485 panic("mapping_p2v: invalid physical page %08X\n", pa);
1486 }
1487
1488 s = splhigh(); /* Make sure interruptions are disabled */
1489
1490 mp = hw_find_space(physent, pmap->space); /* Go find the first mapping to the page from the requested pmap */
1491
1492 if(mp) { /* Did we find one? */
1493 va = mp->mpVAddr & -4096; /* If so, get the cleaned up vaddr */
1494 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1495 }
1496 else va = 0; /* Return failure */
1497
1498 splx(s); /* Restore 'rupts */
1499
1500 return va; /* Bye, bye... */
1501
1502 }
1503
1504 /*
1505 * phystokv(addr)
1506 *
1507 * Convert a physical address to a kernel virtual address if
1508 * there is a mapping, otherwise return NULL
1509 */
1510
1511 vm_offset_t phystokv(vm_offset_t pa) {
1512
1513 addr64_t va;
1514 ppnum_t pp;
1515
1516 pp = pa >> 12; /* Convert to a page number */
1517
1518 if(!(va = mapping_p2v(kernel_pmap, pp))) {
1519 return 0; /* Can't find it, return 0... */
1520 }
1521
1522 return (va | (pa & (PAGE_SIZE - 1))); /* Build and return VADDR... */
1523
1524 }
1525
1526 /*
1527 * kvtophys(addr)
1528 *
1529 * Convert a kernel virtual address to a physical address
1530 */
1531 vm_offset_t kvtophys(vm_offset_t va) {
1532
1533 return pmap_extract(kernel_pmap, va); /* Find mapping and lock the physical entry for this mapping */
1534
1535 }
1536
1537 /*
1538 * kvtophys64(addr)
1539 *
1540 * Convert a kernel virtual address to a 64-bit physical address
1541 */
1542 vm_map_offset_t kvtophys64(vm_map_offset_t va) {
1543 ppnum_t pa = pmap_find_phys(kernel_pmap, (addr64_t)va);
1544
1545 if (!pa)
1546 return (vm_map_offset_t)0;
1547 return (((vm_map_offset_t)pa) << 12) | (va & 0xfff);
1548 }
1549
1550 /*
1551 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1552 * page 0 access for the current thread.
1553 *
1554 * If parameter is TRUE, faults are ignored
1555 * If parameter is FALSE, faults are honored
1556 *
1557 */
1558
1559 void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1560
1561 if(type) current_thread()->machine.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1562 else current_thread()->machine.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1563
1564 return; /* Return the result or 0... */
1565 }
1566
1567
1568 /*
1569 * Copies data between a physical page and a virtual page, or 2 physical. This is used to
1570 * move data from the kernel to user state. Note that the "which" parm
1571 * says which of the parameters is physical and if we need to flush sink/source.
1572 * Note that both addresses may be physical, but only one may be virtual.
1573 *
1574 * The rules are that the size can be anything. Either address can be on any boundary
1575 * and span pages. The physical data must be contiguous as must the virtual.
1576 *
1577 * We can block when we try to resolve the virtual address at each page boundary.
1578 * We don't check protection on the physical page.
1579 *
1580 * Note that we will not check the entire range and if a page translation fails,
1581 * we will stop with partial contents copied.
1582 *
1583 */
1584
1585 kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which) {
1586
1587 vm_map_t map;
1588 kern_return_t ret;
1589 addr64_t nextva, vaddr, paddr;
1590 register mapping_t *mp;
1591 spl_t s;
1592 unsigned int lop, csize;
1593 int needtran, bothphys;
1594 unsigned int pindex;
1595 phys_entry_t *physent;
1596 vm_prot_t prot;
1597 int orig_which;
1598
1599 orig_which = which;
1600
1601 map = (which & cppvKmap) ? kernel_map : current_map_fast();
1602
1603 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
1604 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
1605 }
1606
1607 bothphys = 1; /* Assume both are physical */
1608
1609 if(!(which & cppvPsnk)) { /* Is sink page virtual? */
1610 vaddr = sink; /* Sink side is virtual */
1611 bothphys = 0; /* Show both aren't physical */
1612 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
1613 } else if (!(which & cppvPsrc)) { /* Is source page virtual? */
1614 vaddr = source; /* Source side is virtual */
1615 bothphys = 0; /* Show both aren't physical */
1616 prot = VM_PROT_READ; /* Virtual source is always read only */
1617 }
1618
1619 needtran = 1; /* Show we need to map the virtual the first time */
1620 s = splhigh(); /* Don't bother me */
1621
1622 while(size) {
1623
1624 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
1625 if(!needtran) { /* If this is not the first translation, we need to drop the old busy */
1626 mapping_drop_busy(mp); /* Release the old mapping now */
1627 }
1628 needtran = 0;
1629
1630 while(1) {
1631 mp = mapping_find(map->pmap, vaddr, &nextva, 1); /* Find and busy the mapping */
1632 if(!mp) { /* Was it there? */
1633 if(getPerProc()->istackptr == 0)
1634 panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
1635
1636 splx(s); /* Restore the interrupt level */
1637 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
1638
1639 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
1640
1641 s = splhigh(); /* Don't bother me */
1642 continue; /* Go try for the map again... */
1643
1644 }
1645 if (mp->mpVAddr & mpI) { /* cache inhibited, so force the appropriate page to be flushed before */
1646 if (which & cppvPsrc) /* and after the copy to avoid cache paradoxes */
1647 which |= cppvFsnk;
1648 else
1649 which |= cppvFsrc;
1650 } else
1651 which = orig_which;
1652
1653 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
1654 we can just leave.
1655 */
1656 if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break; /* We got it mapped R/W or the source is not virtual, leave... */
1657
1658 mapping_drop_busy(mp); /* Go ahead and release the mapping for now */
1659 if(getPerProc()->istackptr == 0)
1660 panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
1661 splx(s); /* Restore the interrupt level */
1662
1663 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
1664 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
1665 s = splhigh(); /* Don't bother me */
1666 }
1667 paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL)); /* construct the physical address... this calculation works */
1668 /* properly on both single page and block mappings */
1669 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
1670 else source = paddr; /* Otherwise the source is */
1671 }
1672
1673 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
1674 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
1675
1676 csize = size; /* Assume we can copy it all */
1677 if(lop < size) csize = lop; /* Nope, we can't do it all */
1678
1679 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source before move */
1680 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink before move */
1681
1682 bcopy_physvir_32(source, sink, csize); /* Do a physical copy, virtually */
1683
1684 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source after move */
1685 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink after move */
1686
1687 /*
1688 * Note that for certain ram disk flavors, we may be copying outside of known memory.
1689 * Therefore, before we try to mark it modifed, we check if it exists.
1690 */
1691
1692 if( !(which & cppvNoModSnk)) {
1693 physent = mapping_phys_lookup(sink >> 12, &pindex); /* Get physical entry for sink */
1694 if(physent) mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
1695 }
1696 if( !(which & cppvNoRefSrc)) {
1697 physent = mapping_phys_lookup(source >> 12, &pindex); /* Get physical entry for source */
1698 if(physent) mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
1699 }
1700 size = size - csize; /* Calculate what is left */
1701 vaddr = vaddr + csize; /* Move to next sink address */
1702 source = source + csize; /* Bump source to next physical address */
1703 sink = sink + csize; /* Bump sink to next physical address */
1704 }
1705
1706 if(!bothphys) mapping_drop_busy(mp); /* Go ahead and release the mapping of the virtual page if any */
1707 splx(s); /* Open up for interrupts */
1708
1709 return KERN_SUCCESS;
1710 }
1711
1712
1713 /*
1714 * Debug code
1715 */
1716
1717 void mapping_verify(void) {
1718
1719 spl_t s;
1720 mappingblok_t *mb, *mbn;
1721 unsigned int relncnt;
1722 unsigned int dumbodude;
1723
1724 dumbodude = 0;
1725
1726 s = splhigh(); /* Don't bother from now on */
1727
1728 mbn = 0; /* Start with none */
1729 for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) { /* Walk the free chain */
1730 if((mappingblok_t *)(mb->mapblokflags & 0x7FFFFFFF) != mb) { /* Is tag ok? */
1731 panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
1732 }
1733 mbn = mb; /* Remember the last one */
1734 }
1735
1736 if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) { /* Do we point to the last one? */
1737 panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
1738 }
1739
1740 relncnt = 0; /* Clear count */
1741 for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) { /* Walk the release chain */
1742 dumbodude |= mb->mapblokflags; /* Just touch it to make sure it is mapped */
1743 relncnt++; /* Count this one */
1744 }
1745
1746 if(mapCtl.mapcreln != relncnt) { /* Is the count on release queue ok? */
1747 panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
1748 }
1749
1750 splx(s); /* Restore 'rupts */
1751
1752 return;
1753 }
1754
1755 void mapping_phys_unused(ppnum_t pa) {
1756
1757 unsigned int pindex;
1758 phys_entry_t *physent;
1759
1760 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1761 if(!physent) return; /* Did we find the physical page? */
1762
1763 if(!(physent->ppLink & ~(ppLock | ppFlags))) return; /* No one else is here */
1764
1765 panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
1766
1767 }
1768
1769 void mapping_hibernate_flush(void)
1770 {
1771 int bank;
1772 unsigned int page;
1773 struct phys_entry * entry;
1774
1775 for (bank = 0; bank < pmap_mem_regions_count; bank++)
1776 {
1777 entry = (struct phys_entry *) pmap_mem_regions[bank].mrPhysTab;
1778 for (page = pmap_mem_regions[bank].mrStart; page <= pmap_mem_regions[bank].mrEnd; page++)
1779 {
1780 hw_walk_phys(entry, hwpNoop, hwpNoop, hwpNoop, 0, hwpPurgePTE);
1781 entry++;
1782 }
1783 }
1784 }
1785
1786
1787
1788
1789
1790