]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/mappings.c
5fc22ea7de538ff5a7189ebf5c168547d8676922
[apple/xnu.git] / osfmk / ppc / mappings.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
27 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
28 * Currently, some of the function of this module is contained within pmap.c. We may want to move
29 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
30 *
31 * We also depend upon the structure of the phys_entry control block. We do put some processor
32 * specific stuff in there.
33 *
34 */
35
36 #include <cpus.h>
37 #include <debug.h>
38 #include <mach_kgdb.h>
39 #include <mach_vm_debug.h>
40 #include <db_machine_commands.h>
41
42 #include <kern/thread.h>
43 #include <kern/thread_act.h>
44 #include <mach/vm_attributes.h>
45 #include <mach/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 #include <kern/spl.h>
50
51 #include <kern/misc_protos.h>
52 #include <ppc/exception.h>
53 #include <ppc/misc_protos.h>
54 #include <ppc/proc_reg.h>
55
56 #include <vm/pmap.h>
57 #include <ppc/pmap.h>
58 #include <ppc/mem.h>
59
60 #include <ppc/new_screen.h>
61 #include <ppc/Firmware.h>
62 #include <ppc/mappings.h>
63 #include <ddb/db_output.h>
64
65 #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */
66
67 #define PERFTIMES 0
68
69 vm_map_t mapping_map = VM_MAP_NULL;
70
71 unsigned int incrVSID = 0; /* VSID increment value */
72 unsigned int mappingdeb0 = 0;
73 unsigned int mappingdeb1 = 0;
74 int ppc_max_adrsp; /* Maximum address spaces */
75
76 addr64_t *mapdebug; /* (BRINGUP) */
77 extern unsigned int DebugWork; /* (BRINGUP) */
78
79 extern unsigned int hash_table_size;
80
81 void mapping_verify(void);
82 void mapping_phys_unused(ppnum_t pa);
83
84 /*
85 * ppc_prot translates from the mach representation of protections to the PPC version.
86 * We also allow for a direct setting of the protection bits. This extends the mach
87 * concepts to allow the greater control we need for Virtual Machines (VMM).
88 * Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
89 * It eliminates the used of this table.
90 * unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
91 */
92
93 #define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
94
95 /*
96 * About PPC VSID generation:
97 *
98 * This function is called to generate an address space ID. This space ID must be unique within
99 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
100 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
101 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
102 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
103 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
104 * they are release. This causes us to lose track of what space IDs are free to be reused.
105 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
106 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
107 *
108 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
109 * calculation for virtual address lookup. An improperly chosen value could potentially cause
110 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
111 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
112 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
113 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
114 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
115 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
116 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
117 * with no overflow. I don't think that this is a problem.
118 *
119 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
120 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
121 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
122 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
123 * the same modulo 512. We can reduce this problem by having the segment number be bits
124 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
125 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
126 * I don't think that it is as signifigant as the other, so, I'll make the space ID
127 * with segment first.
128 *
129 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
130 * While this is a problem that should only happen in periods counted in weeks, it can and
131 * will happen. This is assuming a monotonically increasing space ID. If we were to search
132 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
133 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
134 *
135 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
136 * locked by free_pmap_lock) that is sorted in VSID sequence order.
137 *
138 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
139 * the last that was freed. The we allocate that.
140 *
141 * NOTE: We must be called with interruptions off and free_pmap_lock held.
142 *
143 */
144
145 /*
146 * mapping_init();
147 * Do anything that needs to be done before the mapping system can be used.
148 * Hash table must be initialized before we call this.
149 *
150 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
151 */
152
153 void mapping_init(void) {
154
155 unsigned int tmp, maxeff, rwidth;
156
157 ppc_max_adrsp = maxAdrSp; /* Set maximum address spaces */
158
159 maxeff = 32; /* Assume 32-bit */
160 if(per_proc_info[0].pf.Available & pf64Bit) maxeff = 64; /* Is this a 64-bit machine? */
161
162 rwidth = per_proc_info[0].pf.pfMaxVAddr - maxAdrSpb; /* Reduce address width by width of address space ID */
163 if(rwidth > maxeff) rwidth = maxeff; /* If we still have more virtual than effective, clamp at effective */
164
165 vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth); /* Get maximum effective address supported */
166 vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - per_proc_info[0].pf.pfMaxPAddr); /* Get maximum physical address supported */
167
168 if(per_proc_info[0].pf.Available & pf64Bit) { /* Are we 64 bit? */
169 tmp = 12; /* Size of hash space */
170 }
171 else {
172 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
173 tmp = 32 - tmp; /* Size of hash space */
174 }
175
176 incrVSID = 1 << ((tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
177 incrVSID |= 1 << ((tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
178 incrVSID |= 1; /* Set bit and add 1 */
179
180 return;
181
182 }
183
184
185 /*
186 * mapping_remove(pmap_t pmap, addr64_t va);
187 * Given a pmap and virtual address, this routine finds the mapping and unmaps it.
188 * The mapping block will be added to
189 * the free list. If the free list threshold is reached, garbage collection will happen.
190 *
191 * We also pass back the next higher mapped address. This is done so that the higher level
192 * pmap_remove function can release a range of addresses simply by calling mapping_remove
193 * in a loop until it finishes the range or is returned a vaddr of 0.
194 *
195 * Note that if the mapping is not found, we return the next VA ORed with 1
196 *
197 */
198
199 addr64_t mapping_remove(pmap_t pmap, addr64_t va) { /* Remove a single mapping for this VADDR
200 Returns TRUE if a mapping was found to remove */
201
202 mapping *mp;
203 addr64_t nextva;
204
205 disable_preemption(); /* Don't change threads */
206
207 while(1) { /* Keep trying until we truely fail */
208 mp = hw_rem_map(pmap, va, &nextva); /* Remove a mapping from this pmap */
209 if(((unsigned int)mp & mapRetCode) != mapRtRemove) break; /* If it is gone, we are done */
210 }
211
212 enable_preemption(); /* Thread change ok */
213
214 if(!mp) return (nextva | 1); /* Nothing found to unmap */
215
216 if((unsigned int)mp & mapRetCode) { /* Was there a failure? */
217
218 panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
219 pmap, va, mp);
220 }
221
222 mapping_free(mp); /* Add mapping to the free list */
223
224 return nextva; /* Tell them we did it */
225 }
226
227 /*
228 * mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one
229 *
230 * This routine takes the given parameters, builds a mapping block, and queues it into the
231 * correct lists.
232 *
233 * pmap (virtual address) is the pmap to map into
234 * va (virtual address) is the 64-bit virtual address that is being mapped
235 * pa (physical page number) is the physical page number (i.e., physcial address >> 12). This is
236 * a 32-bit quantity.
237 * Flags:
238 * block if 1, mapping is a block, size parameter is used. Note: we do not keep
239 * reference and change information or allow protection changes of blocks.
240 * any changes must first unmap and then remap the area.
241 * use attribute Use specified attributes for map, not defaults for physical page
242 * perm Mapping is permanent
243 * cache inhibited Cache inhibited (used if use attribute or block set )
244 * guarded Guarded access (used if use attribute or block set )
245 * size size of block (not used if not block)
246 * prot VM protection bits
247 * attr Cachability/Guardedness
248 *
249 * Returns 0 if mapping was successful. Returns vaddr that overlaps/collides.
250 * Returns 1 for any other failure.
251 *
252 * Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
253 * for I/O and default the cache attrubutes appropriately. The caller is free to set whatever they want however.
254 *
255 * If there is any physical page that is not found in the physent table, the mapping is forced to be a
256 * block mapping of length 1. This keeps us from trying to update a physent during later mapping use,
257 * e.g., fault handling.
258 *
259 *
260 */
261
262 addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) { /* Make an address mapping */
263
264 register mapping *mp;
265 addr64_t colladdr;
266 unsigned int pindex, mflags, pattr, wimg;
267 phys_entry *physent;
268 int i, nlists;
269
270 disable_preemption(); /* Don't change threads */
271
272 pindex = 0;
273
274 mflags = 0x01000000; /* Start building mpFlags field (busy count = 1) */
275
276 if(!(flags & mmFlgBlock)) { /* Is this a block map? */
277
278 size = 1; /* Set size to 1 page if not block */
279
280 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
281 if(!physent) { /* Did we find the physical page? */
282 mflags |= mpBlock; /* Force this to a block if no physent */
283 size = 1; /* Force size to 1 page */
284 pattr = 0; /* Assume normal, non-I/O memory */
285 if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
286 }
287 else pattr = ((physent->ppLink & (ppI | ppG)) >> 4); /* Get the default attributes from physent */
288
289 if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
290 }
291 else { /* This is a block */
292
293 pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
294 mflags |= mpBlock; /* Show that this is a block */
295 }
296
297 wimg = 0x2; /* Set basic PPC wimg to 0b0010 - Coherent */
298 if(pattr & mmFlgCInhib) wimg |= 0x4; /* Add cache inhibited if we need to */
299 if(pattr & mmFlgGuarded) wimg |= 0x1; /* Add guarded if we need to */
300
301 mflags = mflags | (pindex << 16); /* Stick in the physical entry table index */
302
303 if(flags & mmFlgPerm) mflags |= mpPerm; /* Set permanent mapping */
304
305 size = size - 1; /* Change size to offset */
306 if(size > 0xFFFF) return 1; /* Leave if size is too big */
307
308 nlists = mapSetLists(pmap); /* Set number of lists this will be on */
309
310 mp = mapping_alloc(nlists); /* Get a spare mapping block with this many lists */
311
312 /* the mapping is zero except that the mpLists field is set */
313 mp->mpFlags |= mflags; /* Add in the rest of the flags to mpLists */
314 mp->mpSpace = pmap->space; /* Set the address space/pmap lookup ID */
315 mp->mpBSize = size; /* Set the size */
316 mp->mpPte = 0; /* Set the PTE invalid */
317 mp->mpPAddr = pa; /* Set the physical page number */
318 mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) | ppc_prot(prot); /* Add the protection and attributes to the field */
319
320 while(1) { /* Keep trying... */
321 colladdr = hw_add_map(pmap, mp); /* Go add the mapping to the pmap */
322 if(!colladdr) { /* All is ok... */
323 enable_preemption(); /* Ok to switch around here */
324 return 0; /* Return... */
325 }
326
327 if((colladdr & mapRetCode) == mapRtRemove) { /* Is our target being removed? */
328 (void)mapping_remove(pmap, colladdr); /* Yes, go help out */
329 continue; /* Try to add it now */
330 }
331
332 if((colladdr & mapRetCode) == mapRtMapDup) { /* Is our target already mapped (collision mapping must be identical)? */
333 mapping_free(mp); /* Return mapping to the free list */
334 enable_preemption(); /* Ok to switch around here */
335 return 0; /* Normal return */
336 }
337
338 if(colladdr != mapRtBadLk) { /* Did it collide? */
339 mapping_free(mp); /* Yeah, toss the pending mapping */
340 enable_preemption(); /* Ok to switch around here */
341 return colladdr; /* Pass back the overlapping address */
342 }
343
344 panic("mapping_make: hw_add_map failed - code = %08X, pmap = %08X, va = %016llX, mapping = %08X\n",
345 colladdr, pmap, va, mp); /* Die dead */
346 }
347
348 return 1; /* Leave... */
349 }
350
351
352 /*
353 * mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping
354 *
355 * Looks up the vaddr and returns the mapping and the next mapped va
356 * If full is true, it will descend through all nested pmaps to find actual mapping
357 *
358 * Must be called with interruptions disabled or we can hang trying to remove found mapping.
359 *
360 * Returns 0 if not found and the virtual address of the mapping if it is
361 * Note that the mappings busy count is bumped. It is the responsibility of the caller
362 * to drop the count. If this is not done, any attempt to remove the mapping will hang.
363 *
364 * NOTE: The nextva field is not valid when full is TRUE.
365 *
366 *
367 */
368
369 mapping *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) { /* Make an address mapping */
370
371 register mapping *mp;
372 addr64_t curva;
373 pmap_t curpmap;
374 int nestdepth;
375
376 curpmap = pmap; /* Remember entry */
377 nestdepth = 0; /* Set nest depth */
378 curva = (addr64_t)va; /* Set current va */
379
380 while(1) {
381
382 mp = hw_find_map(curpmap, curva, nextva); /* Find the mapping for this address */
383 if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
384 panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap); /* Die... */
385 }
386
387 if(!mp || !(mp->mpFlags & mpNest) || !full) break; /* Are we a nest or are we only going one deep? */
388
389 if(mp->mpFlags & mpSpecial) { /* Don't chain through a special mapping */
390 mp = 0; /* Set not found */
391 break;
392 }
393
394 if(nestdepth++ > 64) { /* Have we nested too far down? */
395 panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
396 va, curva, pmap, curpmap);
397 }
398
399 curva = curva + mp->mpNestReloc; /* Relocate va to new pmap */
400 curpmap = pmapTrans[mp->mpSpace].pmapVAddr; /* Get the address of the nested pmap */
401 mapping_drop_busy(mp); /* We have everything we need from the mapping */
402
403 }
404
405 return mp; /* Return the mapping if we found one */
406 }
407
408 /*
409 * kern_return_t mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
410 *
411 * This routine takes a pmap and virtual address and changes
412 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
413 * the protection is changed.
414 *
415 * We return success if we change the protection or if there is no page mapped at va. We return failure if
416 * the va corresponds to a block mapped area or the mapping is permanant.
417 *
418 *
419 */
420
421 int mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) { /* Change protection of a virtual page */
422
423 int ret;
424
425 ret = hw_protect(pmap, va, ppc_prot(prot), nextva); /* Try to change the protect here */
426
427 switch (ret) { /* Decode return code */
428
429 case mapRtOK: /* Changed */
430 case mapRtNotFnd: /* Didn't find it */
431 return mapRtOK; /* Ok, return... */
432 break;
433
434 case mapRtBlock: /* Block map, just ignore request */
435 case mapRtNest: /* Nested pmap, just ignore request */
436 return ret; /* Pass back return code */
437 break;
438
439 default:
440 panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
441
442 }
443
444 }
445
446 /*
447 * void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
448 *
449 * This routine takes a physical entry and runs through all mappings attached to it and changes
450 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
451 * the protection is changed. There is no limitation on changes, e.g.,
452 * higher to lower, lower to higher.
453 *
454 * Any mapping that is marked permanent is not changed
455 *
456 * Phys_entry is unlocked.
457 */
458
459 void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) { /* Change protection of all mappings to page */
460
461 unsigned int pindex;
462 phys_entry *physent;
463
464 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
465 if(!physent) { /* Did we find the physical page? */
466 panic("mapping_protect_phys: invalid physical page %08X\n", pa);
467 }
468
469 hw_walk_phys(physent, hwpSPrtPhy, hwpSPrtMap, hwpNoop, ppc_prot(prot)); /* Set the new protection for page and mappings */
470
471 return; /* Leave... */
472 }
473
474
475 /*
476 * void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
477 *
478 * This routine takes a physical entry and runs through all mappings attached to it and turns
479 * off the change bit.
480 */
481
482 void mapping_clr_mod(ppnum_t pa) { /* Clears the change bit of a physical page */
483
484 unsigned int pindex;
485 phys_entry *physent;
486
487 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
488 if(!physent) { /* Did we find the physical page? */
489 panic("mapping_clr_mod: invalid physical page %08X\n", pa);
490 }
491
492 hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy, 0); /* Clear change for page and mappings */
493 return; /* Leave... */
494 }
495
496
497 /*
498 * void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
499 *
500 * This routine takes a physical entry and runs through all mappings attached to it and turns
501 * on the change bit.
502 */
503
504 void mapping_set_mod(ppnum_t pa) { /* Sets the change bit of a physical page */
505
506 unsigned int pindex;
507 phys_entry *physent;
508
509 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
510 if(!physent) { /* Did we find the physical page? */
511 panic("mapping_set_mod: invalid physical page %08X\n", pa);
512 }
513
514 hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy, 0); /* Set change for page and mappings */
515 return; /* Leave... */
516 }
517
518
519 /*
520 * void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
521 *
522 * This routine takes a physical entry and runs through all mappings attached to it and turns
523 * off the reference bit.
524 */
525
526 void mapping_clr_ref(ppnum_t pa) { /* Clears the reference bit of a physical page */
527
528 unsigned int pindex;
529 phys_entry *physent;
530
531 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
532 if(!physent) { /* Did we find the physical page? */
533 panic("mapping_clr_ref: invalid physical page %08X\n", pa);
534 }
535
536 hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy, 0); /* Clear reference for page and mappings */
537 return; /* Leave... */
538 }
539
540
541 /*
542 * void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
543 *
544 * This routine takes a physical entry and runs through all mappings attached to it and turns
545 * on the reference bit.
546 */
547
548 void mapping_set_ref(ppnum_t pa) { /* Sets the reference bit of a physical page */
549
550 unsigned int pindex;
551 phys_entry *physent;
552
553 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
554 if(!physent) { /* Did we find the physical page? */
555 panic("mapping_set_ref: invalid physical page %08X\n", pa);
556 }
557
558 hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy, 0); /* Set reference for page and mappings */
559 return; /* Leave... */
560 }
561
562
563 /*
564 * void mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
565 *
566 * This routine takes a physical entry and runs through all mappings attached to it and tests
567 * the changed bit.
568 */
569
570 boolean_t mapping_tst_mod(ppnum_t pa) { /* Tests the change bit of a physical page */
571
572 unsigned int pindex, rc;
573 phys_entry *physent;
574
575 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
576 if(!physent) { /* Did we find the physical page? */
577 panic("mapping_tst_mod: invalid physical page %08X\n", pa);
578 }
579
580 rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop, 0); /* Set change for page and mappings */
581 return ((rc & (unsigned long)ppC) != 0); /* Leave with change bit */
582 }
583
584
585 /*
586 * void mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
587 *
588 * This routine takes a physical entry and runs through all mappings attached to it and tests
589 * the reference bit.
590 */
591
592 boolean_t mapping_tst_ref(ppnum_t pa) { /* Tests the reference bit of a physical page */
593
594 unsigned int pindex, rc;
595 phys_entry *physent;
596
597 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
598 if(!physent) { /* Did we find the physical page? */
599 panic("mapping_tst_ref: invalid physical page %08X\n", pa);
600 }
601
602 rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop, 0); /* Test reference for page and mappings */
603 return ((rc & (unsigned long)ppR) != 0); /* Leave with reference bit */
604 }
605
606
607 /*
608 * phys_ent *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
609 *
610 * This routine takes a physical page number and returns the phys_entry associated with it. It also
611 * calculates the bank address associated with the entry
612 * the reference bit.
613 */
614
615 phys_entry *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) { /* Finds the physical entry for the page */
616
617 phys_entry *physent;
618 int i;
619
620 for(i = 0; i < pmap_mem_regions_count; i++) { /* Walk through the list */
621 if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue; /* Skip any empty lists */
622 if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue; /* This isn't ours */
623
624 *pindex = (i * sizeof(mem_region_t)) / 4; /* Make the word index to this list */
625
626 return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart]; /* Return the physent pointer */
627 }
628
629 return (phys_entry *)0; /* Shucks, can't find it... */
630
631 }
632
633
634
635
636 /*
637 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
638 *
639 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
640 * the number of free mappings remaining, and if below a threshold, replenishes them.
641 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
642 * a new one is allocated.
643 *
644 * This routine allocates and/or frees memory and must be called from a safe place.
645 * Currently, vm_pageout_scan is the safest place.
646 */
647
648 thread_call_t mapping_adjust_call;
649 static thread_call_data_t mapping_adjust_call_data;
650
651 void mapping_adjust(void) { /* Adjust free mappings */
652
653 kern_return_t retr;
654 mappingblok *mb, *mbn;
655 spl_t s;
656 int allocsize, i;
657 extern int vm_page_free_count;
658
659 if(mapCtl.mapcmin <= MAPPERBLOK) {
660 mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
661
662 #if DEBUG
663 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
664 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
665 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
666 #endif
667 }
668
669 s = splhigh(); /* Don't bother from now on */
670 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
671 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
672 }
673
674 if (mapping_adjust_call == NULL) {
675 thread_call_setup(&mapping_adjust_call_data,
676 (thread_call_func_t)mapping_adjust,
677 (thread_call_param_t)NULL);
678 mapping_adjust_call = &mapping_adjust_call_data;
679 }
680
681 while(1) { /* Keep going until we've got enough */
682
683 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
684 if(allocsize < 1) break; /* Leave if we have all we need */
685
686 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
687 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
688 mapCtl.mapcreln--; /* Back off the count */
689 allocsize = MAPPERBLOK; /* Show we allocated one block */
690 }
691 else { /* No free ones, try to get it */
692
693 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
694
695 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
696 splx(s); /* Restore 'rupts */
697
698 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
699 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
700 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
701 break;
702 }
703 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
704 }
705
706 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
707 s = splhigh(); /* Don't bother from now on */
708 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
709 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
710 }
711 }
712
713 if (retr != KERN_SUCCESS)
714 break; /* Fail to alocate, bail out... */
715 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
716 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
717 mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
718 }
719
720 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
721 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
722 }
723
724 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
725 mapCtl.mapcrecurse = 0; /* We are done now */
726 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
727 splx(s); /* Restore 'rupts */
728 return; /* Return... */
729 }
730
731 mbn = mapCtl.mapcrel; /* Get first pending release block */
732 mapCtl.mapcrel = 0; /* Dequeue them */
733 mapCtl.mapcreln = 0; /* Set count to 0 */
734
735 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
736 splx(s); /* Restore 'rupts */
737
738 while((unsigned int)mbn) { /* Toss 'em all */
739 mb = mbn->nextblok; /* Get the next */
740
741 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
742
743 mbn = mb; /* Chain to the next */
744 }
745
746 __asm__ volatile("eieio"); /* Make sure all is well */
747 mapCtl.mapcrecurse = 0; /* We are done now */
748 return;
749 }
750
751 /*
752 * mapping_free(mapping *mp) - release a mapping to the free list
753 *
754 * This routine takes a mapping and adds it to the free list.
755 * If this mapping make the block non-empty, we queue it to the free block list.
756 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
757 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
758 * If this release fills a block and we are above the threshold, we release the block
759 */
760
761 void mapping_free(struct mapping *mp) { /* Release a mapping */
762
763 mappingblok *mb, *mbn;
764 spl_t s;
765 unsigned int full, mindx, lists;
766
767 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6; /* Get index to mapping */
768 mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
769 lists = (mp->mpFlags & mpLists); /* get #lists */
770 if ((lists == 0) || (lists > kSkipListMaxLists)) /* panic if out of range */
771 panic("mapping_free: mpLists invalid\n");
772
773 #if 0
774 mp->mpFlags = 0x99999999; /* (BRINGUP) */
775 mp->mpSpace = 0x9999; /* (BRINGUP) */
776 mp->mpBSize = 0x9999; /* (BRINGUP) */
777 mp->mpPte = 0x99999998; /* (BRINGUP) */
778 mp->mpPAddr = 0x99999999; /* (BRINGUP) */
779 mp->mpVAddr = 0x9999999999999999ULL; /* (BRINGUP) */
780 mp->mpAlias = 0x9999999999999999ULL; /* (BRINGUP) */
781 mp->mpList0 = 0x9999999999999999ULL; /* (BRINGUP) */
782 mp->mpList[0] = 0x9999999999999999ULL; /* (BRINGUP) */
783 mp->mpList[1] = 0x9999999999999999ULL; /* (BRINGUP) */
784 mp->mpList[2] = 0x9999999999999999ULL; /* (BRINGUP) */
785
786 if(lists > mpBasicLists) { /* (BRINGUP) */
787 mp->mpList[3] = 0x9999999999999999ULL; /* (BRINGUP) */
788 mp->mpList[4] = 0x9999999999999999ULL; /* (BRINGUP) */
789 mp->mpList[5] = 0x9999999999999999ULL; /* (BRINGUP) */
790 mp->mpList[6] = 0x9999999999999999ULL; /* (BRINGUP) */
791 mp->mpList[7] = 0x9999999999999999ULL; /* (BRINGUP) */
792 mp->mpList[8] = 0x9999999999999999ULL; /* (BRINGUP) */
793 mp->mpList[9] = 0x9999999999999999ULL; /* (BRINGUP) */
794 mp->mpList[10] = 0x9999999999999999ULL; /* (BRINGUP) */
795 }
796 #endif
797
798
799 s = splhigh(); /* Don't bother from now on */
800 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
801 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
802 }
803
804 full = !(mb->mapblokfree[0] | mb->mapblokfree[1]); /* See if full now */
805 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
806 if ( lists > mpBasicLists ) { /* if big block, lite the 2nd bit too */
807 mindx++;
808 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
809 mapCtl.mapcfree++;
810 mapCtl.mapcinuse--;
811 }
812
813 if(full) { /* If it was full before this: */
814 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
815 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
816 if(!((unsigned int)mapCtl.mapclast))
817 mapCtl.mapclast = mb;
818 }
819
820 mapCtl.mapcfree++; /* Bump free count */
821 mapCtl.mapcinuse--; /* Decriment in use count */
822
823 mapCtl.mapcfreec++; /* Count total calls */
824
825 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
826 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) { /* See if empty now */
827
828 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
829 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
830 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
831 }
832 else { /* We're not first */
833 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
834 if(mbn->nextblok == mb) break; /* Is the next one our's? */
835 }
836 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
837 mbn->nextblok = mb->nextblok; /* Dequeue us */
838 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
839 }
840
841 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
842 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
843 mapCtl.mapcnext = mb; /* Chain us to the head */
844 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
845 }
846 else {
847 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
848 mapCtl.mapcreln++; /* Count on release list */
849 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
850 mapCtl.mapcrel = mb; /* Chain us in front */
851 }
852 }
853 }
854
855 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
856 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
857 thread_call_enter(mapping_adjust_call); /* Go toss some */
858 }
859 }
860 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
861 splx(s); /* Restore 'rupts */
862
863 return; /* Bye, dude... */
864 }
865
866
867 /*
868 * mapping_alloc(lists) - obtain a mapping from the free list
869 *
870 * This routine takes a mapping off of the free list and returns its address.
871 * The mapping is zeroed, and its mpLists count is set. The caller passes in
872 * the number of skiplists it would prefer; if this number is greater than
873 * mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
874 * just two consequtive free entries coallesced into one. If we cannot find
875 * two consequtive free entries, we clamp the list count down to mpBasicLists
876 * and return a basic 64-byte node. Our caller never knows the difference.
877 *
878 * If this allocation empties a block, we remove it from the free list.
879 * If this allocation drops the total number of free entries below a threshold,
880 * we allocate a new block.
881 *
882 */
883
884 mapping *mapping_alloc(int lists) { /* Obtain a mapping */
885
886 register mapping *mp;
887 mappingblok *mb, *mbn;
888 spl_t s;
889 int mindx;
890 kern_return_t retr;
891 int big = (lists > mpBasicLists); /* set flag if big block req'd */
892 pmap_t refpmap, ckpmap;
893 unsigned int space, i;
894 int ref_count;
895 addr64_t va, nextva;
896 extern pmap_t free_pmap_list;
897 extern int free_pmap_count;
898 decl_simple_lock_data(extern,free_pmap_lock)
899 boolean_t found_mapping;
900 boolean_t do_rescan;
901
902 s = splhigh(); /* Don't bother from now on */
903 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
904 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
905 }
906
907 if(!((unsigned int)mapCtl.mapcnext)) { /* Are there any free mappings? */
908
909 /*
910 * No free mappings. First, there may be some mapping blocks on the "to be released"
911 * list. If so, rescue one. Otherwise, try to steal a couple blocks worth.
912 */
913
914 if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */
915 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
916 mapCtl.mapcreln--; /* Back off the count */
917 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
918 goto rescued;
919 }
920
921 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
922
923 simple_lock(&free_pmap_lock);
924
925 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
926 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
927 }
928
929 if (!((unsigned int)mapCtl.mapcnext)) {
930
931 refpmap = (pmap_t)cursor_pmap->pmap_link.next;
932 space = mapCtl.mapcflush.spacenum;
933 while (refpmap != cursor_pmap) {
934 if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
935 refpmap = (pmap_t)refpmap->pmap_link.next;
936 }
937
938 ckpmap = refpmap;
939 va = mapCtl.mapcflush.addr;
940 found_mapping = FALSE;
941
942 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
943
944 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
945
946 ckpmap = (pmap_t)ckpmap->pmap_link.next;
947
948 if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)) {
949 do_rescan = TRUE;
950 for (i=0;i<8;i++) {
951 mp = hw_purge_map(ckpmap, va, &nextva);
952
953 if((unsigned int)mp & mapRetCode) {
954 panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
955 }
956
957 if(!mp) {
958 if (do_rescan)
959 do_rescan = FALSE;
960 else
961 break;
962 } else {
963 mapping_free(mp);
964 found_mapping = TRUE;
965 }
966
967 va = nextva;
968 }
969 }
970
971 if (ckpmap == refpmap) {
972 if (found_mapping == FALSE)
973 panic("no valid pmap to purge mappings\n");
974 else
975 found_mapping = FALSE;
976 }
977
978 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
979 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
980 }
981
982 }
983
984 mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
985 mapCtl.mapcflush.addr = nextva;
986 }
987
988 simple_unlock(&free_pmap_lock);
989 }
990
991 rescued:
992
993 mb = mapCtl.mapcnext;
994
995 if ( big ) { /* if we need a big (128-byte) mapping */
996 mapCtl.mapcbig++; /* count attempts to allocate a big mapping */
997 mbn = NULL; /* this will be prev ptr */
998 mindx = 0;
999 while( mb ) { /* loop over mapping blocks with free entries */
1000 mindx = mapalc2(mb); /* try for 2 consequtive free bits in this block */
1001
1002 if ( mindx ) break; /* exit loop if we found them */
1003 mbn = mb; /* remember previous block */
1004 mb = mb->nextblok; /* move on to next block */
1005 }
1006 if ( mindx == 0 ) { /* if we couldn't find 2 consequtive bits... */
1007 mapCtl.mapcbigfails++; /* count failures */
1008 big = 0; /* forget that we needed a big mapping */
1009 lists = mpBasicLists; /* clamp list count down to the max in a 64-byte mapping */
1010 mb = mapCtl.mapcnext; /* back to the first block with a free entry */
1011 }
1012 else { /* if we did find a big mapping */
1013 mapCtl.mapcfree--; /* Decrement free count twice */
1014 mapCtl.mapcinuse++; /* Bump in use count twice */
1015 if ( mindx < 0 ) { /* if we just used the last 2 free bits in this block */
1016 if (mbn) { /* if this wasn't the first block */
1017 mindx = -mindx; /* make positive */
1018 mbn->nextblok = mb->nextblok; /* unlink this one from the middle of block list */
1019 if (mb == mapCtl.mapclast) { /* if we emptied last block */
1020 mapCtl.mapclast = mbn; /* then prev block is now last */
1021 }
1022 }
1023 }
1024 }
1025 }
1026
1027 if ( !big ) { /* if we need a small (64-byte) mapping */
1028 if(!(mindx = mapalc1(mb))) /* Allocate a 1-bit slot */
1029 panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
1030 }
1031
1032 if(mindx < 0) { /* Did we just take the last one */
1033 mindx = -mindx; /* Make positive */
1034 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1035 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1036 }
1037
1038 mapCtl.mapcfree--; /* Decrement free count */
1039 mapCtl.mapcinuse++; /* Bump in use count */
1040
1041 mapCtl.mapcallocc++; /* Count total calls */
1042
1043 /*
1044 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1045 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1046 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1047 * if we haven't already done it.
1048 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1049 * the release list with as much as we need until threads start.
1050 */
1051
1052 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
1053 if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */
1054 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1055 mapCtl.mapcreln--; /* Back off the count */
1056 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1057 }
1058 else { /* We need to replenish */
1059 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1060 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1061 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1062 }
1063 }
1064 }
1065 }
1066
1067 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1068 splx(s); /* Restore 'rupts */
1069
1070 mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */
1071 mp->mpFlags = lists; /* set the list count */
1072
1073
1074 return mp; /* Send it back... */
1075 }
1076
1077
1078 void
1079 consider_mapping_adjust()
1080 {
1081 spl_t s;
1082
1083 s = splhigh(); /* Don't bother from now on */
1084 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1085 panic("consider_mapping_adjust -- lock timeout\n");
1086 }
1087
1088 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1089 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1090 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1091 }
1092 }
1093
1094 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1095 splx(s); /* Restore 'rupts */
1096
1097 }
1098
1099
1100
1101 /*
1102 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1103 *
1104 * The mapping block is a page size area on a page boundary. It contains 1 header and 63
1105 * mappings. This call adds and initializes a block for use. Mappings come in two sizes,
1106 * 64 and 128 bytes (the only difference is the number of skip-lists.) When we allocate a
1107 * 128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
1108 * code only deals with "basic" 64-byte mappings. This works for two reasons:
1109 * - Only one in 256 mappings is big, so they are rare.
1110 * - If we cannot find two consequtive free mappings, we just return a small one.
1111 * There is no problem with doing this, except a minor performance degredation.
1112 * Therefore, all counts etc in the mapping control structure are in units of small blocks.
1113 *
1114 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1115 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1116 * corresponds to the header). The translation mask is the XOR of the virtual and real
1117 * addresses (needless to say, the block must be wired).
1118 *
1119 * We handle these mappings the same way as saveareas: the block is only on the chain so
1120 * long as there are free entries in it.
1121 *
1122 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1123 * mappings. Blocks marked PERM won't ever be released.
1124 *
1125 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1126 * list. We do this only at start up time. This is done because we only allocate blocks
1127 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1128 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1129 * them on the release queue, the allocate routine will rescue them. Then when the
1130 * pageout scan starts, all extra ones will be released.
1131 *
1132 */
1133
1134
1135 void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1136 /* Set's start and end of a block of mappings
1137 perm indicates if the block can be released
1138 or goes straight to the release queue .
1139 locked indicates if the lock is held already */
1140
1141 mappingblok *mb;
1142 spl_t s;
1143 int i;
1144 addr64_t raddr;
1145 ppnum_t pp;
1146
1147 mb = (mappingblok *)mbl; /* Start of area */
1148
1149 if(perm >= 0) { /* See if we need to initialize the block */
1150 if(perm) {
1151 raddr = (addr64_t)((unsigned int)mbl); /* Perm means V=R */
1152 mb->mapblokflags = mbPerm; /* Set perm */
1153 // mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1154 }
1155 else {
1156 pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl); /* Get the physical page */
1157 if(!pp) { /* What gives? Where's the page? */
1158 panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
1159 }
1160
1161 raddr = (addr64_t)pp << 12; /* Convert physical page to physical address */
1162 mb->mapblokflags = 0; /* Set not perm */
1163 // mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1164 }
1165
1166 mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl); /* Form translation mask */
1167
1168 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1169 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1170 }
1171
1172 s = splhigh(); /* Don't bother from now on */
1173 if(!locked) { /* Do we need the lock? */
1174 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1175 panic("mapping_free_init: timeout getting control lock\n"); /* Tell all and die */
1176 }
1177 }
1178
1179 if(perm < 0) { /* Direct to release queue? */
1180 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1181 mapCtl.mapcrel = mb; /* Queue us on in */
1182 mapCtl.mapcreln++; /* Count the free block */
1183 }
1184 else { /* Add to the free list */
1185
1186 mb->nextblok = 0; /* We always add to the end */
1187 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1188
1189 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1190 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1191 }
1192 else { /* We are not the first */
1193 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1194 mapCtl.mapclast = mb; /* We are now last */
1195 }
1196 }
1197
1198 if(!locked) { /* Do we need to unlock? */
1199 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1200 }
1201
1202 splx(s); /* Restore 'rupts */
1203 return; /* All done, leave... */
1204 }
1205
1206
1207 /*
1208 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1209 *
1210 * No locks can be held, because we allocate memory here.
1211 * This routine needs a corresponding mapping_relpre call to remove the
1212 * hold off flag so that the adjust routine will free the extra mapping
1213 * blocks on the release list. I don't like this, but I don't know
1214 * how else to do this for now...
1215 *
1216 */
1217
1218 void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1219
1220 int nmapb, i;
1221 kern_return_t retr;
1222 mappingblok *mbn;
1223 spl_t s;
1224
1225 s = splhigh(); /* Don't bother from now on */
1226 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1227 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1228 }
1229
1230 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1231
1232 mapCtl.mapcholdoff++; /* Bump the hold off count */
1233
1234 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1235 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1236 splx(s); /* Restore 'rupts */
1237 return;
1238 }
1239 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1240 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1241 splx(s); /* Restore 'rupts */
1242 return;
1243 }
1244 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1245
1246 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1247 splx(s); /* Restore 'rupts */
1248
1249 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1250 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1251 if(retr != KERN_SUCCESS) /* Did we get some memory? */
1252 break;
1253 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1254 }
1255 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1256 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1257
1258 mapCtl.mapcrecurse = 0; /* We are done now */
1259 }
1260
1261 /*
1262 * void mapping_relpre(void) - Releases preallocation release hold off
1263 *
1264 * This routine removes the
1265 * hold off flag so that the adjust routine will free the extra mapping
1266 * blocks on the release list. I don't like this, but I don't know
1267 * how else to do this for now...
1268 *
1269 */
1270
1271 void mapping_relpre(void) { /* Releases release hold off */
1272
1273 spl_t s;
1274
1275 s = splhigh(); /* Don't bother from now on */
1276 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1277 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1278 }
1279 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1280 panic("mapping_relpre: hold-off count went negative\n");
1281 }
1282
1283 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1284 splx(s); /* Restore 'rupts */
1285 }
1286
1287 /*
1288 * void mapping_free_prime(void) - Primes the mapping block release list
1289 *
1290 * See mapping_free_init.
1291 * No locks can be held, because we allocate memory here.
1292 * One processor running only.
1293 *
1294 */
1295
1296 void mapping_free_prime(void) { /* Primes the mapping block release list */
1297
1298 int nmapb, i;
1299 kern_return_t retr;
1300 mappingblok *mbn;
1301 vm_offset_t mapping_min;
1302
1303 retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
1304 FALSE, TRUE, &mapping_map);
1305
1306 if (retr != KERN_SUCCESS)
1307 panic("mapping_free_prime: kmem_suballoc failed");
1308
1309
1310 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1311 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1312
1313 #if DEBUG
1314 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1315 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1316 #endif
1317
1318 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1319 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1320 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1321 panic("Whoops... Not a bit of wired memory left for anyone\n");
1322 }
1323 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1324 }
1325 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1326 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1327 }
1328
1329
1330
1331 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1332 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1333 {
1334 *count = mapCtl.mapcinuse;
1335 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1336 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1337 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1338 *alloc_size = PAGE_SIZE;
1339
1340 *collectable = 1;
1341 *exhaustable = 0;
1342 }
1343
1344
1345 /*
1346 * addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
1347 *
1348 * First looks up the physical entry associated witht the physical page. Then searches the alias
1349 * list for a matching pmap. It grabs the virtual address from the mapping, drops busy, and returns
1350 * that.
1351 *
1352 */
1353
1354 addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) { /* Finds first virtual mapping of a physical page in a space */
1355
1356 spl_t s;
1357 mapping *mp;
1358 unsigned int pindex;
1359 phys_entry *physent;
1360 addr64_t va;
1361
1362 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1363 if(!physent) { /* Did we find the physical page? */
1364 panic("mapping_p2v: invalid physical page %08X\n", pa);
1365 }
1366
1367 s = splhigh(); /* Make sure interruptions are disabled */
1368
1369 mp = hw_find_space(physent, pmap->space); /* Go find the first mapping to the page from the requested pmap */
1370
1371 if(mp) { /* Did we find one? */
1372 va = mp->mpVAddr & -4096; /* If so, get the cleaned up vaddr */
1373 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1374 }
1375 else va = 0; /* Return failure */
1376
1377 splx(s); /* Restore 'rupts */
1378
1379 return va; /* Bye, bye... */
1380
1381 }
1382
1383 /*
1384 * phystokv(addr)
1385 *
1386 * Convert a physical address to a kernel virtual address if
1387 * there is a mapping, otherwise return NULL
1388 */
1389
1390 vm_offset_t phystokv(vm_offset_t pa) {
1391
1392 addr64_t va;
1393 ppnum_t pp;
1394
1395 pp = pa >> 12; /* Convert to a page number */
1396
1397 if(!(va = mapping_p2v(kernel_pmap, pp))) {
1398 return 0; /* Can't find it, return 0... */
1399 }
1400
1401 return (va | (pa & (PAGE_SIZE - 1))); /* Build and return VADDR... */
1402
1403 }
1404
1405 /*
1406 * kvtophys(addr)
1407 *
1408 * Convert a kernel virtual address to a physical address
1409 */
1410 vm_offset_t kvtophys(vm_offset_t va) {
1411
1412 return pmap_extract(kernel_pmap, va); /* Find mapping and lock the physical entry for this mapping */
1413
1414 }
1415
1416 /*
1417 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1418 * page 0 access for the current thread.
1419 *
1420 * If parameter is TRUE, faults are ignored
1421 * If parameter is FALSE, faults are honored
1422 *
1423 */
1424
1425 void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1426
1427 if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1428 else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1429
1430 return; /* Return the result or 0... */
1431 }
1432
1433
1434 /*
1435 * Copies data between a physical page and a virtual page, or 2 physical. This is used to
1436 * move data from the kernel to user state. Note that the "which" parm
1437 * says which of the parameters is physical and if we need to flush sink/source.
1438 * Note that both addresses may be physicical but only one may be virtual
1439 *
1440 * The rules are that the size can be anything. Either address can be on any boundary
1441 * and span pages. The physical data must be congiguous as must the virtual.
1442 *
1443 * We can block when we try to resolve the virtual address at each page boundary.
1444 * We don't check protection on the physical page.
1445 *
1446 * Note that we will not check the entire range and if a page translation fails,
1447 * we will stop with partial contents copied.
1448 *
1449 */
1450
1451 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) {
1452
1453 vm_map_t map;
1454 kern_return_t ret;
1455 addr64_t pa, nextva, vaddr, paddr;
1456 register mapping *mp;
1457 spl_t s;
1458 unsigned int sz, left, lop, csize;
1459 int needtran, bothphys;
1460 unsigned int pindex;
1461 phys_entry *physent;
1462 vm_prot_t prot;
1463
1464 map = (which & cppvKmap) ? kernel_map : current_map_fast();
1465
1466 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
1467 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
1468 }
1469
1470 bothphys = 1; /* Assume both are physical */
1471
1472 if(!(which & cppvPsnk)) { /* Is there a virtual page here? */
1473 vaddr = sink; /* Sink side is virtual */
1474 bothphys = 0; /* Show both aren't physical */
1475 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
1476 } else if(!(which & cppvPsrc)) { /* Source side is virtual */
1477 vaddr = source; /* Source side is virtual */
1478 bothphys = 0; /* Show both aren't physical */
1479 prot = VM_PROT_READ; /* Virtual source is always read only */
1480 }
1481
1482 needtran = 1; /* Show we need to map the virtual the first time */
1483 s = splhigh(); /* Don't bother me */
1484
1485 while(size) {
1486
1487 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
1488 if(!needtran) { /* If this is not the first translation, we need to drop the old busy */
1489 mapping_drop_busy(mp); /* Release the old mapping now */
1490 }
1491 needtran = 0;
1492
1493 while(1) {
1494 mp = mapping_find(map->pmap, vaddr, &nextva, 1); /* Find and busy the mapping */
1495 if(!mp) { /* Was it there? */
1496 if(per_proc_info[cpu_number()].istackptr == 0)
1497 panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
1498
1499 splx(s); /* Restore the interrupt level */
1500 ret = vm_fault(map, trunc_page_32((vm_offset_t)vaddr), prot, FALSE, NULL, 0); /* Didn't find it, try to fault it in... */
1501
1502 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
1503
1504 s = splhigh(); /* Don't bother me */
1505 continue; /* Go try for the map again... */
1506
1507 }
1508
1509 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
1510 we can just leave.
1511 */
1512 if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break; /* We got it mapped R/W or the source is not virtual, leave... */
1513
1514 mapping_drop_busy(mp); /* Go ahead and release the mapping for now */
1515 if(per_proc_info[cpu_number()].istackptr == 0)
1516 panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
1517 splx(s); /* Restore the interrupt level */
1518
1519 ret = vm_fault(map, trunc_page_32((vm_offset_t)vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* check for a COW area */
1520 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
1521 s = splhigh(); /* Don't bother me */
1522 }
1523 paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL)); /* construct the physical address... this calculation works */
1524 /* properly on both single page and block mappings */
1525 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
1526 else source = paddr; /* Otherwise the source is */
1527 }
1528
1529 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
1530 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
1531
1532 csize = size; /* Assume we can copy it all */
1533 if(lop < size) csize = lop; /* Nope, we can't do it all */
1534
1535 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source before move */
1536 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink before move */
1537
1538 bcopy_physvir(source, sink, csize); /* Do a physical copy, virtually */
1539
1540 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source after move */
1541 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink after move */
1542
1543 /*
1544 * Note that for certain ram disk flavors, we may be copying outside of known memory.
1545 * Therefore, before we try to mark it modifed, we check if it exists.
1546 */
1547
1548 if( !(which & cppvNoModSnk)) {
1549 physent = mapping_phys_lookup(sink >> 12, &pindex); /* Get physical entry for sink */
1550 if(physent) mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
1551 }
1552 if( !(which & cppvNoRefSrc)) {
1553 physent = mapping_phys_lookup(source >> 12, &pindex); /* Get physical entry for source */
1554 if(physent) mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
1555 }
1556 size = size - csize; /* Calculate what is left */
1557 vaddr = vaddr + csize; /* Move to next sink address */
1558 source = source + csize; /* Bump source to next physical address */
1559 sink = sink + csize; /* Bump sink to next physical address */
1560 }
1561
1562 if(!bothphys) mapping_drop_busy(mp); /* Go ahead and release the mapping of the virtual page if any */
1563 splx(s); /* Open up for interrupts */
1564
1565 return KERN_SUCCESS;
1566 }
1567
1568
1569 /*
1570 * Debug code
1571 */
1572
1573 void mapping_verify(void) {
1574
1575 spl_t s;
1576 mappingblok *mb, *mbn;
1577 int relncnt;
1578 unsigned int dumbodude;
1579
1580 dumbodude = 0;
1581
1582 s = splhigh(); /* Don't bother from now on */
1583
1584 mbn = 0; /* Start with none */
1585 for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) { /* Walk the free chain */
1586 if((mb->mapblokflags & 0x7FFFFFFF) != mb) { /* Is tag ok? */
1587 panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
1588 }
1589 mbn = mb; /* Remember the last one */
1590 }
1591
1592 if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) { /* Do we point to the last one? */
1593 panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
1594 }
1595
1596 relncnt = 0; /* Clear count */
1597 for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) { /* Walk the release chain */
1598 dumbodude |= mb->mapblokflags; /* Just touch it to make sure it is mapped */
1599 relncnt++; /* Count this one */
1600 }
1601
1602 if(mapCtl.mapcreln != relncnt) { /* Is the count on release queue ok? */
1603 panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
1604 }
1605
1606 splx(s); /* Restore 'rupts */
1607
1608 return;
1609 }
1610
1611 void mapping_phys_unused(ppnum_t pa) {
1612
1613 unsigned int pindex;
1614 phys_entry *physent;
1615
1616 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1617 if(!physent) return; /* Did we find the physical page? */
1618
1619 if(!(physent->ppLink & ~(ppLock | ppN | ppFlags))) return; /* No one else is here */
1620
1621 panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
1622
1623 }
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633