]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/mappings.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
24 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
25 * Currently, some of the function of this module is contained within pmap.c. We may want to move
26 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
27 *
28 * We also depend upon the structure of the phys_entry control block. We do put some processor
29 * specific stuff in there.
30 *
31 */
32
1c79356b
A
33#include <debug.h>
34#include <mach_kgdb.h>
35#include <mach_vm_debug.h>
36#include <db_machine_commands.h>
37
91447636 38#include <mach/mach_types.h>
1c79356b
A
39#include <mach/vm_attributes.h>
40#include <mach/vm_param.h>
91447636
A
41
42#include <kern/kern_types.h>
43#include <kern/thread.h>
44#include <kern/spl.h>
45#include <kern/misc_protos.h>
46
55e303ae 47#include <vm/vm_fault.h>
1c79356b
A
48#include <vm/vm_kern.h>
49#include <vm/vm_map.h>
50#include <vm/vm_page.h>
91447636 51#include <vm/pmap.h>
1c79356b 52
55e303ae 53#include <ppc/exception.h>
1c79356b
A
54#include <ppc/misc_protos.h>
55#include <ppc/proc_reg.h>
1c79356b 56#include <ppc/pmap.h>
1c79356b 57#include <ppc/mem.h>
1c79356b
A
58#include <ppc/new_screen.h>
59#include <ppc/Firmware.h>
60#include <ppc/mappings.h>
61#include <ddb/db_output.h>
62
55e303ae 63#include <console/video_console.h> /* (TEST/DEBUG) */
1c79356b
A
64
65#define PERFTIMES 0
66
1c79356b
A
67vm_map_t mapping_map = VM_MAP_NULL;
68
55e303ae 69unsigned int incrVSID = 0; /* VSID increment value */
1c79356b 70unsigned int mappingdeb0 = 0;
55e303ae
A
71unsigned int mappingdeb1 = 0;
72int ppc_max_adrsp; /* Maximum address spaces */
73
74addr64_t *mapdebug; /* (BRINGUP) */
75extern unsigned int DebugWork; /* (BRINGUP) */
76
55e303ae
A
77void mapping_verify(void);
78void mapping_phys_unused(ppnum_t pa);
79
1c79356b 80/*
91447636
A
81 * ppc_prot translates Mach's representation of protections to that of the PPC hardware.
82 * For Virtual Machines (VMM), we also provide translation entries where the output is
83 * the same as the input, allowing direct specification of PPC protections. Mach's
84 * representations are always in the range 0..7, so they always fall into the first
85 * 8 table entries; direct translations are placed in the range 8..16, so they fall into
86 * the second half of the table.
87 *
88 * ***NOTE*** I've commented out the Mach->PPC translations that would set page-level
89 * no-execute, pending updates to the VM layer that will properly enable its
90 * use. Bob Abeles 08.02.04
1c79356b 91 */
91447636
A
92
93//unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2, /* Mach -> PPC translations */
94unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, /* Mach -> PPC translations */
95 0, 1, 2, 3, 4, 5, 6, 7 }; /* VMM direct translations */
1c79356b
A
96
97/*
98 * About PPC VSID generation:
99 *
100 * This function is called to generate an address space ID. This space ID must be unique within
101 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
102 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
103 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
104 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
105 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
106 * they are release. This causes us to lose track of what space IDs are free to be reused.
107 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
108 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
109 *
110 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
111 * calculation for virtual address lookup. An improperly chosen value could potentially cause
112 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
113 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
114 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
115 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
116 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
117 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
118 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
119 * with no overflow. I don't think that this is a problem.
120 *
121 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
122 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
123 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
124 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
125 * the same modulo 512. We can reduce this problem by having the segment number be bits
126 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
127 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
128 * I don't think that it is as signifigant as the other, so, I'll make the space ID
129 * with segment first.
130 *
131 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
132 * While this is a problem that should only happen in periods counted in weeks, it can and
133 * will happen. This is assuming a monotonically increasing space ID. If we were to search
134 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
135 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
136 *
137 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
138 * locked by free_pmap_lock) that is sorted in VSID sequence order.
139 *
140 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
141 * the last that was freed. The we allocate that.
142 *
143 * NOTE: We must be called with interruptions off and free_pmap_lock held.
144 *
145 */
146
147/*
148 * mapping_init();
149 * Do anything that needs to be done before the mapping system can be used.
150 * Hash table must be initialized before we call this.
151 *
152 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
153 */
154
155void mapping_init(void) {
156
55e303ae 157 unsigned int tmp, maxeff, rwidth;
d7e50217 158
55e303ae 159 ppc_max_adrsp = maxAdrSp; /* Set maximum address spaces */
1c79356b 160
55e303ae 161 maxeff = 32; /* Assume 32-bit */
91447636 162 if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) maxeff = 64; /* Is this a 64-bit machine? */
d7e50217 163
91447636 164 rwidth = PerProcTable[0].ppe_vaddr->pf.pfMaxVAddr - maxAdrSpb; /* Reduce address width by width of address space ID */
55e303ae 165 if(rwidth > maxeff) rwidth = maxeff; /* If we still have more virtual than effective, clamp at effective */
de355530 166
55e303ae 167 vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth); /* Get maximum effective address supported */
91447636 168 vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - PerProcTable[0].ppe_vaddr->pf.pfMaxPAddr); /* Get maximum physical address supported */
de355530 169
91447636 170 if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) { /* Are we 64 bit? */
55e303ae
A
171 tmp = 12; /* Size of hash space */
172 }
173 else {
174 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
175 tmp = 32 - tmp; /* Size of hash space */
176 }
de355530 177
55e303ae
A
178 incrVSID = 1 << ((tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
179 incrVSID |= 1 << ((tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
180 incrVSID |= 1; /* Set bit and add 1 */
de355530 181
55e303ae 182 return;
1c79356b 183
de355530 184}
1c79356b 185
55e303ae 186
1c79356b 187/*
55e303ae
A
188 * mapping_remove(pmap_t pmap, addr64_t va);
189 * Given a pmap and virtual address, this routine finds the mapping and unmaps it.
190 * The mapping block will be added to
191 * the free list. If the free list threshold is reached, garbage collection will happen.
0b4e3aa0 192 *
55e303ae
A
193 * We also pass back the next higher mapped address. This is done so that the higher level
194 * pmap_remove function can release a range of addresses simply by calling mapping_remove
195 * in a loop until it finishes the range or is returned a vaddr of 0.
0b4e3aa0 196 *
55e303ae 197 * Note that if the mapping is not found, we return the next VA ORed with 1
0b4e3aa0
A
198 *
199 */
0b4e3aa0 200
55e303ae
A
201addr64_t mapping_remove(pmap_t pmap, addr64_t va) { /* Remove a single mapping for this VADDR
202 Returns TRUE if a mapping was found to remove */
0b4e3aa0 203
91447636 204 mapping_t *mp;
55e303ae 205 addr64_t nextva;
91447636 206 ppnum_t pgaddr;
de355530 207
91447636
A
208 va &= ~PAGE_MASK; /* Scrub noise bits */
209
210 do { /* Keep trying until we truely fail */
55e303ae 211 mp = hw_rem_map(pmap, va, &nextva); /* Remove a mapping from this pmap */
91447636 212 } while (mapRtRemove == ((unsigned int)mp & mapRetCode));
de355530 213
91447636
A
214 switch ((unsigned int)mp & mapRetCode) {
215 case mapRtOK:
216 break; /* Mapping removed */
217 case mapRtNotFnd:
218 return (nextva | 1); /* Nothing found to unmap */
219 default:
220 panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
221 pmap, va, mp);
222 break;
de355530 223 }
91447636
A
224
225 pgaddr = mp->mpPAddr; /* Get page number from mapping */
de355530 226
55e303ae 227 mapping_free(mp); /* Add mapping to the free list */
91447636
A
228
229 if ((pmap->pmapFlags & pmapVMhost) && pmap->pmapVmmExt) {
230 /* If this is an assisted host, scrub any guest mappings */
231 unsigned int idx;
232 phys_entry_t *physent = mapping_phys_lookup(pgaddr, &idx);
233 /* Get physent for our physical page */
234 if (!physent) { /* No physent, could be in I/O area, so exit */
235 return (nextva);
236 }
237
238 do { /* Iterate 'till all guest mappings are gone */
239 mp = hw_scrub_guest(physent, pmap); /* Attempt to scrub a guest mapping */
240 switch ((unsigned int)mp & mapRetCode) {
241 case mapRtGuest: /* Found a guest mapping */
242 case mapRtNotFnd: /* Mapping was there, but disappeared, must retry */
243 case mapRtEmpty: /* No guest mappings left to scrub */
244 break;
245 default:
246 panic("mapping_remove: hw_scrub_guest failed - physent = %08X, code = %08X\n",
247 physent, mp); /* Cry havoc, cry wrack,
248 at least we die with harness on our backs */
249 break;
250 }
251 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
252 }
1c79356b 253
55e303ae
A
254 return nextva; /* Tell them we did it */
255}
de355530 256
1c79356b 257/*
55e303ae 258 * mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one
1c79356b
A
259 *
260 * This routine takes the given parameters, builds a mapping block, and queues it into the
261 * correct lists.
262 *
55e303ae
A
263 * pmap (virtual address) is the pmap to map into
264 * va (virtual address) is the 64-bit virtual address that is being mapped
265 * pa (physical page number) is the physical page number (i.e., physcial address >> 12). This is
266 * a 32-bit quantity.
267 * Flags:
268 * block if 1, mapping is a block, size parameter is used. Note: we do not keep
269 * reference and change information or allow protection changes of blocks.
270 * any changes must first unmap and then remap the area.
271 * use attribute Use specified attributes for map, not defaults for physical page
272 * perm Mapping is permanent
273 * cache inhibited Cache inhibited (used if use attribute or block set )
274 * guarded Guarded access (used if use attribute or block set )
275 * size size of block (not used if not block)
276 * prot VM protection bits
277 * attr Cachability/Guardedness
278 *
279 * Returns 0 if mapping was successful. Returns vaddr that overlaps/collides.
280 * Returns 1 for any other failure.
281 *
282 * Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
283 * for I/O and default the cache attrubutes appropriately. The caller is free to set whatever they want however.
284 *
285 * If there is any physical page that is not found in the physent table, the mapping is forced to be a
286 * block mapping of length 1. This keeps us from trying to update a physent during later mapping use,
287 * e.g., fault handling.
288 *
1c79356b 289 *
1c79356b
A
290 */
291
55e303ae 292addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) { /* Make an address mapping */
1c79356b 293
91447636
A
294 register mapping_t *mp;
295 addr64_t colladdr, psmask;
296 unsigned int pindex, mflags, pattr, wimg, rc;
297 phys_entry_t *physent;
298 int nlists, pcf;
de355530 299
55e303ae
A
300 pindex = 0;
301
302 mflags = 0x01000000; /* Start building mpFlags field (busy count = 1) */
91447636
A
303
304 pcf = (flags & mmFlgPcfg) >> 24; /* Get the physical page config index */
305 if(!(pPcfg[pcf].pcfFlags)) { /* Validate requested physical page configuration */
306 panic("mapping_make: invalid physical page configuration request - pmap = %08X, va = %016llX, cfg = %d\n",
307 pmap, va, pcf);
308 }
309
310 psmask = (1ULL << pPcfg[pcf].pcfPSize) - 1; /* Mask to isolate any offset into a page */
311 if(va & psmask) { /* Make sure we are page aligned on virtual */
312 panic("mapping_make: attempt to map unaligned vaddr - pmap = %08X, va = %016llX, cfg = %d\n",
313 pmap, va, pcf);
314 }
315 if(((addr64_t)pa << 12) & psmask) { /* Make sure we are page aligned on physical */
316 panic("mapping_make: attempt to map unaligned paddr - pmap = %08X, pa = %016llX, cfg = %d\n",
317 pmap, pa, pcf);
318 }
1c79356b 319
91447636
A
320 mflags |= (pcf << (31-mpPcfgb)); /* Insert physical page configuration index */
321
55e303ae 322 if(!(flags & mmFlgBlock)) { /* Is this a block map? */
1c79356b 323
55e303ae
A
324 size = 1; /* Set size to 1 page if not block */
325
326 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
327 if(!physent) { /* Did we find the physical page? */
328 mflags |= mpBlock; /* Force this to a block if no physent */
55e303ae
A
329 pattr = 0; /* Assume normal, non-I/O memory */
330 if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
1c79356b 331 }
91447636 332 else pattr = ((physent->ppLink & (ppI | ppG)) >> 60); /* Get the default attributes from physent */
de355530 333
55e303ae 334 if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
1c79356b 335 }
55e303ae
A
336 else { /* This is a block */
337
338 pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
339 mflags |= mpBlock; /* Show that this is a block */
de355530 340 }
1c79356b 341
55e303ae
A
342 wimg = 0x2; /* Set basic PPC wimg to 0b0010 - Coherent */
343 if(pattr & mmFlgCInhib) wimg |= 0x4; /* Add cache inhibited if we need to */
344 if(pattr & mmFlgGuarded) wimg |= 0x1; /* Add guarded if we need to */
1c79356b 345
55e303ae 346 mflags = mflags | (pindex << 16); /* Stick in the physical entry table index */
1c79356b 347
55e303ae 348 if(flags & mmFlgPerm) mflags |= mpPerm; /* Set permanent mapping */
1c79356b 349
55e303ae
A
350 size = size - 1; /* Change size to offset */
351 if(size > 0xFFFF) return 1; /* Leave if size is too big */
1c79356b 352
55e303ae 353 nlists = mapSetLists(pmap); /* Set number of lists this will be on */
de355530 354
55e303ae
A
355 mp = mapping_alloc(nlists); /* Get a spare mapping block with this many lists */
356
357 /* the mapping is zero except that the mpLists field is set */
358 mp->mpFlags |= mflags; /* Add in the rest of the flags to mpLists */
359 mp->mpSpace = pmap->space; /* Set the address space/pmap lookup ID */
91447636 360 mp->u.mpBSize = size; /* Set the size */
55e303ae
A
361 mp->mpPte = 0; /* Set the PTE invalid */
362 mp->mpPAddr = pa; /* Set the physical page number */
91447636
A
363 mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) /* Add the protection and attributes to the field */
364 | ((PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit)?
365 getProtPPC(prot) : (getProtPPC(prot) & 0x3)); /* Mask off no-execute control for 32-bit machines */
de355530 366
55e303ae
A
367 while(1) { /* Keep trying... */
368 colladdr = hw_add_map(pmap, mp); /* Go add the mapping to the pmap */
91447636
A
369 rc = colladdr & mapRetCode; /* Separate return code */
370 colladdr &= ~mapRetCode; /* Clean up collision effective address */
55e303ae 371
91447636
A
372 switch (rc) {
373 case mapRtOK:
374 return 0; /* Mapping added successfully */
375
376 case mapRtRemove: /* Remove in progress */
377 (void)mapping_remove(pmap, colladdr); /* Lend a helping hand to another CPU doing block removal */
378 continue; /* Retry mapping add */
379
380 case mapRtMapDup: /* Identical mapping already present */
381 mapping_free(mp); /* Free duplicate mapping */
382 return 0; /* Return success */
383
384 case mapRtSmash: /* Mapping already present but does not match new mapping */
385 mapping_free(mp); /* Free duplicate mapping */
386 return (colladdr | 1); /* Return colliding address, with some dirt added to avoid
387 confusion if effective address is 0 */
388 default:
389 panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %08X, va = %016llX, mapping = %08X\n",
390 colladdr, rc, pmap, va, mp); /* Die dead */
1c79356b 391 }
1c79356b 392
1c79356b
A
393 }
394
91447636 395 return 1; /* Unreachable, but pleases compiler */
1c79356b
A
396}
397
398
399/*
55e303ae 400 * mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping
1c79356b 401 *
55e303ae
A
402 * Looks up the vaddr and returns the mapping and the next mapped va
403 * If full is true, it will descend through all nested pmaps to find actual mapping
1c79356b 404 *
55e303ae 405 * Must be called with interruptions disabled or we can hang trying to remove found mapping.
1c79356b 406 *
55e303ae
A
407 * Returns 0 if not found and the virtual address of the mapping if it is
408 * Note that the mappings busy count is bumped. It is the responsibility of the caller
409 * to drop the count. If this is not done, any attempt to remove the mapping will hang.
1c79356b 410 *
55e303ae 411 * NOTE: The nextva field is not valid when full is TRUE.
1c79356b 412 *
1c79356b
A
413 *
414 */
415
91447636 416mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) { /* Make an address mapping */
de355530 417
91447636 418 register mapping_t *mp;
55e303ae
A
419 addr64_t curva;
420 pmap_t curpmap;
421 int nestdepth;
de355530 422
55e303ae
A
423 curpmap = pmap; /* Remember entry */
424 nestdepth = 0; /* Set nest depth */
91447636 425 curva = (addr64_t)va; /* Set current va */
de355530 426
55e303ae 427 while(1) {
1c79356b 428
55e303ae
A
429 mp = hw_find_map(curpmap, curva, nextva); /* Find the mapping for this address */
430 if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
431 panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap); /* Die... */
1c79356b 432 }
55e303ae 433
91447636 434 if(!mp || ((mp->mpFlags & mpType) < mpMinSpecial) || !full) break; /* Are we done looking? */
1c79356b 435
91447636
A
436 if((mp->mpFlags & mpType) != mpNest) { /* Don't chain through anything other than a nested pmap */
437 mapping_drop_busy(mp); /* We have everything we need from the mapping */
55e303ae
A
438 mp = 0; /* Set not found */
439 break;
1c79356b 440 }
1c79356b 441
55e303ae
A
442 if(nestdepth++ > 64) { /* Have we nested too far down? */
443 panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
444 va, curva, pmap, curpmap);
1c79356b 445 }
55e303ae
A
446
447 curva = curva + mp->mpNestReloc; /* Relocate va to new pmap */
448 curpmap = (pmap_t) pmapTrans[mp->mpSpace].pmapVAddr; /* Get the address of the nested pmap */
449 mapping_drop_busy(mp); /* We have everything we need from the mapping */
450
1c79356b
A
451 }
452
55e303ae 453 return mp; /* Return the mapping if we found one */
1c79356b
A
454}
455
1c79356b 456/*
91447636 457 * void mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
1c79356b 458 *
55e303ae
A
459 * This routine takes a pmap and virtual address and changes
460 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
461 * the protection is changed.
1c79356b 462 *
55e303ae
A
463 * We return success if we change the protection or if there is no page mapped at va. We return failure if
464 * the va corresponds to a block mapped area or the mapping is permanant.
de355530 465 *
1c79356b
A
466 *
467 */
1c79356b 468
91447636
A
469void
470mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) { /* Change protection of a virtual page */
1c79356b 471
55e303ae 472 int ret;
de355530 473
91447636 474 ret = hw_protect(pmap, va, getProtPPC(prot), nextva); /* Try to change the protect here */
1c79356b 475
55e303ae
A
476 switch (ret) { /* Decode return code */
477
478 case mapRtOK: /* Changed */
479 case mapRtNotFnd: /* Didn't find it */
55e303ae
A
480 case mapRtBlock: /* Block map, just ignore request */
481 case mapRtNest: /* Nested pmap, just ignore request */
55e303ae
A
482 break;
483
484 default:
485 panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
486
1c79356b
A
487 }
488
1c79356b 489}
1c79356b
A
490
491/*
55e303ae 492 * void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
1c79356b
A
493 *
494 * This routine takes a physical entry and runs through all mappings attached to it and changes
495 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
55e303ae 496 * the protection is changed. There is no limitation on changes, e.g.,
1c79356b
A
497 * higher to lower, lower to higher.
498 *
55e303ae
A
499 * Any mapping that is marked permanent is not changed
500 *
1c79356b
A
501 * Phys_entry is unlocked.
502 */
503
55e303ae 504void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) { /* Change protection of all mappings to page */
1c79356b 505
55e303ae 506 unsigned int pindex;
91447636 507 phys_entry_t *physent;
de355530 508
55e303ae
A
509 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
510 if(!physent) { /* Did we find the physical page? */
511 panic("mapping_protect_phys: invalid physical page %08X\n", pa);
de355530 512 }
1c79356b 513
91447636
A
514 hw_walk_phys(physent, hwpNoop, hwpSPrtMap, hwpNoop,
515 getProtPPC(prot), hwpPurgePTE); /* Set the new protection for page and mappings */
de355530 516
1c79356b
A
517 return; /* Leave... */
518}
519
520
521/*
55e303ae 522 * void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
1c79356b
A
523 *
524 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 525 * off the change bit.
1c79356b
A
526 */
527
55e303ae
A
528void mapping_clr_mod(ppnum_t pa) { /* Clears the change bit of a physical page */
529
530 unsigned int pindex;
91447636 531 phys_entry_t *physent;
55e303ae
A
532
533 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
534 if(!physent) { /* Did we find the physical page? */
535 panic("mapping_clr_mod: invalid physical page %08X\n", pa);
536 }
1c79356b 537
91447636
A
538 hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy,
539 0, hwpPurgePTE); /* Clear change for page and mappings */
1c79356b
A
540 return; /* Leave... */
541}
542
543
544/*
55e303ae 545 * void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
1c79356b
A
546 *
547 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 548 * on the change bit.
1c79356b
A
549 */
550
55e303ae
A
551void mapping_set_mod(ppnum_t pa) { /* Sets the change bit of a physical page */
552
553 unsigned int pindex;
91447636 554 phys_entry_t *physent;
55e303ae
A
555
556 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
557 if(!physent) { /* Did we find the physical page? */
558 panic("mapping_set_mod: invalid physical page %08X\n", pa);
559 }
d7e50217 560
91447636
A
561 hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy,
562 0, hwpNoopPTE); /* Set change for page and mappings */
1c79356b
A
563 return; /* Leave... */
564}
565
566
567/*
55e303ae 568 * void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
1c79356b 569 *
de355530 570 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 571 * off the reference bit.
1c79356b
A
572 */
573
55e303ae 574void mapping_clr_ref(ppnum_t pa) { /* Clears the reference bit of a physical page */
de355530 575
55e303ae 576 unsigned int pindex;
91447636 577 phys_entry_t *physent;
55e303ae
A
578
579 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
580 if(!physent) { /* Did we find the physical page? */
581 panic("mapping_clr_ref: invalid physical page %08X\n", pa);
d7e50217 582 }
55e303ae 583
91447636
A
584 hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy,
585 0, hwpPurgePTE); /* Clear reference for page and mappings */
de355530
A
586 return; /* Leave... */
587}
588
589
590/*
55e303ae 591 * void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
de355530
A
592 *
593 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 594 * on the reference bit.
de355530
A
595 */
596
55e303ae
A
597void mapping_set_ref(ppnum_t pa) { /* Sets the reference bit of a physical page */
598
599 unsigned int pindex;
91447636 600 phys_entry_t *physent;
55e303ae
A
601
602 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
603 if(!physent) { /* Did we find the physical page? */
604 panic("mapping_set_ref: invalid physical page %08X\n", pa);
605 }
d7e50217 606
91447636
A
607 hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy,
608 0, hwpNoopPTE); /* Set reference for page and mappings */
de355530 609 return; /* Leave... */
1c79356b
A
610}
611
612
613/*
91447636 614 * boolean_t mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
1c79356b
A
615 *
616 * This routine takes a physical entry and runs through all mappings attached to it and tests
55e303ae 617 * the changed bit.
1c79356b
A
618 */
619
55e303ae
A
620boolean_t mapping_tst_mod(ppnum_t pa) { /* Tests the change bit of a physical page */
621
622 unsigned int pindex, rc;
91447636 623 phys_entry_t *physent;
55e303ae
A
624
625 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
626 if(!physent) { /* Did we find the physical page? */
627 panic("mapping_tst_mod: invalid physical page %08X\n", pa);
628 }
d7e50217 629
91447636
A
630 rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop,
631 0, hwpMergePTE); /* Set change for page and mappings */
55e303ae 632 return ((rc & (unsigned long)ppC) != 0); /* Leave with change bit */
1c79356b
A
633}
634
635
636/*
91447636 637 * boolean_t mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
de355530
A
638 *
639 * This routine takes a physical entry and runs through all mappings attached to it and tests
55e303ae 640 * the reference bit.
1c79356b
A
641 */
642
55e303ae
A
643boolean_t mapping_tst_ref(ppnum_t pa) { /* Tests the reference bit of a physical page */
644
645 unsigned int pindex, rc;
91447636 646 phys_entry_t *physent;
55e303ae
A
647
648 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
649 if(!physent) { /* Did we find the physical page? */
650 panic("mapping_tst_ref: invalid physical page %08X\n", pa);
651 }
1c79356b 652
91447636
A
653 rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop,
654 0, hwpMergePTE); /* Test reference for page and mappings */
55e303ae 655 return ((rc & (unsigned long)ppR) != 0); /* Leave with reference bit */
1c79356b
A
656}
657
658
91447636
A
659/*
660 * unsigned int mapping_tst_refmod(ppnum_t pa) - tests the reference and change bits of a physical page
661 *
662 * This routine takes a physical entry and runs through all mappings attached to it and tests
663 * their reference and changed bits.
664 */
665
666unsigned int mapping_tst_refmod(ppnum_t pa) { /* Tests the reference and change bits of a physical page */
667
668 unsigned int pindex, rc;
669 phys_entry_t *physent;
670
671 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
672 if (!physent) { /* Did we find the physical page? */
673 panic("mapping_tst_refmod: invalid physical page %08X\n", pa);
674 }
675
676 rc = hw_walk_phys(physent, hwpTRefCngPhy, hwpTRefCngMap, hwpNoop,
677 0, hwpMergePTE); /* Test reference and change bits in page and mappings */
678 return (((rc & ppC)? VM_MEM_MODIFIED : 0) | ((rc & ppR)? VM_MEM_REFERENCED : 0));
679 /* Convert bits to generic format and return */
680
681}
682
683
684/*
685 * void mapping_clr_refmod(ppnum_t pa, unsigned int mask) - clears the reference and change bits specified
686 * by mask of a physical page
687 *
688 * This routine takes a physical entry and runs through all mappings attached to it and turns
689 * off all the reference and change bits.
690 */
691
692void mapping_clr_refmod(ppnum_t pa, unsigned int mask) { /* Clears the reference and change bits of a physical page */
693
694 unsigned int pindex;
695 phys_entry_t *physent;
696 unsigned int ppcMask;
697
698 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
699 if(!physent) { /* Did we find the physical page? */
700 panic("mapping_clr_refmod: invalid physical page %08X\n", pa);
701 }
702
703 ppcMask = (((mask & VM_MEM_MODIFIED)? ppC : 0) | ((mask & VM_MEM_REFERENCED)? ppR : 0));
704 /* Convert mask bits to PPC-specific format */
705 hw_walk_phys(physent, hwpNoop, hwpCRefCngMap, hwpCRefCngPhy,
706 ppcMask, hwpPurgePTE); /* Clear reference and change bits for page and mappings */
707 return; /* Leave... */
708}
709
710
711
de355530 712/*
55e303ae 713 * phys_ent *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
de355530 714 *
55e303ae
A
715 * This routine takes a physical page number and returns the phys_entry associated with it. It also
716 * calculates the bank address associated with the entry
717 * the reference bit.
de355530
A
718 */
719
91447636 720phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) { /* Finds the physical entry for the page */
de355530 721
55e303ae
A
722 int i;
723
724 for(i = 0; i < pmap_mem_regions_count; i++) { /* Walk through the list */
725 if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue; /* Skip any empty lists */
726 if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue; /* This isn't ours */
727
728 *pindex = (i * sizeof(mem_region_t)) / 4; /* Make the word index to this list */
729
730 return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart]; /* Return the physent pointer */
731 }
732
91447636 733 return (phys_entry_t *)0; /* Shucks, can't find it... */
55e303ae 734
de355530 735}
d7e50217
A
736
737
55e303ae
A
738
739
1c79356b
A
740/*
741 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
742 *
743 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
744 * the number of free mappings remaining, and if below a threshold, replenishes them.
745 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
746 * a new one is allocated.
747 *
55e303ae
A
748 * This routine allocates and/or frees memory and must be called from a safe place.
749 * Currently, vm_pageout_scan is the safest place.
1c79356b
A
750 */
751
752thread_call_t mapping_adjust_call;
753static thread_call_data_t mapping_adjust_call_data;
754
755void mapping_adjust(void) { /* Adjust free mappings */
756
55e303ae 757 kern_return_t retr = KERN_SUCCESS;
91447636 758 mappingblok_t *mb, *mbn;
1c79356b 759 spl_t s;
91447636 760 int allocsize;
1c79356b
A
761
762 if(mapCtl.mapcmin <= MAPPERBLOK) {
55e303ae 763 mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
1c79356b
A
764
765#if DEBUG
766 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
767 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
768 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
769#endif
770 }
771
772 s = splhigh(); /* Don't bother from now on */
773 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
774 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
775 }
776
777 if (mapping_adjust_call == NULL) {
9bccf70c
A
778 thread_call_setup(&mapping_adjust_call_data,
779 (thread_call_func_t)mapping_adjust,
780 (thread_call_param_t)NULL);
1c79356b
A
781 mapping_adjust_call = &mapping_adjust_call_data;
782 }
783
784 while(1) { /* Keep going until we've got enough */
785
786 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
787 if(allocsize < 1) break; /* Leave if we have all we need */
788
789 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
790 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
791 mapCtl.mapcreln--; /* Back off the count */
792 allocsize = MAPPERBLOK; /* Show we allocated one block */
793 }
55e303ae 794 else { /* No free ones, try to get it */
1c79356b
A
795
796 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
55e303ae 797
1c79356b
A
798 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
799 splx(s); /* Restore 'rupts */
800
801 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
802 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
803 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
9bccf70c 804 break;
1c79356b
A
805 }
806 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
807 }
55e303ae 808
1c79356b
A
809 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
810 s = splhigh(); /* Don't bother from now on */
811 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
812 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
813 }
814 }
55e303ae 815
9bccf70c
A
816 if (retr != KERN_SUCCESS)
817 break; /* Fail to alocate, bail out... */
1c79356b
A
818 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
819 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
91447636 820 mbn = (mappingblok_t *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
1c79356b 821 }
55e303ae 822
1c79356b
A
823 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
824 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
825 }
826
827 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
828 mapCtl.mapcrecurse = 0; /* We are done now */
829 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
830 splx(s); /* Restore 'rupts */
831 return; /* Return... */
832 }
833
834 mbn = mapCtl.mapcrel; /* Get first pending release block */
835 mapCtl.mapcrel = 0; /* Dequeue them */
836 mapCtl.mapcreln = 0; /* Set count to 0 */
837
838 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
839 splx(s); /* Restore 'rupts */
840
841 while((unsigned int)mbn) { /* Toss 'em all */
842 mb = mbn->nextblok; /* Get the next */
55e303ae 843
1c79356b 844 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
55e303ae 845
1c79356b
A
846 mbn = mb; /* Chain to the next */
847 }
848
55e303ae 849 __asm__ volatile("eieio"); /* Make sure all is well */
1c79356b
A
850 mapCtl.mapcrecurse = 0; /* We are done now */
851 return;
852}
853
854/*
855 * mapping_free(mapping *mp) - release a mapping to the free list
856 *
857 * This routine takes a mapping and adds it to the free list.
858 * If this mapping make the block non-empty, we queue it to the free block list.
859 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
860 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
861 * If this release fills a block and we are above the threshold, we release the block
862 */
863
864void mapping_free(struct mapping *mp) { /* Release a mapping */
865
91447636 866 mappingblok_t *mb, *mbn;
1c79356b 867 spl_t s;
55e303ae 868 unsigned int full, mindx, lists;
1c79356b 869
55e303ae 870 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6; /* Get index to mapping */
91447636 871 mb = (mappingblok_t *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
55e303ae
A
872 lists = (mp->mpFlags & mpLists); /* get #lists */
873 if ((lists == 0) || (lists > kSkipListMaxLists)) /* panic if out of range */
874 panic("mapping_free: mpLists invalid\n");
875
876#if 0
877 mp->mpFlags = 0x99999999; /* (BRINGUP) */
878 mp->mpSpace = 0x9999; /* (BRINGUP) */
91447636 879 mp->u.mpBSize = 0x9999; /* (BRINGUP) */
55e303ae
A
880 mp->mpPte = 0x99999998; /* (BRINGUP) */
881 mp->mpPAddr = 0x99999999; /* (BRINGUP) */
882 mp->mpVAddr = 0x9999999999999999ULL; /* (BRINGUP) */
883 mp->mpAlias = 0x9999999999999999ULL; /* (BRINGUP) */
884 mp->mpList0 = 0x9999999999999999ULL; /* (BRINGUP) */
885 mp->mpList[0] = 0x9999999999999999ULL; /* (BRINGUP) */
886 mp->mpList[1] = 0x9999999999999999ULL; /* (BRINGUP) */
887 mp->mpList[2] = 0x9999999999999999ULL; /* (BRINGUP) */
888
889 if(lists > mpBasicLists) { /* (BRINGUP) */
890 mp->mpList[3] = 0x9999999999999999ULL; /* (BRINGUP) */
891 mp->mpList[4] = 0x9999999999999999ULL; /* (BRINGUP) */
892 mp->mpList[5] = 0x9999999999999999ULL; /* (BRINGUP) */
893 mp->mpList[6] = 0x9999999999999999ULL; /* (BRINGUP) */
894 mp->mpList[7] = 0x9999999999999999ULL; /* (BRINGUP) */
895 mp->mpList[8] = 0x9999999999999999ULL; /* (BRINGUP) */
896 mp->mpList[9] = 0x9999999999999999ULL; /* (BRINGUP) */
897 mp->mpList[10] = 0x9999999999999999ULL; /* (BRINGUP) */
898 }
899#endif
900
1c79356b
A
901
902 s = splhigh(); /* Don't bother from now on */
903 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
904 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
905 }
906
55e303ae 907 full = !(mb->mapblokfree[0] | mb->mapblokfree[1]); /* See if full now */
1c79356b 908 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
55e303ae
A
909 if ( lists > mpBasicLists ) { /* if big block, lite the 2nd bit too */
910 mindx++;
911 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
912 mapCtl.mapcfree++;
913 mapCtl.mapcinuse--;
914 }
1c79356b
A
915
916 if(full) { /* If it was full before this: */
917 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
918 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
9bccf70c
A
919 if(!((unsigned int)mapCtl.mapclast))
920 mapCtl.mapclast = mb;
1c79356b
A
921 }
922
923 mapCtl.mapcfree++; /* Bump free count */
924 mapCtl.mapcinuse--; /* Decriment in use count */
925
926 mapCtl.mapcfreec++; /* Count total calls */
927
928 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
55e303ae 929 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) { /* See if empty now */
1c79356b
A
930
931 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
932 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
933 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
934 }
935 else { /* We're not first */
936 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
937 if(mbn->nextblok == mb) break; /* Is the next one our's? */
938 }
939 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
940 mbn->nextblok = mb->nextblok; /* Dequeue us */
941 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
942 }
943
944 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
945 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
946 mapCtl.mapcnext = mb; /* Chain us to the head */
947 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
948 }
949 else {
950 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
951 mapCtl.mapcreln++; /* Count on release list */
952 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
953 mapCtl.mapcrel = mb; /* Chain us in front */
954 }
955 }
956 }
957
958 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
959 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
960 thread_call_enter(mapping_adjust_call); /* Go toss some */
961 }
962 }
963 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
964 splx(s); /* Restore 'rupts */
965
966 return; /* Bye, dude... */
967}
968
969
970/*
55e303ae 971 * mapping_alloc(lists) - obtain a mapping from the free list
1c79356b 972 *
55e303ae
A
973 * This routine takes a mapping off of the free list and returns its address.
974 * The mapping is zeroed, and its mpLists count is set. The caller passes in
975 * the number of skiplists it would prefer; if this number is greater than
976 * mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
977 * just two consequtive free entries coallesced into one. If we cannot find
978 * two consequtive free entries, we clamp the list count down to mpBasicLists
979 * and return a basic 64-byte node. Our caller never knows the difference.
1c79356b 980 *
55e303ae 981 * If this allocation empties a block, we remove it from the free list.
1c79356b
A
982 * If this allocation drops the total number of free entries below a threshold,
983 * we allocate a new block.
984 *
985 */
91447636 986decl_simple_lock_data(extern,free_pmap_lock)
1c79356b 987
91447636
A
988mapping_t *
989mapping_alloc(int lists) { /* Obtain a mapping */
1c79356b 990
91447636
A
991 register mapping_t *mp;
992 mappingblok_t *mb, *mbn;
1c79356b
A
993 spl_t s;
994 int mindx;
55e303ae
A
995 int big = (lists > mpBasicLists); /* set flag if big block req'd */
996 pmap_t refpmap, ckpmap;
997 unsigned int space, i;
55e303ae 998 addr64_t va, nextva;
55e303ae
A
999 boolean_t found_mapping;
1000 boolean_t do_rescan;
1001
1c79356b
A
1002 s = splhigh(); /* Don't bother from now on */
1003 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1004 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1005 }
1006
55e303ae
A
1007 if(!((unsigned int)mapCtl.mapcnext)) { /* Are there any free mappings? */
1008
1009/*
1010 * No free mappings. First, there may be some mapping blocks on the "to be released"
1011 * list. If so, rescue one. Otherwise, try to steal a couple blocks worth.
1012 */
1013
91447636 1014 if((mbn = mapCtl.mapcrel) != 0) { /* Try to rescue a block from impending doom */
55e303ae
A
1015 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1016 mapCtl.mapcreln--; /* Back off the count */
1017 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1018 goto rescued;
1019 }
1020
1021 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1022
1023 simple_lock(&free_pmap_lock);
1024
1025 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1026 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1027 }
1028
1029 if (!((unsigned int)mapCtl.mapcnext)) {
1030
1031 refpmap = (pmap_t)cursor_pmap->pmap_link.next;
1032 space = mapCtl.mapcflush.spacenum;
1033 while (refpmap != cursor_pmap) {
1034 if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
1035 refpmap = (pmap_t)refpmap->pmap_link.next;
de355530 1036 }
55e303ae
A
1037
1038 ckpmap = refpmap;
1039 va = mapCtl.mapcflush.addr;
1040 found_mapping = FALSE;
1041
1042 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
1043
1044 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1045
1046 ckpmap = (pmap_t)ckpmap->pmap_link.next;
1047
91447636
A
1048 /* We don't steal mappings from the kernel pmap, a VMM host pmap, or a VMM guest pmap with guest
1049 shadow assist active.
1050 */
1051 if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)
1052 && !(ckpmap->pmapFlags & (pmapVMgsaa|pmapVMhost))) {
55e303ae
A
1053 do_rescan = TRUE;
1054 for (i=0;i<8;i++) {
1055 mp = hw_purge_map(ckpmap, va, &nextva);
1056
91447636
A
1057 switch ((unsigned int)mp & mapRetCode) {
1058 case mapRtOK:
1059 mapping_free(mp);
1060 found_mapping = TRUE;
1061 break;
1062 case mapRtNotFnd:
1063 break;
1064 default:
1065 panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
1066 break;
55e303ae
A
1067 }
1068
91447636 1069 if (mapRtNotFnd == ((unsigned int)mp & mapRetCode))
55e303ae
A
1070 if (do_rescan)
1071 do_rescan = FALSE;
1072 else
1073 break;
55e303ae
A
1074
1075 va = nextva;
1076 }
1077 }
1078
1079 if (ckpmap == refpmap) {
1080 if (found_mapping == FALSE)
1081 panic("no valid pmap to purge mappings\n");
1082 else
1083 found_mapping = FALSE;
1084 }
1085
1086 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1087 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1088 }
1089
9bccf70c 1090 }
55e303ae
A
1091
1092 mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
1093 mapCtl.mapcflush.addr = nextva;
9bccf70c 1094 }
55e303ae
A
1095
1096 simple_unlock(&free_pmap_lock);
1097 }
1098
1099rescued:
1100
1101 mb = mapCtl.mapcnext;
1102
1103 if ( big ) { /* if we need a big (128-byte) mapping */
1104 mapCtl.mapcbig++; /* count attempts to allocate a big mapping */
1105 mbn = NULL; /* this will be prev ptr */
1106 mindx = 0;
1107 while( mb ) { /* loop over mapping blocks with free entries */
1108 mindx = mapalc2(mb); /* try for 2 consequtive free bits in this block */
1109
1110 if ( mindx ) break; /* exit loop if we found them */
1111 mbn = mb; /* remember previous block */
1112 mb = mb->nextblok; /* move on to next block */
1113 }
1114 if ( mindx == 0 ) { /* if we couldn't find 2 consequtive bits... */
1115 mapCtl.mapcbigfails++; /* count failures */
1116 big = 0; /* forget that we needed a big mapping */
1117 lists = mpBasicLists; /* clamp list count down to the max in a 64-byte mapping */
1118 mb = mapCtl.mapcnext; /* back to the first block with a free entry */
1119 }
1120 else { /* if we did find a big mapping */
1121 mapCtl.mapcfree--; /* Decrement free count twice */
1122 mapCtl.mapcinuse++; /* Bump in use count twice */
1123 if ( mindx < 0 ) { /* if we just used the last 2 free bits in this block */
1124 if (mbn) { /* if this wasn't the first block */
1125 mindx = -mindx; /* make positive */
1126 mbn->nextblok = mb->nextblok; /* unlink this one from the middle of block list */
1127 if (mb == mapCtl.mapclast) { /* if we emptied last block */
1128 mapCtl.mapclast = mbn; /* then prev block is now last */
1129 }
1130 }
1131 }
1132 }
1133 }
1134
1135 if ( !big ) { /* if we need a small (64-byte) mapping */
1136 if(!(mindx = mapalc1(mb))) /* Allocate a 1-bit slot */
1137 panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
1138 }
1c79356b
A
1139
1140 if(mindx < 0) { /* Did we just take the last one */
1141 mindx = -mindx; /* Make positive */
1142 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
1143 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
1144 }
1145
1146 mapCtl.mapcfree--; /* Decrement free count */
1147 mapCtl.mapcinuse++; /* Bump in use count */
1148
1149 mapCtl.mapcallocc++; /* Count total calls */
1150
1151/*
1152 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1153 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1154 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1155 * if we haven't already done it.
1156 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1157 * the release list with as much as we need until threads start.
1158 */
55e303ae 1159
1c79356b 1160 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
91447636 1161 if((mbn = mapCtl.mapcrel) != 0) { /* Try to rescue a block from impending doom */
1c79356b
A
1162 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1163 mapCtl.mapcreln--; /* Back off the count */
1164 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1165 }
1166 else { /* We need to replenish */
1167 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1168 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1169 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1170 }
1171 }
1172 }
1173 }
1174
1175 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1176 splx(s); /* Restore 'rupts */
1177
91447636 1178 mp = &((mapping_t *)mb)[mindx]; /* Point to the allocated mapping */
55e303ae
A
1179 mp->mpFlags = lists; /* set the list count */
1180
1181
1c79356b
A
1182 return mp; /* Send it back... */
1183}
1184
1185
1186void
91447636 1187consider_mapping_adjust(void)
1c79356b
A
1188{
1189 spl_t s;
1190
1191 s = splhigh(); /* Don't bother from now on */
1192 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
55e303ae 1193 panic("consider_mapping_adjust -- lock timeout\n");
1c79356b
A
1194 }
1195
1196 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1197 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1198 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1199 }
1200 }
1201
1202 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1203 splx(s); /* Restore 'rupts */
1204
1205}
1206
1207
1208
1209/*
1210 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1211 *
55e303ae
A
1212 * The mapping block is a page size area on a page boundary. It contains 1 header and 63
1213 * mappings. This call adds and initializes a block for use. Mappings come in two sizes,
1214 * 64 and 128 bytes (the only difference is the number of skip-lists.) When we allocate a
1215 * 128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
1216 * code only deals with "basic" 64-byte mappings. This works for two reasons:
1217 * - Only one in 256 mappings is big, so they are rare.
1218 * - If we cannot find two consequtive free mappings, we just return a small one.
1219 * There is no problem with doing this, except a minor performance degredation.
1220 * Therefore, all counts etc in the mapping control structure are in units of small blocks.
1c79356b
A
1221 *
1222 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1223 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1224 * corresponds to the header). The translation mask is the XOR of the virtual and real
1225 * addresses (needless to say, the block must be wired).
1226 *
1227 * We handle these mappings the same way as saveareas: the block is only on the chain so
1228 * long as there are free entries in it.
1229 *
1230 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1231 * mappings. Blocks marked PERM won't ever be released.
1232 *
1233 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1234 * list. We do this only at start up time. This is done because we only allocate blocks
1235 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1236 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1237 * them on the release queue, the allocate routine will rescue them. Then when the
1238 * pageout scan starts, all extra ones will be released.
1239 *
1240 */
1241
1242
1243void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1244 /* Set's start and end of a block of mappings
1245 perm indicates if the block can be released
1246 or goes straight to the release queue .
1247 locked indicates if the lock is held already */
1248
91447636 1249 mappingblok_t *mb;
1c79356b 1250 spl_t s;
55e303ae
A
1251 addr64_t raddr;
1252 ppnum_t pp;
1c79356b 1253
91447636 1254 mb = (mappingblok_t *)mbl; /* Start of area */
1c79356b
A
1255
1256 if(perm >= 0) { /* See if we need to initialize the block */
1257 if(perm) {
55e303ae 1258 raddr = (addr64_t)((unsigned int)mbl); /* Perm means V=R */
1c79356b 1259 mb->mapblokflags = mbPerm; /* Set perm */
55e303ae 1260// mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1c79356b
A
1261 }
1262 else {
55e303ae
A
1263 pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl); /* Get the physical page */
1264 if(!pp) { /* What gives? Where's the page? */
1265 panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
1266 }
1267
1268 raddr = (addr64_t)pp << 12; /* Convert physical page to physical address */
1c79356b 1269 mb->mapblokflags = 0; /* Set not perm */
55e303ae 1270// mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1c79356b
A
1271 }
1272
55e303ae 1273 mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl); /* Form translation mask */
1c79356b
A
1274
1275 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1276 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1c79356b
A
1277 }
1278
1279 s = splhigh(); /* Don't bother from now on */
1280 if(!locked) { /* Do we need the lock? */
1281 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
55e303ae 1282 panic("mapping_free_init: timeout getting control lock\n"); /* Tell all and die */
1c79356b
A
1283 }
1284 }
1285
1286 if(perm < 0) { /* Direct to release queue? */
1287 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1288 mapCtl.mapcrel = mb; /* Queue us on in */
1289 mapCtl.mapcreln++; /* Count the free block */
1290 }
1291 else { /* Add to the free list */
1292
1293 mb->nextblok = 0; /* We always add to the end */
1294 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1295
1296 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1297 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1298 }
1299 else { /* We are not the first */
1300 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1301 mapCtl.mapclast = mb; /* We are now last */
1302 }
1303 }
1304
1305 if(!locked) { /* Do we need to unlock? */
1306 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1307 }
55e303ae
A
1308
1309 splx(s); /* Restore 'rupts */
1c79356b
A
1310 return; /* All done, leave... */
1311}
1312
1313
1314/*
1315 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1316 *
1317 * No locks can be held, because we allocate memory here.
1318 * This routine needs a corresponding mapping_relpre call to remove the
1319 * hold off flag so that the adjust routine will free the extra mapping
1320 * blocks on the release list. I don't like this, but I don't know
1321 * how else to do this for now...
1322 *
1323 */
1324
1325void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1326
1327 int nmapb, i;
1328 kern_return_t retr;
91447636 1329 mappingblok_t *mbn;
1c79356b
A
1330 spl_t s;
1331
1332 s = splhigh(); /* Don't bother from now on */
1333 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1334 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1335 }
1336
1337 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1338
1339 mapCtl.mapcholdoff++; /* Bump the hold off count */
1340
1341 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1342 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1343 splx(s); /* Restore 'rupts */
1344 return;
1345 }
55e303ae 1346 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1c79356b 1347 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
55e303ae 1348 splx(s); /* Restore 'rupts */
1c79356b
A
1349 return;
1350 }
1351 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1352
1353 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1354 splx(s); /* Restore 'rupts */
1355
1356 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1357 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
55e303ae 1358 if(retr != KERN_SUCCESS) /* Did we get some memory? */
d7e50217 1359 break;
1c79356b
A
1360 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1361 }
1362 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1363 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1364
1365 mapCtl.mapcrecurse = 0; /* We are done now */
1366}
1367
1368/*
1369 * void mapping_relpre(void) - Releases preallocation release hold off
1370 *
1371 * This routine removes the
1372 * hold off flag so that the adjust routine will free the extra mapping
1373 * blocks on the release list. I don't like this, but I don't know
1374 * how else to do this for now...
1375 *
1376 */
1377
1378void mapping_relpre(void) { /* Releases release hold off */
1379
1380 spl_t s;
1381
1382 s = splhigh(); /* Don't bother from now on */
1383 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1384 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1385 }
1386 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1387 panic("mapping_relpre: hold-off count went negative\n");
1388 }
1389
1390 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1391 splx(s); /* Restore 'rupts */
1392}
1393
1394/*
1395 * void mapping_free_prime(void) - Primes the mapping block release list
1396 *
1397 * See mapping_free_init.
1398 * No locks can be held, because we allocate memory here.
1399 * One processor running only.
1400 *
1401 */
1402
1403void mapping_free_prime(void) { /* Primes the mapping block release list */
1404
1405 int nmapb, i;
1406 kern_return_t retr;
91447636 1407 mappingblok_t *mbn;
1c79356b
A
1408 vm_offset_t mapping_min;
1409
55e303ae 1410 retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
91447636 1411 FALSE, VM_FLAGS_ANYWHERE, &mapping_map);
1c79356b
A
1412
1413 if (retr != KERN_SUCCESS)
1414 panic("mapping_free_prime: kmem_suballoc failed");
1415
1416
1417 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1418 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1419
1420#if DEBUG
1421 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1422 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1423#endif
1424
1425 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1426 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1427 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1428 panic("Whoops... Not a bit of wired memory left for anyone\n");
1429 }
1430 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1431 }
1432 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1433 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1434}
1435
1436
91447636 1437void
1c79356b
A
1438mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1439 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1440{
1441 *count = mapCtl.mapcinuse;
1442 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1443 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1444 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1445 *alloc_size = PAGE_SIZE;
1446
1447 *collectable = 1;
1448 *exhaustable = 0;
1449}
1450
1451
1452/*
55e303ae 1453 * addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
1c79356b 1454 *
55e303ae
A
1455 * First looks up the physical entry associated witht the physical page. Then searches the alias
1456 * list for a matching pmap. It grabs the virtual address from the mapping, drops busy, and returns
1457 * that.
1c79356b 1458 *
1c79356b
A
1459 */
1460
55e303ae 1461addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) { /* Finds first virtual mapping of a physical page in a space */
1c79356b 1462
55e303ae 1463 spl_t s;
91447636 1464 mapping_t *mp;
55e303ae 1465 unsigned int pindex;
91447636 1466 phys_entry_t *physent;
55e303ae 1467 addr64_t va;
de355530 1468
55e303ae
A
1469 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1470 if(!physent) { /* Did we find the physical page? */
1471 panic("mapping_p2v: invalid physical page %08X\n", pa);
1c79356b 1472 }
1c79356b 1473
55e303ae 1474 s = splhigh(); /* Make sure interruptions are disabled */
1c79356b 1475
91447636 1476 mp = hw_find_space(physent, pmap->space); /* Go find the first mapping to the page from the requested pmap */
1c79356b 1477
55e303ae
A
1478 if(mp) { /* Did we find one? */
1479 va = mp->mpVAddr & -4096; /* If so, get the cleaned up vaddr */
1480 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1c79356b 1481 }
55e303ae 1482 else va = 0; /* Return failure */
1c79356b 1483
55e303ae 1484 splx(s); /* Restore 'rupts */
1c79356b 1485
55e303ae 1486 return va; /* Bye, bye... */
1c79356b 1487
1c79356b
A
1488}
1489
1490/*
1491 * phystokv(addr)
1492 *
1493 * Convert a physical address to a kernel virtual address if
1494 * there is a mapping, otherwise return NULL
1495 */
1496
1497vm_offset_t phystokv(vm_offset_t pa) {
1498
55e303ae
A
1499 addr64_t va;
1500 ppnum_t pp;
1c79356b 1501
55e303ae
A
1502 pp = pa >> 12; /* Convert to a page number */
1503
1504 if(!(va = mapping_p2v(kernel_pmap, pp))) {
1c79356b
A
1505 return 0; /* Can't find it, return 0... */
1506 }
55e303ae
A
1507
1508 return (va | (pa & (PAGE_SIZE - 1))); /* Build and return VADDR... */
1509
1510}
1511
1512/*
1513 * kvtophys(addr)
1514 *
1515 * Convert a kernel virtual address to a physical address
1516 */
1517vm_offset_t kvtophys(vm_offset_t va) {
1518
1519 return pmap_extract(kernel_pmap, va); /* Find mapping and lock the physical entry for this mapping */
1c79356b
A
1520
1521}
1522
1523/*
1524 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1525 * page 0 access for the current thread.
1526 *
1527 * If parameter is TRUE, faults are ignored
1528 * If parameter is FALSE, faults are honored
1529 *
1530 */
1531
1532void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1533
91447636
A
1534 if(type) current_thread()->machine.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1535 else current_thread()->machine.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1c79356b
A
1536
1537 return; /* Return the result or 0... */
1538}
1539
1540
55e303ae
A
1541/*
1542 * Copies data between a physical page and a virtual page, or 2 physical. This is used to
1543 * move data from the kernel to user state. Note that the "which" parm
1544 * says which of the parameters is physical and if we need to flush sink/source.
91447636 1545 * Note that both addresses may be physical, but only one may be virtual.
1c79356b 1546 *
55e303ae 1547 * The rules are that the size can be anything. Either address can be on any boundary
91447636 1548 * and span pages. The physical data must be contiguous as must the virtual.
1c79356b 1549 *
55e303ae
A
1550 * We can block when we try to resolve the virtual address at each page boundary.
1551 * We don't check protection on the physical page.
1c79356b 1552 *
55e303ae
A
1553 * Note that we will not check the entire range and if a page translation fails,
1554 * we will stop with partial contents copied.
1c79356b
A
1555 *
1556 */
1557
91447636 1558kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which) {
1c79356b
A
1559
1560 vm_map_t map;
1561 kern_return_t ret;
91447636
A
1562 addr64_t nextva, vaddr, paddr;
1563 register mapping_t *mp;
1c79356b 1564 spl_t s;
91447636 1565 unsigned int lop, csize;
55e303ae
A
1566 int needtran, bothphys;
1567 unsigned int pindex;
91447636 1568 phys_entry_t *physent;
55e303ae
A
1569 vm_prot_t prot;
1570 int orig_which;
1c79356b 1571
55e303ae 1572 orig_which = which;
1c79356b 1573
55e303ae 1574 map = (which & cppvKmap) ? kernel_map : current_map_fast();
1c79356b 1575
55e303ae
A
1576 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
1577 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
1578 }
1579
1580 bothphys = 1; /* Assume both are physical */
1581
91447636 1582 if(!(which & cppvPsnk)) { /* Is sink page virtual? */
55e303ae
A
1583 vaddr = sink; /* Sink side is virtual */
1584 bothphys = 0; /* Show both aren't physical */
1585 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
91447636 1586 } else if (!(which & cppvPsrc)) { /* Is source page virtual? */
55e303ae
A
1587 vaddr = source; /* Source side is virtual */
1588 bothphys = 0; /* Show both aren't physical */
1589 prot = VM_PROT_READ; /* Virtual source is always read only */
1590 }
1c79356b 1591
55e303ae
A
1592 needtran = 1; /* Show we need to map the virtual the first time */
1593 s = splhigh(); /* Don't bother me */
1c79356b 1594
55e303ae 1595 while(size) {
de355530 1596
55e303ae
A
1597 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
1598 if(!needtran) { /* If this is not the first translation, we need to drop the old busy */
1599 mapping_drop_busy(mp); /* Release the old mapping now */
1600 }
1601 needtran = 0;
1602
1603 while(1) {
1604 mp = mapping_find(map->pmap, vaddr, &nextva, 1); /* Find and busy the mapping */
1605 if(!mp) { /* Was it there? */
91447636 1606 if(getPerProc()->istackptr == 0)
55e303ae
A
1607 panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
1608
1609 splx(s); /* Restore the interrupt level */
91447636 1610 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
55e303ae
A
1611
1612 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
1613
1614 s = splhigh(); /* Don't bother me */
1615 continue; /* Go try for the map again... */
1616
1617 }
1618 if (mp->mpVAddr & mpI) { /* cache inhibited, so force the appropriate page to be flushed before */
1619 if (which & cppvPsrc) /* and after the copy to avoid cache paradoxes */
1620 which |= cppvFsnk;
1621 else
1622 which |= cppvFsrc;
1623 } else
1624 which = orig_which;
1625
1626 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
1627 we can just leave.
1628 */
1629 if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break; /* We got it mapped R/W or the source is not virtual, leave... */
1630
1631 mapping_drop_busy(mp); /* Go ahead and release the mapping for now */
91447636 1632 if(getPerProc()->istackptr == 0)
55e303ae
A
1633 panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
1634 splx(s); /* Restore the interrupt level */
1635
91447636 1636 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
55e303ae
A
1637 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
1638 s = splhigh(); /* Don't bother me */
1639 }
1640 paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL)); /* construct the physical address... this calculation works */
1641 /* properly on both single page and block mappings */
1642 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
1643 else source = paddr; /* Otherwise the source is */
1c79356b 1644 }
55e303ae
A
1645
1646 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
1647 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
1648
1649 csize = size; /* Assume we can copy it all */
1650 if(lop < size) csize = lop; /* Nope, we can't do it all */
1651
1652 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source before move */
1653 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink before move */
1c79356b 1654
91447636 1655 bcopy_physvir_32(source, sink, csize); /* Do a physical copy, virtually */
55e303ae
A
1656
1657 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source after move */
1658 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink after move */
1c79356b 1659
b4c24cb9 1660/*
55e303ae
A
1661 * Note that for certain ram disk flavors, we may be copying outside of known memory.
1662 * Therefore, before we try to mark it modifed, we check if it exists.
b4c24cb9
A
1663 */
1664
55e303ae
A
1665 if( !(which & cppvNoModSnk)) {
1666 physent = mapping_phys_lookup(sink >> 12, &pindex); /* Get physical entry for sink */
1667 if(physent) mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
1668 }
1669 if( !(which & cppvNoRefSrc)) {
1670 physent = mapping_phys_lookup(source >> 12, &pindex); /* Get physical entry for source */
1671 if(physent) mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
1672 }
1673 size = size - csize; /* Calculate what is left */
1674 vaddr = vaddr + csize; /* Move to next sink address */
1675 source = source + csize; /* Bump source to next physical address */
1676 sink = sink + csize; /* Bump sink to next physical address */
b4c24cb9 1677 }
55e303ae
A
1678
1679 if(!bothphys) mapping_drop_busy(mp); /* Go ahead and release the mapping of the virtual page if any */
1680 splx(s); /* Open up for interrupts */
b4c24cb9 1681
55e303ae 1682 return KERN_SUCCESS;
b4c24cb9
A
1683}
1684
1685
1c79356b 1686/*
55e303ae 1687 * Debug code
1c79356b 1688 */
1c79356b 1689
55e303ae 1690void mapping_verify(void) {
1c79356b 1691
55e303ae 1692 spl_t s;
91447636
A
1693 mappingblok_t *mb, *mbn;
1694 unsigned int relncnt;
55e303ae 1695 unsigned int dumbodude;
de355530 1696
55e303ae
A
1697 dumbodude = 0;
1698
1699 s = splhigh(); /* Don't bother from now on */
de355530 1700
55e303ae
A
1701 mbn = 0; /* Start with none */
1702 for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) { /* Walk the free chain */
91447636 1703 if((mappingblok_t *)(mb->mapblokflags & 0x7FFFFFFF) != mb) { /* Is tag ok? */
55e303ae
A
1704 panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
1705 }
1706 mbn = mb; /* Remember the last one */
1c79356b 1707 }
55e303ae
A
1708
1709 if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) { /* Do we point to the last one? */
1710 panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
1c79356b
A
1711 }
1712
55e303ae
A
1713 relncnt = 0; /* Clear count */
1714 for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) { /* Walk the release chain */
1715 dumbodude |= mb->mapblokflags; /* Just touch it to make sure it is mapped */
1716 relncnt++; /* Count this one */
1717 }
1c79356b 1718
55e303ae
A
1719 if(mapCtl.mapcreln != relncnt) { /* Is the count on release queue ok? */
1720 panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
1721 }
1c79356b 1722
55e303ae 1723 splx(s); /* Restore 'rupts */
1c79356b 1724
1c79356b
A
1725 return;
1726}
1727
55e303ae 1728void mapping_phys_unused(ppnum_t pa) {
1c79356b 1729
55e303ae 1730 unsigned int pindex;
91447636 1731 phys_entry_t *physent;
1c79356b 1732
55e303ae
A
1733 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1734 if(!physent) return; /* Did we find the physical page? */
1c79356b 1735
91447636 1736 if(!(physent->ppLink & ~(ppLock | ppFlags))) return; /* No one else is here */
1c79356b 1737
55e303ae 1738 panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
1c79356b 1739
de355530 1740}
d7e50217 1741
91447636
A
1742
1743
1744
1745
1746