]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/mappings.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * This file is used to maintain the virtual to real mappings for a PowerPC machine.
30 * The code herein is primarily used to bridge between the pmap layer and the hardware layer.
31 * Currently, some of the function of this module is contained within pmap.c. We may want to move
32 * all of this into it (or most anyway) for the sake of performance. We shall see as we write it.
33 *
34 * We also depend upon the structure of the phys_entry control block. We do put some processor
35 * specific stuff in there.
36 *
37 */
38
1c79356b
A
39#include <debug.h>
40#include <mach_kgdb.h>
41#include <mach_vm_debug.h>
42#include <db_machine_commands.h>
43
91447636 44#include <mach/mach_types.h>
1c79356b
A
45#include <mach/vm_attributes.h>
46#include <mach/vm_param.h>
91447636
A
47
48#include <kern/kern_types.h>
49#include <kern/thread.h>
50#include <kern/spl.h>
51#include <kern/misc_protos.h>
52
55e303ae 53#include <vm/vm_fault.h>
1c79356b
A
54#include <vm/vm_kern.h>
55#include <vm/vm_map.h>
56#include <vm/vm_page.h>
91447636 57#include <vm/pmap.h>
1c79356b 58
55e303ae 59#include <ppc/exception.h>
1c79356b
A
60#include <ppc/misc_protos.h>
61#include <ppc/proc_reg.h>
1c79356b 62#include <ppc/pmap.h>
1c79356b 63#include <ppc/mem.h>
1c79356b
A
64#include <ppc/new_screen.h>
65#include <ppc/Firmware.h>
66#include <ppc/mappings.h>
67#include <ddb/db_output.h>
68
55e303ae 69#include <console/video_console.h> /* (TEST/DEBUG) */
1c79356b
A
70
71#define PERFTIMES 0
72
1c79356b
A
73vm_map_t mapping_map = VM_MAP_NULL;
74
55e303ae 75unsigned int incrVSID = 0; /* VSID increment value */
1c79356b 76unsigned int mappingdeb0 = 0;
55e303ae
A
77unsigned int mappingdeb1 = 0;
78int ppc_max_adrsp; /* Maximum address spaces */
79
80addr64_t *mapdebug; /* (BRINGUP) */
81extern unsigned int DebugWork; /* (BRINGUP) */
82
55e303ae
A
83void mapping_verify(void);
84void mapping_phys_unused(ppnum_t pa);
85
2d21ac55
A
86int nx_enabled = 1; /* enable no-execute protection */
87int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
88int allow_stack_exec = VM_ABI_32; /* 32-bit apps may execute from the stack by default, 64-bit apps may not */
0c530ab8 89
1c79356b 90/*
91447636
A
91 * ppc_prot translates Mach's representation of protections to that of the PPC hardware.
92 * For Virtual Machines (VMM), we also provide translation entries where the output is
93 * the same as the input, allowing direct specification of PPC protections. Mach's
94 * representations are always in the range 0..7, so they always fall into the first
95 * 8 table entries; direct translations are placed in the range 8..16, so they fall into
96 * the second half of the table.
97 *
1c79356b 98 */
91447636 99
0c530ab8 100unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2, /* Mach -> PPC translations */
91447636 101 0, 1, 2, 3, 4, 5, 6, 7 }; /* VMM direct translations */
1c79356b 102
0c530ab8
A
103
104
105vm_prot_t getProtPPC(int key, boolean_t disable_NX) {
106 vm_prot_t prot;
107
108 prot = ppc_prot[key & 0xF];
109
110 if (key <= 7 && disable_NX == TRUE)
111 prot &= ~mpN;
112
113 return (prot);
114}
115
116
1c79356b
A
117/*
118 * About PPC VSID generation:
119 *
120 * This function is called to generate an address space ID. This space ID must be unique within
121 * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following
122 * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last
123 * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able
124 * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The
125 * problem is that only a certain number of pmaps are kept in a free list and if that is full,
126 * they are release. This causes us to lose track of what space IDs are free to be reused.
127 * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
128 * when the space ID wraps, or 4) scan the list of pmaps and find a free one.
129 *
130 * Yet another consideration is the hardware use of the VSID. It is used as part of the hash
131 * calculation for virtual address lookup. An improperly chosen value could potentially cause
132 * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function
133 * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
134 * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits
135 * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
136 * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
137 * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions
138 * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but
139 * with no overflow. I don't think that this is a problem.
140 *
141 * There may be a problem with the space ID, though. A new space ID is generate (mainly)
142 * whenever there is a fork. There shouldn't really be any problem because (for a 32MB
143 * machine) we can have 512 pmaps and still not have hash collisions for the same address.
144 * The potential problem, though, is if we get long-term pmaps that have space IDs that are
145 * the same modulo 512. We can reduce this problem by having the segment number be bits
146 * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding
147 * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
148 * I don't think that it is as signifigant as the other, so, I'll make the space ID
149 * with segment first.
150 *
151 * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
152 * While this is a problem that should only happen in periods counted in weeks, it can and
153 * will happen. This is assuming a monotonically increasing space ID. If we were to search
154 * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
155 * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks.
156 *
157 * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
158 * locked by free_pmap_lock) that is sorted in VSID sequence order.
159 *
160 * Whenever we need a VSID, we walk the list looking for the next in the sequence from
161 * the last that was freed. The we allocate that.
162 *
163 * NOTE: We must be called with interruptions off and free_pmap_lock held.
164 *
165 */
166
167/*
168 * mapping_init();
169 * Do anything that needs to be done before the mapping system can be used.
170 * Hash table must be initialized before we call this.
171 *
172 * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1;
173 */
174
175void mapping_init(void) {
176
55e303ae 177 unsigned int tmp, maxeff, rwidth;
d7e50217 178
55e303ae 179 ppc_max_adrsp = maxAdrSp; /* Set maximum address spaces */
1c79356b 180
55e303ae 181 maxeff = 32; /* Assume 32-bit */
91447636 182 if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) maxeff = 64; /* Is this a 64-bit machine? */
d7e50217 183
91447636 184 rwidth = PerProcTable[0].ppe_vaddr->pf.pfMaxVAddr - maxAdrSpb; /* Reduce address width by width of address space ID */
55e303ae 185 if(rwidth > maxeff) rwidth = maxeff; /* If we still have more virtual than effective, clamp at effective */
de355530 186
55e303ae 187 vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth); /* Get maximum effective address supported */
91447636 188 vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - PerProcTable[0].ppe_vaddr->pf.pfMaxPAddr); /* Get maximum physical address supported */
de355530 189
91447636 190 if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) { /* Are we 64 bit? */
55e303ae
A
191 tmp = 12; /* Size of hash space */
192 }
193 else {
194 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
195 tmp = 32 - tmp; /* Size of hash space */
196 }
de355530 197
55e303ae
A
198 incrVSID = 1 << ((tmp + 1) >> 1); /* Get ceiling of sqrt of table size */
199 incrVSID |= 1 << ((tmp + 1) >> 2); /* Get ceiling of quadroot of table size */
200 incrVSID |= 1; /* Set bit and add 1 */
de355530 201
55e303ae 202 return;
1c79356b 203
de355530 204}
1c79356b 205
55e303ae 206
1c79356b 207/*
55e303ae
A
208 * mapping_remove(pmap_t pmap, addr64_t va);
209 * Given a pmap and virtual address, this routine finds the mapping and unmaps it.
210 * The mapping block will be added to
211 * the free list. If the free list threshold is reached, garbage collection will happen.
0b4e3aa0 212 *
55e303ae
A
213 * We also pass back the next higher mapped address. This is done so that the higher level
214 * pmap_remove function can release a range of addresses simply by calling mapping_remove
215 * in a loop until it finishes the range or is returned a vaddr of 0.
0b4e3aa0 216 *
55e303ae 217 * Note that if the mapping is not found, we return the next VA ORed with 1
0b4e3aa0
A
218 *
219 */
0b4e3aa0 220
55e303ae
A
221addr64_t mapping_remove(pmap_t pmap, addr64_t va) { /* Remove a single mapping for this VADDR
222 Returns TRUE if a mapping was found to remove */
0b4e3aa0 223
91447636 224 mapping_t *mp;
55e303ae 225 addr64_t nextva;
91447636 226 ppnum_t pgaddr;
de355530 227
91447636
A
228 va &= ~PAGE_MASK; /* Scrub noise bits */
229
230 do { /* Keep trying until we truely fail */
55e303ae 231 mp = hw_rem_map(pmap, va, &nextva); /* Remove a mapping from this pmap */
91447636 232 } while (mapRtRemove == ((unsigned int)mp & mapRetCode));
de355530 233
91447636
A
234 switch ((unsigned int)mp & mapRetCode) {
235 case mapRtOK:
236 break; /* Mapping removed */
237 case mapRtNotFnd:
238 return (nextva | 1); /* Nothing found to unmap */
239 default:
2d21ac55 240 panic("mapping_remove: hw_rem_map failed - pmap = %p, va = %016llX, code = %p\n",
91447636
A
241 pmap, va, mp);
242 break;
de355530 243 }
91447636
A
244
245 pgaddr = mp->mpPAddr; /* Get page number from mapping */
de355530 246
55e303ae 247 mapping_free(mp); /* Add mapping to the free list */
91447636
A
248
249 if ((pmap->pmapFlags & pmapVMhost) && pmap->pmapVmmExt) {
250 /* If this is an assisted host, scrub any guest mappings */
251 unsigned int idx;
252 phys_entry_t *physent = mapping_phys_lookup(pgaddr, &idx);
253 /* Get physent for our physical page */
254 if (!physent) { /* No physent, could be in I/O area, so exit */
255 return (nextva);
256 }
257
258 do { /* Iterate 'till all guest mappings are gone */
259 mp = hw_scrub_guest(physent, pmap); /* Attempt to scrub a guest mapping */
260 switch ((unsigned int)mp & mapRetCode) {
261 case mapRtGuest: /* Found a guest mapping */
262 case mapRtNotFnd: /* Mapping was there, but disappeared, must retry */
263 case mapRtEmpty: /* No guest mappings left to scrub */
264 break;
265 default:
2d21ac55 266 panic("mapping_remove: hw_scrub_guest failed - physent = %p, code = %p\n",
91447636
A
267 physent, mp); /* Cry havoc, cry wrack,
268 at least we die with harness on our backs */
269 break;
270 }
271 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
272 }
1c79356b 273
55e303ae
A
274 return nextva; /* Tell them we did it */
275}
de355530 276
1c79356b 277/*
55e303ae 278 * mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one
1c79356b
A
279 *
280 * This routine takes the given parameters, builds a mapping block, and queues it into the
281 * correct lists.
282 *
55e303ae
A
283 * pmap (virtual address) is the pmap to map into
284 * va (virtual address) is the 64-bit virtual address that is being mapped
285 * pa (physical page number) is the physical page number (i.e., physcial address >> 12). This is
286 * a 32-bit quantity.
287 * Flags:
288 * block if 1, mapping is a block, size parameter is used. Note: we do not keep
289 * reference and change information or allow protection changes of blocks.
290 * any changes must first unmap and then remap the area.
291 * use attribute Use specified attributes for map, not defaults for physical page
292 * perm Mapping is permanent
293 * cache inhibited Cache inhibited (used if use attribute or block set )
294 * guarded Guarded access (used if use attribute or block set )
3a60a9f5 295 * size size of block in pages - 1 (not used if not block)
55e303ae
A
296 * prot VM protection bits
297 * attr Cachability/Guardedness
298 *
299 * Returns 0 if mapping was successful. Returns vaddr that overlaps/collides.
300 * Returns 1 for any other failure.
301 *
302 * Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
303 * for I/O and default the cache attrubutes appropriately. The caller is free to set whatever they want however.
304 *
305 * If there is any physical page that is not found in the physent table, the mapping is forced to be a
306 * block mapping of length 1. This keeps us from trying to update a physent during later mapping use,
307 * e.g., fault handling.
308 *
1c79356b 309 *
1c79356b
A
310 */
311
55e303ae 312addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) { /* Make an address mapping */
1c79356b 313
91447636
A
314 register mapping_t *mp;
315 addr64_t colladdr, psmask;
316 unsigned int pindex, mflags, pattr, wimg, rc;
317 phys_entry_t *physent;
318 int nlists, pcf;
0c530ab8 319 boolean_t disable_NX = FALSE;
de355530 320
55e303ae
A
321 pindex = 0;
322
323 mflags = 0x01000000; /* Start building mpFlags field (busy count = 1) */
91447636
A
324
325 pcf = (flags & mmFlgPcfg) >> 24; /* Get the physical page config index */
326 if(!(pPcfg[pcf].pcfFlags)) { /* Validate requested physical page configuration */
2d21ac55 327 panic("mapping_make: invalid physical page configuration request - pmap = %p, va = %016llX, cfg = %d\n",
91447636
A
328 pmap, va, pcf);
329 }
330
331 psmask = (1ULL << pPcfg[pcf].pcfPSize) - 1; /* Mask to isolate any offset into a page */
332 if(va & psmask) { /* Make sure we are page aligned on virtual */
2d21ac55 333 panic("mapping_make: attempt to map unaligned vaddr - pmap = %p, va = %016llX, cfg = %d\n",
91447636
A
334 pmap, va, pcf);
335 }
336 if(((addr64_t)pa << 12) & psmask) { /* Make sure we are page aligned on physical */
2d21ac55 337 panic("mapping_make: attempt to map unaligned paddr - pmap = %p, pa = %08X, cfg = %d\n",
91447636
A
338 pmap, pa, pcf);
339 }
1c79356b 340
91447636
A
341 mflags |= (pcf << (31-mpPcfgb)); /* Insert physical page configuration index */
342
55e303ae 343 if(!(flags & mmFlgBlock)) { /* Is this a block map? */
1c79356b 344
55e303ae
A
345 size = 1; /* Set size to 1 page if not block */
346
347 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
348 if(!physent) { /* Did we find the physical page? */
349 mflags |= mpBlock; /* Force this to a block if no physent */
55e303ae
A
350 pattr = 0; /* Assume normal, non-I/O memory */
351 if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
1c79356b 352 }
91447636 353 else pattr = ((physent->ppLink & (ppI | ppG)) >> 60); /* Get the default attributes from physent */
de355530 354
55e303ae 355 if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
1c79356b 356 }
55e303ae
A
357 else { /* This is a block */
358
359 pattr = flags & (mmFlgCInhib | mmFlgGuarded); /* Use requested attributes */
360 mflags |= mpBlock; /* Show that this is a block */
3a60a9f5
A
361
362 if(size > pmapSmallBlock) { /* Is it one? */
363 if(size & 0x00001FFF) return mapRtBadSz; /* Fail if bigger than 256MB and not a 32MB multiple */
364 size = size >> 13; /* Convert to 32MB chunks */
365 mflags = mflags | mpBSu; /* Show 32MB basic size unit */
366 }
de355530 367 }
1c79356b 368
55e303ae
A
369 wimg = 0x2; /* Set basic PPC wimg to 0b0010 - Coherent */
370 if(pattr & mmFlgCInhib) wimg |= 0x4; /* Add cache inhibited if we need to */
371 if(pattr & mmFlgGuarded) wimg |= 0x1; /* Add guarded if we need to */
1c79356b 372
55e303ae 373 mflags = mflags | (pindex << 16); /* Stick in the physical entry table index */
1c79356b 374
55e303ae 375 if(flags & mmFlgPerm) mflags |= mpPerm; /* Set permanent mapping */
1c79356b 376
55e303ae 377 size = size - 1; /* Change size to offset */
3a60a9f5 378 if(size > 0xFFFF) return mapRtBadSz; /* Leave if size is too big */
1c79356b 379
55e303ae 380 nlists = mapSetLists(pmap); /* Set number of lists this will be on */
de355530 381
55e303ae
A
382 mp = mapping_alloc(nlists); /* Get a spare mapping block with this many lists */
383
384 /* the mapping is zero except that the mpLists field is set */
385 mp->mpFlags |= mflags; /* Add in the rest of the flags to mpLists */
386 mp->mpSpace = pmap->space; /* Set the address space/pmap lookup ID */
91447636 387 mp->u.mpBSize = size; /* Set the size */
55e303ae
A
388 mp->mpPte = 0; /* Set the PTE invalid */
389 mp->mpPAddr = pa; /* Set the physical page number */
0c530ab8
A
390
391 if ( !nx_enabled || (pmap->pmapFlags & pmapNXdisabled) )
392 disable_NX = TRUE;
393
394 mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot, disable_NX); /* Add the protection and attributes to the field */
395
55e303ae
A
396 while(1) { /* Keep trying... */
397 colladdr = hw_add_map(pmap, mp); /* Go add the mapping to the pmap */
91447636
A
398 rc = colladdr & mapRetCode; /* Separate return code */
399 colladdr &= ~mapRetCode; /* Clean up collision effective address */
55e303ae 400
91447636
A
401 switch (rc) {
402 case mapRtOK:
3a60a9f5 403 return mapRtOK; /* Mapping added successfully */
91447636
A
404
405 case mapRtRemove: /* Remove in progress */
406 (void)mapping_remove(pmap, colladdr); /* Lend a helping hand to another CPU doing block removal */
407 continue; /* Retry mapping add */
408
409 case mapRtMapDup: /* Identical mapping already present */
410 mapping_free(mp); /* Free duplicate mapping */
3a60a9f5 411 return mapRtOK; /* Return success */
91447636
A
412
413 case mapRtSmash: /* Mapping already present but does not match new mapping */
414 mapping_free(mp); /* Free duplicate mapping */
3a60a9f5
A
415 return (colladdr | mapRtSmash); /* Return colliding address, with some dirt added to avoid
416 confusion if effective address is 0 */
91447636 417 default:
2d21ac55 418 panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %p, va = %016llX, mapping = %p\n",
91447636 419 colladdr, rc, pmap, va, mp); /* Die dead */
1c79356b 420 }
1c79356b 421
1c79356b
A
422 }
423
91447636 424 return 1; /* Unreachable, but pleases compiler */
1c79356b
A
425}
426
427
428/*
55e303ae 429 * mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping
1c79356b 430 *
55e303ae
A
431 * Looks up the vaddr and returns the mapping and the next mapped va
432 * If full is true, it will descend through all nested pmaps to find actual mapping
1c79356b 433 *
55e303ae 434 * Must be called with interruptions disabled or we can hang trying to remove found mapping.
1c79356b 435 *
55e303ae
A
436 * Returns 0 if not found and the virtual address of the mapping if it is
437 * Note that the mappings busy count is bumped. It is the responsibility of the caller
438 * to drop the count. If this is not done, any attempt to remove the mapping will hang.
1c79356b 439 *
55e303ae 440 * NOTE: The nextva field is not valid when full is TRUE.
1c79356b 441 *
1c79356b
A
442 *
443 */
444
91447636 445mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) { /* Make an address mapping */
de355530 446
91447636 447 register mapping_t *mp;
55e303ae
A
448 addr64_t curva;
449 pmap_t curpmap;
450 int nestdepth;
de355530 451
55e303ae
A
452 curpmap = pmap; /* Remember entry */
453 nestdepth = 0; /* Set nest depth */
91447636 454 curva = (addr64_t)va; /* Set current va */
de355530 455
55e303ae 456 while(1) {
1c79356b 457
55e303ae
A
458 mp = hw_find_map(curpmap, curva, nextva); /* Find the mapping for this address */
459 if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
2d21ac55 460 panic("mapping_find: pmap lock failure - rc = %p, pmap = %p\n", mp, curpmap); /* Die... */
1c79356b 461 }
55e303ae 462
91447636 463 if(!mp || ((mp->mpFlags & mpType) < mpMinSpecial) || !full) break; /* Are we done looking? */
1c79356b 464
91447636
A
465 if((mp->mpFlags & mpType) != mpNest) { /* Don't chain through anything other than a nested pmap */
466 mapping_drop_busy(mp); /* We have everything we need from the mapping */
2d21ac55 467 mp = NULL; /* Set not found */
55e303ae 468 break;
1c79356b 469 }
1c79356b 470
55e303ae 471 if(nestdepth++ > 64) { /* Have we nested too far down? */
2d21ac55 472 panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %p, curpmap = %p\n",
55e303ae 473 va, curva, pmap, curpmap);
1c79356b 474 }
55e303ae
A
475
476 curva = curva + mp->mpNestReloc; /* Relocate va to new pmap */
477 curpmap = (pmap_t) pmapTrans[mp->mpSpace].pmapVAddr; /* Get the address of the nested pmap */
478 mapping_drop_busy(mp); /* We have everything we need from the mapping */
479
1c79356b
A
480 }
481
55e303ae 482 return mp; /* Return the mapping if we found one */
1c79356b
A
483}
484
1c79356b 485/*
91447636 486 * void mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
1c79356b 487 *
55e303ae
A
488 * This routine takes a pmap and virtual address and changes
489 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
490 * the protection is changed.
1c79356b 491 *
55e303ae
A
492 * We return success if we change the protection or if there is no page mapped at va. We return failure if
493 * the va corresponds to a block mapped area or the mapping is permanant.
de355530 494 *
1c79356b
A
495 *
496 */
1c79356b 497
91447636
A
498void
499mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) { /* Change protection of a virtual page */
1c79356b 500
55e303ae 501 int ret;
0c530ab8
A
502 boolean_t disable_NX = FALSE;
503
504 if ( !nx_enabled || (pmap->pmapFlags & pmapNXdisabled) )
505 disable_NX = TRUE;
506
507 ret = hw_protect(pmap, va, getProtPPC(prot, disable_NX), nextva); /* Try to change the protect here */
1c79356b 508
55e303ae
A
509 switch (ret) { /* Decode return code */
510
511 case mapRtOK: /* Changed */
512 case mapRtNotFnd: /* Didn't find it */
55e303ae
A
513 case mapRtBlock: /* Block map, just ignore request */
514 case mapRtNest: /* Nested pmap, just ignore request */
55e303ae
A
515 break;
516
517 default:
2d21ac55 518 panic("mapping_protect: hw_protect failed - rc = %d, pmap = %p, va = %016llX\n", ret, pmap, va);
55e303ae 519
1c79356b
A
520 }
521
1c79356b 522}
1c79356b
A
523
524/*
55e303ae 525 * void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
1c79356b
A
526 *
527 * This routine takes a physical entry and runs through all mappings attached to it and changes
528 * the protection. If there are PTEs associated with the mappings, they will be invalidated before
0c530ab8
A
529 * the protection is changed. There is no limitation on changes, e.g., higher to lower, lower to
530 * higher; however, changes to execute protection are ignored.
1c79356b 531 *
55e303ae
A
532 * Any mapping that is marked permanent is not changed
533 *
1c79356b
A
534 * Phys_entry is unlocked.
535 */
536
55e303ae 537void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) { /* Change protection of all mappings to page */
1c79356b 538
55e303ae 539 unsigned int pindex;
91447636 540 phys_entry_t *physent;
0c530ab8 541
55e303ae
A
542 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
543 if(!physent) { /* Did we find the physical page? */
544 panic("mapping_protect_phys: invalid physical page %08X\n", pa);
de355530 545 }
1c79356b 546
91447636 547 hw_walk_phys(physent, hwpNoop, hwpSPrtMap, hwpNoop,
0c530ab8 548 getProtPPC(prot, FALSE), hwpPurgePTE); /* Set the new protection for page and mappings */
de355530 549
0c530ab8 550 return; /* Leave... */
1c79356b
A
551}
552
553
554/*
55e303ae 555 * void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
1c79356b
A
556 *
557 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 558 * off the change bit.
1c79356b
A
559 */
560
55e303ae
A
561void mapping_clr_mod(ppnum_t pa) { /* Clears the change bit of a physical page */
562
563 unsigned int pindex;
91447636 564 phys_entry_t *physent;
55e303ae
A
565
566 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
567 if(!physent) { /* Did we find the physical page? */
568 panic("mapping_clr_mod: invalid physical page %08X\n", pa);
569 }
1c79356b 570
91447636
A
571 hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy,
572 0, hwpPurgePTE); /* Clear change for page and mappings */
1c79356b
A
573 return; /* Leave... */
574}
575
576
577/*
55e303ae 578 * void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
1c79356b
A
579 *
580 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 581 * on the change bit.
1c79356b
A
582 */
583
55e303ae
A
584void mapping_set_mod(ppnum_t pa) { /* Sets the change bit of a physical page */
585
586 unsigned int pindex;
91447636 587 phys_entry_t *physent;
55e303ae
A
588
589 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
590 if(!physent) { /* Did we find the physical page? */
591 panic("mapping_set_mod: invalid physical page %08X\n", pa);
592 }
d7e50217 593
91447636
A
594 hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy,
595 0, hwpNoopPTE); /* Set change for page and mappings */
1c79356b
A
596 return; /* Leave... */
597}
598
599
600/*
55e303ae 601 * void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
1c79356b 602 *
de355530 603 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 604 * off the reference bit.
1c79356b
A
605 */
606
55e303ae 607void mapping_clr_ref(ppnum_t pa) { /* Clears the reference bit of a physical page */
de355530 608
55e303ae 609 unsigned int pindex;
91447636 610 phys_entry_t *physent;
55e303ae
A
611
612 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
613 if(!physent) { /* Did we find the physical page? */
614 panic("mapping_clr_ref: invalid physical page %08X\n", pa);
d7e50217 615 }
55e303ae 616
91447636
A
617 hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy,
618 0, hwpPurgePTE); /* Clear reference for page and mappings */
de355530
A
619 return; /* Leave... */
620}
621
622
623/*
55e303ae 624 * void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
de355530
A
625 *
626 * This routine takes a physical entry and runs through all mappings attached to it and turns
55e303ae 627 * on the reference bit.
de355530
A
628 */
629
55e303ae
A
630void mapping_set_ref(ppnum_t pa) { /* Sets the reference bit of a physical page */
631
632 unsigned int pindex;
91447636 633 phys_entry_t *physent;
55e303ae
A
634
635 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
636 if(!physent) { /* Did we find the physical page? */
637 panic("mapping_set_ref: invalid physical page %08X\n", pa);
638 }
d7e50217 639
91447636
A
640 hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy,
641 0, hwpNoopPTE); /* Set reference for page and mappings */
de355530 642 return; /* Leave... */
1c79356b
A
643}
644
645
646/*
91447636 647 * boolean_t mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
1c79356b
A
648 *
649 * This routine takes a physical entry and runs through all mappings attached to it and tests
55e303ae 650 * the changed bit.
1c79356b
A
651 */
652
55e303ae
A
653boolean_t mapping_tst_mod(ppnum_t pa) { /* Tests the change bit of a physical page */
654
655 unsigned int pindex, rc;
91447636 656 phys_entry_t *physent;
55e303ae
A
657
658 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
659 if(!physent) { /* Did we find the physical page? */
660 panic("mapping_tst_mod: invalid physical page %08X\n", pa);
661 }
d7e50217 662
91447636
A
663 rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop,
664 0, hwpMergePTE); /* Set change for page and mappings */
55e303ae 665 return ((rc & (unsigned long)ppC) != 0); /* Leave with change bit */
1c79356b
A
666}
667
668
669/*
91447636 670 * boolean_t mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
de355530
A
671 *
672 * This routine takes a physical entry and runs through all mappings attached to it and tests
55e303ae 673 * the reference bit.
1c79356b
A
674 */
675
55e303ae
A
676boolean_t mapping_tst_ref(ppnum_t pa) { /* Tests the reference bit of a physical page */
677
678 unsigned int pindex, rc;
91447636 679 phys_entry_t *physent;
55e303ae
A
680
681 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
682 if(!physent) { /* Did we find the physical page? */
683 panic("mapping_tst_ref: invalid physical page %08X\n", pa);
684 }
1c79356b 685
91447636
A
686 rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop,
687 0, hwpMergePTE); /* Test reference for page and mappings */
55e303ae 688 return ((rc & (unsigned long)ppR) != 0); /* Leave with reference bit */
1c79356b
A
689}
690
691
91447636
A
692/*
693 * unsigned int mapping_tst_refmod(ppnum_t pa) - tests the reference and change bits of a physical page
694 *
695 * This routine takes a physical entry and runs through all mappings attached to it and tests
696 * their reference and changed bits.
697 */
698
699unsigned int mapping_tst_refmod(ppnum_t pa) { /* Tests the reference and change bits of a physical page */
700
701 unsigned int pindex, rc;
702 phys_entry_t *physent;
703
704 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
705 if (!physent) { /* Did we find the physical page? */
706 panic("mapping_tst_refmod: invalid physical page %08X\n", pa);
707 }
708
709 rc = hw_walk_phys(physent, hwpTRefCngPhy, hwpTRefCngMap, hwpNoop,
710 0, hwpMergePTE); /* Test reference and change bits in page and mappings */
711 return (((rc & ppC)? VM_MEM_MODIFIED : 0) | ((rc & ppR)? VM_MEM_REFERENCED : 0));
712 /* Convert bits to generic format and return */
713
714}
715
716
717/*
718 * void mapping_clr_refmod(ppnum_t pa, unsigned int mask) - clears the reference and change bits specified
719 * by mask of a physical page
720 *
721 * This routine takes a physical entry and runs through all mappings attached to it and turns
722 * off all the reference and change bits.
723 */
724
725void mapping_clr_refmod(ppnum_t pa, unsigned int mask) { /* Clears the reference and change bits of a physical page */
726
727 unsigned int pindex;
728 phys_entry_t *physent;
729 unsigned int ppcMask;
730
731 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
732 if(!physent) { /* Did we find the physical page? */
733 panic("mapping_clr_refmod: invalid physical page %08X\n", pa);
734 }
735
736 ppcMask = (((mask & VM_MEM_MODIFIED)? ppC : 0) | ((mask & VM_MEM_REFERENCED)? ppR : 0));
737 /* Convert mask bits to PPC-specific format */
738 hw_walk_phys(physent, hwpNoop, hwpCRefCngMap, hwpCRefCngPhy,
739 ppcMask, hwpPurgePTE); /* Clear reference and change bits for page and mappings */
740 return; /* Leave... */
741}
742
743
744
de355530 745/*
55e303ae 746 * phys_ent *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
de355530 747 *
55e303ae
A
748 * This routine takes a physical page number and returns the phys_entry associated with it. It also
749 * calculates the bank address associated with the entry
750 * the reference bit.
de355530
A
751 */
752
2d21ac55
A
753phys_entry_t *
754mapping_phys_lookup(ppnum_t pp, unsigned int *pindex)
755{ /* Finds the physical entry for the page */
756 unsigned int i;
55e303ae
A
757
758 for(i = 0; i < pmap_mem_regions_count; i++) { /* Walk through the list */
759 if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue; /* Skip any empty lists */
760 if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue; /* This isn't ours */
761
762 *pindex = (i * sizeof(mem_region_t)) / 4; /* Make the word index to this list */
763
764 return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart]; /* Return the physent pointer */
765 }
766
91447636 767 return (phys_entry_t *)0; /* Shucks, can't find it... */
55e303ae 768
de355530 769}
d7e50217
A
770
771
55e303ae
A
772
773
1c79356b
A
774/*
775 * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones
776 *
777 * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
778 * the number of free mappings remaining, and if below a threshold, replenishes them.
779 * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise,
780 * a new one is allocated.
781 *
55e303ae
A
782 * This routine allocates and/or frees memory and must be called from a safe place.
783 * Currently, vm_pageout_scan is the safest place.
1c79356b
A
784 */
785
786thread_call_t mapping_adjust_call;
787static thread_call_data_t mapping_adjust_call_data;
788
789void mapping_adjust(void) { /* Adjust free mappings */
790
55e303ae 791 kern_return_t retr = KERN_SUCCESS;
91447636 792 mappingblok_t *mb, *mbn;
1c79356b 793 spl_t s;
91447636 794 int allocsize;
1c79356b
A
795
796 if(mapCtl.mapcmin <= MAPPERBLOK) {
55e303ae 797 mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
1c79356b
A
798
799#if DEBUG
800 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
801 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
802 mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
803#endif
804 }
805
806 s = splhigh(); /* Don't bother from now on */
807 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
808 panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */
809 }
810
811 if (mapping_adjust_call == NULL) {
9bccf70c
A
812 thread_call_setup(&mapping_adjust_call_data,
813 (thread_call_func_t)mapping_adjust,
814 (thread_call_param_t)NULL);
1c79356b
A
815 mapping_adjust_call = &mapping_adjust_call_data;
816 }
817
818 while(1) { /* Keep going until we've got enough */
819
820 allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */
821 if(allocsize < 1) break; /* Leave if we have all we need */
822
823 if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */
824 mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */
825 mapCtl.mapcreln--; /* Back off the count */
826 allocsize = MAPPERBLOK; /* Show we allocated one block */
827 }
55e303ae 828 else { /* No free ones, try to get it */
1c79356b
A
829
830 allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */
55e303ae 831
1c79356b
A
832 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
833 splx(s); /* Restore 'rupts */
834
835 for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
836 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
837 if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
9bccf70c 838 break;
1c79356b
A
839 }
840 if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */
841 }
55e303ae 842
1c79356b
A
843 allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */
844 s = splhigh(); /* Don't bother from now on */
845 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
846 panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */
847 }
848 }
55e303ae 849
9bccf70c
A
850 if (retr != KERN_SUCCESS)
851 break; /* Fail to alocate, bail out... */
1c79356b
A
852 for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */
853 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
91447636 854 mbn = (mappingblok_t *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
1c79356b 855 }
55e303ae 856
1c79356b
A
857 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
858 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
859 }
860
861 if(mapCtl.mapcholdoff) { /* Should we hold off this release? */
862 mapCtl.mapcrecurse = 0; /* We are done now */
863 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
864 splx(s); /* Restore 'rupts */
865 return; /* Return... */
866 }
867
868 mbn = mapCtl.mapcrel; /* Get first pending release block */
2d21ac55 869 mapCtl.mapcrel = NULL; /* Dequeue them */
1c79356b
A
870 mapCtl.mapcreln = 0; /* Set count to 0 */
871
872 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
873 splx(s); /* Restore 'rupts */
874
875 while((unsigned int)mbn) { /* Toss 'em all */
876 mb = mbn->nextblok; /* Get the next */
55e303ae 877
1c79356b 878 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */
55e303ae 879
1c79356b
A
880 mbn = mb; /* Chain to the next */
881 }
882
55e303ae 883 __asm__ volatile("eieio"); /* Make sure all is well */
1c79356b
A
884 mapCtl.mapcrecurse = 0; /* We are done now */
885 return;
886}
887
888/*
889 * mapping_free(mapping *mp) - release a mapping to the free list
890 *
891 * This routine takes a mapping and adds it to the free list.
892 * If this mapping make the block non-empty, we queue it to the free block list.
893 * NOTE: we might want to queue it to the end to keep quelch the pathalogical
894 * case when we get a mapping and free it repeatedly causing the block to chain and unchain.
895 * If this release fills a block and we are above the threshold, we release the block
896 */
897
898void mapping_free(struct mapping *mp) { /* Release a mapping */
899
91447636 900 mappingblok_t *mb, *mbn;
1c79356b 901 spl_t s;
55e303ae 902 unsigned int full, mindx, lists;
1c79356b 903
55e303ae 904 mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6; /* Get index to mapping */
91447636 905 mb = (mappingblok_t *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */
55e303ae
A
906 lists = (mp->mpFlags & mpLists); /* get #lists */
907 if ((lists == 0) || (lists > kSkipListMaxLists)) /* panic if out of range */
908 panic("mapping_free: mpLists invalid\n");
909
910#if 0
911 mp->mpFlags = 0x99999999; /* (BRINGUP) */
912 mp->mpSpace = 0x9999; /* (BRINGUP) */
91447636 913 mp->u.mpBSize = 0x9999; /* (BRINGUP) */
55e303ae
A
914 mp->mpPte = 0x99999998; /* (BRINGUP) */
915 mp->mpPAddr = 0x99999999; /* (BRINGUP) */
916 mp->mpVAddr = 0x9999999999999999ULL; /* (BRINGUP) */
917 mp->mpAlias = 0x9999999999999999ULL; /* (BRINGUP) */
918 mp->mpList0 = 0x9999999999999999ULL; /* (BRINGUP) */
919 mp->mpList[0] = 0x9999999999999999ULL; /* (BRINGUP) */
920 mp->mpList[1] = 0x9999999999999999ULL; /* (BRINGUP) */
921 mp->mpList[2] = 0x9999999999999999ULL; /* (BRINGUP) */
922
923 if(lists > mpBasicLists) { /* (BRINGUP) */
924 mp->mpList[3] = 0x9999999999999999ULL; /* (BRINGUP) */
925 mp->mpList[4] = 0x9999999999999999ULL; /* (BRINGUP) */
926 mp->mpList[5] = 0x9999999999999999ULL; /* (BRINGUP) */
927 mp->mpList[6] = 0x9999999999999999ULL; /* (BRINGUP) */
928 mp->mpList[7] = 0x9999999999999999ULL; /* (BRINGUP) */
929 mp->mpList[8] = 0x9999999999999999ULL; /* (BRINGUP) */
930 mp->mpList[9] = 0x9999999999999999ULL; /* (BRINGUP) */
931 mp->mpList[10] = 0x9999999999999999ULL; /* (BRINGUP) */
932 }
933#endif
934
1c79356b
A
935
936 s = splhigh(); /* Don't bother from now on */
937 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
938 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
939 }
940
55e303ae 941 full = !(mb->mapblokfree[0] | mb->mapblokfree[1]); /* See if full now */
1c79356b 942 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */
55e303ae
A
943 if ( lists > mpBasicLists ) { /* if big block, lite the 2nd bit too */
944 mindx++;
945 mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
946 mapCtl.mapcfree++;
947 mapCtl.mapcinuse--;
948 }
1c79356b
A
949
950 if(full) { /* If it was full before this: */
951 mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */
952 mapCtl.mapcnext = mb; /* Chain us to the head of the list */
9bccf70c
A
953 if(!((unsigned int)mapCtl.mapclast))
954 mapCtl.mapclast = mb;
1c79356b
A
955 }
956
957 mapCtl.mapcfree++; /* Bump free count */
958 mapCtl.mapcinuse--; /* Decriment in use count */
959
960 mapCtl.mapcfreec++; /* Count total calls */
961
962 if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */
55e303ae 963 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) { /* See if empty now */
1c79356b
A
964
965 if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
966 mapCtl.mapcnext = mb->nextblok; /* Unchain us */
2d21ac55 967 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = NULL; /* If last, remove last */
1c79356b
A
968 }
969 else { /* We're not first */
970 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
971 if(mbn->nextblok == mb) break; /* Is the next one our's? */
972 }
2d21ac55 973 if(!mbn) panic("mapping_free: attempt to release mapping block (%p) not on list\n", mp);
1c79356b
A
974 mbn->nextblok = mb->nextblok; /* Dequeue us */
975 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
976 }
977
978 if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */
979 mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */
980 mapCtl.mapcnext = mb; /* Chain us to the head */
981 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
982 }
983 else {
984 mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */
985 mapCtl.mapcreln++; /* Count on release list */
986 mb->nextblok = mapCtl.mapcrel; /* Move pointer */
987 mapCtl.mapcrel = mb; /* Chain us in front */
988 }
989 }
990 }
991
992 if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */
993 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
994 thread_call_enter(mapping_adjust_call); /* Go toss some */
995 }
996 }
997 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
998 splx(s); /* Restore 'rupts */
999
1000 return; /* Bye, dude... */
1001}
1002
1003
1004/*
55e303ae 1005 * mapping_alloc(lists) - obtain a mapping from the free list
1c79356b 1006 *
55e303ae
A
1007 * This routine takes a mapping off of the free list and returns its address.
1008 * The mapping is zeroed, and its mpLists count is set. The caller passes in
1009 * the number of skiplists it would prefer; if this number is greater than
1010 * mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
1011 * just two consequtive free entries coallesced into one. If we cannot find
1012 * two consequtive free entries, we clamp the list count down to mpBasicLists
1013 * and return a basic 64-byte node. Our caller never knows the difference.
1c79356b 1014 *
55e303ae 1015 * If this allocation empties a block, we remove it from the free list.
1c79356b
A
1016 * If this allocation drops the total number of free entries below a threshold,
1017 * we allocate a new block.
1018 *
1019 */
91447636 1020decl_simple_lock_data(extern,free_pmap_lock)
1c79356b 1021
91447636
A
1022mapping_t *
1023mapping_alloc(int lists) { /* Obtain a mapping */
1c79356b 1024
91447636
A
1025 register mapping_t *mp;
1026 mappingblok_t *mb, *mbn;
1c79356b
A
1027 spl_t s;
1028 int mindx;
55e303ae
A
1029 int big = (lists > mpBasicLists); /* set flag if big block req'd */
1030 pmap_t refpmap, ckpmap;
1031 unsigned int space, i;
55e303ae 1032 addr64_t va, nextva;
55e303ae
A
1033 boolean_t found_mapping;
1034 boolean_t do_rescan;
1035
1c79356b
A
1036 s = splhigh(); /* Don't bother from now on */
1037 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1038 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1039 }
1040
55e303ae
A
1041 if(!((unsigned int)mapCtl.mapcnext)) { /* Are there any free mappings? */
1042
1043/*
1044 * No free mappings. First, there may be some mapping blocks on the "to be released"
1045 * list. If so, rescue one. Otherwise, try to steal a couple blocks worth.
1046 */
1047
91447636 1048 if((mbn = mapCtl.mapcrel) != 0) { /* Try to rescue a block from impending doom */
55e303ae
A
1049 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1050 mapCtl.mapcreln--; /* Back off the count */
1051 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1052 goto rescued;
1053 }
1054
1055 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1056
1057 simple_lock(&free_pmap_lock);
1058
1059 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1060 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1061 }
1062
1063 if (!((unsigned int)mapCtl.mapcnext)) {
1064
1065 refpmap = (pmap_t)cursor_pmap->pmap_link.next;
1066 space = mapCtl.mapcflush.spacenum;
1067 while (refpmap != cursor_pmap) {
1068 if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
1069 refpmap = (pmap_t)refpmap->pmap_link.next;
de355530 1070 }
55e303ae
A
1071
1072 ckpmap = refpmap;
1073 va = mapCtl.mapcflush.addr;
1074 found_mapping = FALSE;
1075
1076 while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
1077
1078 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
1079
1080 ckpmap = (pmap_t)ckpmap->pmap_link.next;
1081
91447636
A
1082 /* We don't steal mappings from the kernel pmap, a VMM host pmap, or a VMM guest pmap with guest
1083 shadow assist active.
1084 */
1085 if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)
1086 && !(ckpmap->pmapFlags & (pmapVMgsaa|pmapVMhost))) {
55e303ae
A
1087 do_rescan = TRUE;
1088 for (i=0;i<8;i++) {
1089 mp = hw_purge_map(ckpmap, va, &nextva);
1090
91447636
A
1091 switch ((unsigned int)mp & mapRetCode) {
1092 case mapRtOK:
1093 mapping_free(mp);
1094 found_mapping = TRUE;
1095 break;
1096 case mapRtNotFnd:
1097 break;
1098 default:
2d21ac55 1099 panic("mapping_alloc: hw_purge_map failed - pmap = %p, va = %16llX, code = %p\n", ckpmap, va, mp);
91447636 1100 break;
55e303ae
A
1101 }
1102
2d21ac55 1103 if (mapRtNotFnd == ((unsigned int)mp & mapRetCode)) {
55e303ae
A
1104 if (do_rescan)
1105 do_rescan = FALSE;
1106 else
1107 break;
2d21ac55 1108 }
55e303ae
A
1109
1110 va = nextva;
1111 }
1112 }
1113
1114 if (ckpmap == refpmap) {
1115 if (found_mapping == FALSE)
1116 panic("no valid pmap to purge mappings\n");
1117 else
1118 found_mapping = FALSE;
1119 }
1120
1121 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1122 panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */
1123 }
1124
9bccf70c 1125 }
55e303ae
A
1126
1127 mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
1128 mapCtl.mapcflush.addr = nextva;
9bccf70c 1129 }
55e303ae
A
1130
1131 simple_unlock(&free_pmap_lock);
1132 }
1133
1134rescued:
1135
1136 mb = mapCtl.mapcnext;
1137
1138 if ( big ) { /* if we need a big (128-byte) mapping */
1139 mapCtl.mapcbig++; /* count attempts to allocate a big mapping */
1140 mbn = NULL; /* this will be prev ptr */
1141 mindx = 0;
1142 while( mb ) { /* loop over mapping blocks with free entries */
1143 mindx = mapalc2(mb); /* try for 2 consequtive free bits in this block */
1144
1145 if ( mindx ) break; /* exit loop if we found them */
1146 mbn = mb; /* remember previous block */
1147 mb = mb->nextblok; /* move on to next block */
1148 }
1149 if ( mindx == 0 ) { /* if we couldn't find 2 consequtive bits... */
1150 mapCtl.mapcbigfails++; /* count failures */
1151 big = 0; /* forget that we needed a big mapping */
1152 lists = mpBasicLists; /* clamp list count down to the max in a 64-byte mapping */
1153 mb = mapCtl.mapcnext; /* back to the first block with a free entry */
1154 }
1155 else { /* if we did find a big mapping */
1156 mapCtl.mapcfree--; /* Decrement free count twice */
1157 mapCtl.mapcinuse++; /* Bump in use count twice */
1158 if ( mindx < 0 ) { /* if we just used the last 2 free bits in this block */
1159 if (mbn) { /* if this wasn't the first block */
1160 mindx = -mindx; /* make positive */
1161 mbn->nextblok = mb->nextblok; /* unlink this one from the middle of block list */
1162 if (mb == mapCtl.mapclast) { /* if we emptied last block */
1163 mapCtl.mapclast = mbn; /* then prev block is now last */
1164 }
1165 }
1166 }
1167 }
1168 }
1169
1170 if ( !big ) { /* if we need a small (64-byte) mapping */
1171 if(!(mindx = mapalc1(mb))) /* Allocate a 1-bit slot */
2d21ac55 1172 panic("mapping_alloc - empty mapping block detected at %p\n", mb);
55e303ae 1173 }
1c79356b
A
1174
1175 if(mindx < 0) { /* Did we just take the last one */
1176 mindx = -mindx; /* Make positive */
1177 mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
2d21ac55 1178 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = NULL; /* Removed the last one */
1c79356b
A
1179 }
1180
1181 mapCtl.mapcfree--; /* Decrement free count */
1182 mapCtl.mapcinuse++; /* Bump in use count */
1183
1184 mapCtl.mapcallocc++; /* Count total calls */
1185
1186/*
1187 * Note: in the following code, we will attempt to rescue blocks only one at a time.
1188 * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none
1189 * rescueable, we will kick the misc scan who will allocate some for us. We only do this
1190 * if we haven't already done it.
1191 * For early boot, we are set up to only rescue one block at a time. This is because we prime
1192 * the release list with as much as we need until threads start.
1193 */
55e303ae 1194
1c79356b 1195 if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */
91447636 1196 if((mbn = mapCtl.mapcrel) != 0) { /* Try to rescue a block from impending doom */
1c79356b
A
1197 mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */
1198 mapCtl.mapcreln--; /* Back off the count */
1199 mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */
1200 }
1201 else { /* We need to replenish */
1202 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1203 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1204 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1205 }
1206 }
1207 }
1208 }
1209
1210 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1211 splx(s); /* Restore 'rupts */
1212
91447636 1213 mp = &((mapping_t *)mb)[mindx]; /* Point to the allocated mapping */
55e303ae
A
1214 mp->mpFlags = lists; /* set the list count */
1215
1216
1c79356b
A
1217 return mp; /* Send it back... */
1218}
1219
1220
1221void
91447636 1222consider_mapping_adjust(void)
1c79356b
A
1223{
1224 spl_t s;
1225
1226 s = splhigh(); /* Don't bother from now on */
1227 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
55e303ae 1228 panic("consider_mapping_adjust -- lock timeout\n");
1c79356b
A
1229 }
1230
1231 if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
1232 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1233 thread_call_enter(mapping_adjust_call); /* Go allocate some more */
1234 }
1235 }
1236
1237 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1238 splx(s); /* Restore 'rupts */
1239
1240}
1241
1242
1243
1244/*
1245 * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
1246 *
55e303ae
A
1247 * The mapping block is a page size area on a page boundary. It contains 1 header and 63
1248 * mappings. This call adds and initializes a block for use. Mappings come in two sizes,
1249 * 64 and 128 bytes (the only difference is the number of skip-lists.) When we allocate a
1250 * 128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
1251 * code only deals with "basic" 64-byte mappings. This works for two reasons:
1252 * - Only one in 256 mappings is big, so they are rare.
1253 * - If we cannot find two consequtive free mappings, we just return a small one.
1254 * There is no problem with doing this, except a minor performance degredation.
1255 * Therefore, all counts etc in the mapping control structure are in units of small blocks.
1c79356b
A
1256 *
1257 * The header contains a chain link, bit maps, a virtual to real translation mask, and
1258 * some statistics. Bit maps map each slot on the page (bit 0 is not used because it
1259 * corresponds to the header). The translation mask is the XOR of the virtual and real
1260 * addresses (needless to say, the block must be wired).
1261 *
1262 * We handle these mappings the same way as saveareas: the block is only on the chain so
1263 * long as there are free entries in it.
1264 *
1265 * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free
1266 * mappings. Blocks marked PERM won't ever be released.
1267 *
1268 * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
1269 * list. We do this only at start up time. This is done because we only allocate blocks
1270 * in the pageout scan and it doesn't start up until after we run out of the initial mappings.
1271 * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put
1272 * them on the release queue, the allocate routine will rescue them. Then when the
1273 * pageout scan starts, all extra ones will be released.
1274 *
1275 */
1276
1277
1278void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
1279 /* Set's start and end of a block of mappings
1280 perm indicates if the block can be released
1281 or goes straight to the release queue .
1282 locked indicates if the lock is held already */
1283
91447636 1284 mappingblok_t *mb;
1c79356b 1285 spl_t s;
55e303ae
A
1286 addr64_t raddr;
1287 ppnum_t pp;
1c79356b 1288
91447636 1289 mb = (mappingblok_t *)mbl; /* Start of area */
1c79356b
A
1290
1291 if(perm >= 0) { /* See if we need to initialize the block */
1292 if(perm) {
55e303ae 1293 raddr = (addr64_t)((unsigned int)mbl); /* Perm means V=R */
1c79356b 1294 mb->mapblokflags = mbPerm; /* Set perm */
55e303ae 1295// mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1c79356b
A
1296 }
1297 else {
55e303ae
A
1298 pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl); /* Get the physical page */
1299 if(!pp) { /* What gives? Where's the page? */
1300 panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
1301 }
1302
1303 raddr = (addr64_t)pp << 12; /* Convert physical page to physical address */
1c79356b 1304 mb->mapblokflags = 0; /* Set not perm */
55e303ae 1305// mb->mapblokflags |= (unsigned int)mb; /* (BRINGUP) */
1c79356b
A
1306 }
1307
55e303ae 1308 mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl); /* Form translation mask */
1c79356b
A
1309
1310 mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */
1311 mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */
1c79356b
A
1312 }
1313
1314 s = splhigh(); /* Don't bother from now on */
1315 if(!locked) { /* Do we need the lock? */
1316 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
55e303ae 1317 panic("mapping_free_init: timeout getting control lock\n"); /* Tell all and die */
1c79356b
A
1318 }
1319 }
1320
1321 if(perm < 0) { /* Direct to release queue? */
1322 mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */
1323 mapCtl.mapcrel = mb; /* Queue us on in */
1324 mapCtl.mapcreln++; /* Count the free block */
1325 }
1326 else { /* Add to the free list */
1327
2d21ac55 1328 mb->nextblok = NULL; /* We always add to the end */
1c79356b
A
1329 mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
1330
1331 if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
1332 mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */
1333 }
1334 else { /* We are not the first */
1335 mapCtl.mapclast->nextblok = mb; /* Point the last to us */
1336 mapCtl.mapclast = mb; /* We are now last */
1337 }
1338 }
1339
1340 if(!locked) { /* Do we need to unlock? */
1341 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1342 }
55e303ae
A
1343
1344 splx(s); /* Restore 'rupts */
1c79356b
A
1345 return; /* All done, leave... */
1346}
1347
1348
1349/*
1350 * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
1351 *
1352 * No locks can be held, because we allocate memory here.
1353 * This routine needs a corresponding mapping_relpre call to remove the
1354 * hold off flag so that the adjust routine will free the extra mapping
1355 * blocks on the release list. I don't like this, but I don't know
1356 * how else to do this for now...
1357 *
1358 */
1359
1360void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */
1361
1362 int nmapb, i;
1363 kern_return_t retr;
91447636 1364 mappingblok_t *mbn;
1c79356b
A
1365 spl_t s;
1366
1367 s = splhigh(); /* Don't bother from now on */
1368 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1369 panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */
1370 }
1371
1372 nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */
1373
1374 mapCtl.mapcholdoff++; /* Bump the hold off count */
1375
1376 if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */
1377 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1378 splx(s); /* Restore 'rupts */
1379 return;
1380 }
55e303ae 1381 if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */
1c79356b 1382 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
55e303ae 1383 splx(s); /* Restore 'rupts */
1c79356b
A
1384 return;
1385 }
1386 nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */
1387
1388 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1389 splx(s); /* Restore 'rupts */
1390
1391 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1392 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
55e303ae 1393 if(retr != KERN_SUCCESS) /* Did we get some memory? */
d7e50217 1394 break;
1c79356b
A
1395 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
1396 }
1397 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1398 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1399
1400 mapCtl.mapcrecurse = 0; /* We are done now */
1401}
1402
1403/*
1404 * void mapping_relpre(void) - Releases preallocation release hold off
1405 *
1406 * This routine removes the
1407 * hold off flag so that the adjust routine will free the extra mapping
1408 * blocks on the release list. I don't like this, but I don't know
1409 * how else to do this for now...
1410 *
1411 */
1412
1413void mapping_relpre(void) { /* Releases release hold off */
1414
1415 spl_t s;
1416
1417 s = splhigh(); /* Don't bother from now on */
1418 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */
1419 panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */
1420 }
1421 if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */
1422 panic("mapping_relpre: hold-off count went negative\n");
1423 }
1424
1425 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
1426 splx(s); /* Restore 'rupts */
1427}
1428
1429/*
1430 * void mapping_free_prime(void) - Primes the mapping block release list
1431 *
1432 * See mapping_free_init.
1433 * No locks can be held, because we allocate memory here.
1434 * One processor running only.
1435 *
1436 */
1437
1438void mapping_free_prime(void) { /* Primes the mapping block release list */
1439
1440 int nmapb, i;
1441 kern_return_t retr;
91447636 1442 mappingblok_t *mbn;
1c79356b
A
1443 vm_offset_t mapping_min;
1444
55e303ae 1445 retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
91447636 1446 FALSE, VM_FLAGS_ANYWHERE, &mapping_map);
1c79356b
A
1447
1448 if (retr != KERN_SUCCESS)
1449 panic("mapping_free_prime: kmem_suballoc failed");
1450
1451
1452 nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */
1453 nmapb = nmapb * 4; /* Get 4 times our initial allocation */
1454
1455#if DEBUG
1456 kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n",
1457 mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
1458#endif
1459
1460 for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
1461 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
1462 if(retr != KERN_SUCCESS) { /* Did we get some memory? */
1463 panic("Whoops... Not a bit of wired memory left for anyone\n");
1464 }
1465 mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */
1466 }
1467 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
1468 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
1469}
1470
1471
91447636 1472void
1c79356b
A
1473mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
1474 vm_size_t *alloc_size, int *collectable, int *exhaustable)
1475{
1476 *count = mapCtl.mapcinuse;
1477 *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
1478 *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
1479 *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1));
1480 *alloc_size = PAGE_SIZE;
1481
1482 *collectable = 1;
1483 *exhaustable = 0;
1484}
1485
1486
1487/*
55e303ae 1488 * addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
1c79356b 1489 *
55e303ae
A
1490 * First looks up the physical entry associated witht the physical page. Then searches the alias
1491 * list for a matching pmap. It grabs the virtual address from the mapping, drops busy, and returns
1492 * that.
1c79356b 1493 *
1c79356b
A
1494 */
1495
55e303ae 1496addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa) { /* Finds first virtual mapping of a physical page in a space */
1c79356b 1497
55e303ae 1498 spl_t s;
91447636 1499 mapping_t *mp;
55e303ae 1500 unsigned int pindex;
91447636 1501 phys_entry_t *physent;
55e303ae 1502 addr64_t va;
de355530 1503
55e303ae
A
1504 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1505 if(!physent) { /* Did we find the physical page? */
1506 panic("mapping_p2v: invalid physical page %08X\n", pa);
1c79356b 1507 }
1c79356b 1508
55e303ae 1509 s = splhigh(); /* Make sure interruptions are disabled */
1c79356b 1510
91447636 1511 mp = hw_find_space(physent, pmap->space); /* Go find the first mapping to the page from the requested pmap */
1c79356b 1512
55e303ae
A
1513 if(mp) { /* Did we find one? */
1514 va = mp->mpVAddr & -4096; /* If so, get the cleaned up vaddr */
1515 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1c79356b 1516 }
55e303ae 1517 else va = 0; /* Return failure */
1c79356b 1518
55e303ae 1519 splx(s); /* Restore 'rupts */
1c79356b 1520
55e303ae 1521 return va; /* Bye, bye... */
1c79356b 1522
1c79356b
A
1523}
1524
55e303ae
A
1525
1526/*
1527 * kvtophys(addr)
1528 *
1529 * Convert a kernel virtual address to a physical address
1530 */
0c530ab8 1531addr64_t kvtophys(vm_offset_t va) {
55e303ae
A
1532
1533 return pmap_extract(kernel_pmap, va); /* Find mapping and lock the physical entry for this mapping */
1c79356b
A
1534
1535}
1536
2d21ac55
A
1537/*
1538 * kvtophys64(addr)
1539 *
1540 * Convert a kernel virtual address to a 64-bit physical address
1541 */
1542vm_map_offset_t kvtophys64(vm_map_offset_t va) {
1543
1544 ppnum_t pa = pmap_find_phys(kernel_pmap, (addr64_t)va);
1545
1546 if (!pa)
1547 return 0;
1548 return (((vm_map_offset_t)pa) << 12) | (va & 0xfff);
1549
1550}
1551
1c79356b
A
1552/*
1553 * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on
1554 * page 0 access for the current thread.
1555 *
1556 * If parameter is TRUE, faults are ignored
1557 * If parameter is FALSE, faults are honored
1558 *
1559 */
1560
1561void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */
1562
91447636
A
1563 if(type) current_thread()->machine.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */
1564 else current_thread()->machine.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */
1c79356b
A
1565
1566 return; /* Return the result or 0... */
1567}
1568
0c530ab8 1569/*
2d21ac55 1570 * no-op in current ppc implementation
0c530ab8 1571 */
2d21ac55 1572void inval_copy_windows(__unused thread_t th)
0c530ab8
A
1573{
1574}
1575
1c79356b 1576
55e303ae
A
1577/*
1578 * Copies data between a physical page and a virtual page, or 2 physical. This is used to
1579 * move data from the kernel to user state. Note that the "which" parm
1580 * says which of the parameters is physical and if we need to flush sink/source.
91447636 1581 * Note that both addresses may be physical, but only one may be virtual.
1c79356b 1582 *
55e303ae 1583 * The rules are that the size can be anything. Either address can be on any boundary
91447636 1584 * and span pages. The physical data must be contiguous as must the virtual.
1c79356b 1585 *
55e303ae
A
1586 * We can block when we try to resolve the virtual address at each page boundary.
1587 * We don't check protection on the physical page.
1c79356b 1588 *
55e303ae
A
1589 * Note that we will not check the entire range and if a page translation fails,
1590 * we will stop with partial contents copied.
1c79356b
A
1591 *
1592 */
1593
2d21ac55
A
1594kern_return_t
1595hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which)
1596{
1c79356b
A
1597 vm_map_t map;
1598 kern_return_t ret;
2d21ac55
A
1599 addr64_t nextva, vaddr = 0, paddr;
1600 mapping_t *mp = NULL;
1c79356b 1601 spl_t s;
91447636 1602 unsigned int lop, csize;
55e303ae
A
1603 int needtran, bothphys;
1604 unsigned int pindex;
91447636 1605 phys_entry_t *physent;
2d21ac55 1606 vm_prot_t prot = 0;
55e303ae 1607 int orig_which;
1c79356b 1608
55e303ae 1609 orig_which = which;
1c79356b 1610
55e303ae 1611 map = (which & cppvKmap) ? kernel_map : current_map_fast();
1c79356b 1612
55e303ae
A
1613 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
1614 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
1615 }
1616
1617 bothphys = 1; /* Assume both are physical */
1618
91447636 1619 if(!(which & cppvPsnk)) { /* Is sink page virtual? */
55e303ae
A
1620 vaddr = sink; /* Sink side is virtual */
1621 bothphys = 0; /* Show both aren't physical */
1622 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
91447636 1623 } else if (!(which & cppvPsrc)) { /* Is source page virtual? */
55e303ae
A
1624 vaddr = source; /* Source side is virtual */
1625 bothphys = 0; /* Show both aren't physical */
1626 prot = VM_PROT_READ; /* Virtual source is always read only */
1627 }
1c79356b 1628
55e303ae
A
1629 needtran = 1; /* Show we need to map the virtual the first time */
1630 s = splhigh(); /* Don't bother me */
1c79356b 1631
55e303ae 1632 while(size) {
de355530 1633
55e303ae
A
1634 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
1635 if(!needtran) { /* If this is not the first translation, we need to drop the old busy */
1636 mapping_drop_busy(mp); /* Release the old mapping now */
1637 }
1638 needtran = 0;
1639
1640 while(1) {
1641 mp = mapping_find(map->pmap, vaddr, &nextva, 1); /* Find and busy the mapping */
1642 if(!mp) { /* Was it there? */
91447636 1643 if(getPerProc()->istackptr == 0)
2d21ac55 1644 panic("copypv: No vaild mapping on memory %s %16llx", "RD", vaddr);
55e303ae
A
1645
1646 splx(s); /* Restore the interrupt level */
91447636 1647 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
55e303ae
A
1648
1649 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
1650
1651 s = splhigh(); /* Don't bother me */
1652 continue; /* Go try for the map again... */
1653
1654 }
1655 if (mp->mpVAddr & mpI) { /* cache inhibited, so force the appropriate page to be flushed before */
1656 if (which & cppvPsrc) /* and after the copy to avoid cache paradoxes */
1657 which |= cppvFsnk;
1658 else
1659 which |= cppvFsrc;
1660 } else
1661 which = orig_which;
1662
1663 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
1664 we can just leave.
1665 */
1666 if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break; /* We got it mapped R/W or the source is not virtual, leave... */
1667
1668 mapping_drop_busy(mp); /* Go ahead and release the mapping for now */
91447636 1669 if(getPerProc()->istackptr == 0)
2d21ac55 1670 panic("copypv: No vaild mapping on memory %s %16llx", "RDWR", vaddr);
55e303ae
A
1671 splx(s); /* Restore the interrupt level */
1672
91447636 1673 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
55e303ae
A
1674 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
1675 s = splhigh(); /* Don't bother me */
1676 }
1677 paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL)); /* construct the physical address... this calculation works */
1678 /* properly on both single page and block mappings */
1679 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
1680 else source = paddr; /* Otherwise the source is */
1c79356b 1681 }
55e303ae
A
1682
1683 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
1684 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
1685
1686 csize = size; /* Assume we can copy it all */
1687 if(lop < size) csize = lop; /* Nope, we can't do it all */
1688
1689 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source before move */
1690 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink before move */
1c79356b 1691
91447636 1692 bcopy_physvir_32(source, sink, csize); /* Do a physical copy, virtually */
55e303ae
A
1693
1694 if(which & cppvFsrc) flush_dcache64(source, csize, 1); /* If requested, flush source after move */
1695 if(which & cppvFsnk) flush_dcache64(sink, csize, 1); /* If requested, flush sink after move */
1c79356b 1696
b4c24cb9 1697/*
55e303ae
A
1698 * Note that for certain ram disk flavors, we may be copying outside of known memory.
1699 * Therefore, before we try to mark it modifed, we check if it exists.
b4c24cb9
A
1700 */
1701
55e303ae
A
1702 if( !(which & cppvNoModSnk)) {
1703 physent = mapping_phys_lookup(sink >> 12, &pindex); /* Get physical entry for sink */
1704 if(physent) mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
1705 }
1706 if( !(which & cppvNoRefSrc)) {
1707 physent = mapping_phys_lookup(source >> 12, &pindex); /* Get physical entry for source */
1708 if(physent) mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
1709 }
1710 size = size - csize; /* Calculate what is left */
1711 vaddr = vaddr + csize; /* Move to next sink address */
1712 source = source + csize; /* Bump source to next physical address */
1713 sink = sink + csize; /* Bump sink to next physical address */
b4c24cb9 1714 }
55e303ae
A
1715
1716 if(!bothphys) mapping_drop_busy(mp); /* Go ahead and release the mapping of the virtual page if any */
1717 splx(s); /* Open up for interrupts */
b4c24cb9 1718
55e303ae 1719 return KERN_SUCCESS;
b4c24cb9
A
1720}
1721
1722
1c79356b 1723/*
55e303ae 1724 * Debug code
1c79356b 1725 */
1c79356b 1726
55e303ae 1727void mapping_verify(void) {
1c79356b 1728
55e303ae 1729 spl_t s;
91447636
A
1730 mappingblok_t *mb, *mbn;
1731 unsigned int relncnt;
55e303ae 1732 unsigned int dumbodude;
de355530 1733
55e303ae
A
1734 dumbodude = 0;
1735
1736 s = splhigh(); /* Don't bother from now on */
de355530 1737
2d21ac55 1738 mbn = NULL; /* Start with none */
55e303ae 1739 for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) { /* Walk the free chain */
91447636 1740 if((mappingblok_t *)(mb->mapblokflags & 0x7FFFFFFF) != mb) { /* Is tag ok? */
2d21ac55 1741 panic("mapping_verify: flags tag bad, free chain; mb = %p, tag = %08X\n", mb, mb->mapblokflags);
55e303ae
A
1742 }
1743 mbn = mb; /* Remember the last one */
1c79356b 1744 }
55e303ae
A
1745
1746 if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) { /* Do we point to the last one? */
2d21ac55 1747 panic("mapping_verify: last pointer bad; mb = %p, mapclast = %p\n", mb, mapCtl.mapclast);
1c79356b
A
1748 }
1749
55e303ae
A
1750 relncnt = 0; /* Clear count */
1751 for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) { /* Walk the release chain */
1752 dumbodude |= mb->mapblokflags; /* Just touch it to make sure it is mapped */
1753 relncnt++; /* Count this one */
1754 }
1c79356b 1755
55e303ae
A
1756 if(mapCtl.mapcreln != relncnt) { /* Is the count on release queue ok? */
1757 panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
1758 }
1c79356b 1759
55e303ae 1760 splx(s); /* Restore 'rupts */
1c79356b 1761
1c79356b
A
1762 return;
1763}
1764
55e303ae 1765void mapping_phys_unused(ppnum_t pa) {
1c79356b 1766
55e303ae 1767 unsigned int pindex;
91447636 1768 phys_entry_t *physent;
1c79356b 1769
55e303ae
A
1770 physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
1771 if(!physent) return; /* Did we find the physical page? */
1c79356b 1772
91447636 1773 if(!(physent->ppLink & ~(ppLock | ppFlags))) return; /* No one else is here */
1c79356b 1774
2d21ac55 1775 panic("mapping_phys_unused: physical page (%08X) in use, physent = %p\n", pa, physent);
1c79356b 1776
de355530 1777}
d7e50217 1778
2d21ac55
A
1779void
1780mapping_hibernate_flush(void)
3a60a9f5 1781{
2d21ac55 1782 unsigned int page, bank;
3a60a9f5
A
1783 struct phys_entry * entry;
1784
1785 for (bank = 0; bank < pmap_mem_regions_count; bank++)
1786 {
1787 entry = (struct phys_entry *) pmap_mem_regions[bank].mrPhysTab;
1788 for (page = pmap_mem_regions[bank].mrStart; page <= pmap_mem_regions[bank].mrEnd; page++)
1789 {
1790 hw_walk_phys(entry, hwpNoop, hwpNoop, hwpNoop, 0, hwpPurgePTE);
1791 entry++;
1792 }
1793 }
1794}
1795
91447636
A
1796
1797
1798
1799
1800