]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * This file is used to maintain the virtual to real mappings for a PowerPC machine. | |
24 | * The code herein is primarily used to bridge between the pmap layer and the hardware layer. | |
25 | * Currently, some of the function of this module is contained within pmap.c. We may want to move | |
26 | * all of this into it (or most anyway) for the sake of performance. We shall see as we write it. | |
27 | * | |
28 | * We also depend upon the structure of the phys_entry control block. We do put some processor | |
29 | * specific stuff in there. | |
30 | * | |
31 | */ | |
32 | ||
33 | #include <cpus.h> | |
34 | #include <debug.h> | |
35 | #include <mach_kgdb.h> | |
36 | #include <mach_vm_debug.h> | |
37 | #include <db_machine_commands.h> | |
38 | ||
39 | #include <kern/thread.h> | |
40 | #include <kern/thread_act.h> | |
41 | #include <mach/vm_attributes.h> | |
42 | #include <mach/vm_param.h> | |
43 | #include <vm/vm_kern.h> | |
44 | #include <vm/vm_map.h> | |
45 | #include <vm/vm_page.h> | |
46 | #include <kern/spl.h> | |
47 | ||
48 | #include <kern/misc_protos.h> | |
49 | #include <ppc/misc_protos.h> | |
50 | #include <ppc/proc_reg.h> | |
51 | ||
52 | #include <vm/pmap.h> | |
53 | #include <ppc/pmap.h> | |
54 | #include <ppc/pmap_internals.h> | |
55 | #include <ppc/mem.h> | |
56 | ||
57 | #include <ppc/new_screen.h> | |
58 | #include <ppc/Firmware.h> | |
59 | #include <ppc/mappings.h> | |
60 | #include <ddb/db_output.h> | |
61 | ||
62 | #include <ppc/POWERMAC/video_console.h> /* (TEST/DEBUG) */ | |
63 | ||
64 | #define PERFTIMES 0 | |
65 | ||
66 | #if PERFTIMES && DEBUG | |
67 | #define debugLog2(a, b, c) dbgLog2(a, b, c) | |
68 | #else | |
69 | #define debugLog2(a, b, c) | |
70 | #endif | |
71 | ||
72 | vm_map_t mapping_map = VM_MAP_NULL; | |
73 | ||
74 | unsigned int incrVSID = 0; /* VSID increment value */ | |
75 | unsigned int mappingdeb0 = 0; | |
76 | unsigned int mappingdeb1 = 0; | |
77 | extern unsigned int hash_table_size; | |
78 | extern vm_offset_t mem_size; | |
79 | /* | |
80 | * ppc_prot translates from the mach representation of protections to the PPC version. | |
81 | * Calculation of it like this saves a memory reference - and maybe a couple of microseconds. | |
82 | * It eliminates the used of this table. | |
83 | * unsigned char ppc_prot[8] = { 0, 3, 2, 2, 3, 3, 2, 2 }; | |
84 | */ | |
85 | ||
86 | #define ppc_prot(p) ((0xAFAC >> (p << 1)) & 3) | |
87 | ||
88 | /* | |
89 | * About PPC VSID generation: | |
90 | * | |
91 | * This function is called to generate an address space ID. This space ID must be unique within | |
92 | * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following | |
93 | * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last | |
94 | * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able | |
95 | * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The | |
96 | * problem is that only a certain number of pmaps are kept in a free list and if that is full, | |
97 | * they are release. This causes us to lose track of what space IDs are free to be reused. | |
98 | * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings | |
99 | * when the space ID wraps, or 4) scan the list of pmaps and find a free one. | |
100 | * | |
101 | * Yet another consideration is the hardware use of the VSID. It is used as part of the hash | |
102 | * calculation for virtual address lookup. An improperly chosen value could potentially cause | |
103 | * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function | |
104 | * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested | |
105 | * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits | |
106 | * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs | |
107 | * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID, | |
108 | * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions | |
109 | * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but | |
110 | * with no overflow. I don't think that this is a problem. | |
111 | * | |
112 | * There may be a problem with the space ID, though. A new space ID is generate (mainly) | |
113 | * whenever there is a fork. There shouldn't really be any problem because (for a 32MB | |
114 | * machine) we can have 512 pmaps and still not have hash collisions for the same address. | |
115 | * The potential problem, though, is if we get long-term pmaps that have space IDs that are | |
116 | * the same modulo 512. We can reduce this problem by having the segment number be bits | |
117 | * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding | |
118 | * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem, | |
119 | * I don't think that it is as signifigant as the other, so, I'll make the space ID | |
120 | * with segment first. | |
121 | * | |
122 | * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs. | |
123 | * While this is a problem that should only happen in periods counted in weeks, it can and | |
124 | * will happen. This is assuming a monotonically increasing space ID. If we were to search | |
125 | * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs. | |
126 | * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks. | |
127 | * | |
128 | * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and | |
129 | * locked by free_pmap_lock) that is sorted in VSID sequence order. | |
130 | * | |
131 | * Whenever we need a VSID, we walk the list looking for the next in the sequence from | |
132 | * the last that was freed. The we allocate that. | |
133 | * | |
134 | * NOTE: We must be called with interruptions off and free_pmap_lock held. | |
135 | * | |
136 | */ | |
137 | ||
138 | /* | |
139 | * mapping_init(); | |
140 | * Do anything that needs to be done before the mapping system can be used. | |
141 | * Hash table must be initialized before we call this. | |
142 | * | |
143 | * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1; | |
144 | */ | |
145 | ||
146 | void mapping_init(void) { | |
147 | ||
148 | unsigned int tmp; | |
149 | ||
150 | __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */ | |
151 | ||
152 | incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */ | |
153 | incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */ | |
154 | incrVSID |= 1; /* Set bit and add 1 */ | |
155 | return; | |
156 | ||
157 | } | |
158 | ||
159 | ||
160 | /* | |
161 | * mapping_remove(pmap_t pmap, vm_offset_t va); | |
162 | * Given a pmap and virtual address, this routine finds the mapping and removes it from | |
163 | * both its PTEG hash list and the physical entry list. The mapping block will be added to | |
164 | * the free list. If the free list threshold is reached, garbage collection will happen. | |
165 | * We also kick back a return code to say whether or not we had one to remove. | |
166 | * | |
167 | * We have a strict ordering here: the mapping must be removed from the PTEG hash list before | |
168 | * it can be removed from the physical entry list. This allows us to get by with only the PTEG | |
169 | * hash lock at page fault time. The physical entry lock must be held while we remove the mapping | |
170 | * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions, | |
171 | * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die. | |
172 | * It's just that simple! | |
173 | * | |
174 | * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around. | |
175 | * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG | |
176 | * lock to control the hash cahin and may move the position of the mapping for MRU calculations. | |
177 | * | |
178 | * Note that mappings do not need to point to a physical entry. When they don't, it indicates | |
179 | * the mapping is outside of physical memory and usually refers to a memory mapped device of | |
180 | * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock | |
181 | * routines return normally, but don't do anything. | |
182 | */ | |
183 | ||
184 | boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR | |
185 | Returns TRUE if a mapping was found to remove */ | |
186 | ||
187 | mapping *mp, *mpv; | |
188 | register blokmap *blm; | |
189 | spl_t s; | |
190 | unsigned int *useadd, *useaddr; | |
191 | int i; | |
192 | ||
193 | debugLog2(1, va, pmap->space); /* start mapping_remove */ | |
194 | ||
195 | s=splhigh(); /* Don't bother me */ | |
196 | ||
197 | mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */ | |
198 | ||
199 | if(!mp) { /* Did we find one? */ | |
200 | if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */ | |
201 | splx(s); /* Allow 'rupts now */ | |
202 | ||
203 | if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */ | |
204 | blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFE)); /* Get virtual address */ | |
205 | panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n", | |
206 | pmap, va, blm); | |
207 | } | |
208 | #if 0 | |
209 | blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */ | |
210 | kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ | |
211 | blm, blm->start, blm->end, blm->PTEr); | |
212 | #endif | |
213 | mapping_free(hw_cpv(mp)); /* Release it */ | |
214 | debugLog2(2, 1, 0); /* End mapping_remove */ | |
215 | return TRUE; /* Tell them we did it */ | |
216 | } | |
217 | splx(s); /* Restore the interrupt level */ | |
218 | debugLog2(2, 0, 0); /* end mapping_remove */ | |
219 | return FALSE; /* Didn't find any, return FALSE... */ | |
220 | } | |
221 | if((unsigned int)mp&1) { /* Did we timeout? */ | |
222 | panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */ | |
223 | splx(s); /* Restore the interrupt level */ | |
224 | return FALSE; /* Bad hair day, return FALSE... */ | |
225 | } | |
226 | ||
227 | mpv = hw_cpv(mp); /* Get virtual address of mapping */ | |
228 | #if DEBUG | |
229 | if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); | |
230 | #else | |
231 | (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ | |
232 | #endif | |
233 | useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */ | |
234 | useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ | |
235 | (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ | |
236 | ||
237 | #if 0 | |
238 | for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ | |
239 | if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ | |
240 | panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", | |
241 | i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); | |
242 | } | |
243 | } | |
244 | #endif | |
245 | ||
246 | hw_rem_map(mp); /* Remove the corresponding mapping */ | |
247 | ||
248 | if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ | |
249 | ||
250 | splx(s); /* Was there something you needed? */ | |
251 | ||
252 | mapping_free(mpv); /* Add mapping to the free list */ | |
253 | debugLog2(2, 1, 0); /* end mapping_remove */ | |
254 | return TRUE; /* Tell them we did it */ | |
255 | } | |
256 | ||
257 | /* | |
258 | * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list | |
259 | * | |
260 | * This guy releases any mappings that exist for a physical page. | |
261 | * We get the lock on the phys_entry, and hold it through out this whole routine. | |
262 | * That way, no one can change the queue out from underneath us. We keep fetching | |
263 | * the physents mapping anchor until it is null, then we're done. | |
264 | * | |
265 | * For each mapping, we call the remove routine to remove it from the PTEG hash list and | |
266 | * decriment the pmap's residency count. Then we release the mapping back to the free list. | |
267 | * | |
268 | */ | |
269 | ||
270 | void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */ | |
271 | ||
272 | mapping *mp, *mpv; | |
273 | spl_t s; | |
274 | unsigned int *useadd, *useaddr, uindx; | |
275 | int i; | |
276 | ||
277 | s=splhigh(); /* Don't bother me */ | |
278 | debugLog2(3, pp->pte1, 0); /* start mapping_purge */ | |
279 | ||
280 | if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ | |
281 | panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n", | |
282 | pp, pp->phys_link, pp->pte1); /* Complain about timeout */ | |
283 | } | |
284 | ||
285 | while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */ | |
286 | ||
287 | mpv = hw_cpv(mp); /* Get the virtual address */ | |
288 | #if DEBUG | |
289 | if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); | |
290 | #else | |
291 | (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ | |
292 | #endif | |
293 | ||
294 | uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */ | |
295 | useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */ | |
296 | useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ | |
297 | (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ | |
298 | ||
299 | #if 0 | |
300 | for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ | |
301 | if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ | |
302 | panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", | |
303 | i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); | |
304 | } | |
305 | } | |
306 | #endif | |
307 | ||
308 | ||
309 | hw_rem_map(mp); /* Remove the mapping */ | |
310 | mapping_free(mpv); /* Add mapping to the free list */ | |
311 | } | |
312 | ||
313 | hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ | |
314 | ||
315 | debugLog2(4, pp->pte1, 0); /* end mapping_purge */ | |
316 | splx(s); /* Was there something you needed? */ | |
317 | return; /* Tell them we did it */ | |
318 | } | |
319 | ||
320 | ||
321 | /* | |
322 | * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one | |
323 | * | |
324 | * This routine takes the given parameters, builds a mapping block, and queues it into the | |
325 | * correct lists. | |
326 | * | |
327 | * The pp parameter can be null. This allows us to make a mapping that is not | |
328 | * associated with any physical page. We may need this for certain I/O areas. | |
329 | * | |
330 | * If the phys_entry address is null, we neither lock or chain into it. | |
331 | * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it. | |
332 | */ | |
333 | ||
334 | mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */ | |
335 | ||
336 | register mapping *mp, *mpv; | |
337 | unsigned int *useadd, *useaddr; | |
338 | spl_t s; | |
339 | int i; | |
340 | ||
341 | debugLog2(5, va, pa); /* start mapping_purge */ | |
342 | mpv = mapping_alloc(); /* Get a spare mapping block */ | |
343 | ||
344 | mpv->pmap = pmap; /* Initialize the pmap pointer */ | |
345 | mpv->physent = pp; /* Initialize the pointer to the physical entry */ | |
346 | mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */ | |
347 | mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */ | |
348 | ||
349 | s=splhigh(); /* Don't bother from now on */ | |
350 | ||
351 | mp = hw_cvp(mpv); /* Get the physical address of this */ | |
352 | ||
353 | if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */ | |
354 | if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ | |
355 | panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n", | |
356 | pp, pp->phys_link, pp->pte1); /* Complain about timeout */ | |
357 | } | |
358 | } | |
359 | ||
360 | if(pp) { /* See of there is a physcial entry */ | |
361 | mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */ | |
362 | pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */ | |
363 | } | |
364 | ||
365 | hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */ | |
366 | ||
367 | (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */ | |
368 | useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */ | |
369 | useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ | |
370 | (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ | |
371 | #if 0 | |
372 | for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ | |
373 | if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ | |
374 | panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", | |
375 | i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); | |
376 | } | |
377 | } | |
378 | #endif | |
379 | ||
380 | if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */ | |
381 | ||
382 | splx(s); /* Ok for interruptions now */ | |
383 | debugLog2(6, pmap->space, prot); /* end mapping_purge */ | |
384 | return mpv; /* Leave... */ | |
385 | } | |
386 | ||
387 | ||
388 | /* | |
389 | * Enters optimal translations for odd-sized V=F blocks. | |
390 | * | |
391 | * Builds a block map for each power-of-two hunk o' address | |
392 | * that exists. This is specific to the processor type. | |
393 | * PPC uses BAT register size stuff. Future PPC might have | |
394 | * something else. | |
395 | * | |
396 | * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too | |
397 | * stupid to know otherwise so we only look at the va anyhow, so there... | |
398 | * | |
399 | */ | |
400 | ||
401 | void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */ | |
402 | ||
403 | register blokmap *blm, *oblm; | |
404 | unsigned int pg; | |
405 | unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin; | |
406 | int i, maxshft, nummax, minshft; | |
407 | ||
408 | #if 1 | |
409 | kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ | |
410 | pmap, va, pa, bnd, size, prot, attr); | |
411 | #endif | |
412 | ||
413 | minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */ | |
414 | maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */ | |
415 | ||
416 | minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */ | |
417 | maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */ | |
418 | ||
419 | leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */ | |
420 | trailing = size - leading; /* Get size of trailing area */ | |
421 | tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */ | |
422 | ||
423 | #if 1 | |
424 | kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */ | |
425 | #endif | |
426 | ||
427 | if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */ | |
428 | ||
429 | va = va + tomin; /* Adjust virtual start */ | |
430 | pa = pa + tomin; /* Adjust physical start */ | |
431 | leading = leading - tomin; /* Adjust leading size */ | |
432 | ||
433 | /* | |
434 | * Some of this code is very classic PPC. We need to fix this up. | |
435 | */ | |
436 | ||
437 | leading = leading >> minshft; /* Position for bit testing */ | |
438 | cbsize = minsize; /* Set the minimum size */ | |
439 | ||
440 | for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */ | |
441 | ||
442 | if(leading & 1) { | |
443 | pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */ | |
444 | pa = pa + cbsize; /* Bump up physical address */ | |
445 | va = va + cbsize; /* Bump up virtual address */ | |
446 | } | |
447 | ||
448 | leading = leading >> 1; /* Shift up to next size */ | |
449 | cbsize = cbsize << 1; /* Here too */ | |
450 | ||
451 | } | |
452 | ||
453 | nummax = trailing >> maxshft; /* Get number of max size blocks left */ | |
454 | for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */ | |
455 | pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */ | |
456 | ||
457 | pa = pa + maxsize; /* Bump up physical address */ | |
458 | va = va + maxsize; /* Bump up virtual address */ | |
459 | trailing -= maxsize; /* Back off what we just did */ | |
460 | } | |
461 | ||
462 | cbsize = maxsize; /* Start at maximum size */ | |
463 | ||
464 | for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */ | |
465 | ||
466 | if(trailing & cbsize) { | |
467 | trailing &= ~cbsize; /* Remove the block we are allocating */ | |
468 | pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */ | |
469 | pa = pa + cbsize; /* Bump up physical address */ | |
470 | va = va + cbsize; /* Bump up virtual address */ | |
471 | } | |
472 | cbsize = cbsize >> 1; /* Next size down */ | |
473 | } | |
474 | ||
475 | if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */ | |
476 | ||
477 | return; /* Return */ | |
478 | } | |
479 | ||
480 | ||
481 | /* | |
482 | * Enters translations for odd-sized V=F blocks. | |
483 | * | |
484 | * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request | |
485 | * will be split into normal-sized page mappings. | |
486 | * | |
487 | * The higher level VM map should be locked to insure that we don't have a | |
488 | * double diddle here. | |
489 | * | |
490 | * We panic if we get a block that overlaps with another. We do not merge adjacent | |
491 | * blocks because removing any address within a block removes the entire block and if | |
492 | * would really mess things up if we trashed too much. | |
493 | * | |
494 | * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can | |
495 | * not be changed. The block must be unmapped and then remapped with the new stuff. | |
496 | * We also do not keep track of reference or change flags. | |
497 | * | |
498 | * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only | |
499 | * with interruptions and translation disabled and under the control of the lock located | |
500 | * in the first block map. MRU is used because it is expected that the same entry | |
501 | * will be accessed repeatedly while PTEs are being generated to cover those addresses. | |
502 | * | |
503 | */ | |
504 | ||
505 | void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ | |
506 | ||
507 | register blokmap *blm, *oblm; | |
508 | unsigned int pg; | |
509 | ||
510 | #if 0 | |
511 | kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ | |
512 | pmap, va, pa, size, prot, attr); | |
513 | #endif | |
514 | ||
515 | if(size < ODDBLKMIN) { /* Is this below the minimum size? */ | |
516 | for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ | |
517 | mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ | |
518 | #if 0 | |
519 | kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */ | |
520 | va + pg, pa + pg); | |
521 | #endif | |
522 | } | |
523 | return; /* All done */ | |
524 | } | |
525 | ||
526 | blm = (blokmap *)mapping_alloc(); /* Get a block mapping */ | |
527 | ||
528 | blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */ | |
529 | blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */ | |
530 | blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */ | |
531 | blm->space = pmap->space; /* Set the space (only needed for remove) */ | |
532 | blm->blkFlags = flags; /* Set the block's flags */ | |
533 | ||
534 | #if 0 | |
535 | kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ | |
536 | blm, blm->start, blm->end, blm->PTEr); | |
537 | #endif | |
538 | ||
539 | blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */ | |
540 | ||
541 | #if 0 | |
542 | kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ | |
543 | blm, pmap->bmaps); | |
544 | #endif | |
545 | ||
546 | if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */ | |
547 | panic("pmap_map_block: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */ | |
548 | } | |
549 | ||
550 | #if 0 | |
551 | kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ | |
552 | blm, pmap->bmaps); | |
553 | #endif | |
554 | ||
555 | return; /* Return */ | |
556 | } | |
557 | ||
558 | ||
559 | /* | |
560 | * Optimally enters translations for odd-sized V=F blocks. | |
561 | * | |
562 | * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request | |
563 | * will be split into normal-sized page mappings. | |
564 | * | |
565 | * This one is different than pmap_map_block in that it will allocate it's own virtual | |
566 | * target address. Rather than allocating a single block, | |
567 | * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows | |
568 | * hardware-level mapping that takes advantage of BAT maps or large page sizes. | |
569 | * | |
570 | * Most considerations for pmap_map_block apply. | |
571 | * | |
572 | * | |
573 | */ | |
574 | ||
575 | kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, | |
576 | vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */ | |
577 | ||
578 | register blokmap *blm, *oblm; | |
579 | unsigned int pg; | |
580 | kern_return_t err; | |
581 | unsigned int bnd; | |
582 | ||
583 | #if 1 | |
584 | kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ | |
585 | map, pa, size, prot, attr); | |
586 | #endif | |
587 | ||
588 | if(size < ODDBLKMIN) { /* Is this below the minimum size? */ | |
589 | err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */ | |
590 | if(err) { | |
591 | #if DEBUG | |
592 | kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */ | |
593 | #endif | |
594 | return(err); /* Pass back the error */ | |
595 | } | |
596 | #if 1 | |
597 | kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */ | |
598 | #endif | |
599 | ||
600 | for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ | |
601 | mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ | |
602 | } | |
603 | return(KERN_SUCCESS); /* All done */ | |
604 | } | |
605 | ||
606 | err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */ | |
607 | ||
608 | if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */ | |
609 | err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */ | |
610 | if(err) { | |
611 | #if DEBUG | |
612 | kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */ | |
613 | #endif | |
614 | return(err); /* Pass back the error */ | |
615 | } | |
616 | #if 1 | |
617 | kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */ | |
618 | #endif | |
619 | pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */ | |
620 | return KERN_SUCCESS; /* All done now */ | |
621 | } | |
622 | ||
623 | if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */ | |
624 | #if DEBUG | |
625 | kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */ | |
626 | #endif | |
627 | return(err); | |
628 | } | |
629 | ||
630 | #if 1 | |
631 | kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */ | |
632 | #endif | |
633 | mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */ | |
634 | return(KERN_SUCCESS); /* All done */ | |
635 | } | |
636 | ||
637 | ||
638 | #if 0 | |
639 | ||
640 | /* | |
641 | * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping | |
642 | * areas. | |
643 | * | |
644 | * Once blocks are merged, they act like one block, i.e., if you remove it, | |
645 | * it all goes... | |
646 | * | |
647 | * This can only be used during boot. Ain't no way we can handle SMP | |
648 | * or preemption easily, so we restrict it. We don't check either. We | |
649 | * assume only skilled professional programmers will attempt using this | |
650 | * function. We assume no responsibility, either real or imagined, for | |
651 | * injury or death resulting from unauthorized use of this function. | |
652 | * | |
653 | * No user servicable parts inside. Notice to be removed by end-user only, | |
654 | * under penalty of applicable federal and state laws. | |
655 | * | |
656 | * See descriptions of pmap_map_block. Ignore the part where we say we panic for | |
657 | * overlapping areas. Note that we do panic if we can't merge. | |
658 | * | |
659 | */ | |
660 | ||
661 | void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */ | |
662 | ||
663 | register blokmap *blm, *oblm; | |
664 | unsigned int pg; | |
665 | spl_t s; | |
666 | ||
667 | #if 1 | |
668 | kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ | |
669 | pmap, va, pa, size, prot, attr); | |
670 | #endif | |
671 | ||
672 | s=splhigh(); /* Don't bother from now on */ | |
673 | if(size < ODDBLKMIN) { /* Is this below the minimum size? */ | |
674 | for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ | |
675 | mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ | |
676 | } | |
677 | return; /* All done */ | |
678 | } | |
679 | ||
680 | blm = (blokmap *)mapping_alloc(); /* Get a block mapping */ | |
681 | ||
682 | blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */ | |
683 | blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */ | |
684 | blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */ | |
685 | ||
686 | #if 1 | |
687 | kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ | |
688 | blm, blm->start, blm->end, blm->PTEr); | |
689 | #endif | |
690 | ||
691 | blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */ | |
692 | ||
693 | #if 1 | |
694 | kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ | |
695 | blm, pmap->bmaps); | |
696 | #endif | |
697 | ||
698 | if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */ | |
699 | panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */ | |
700 | } | |
701 | ||
702 | #if 1 | |
703 | kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ | |
704 | blm, pmap->bmaps); | |
705 | #endif | |
706 | splx(s); /* Ok for interruptions now */ | |
707 | ||
708 | return; /* Return */ | |
709 | } | |
710 | #endif | |
711 | ||
712 | /* | |
713 | * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page | |
714 | * | |
715 | * This routine takes a physical entry and runs through all mappings attached to it and changes | |
716 | * the protection. If there are PTEs associated with the mappings, they will be invalidated before | |
717 | * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
718 | * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g., | |
719 | * higher to lower, lower to higher. | |
720 | * | |
721 | * Phys_entry is unlocked. | |
722 | */ | |
723 | ||
724 | void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */ | |
725 | ||
726 | spl_t spl; | |
727 | ||
728 | debugLog2(9, pp->pte1, prot); /* end remap */ | |
729 | spl=splhigh(); /* No interruptions during this */ | |
730 | if(!locked) { /* Do we need to lock the physent? */ | |
731 | if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ | |
732 | panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n", | |
733 | pp, pp->phys_link, pp->pte1); /* Complain about timeout */ | |
734 | } | |
735 | } | |
736 | ||
737 | hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */ | |
738 | ||
739 | if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ | |
740 | splx(spl); /* Restore interrupt state */ | |
741 | debugLog2(10, pp->pte1, 0); /* end remap */ | |
742 | ||
743 | return; /* Leave... */ | |
744 | } | |
745 | ||
746 | /* | |
747 | * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page | |
748 | * | |
749 | * This routine takes a pmap and virtual address and changes | |
750 | * the protection. If there are PTEs associated with the mappings, they will be invalidated before | |
751 | * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
752 | * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g., | |
753 | * higher to lower, lower to higher. | |
754 | * | |
755 | */ | |
756 | ||
757 | void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */ | |
758 | ||
759 | mapping *mp, *mpv; | |
760 | spl_t s; | |
761 | ||
762 | debugLog2(9, vaddr, pmap); /* start mapping_protect */ | |
763 | s = splhigh(); /* Don't bother me */ | |
764 | ||
765 | mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */ | |
766 | ||
767 | if(!mp) { /* Did we find one? */ | |
768 | splx(s); /* Restore the interrupt level */ | |
769 | debugLog2(10, 0, 0); /* end mapping_pmap */ | |
770 | return; /* Didn't find any... */ | |
771 | } | |
772 | if((unsigned int)mp & 1) { /* Did we timeout? */ | |
773 | panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */ | |
774 | splx(s); /* Restore the interrupt level */ | |
775 | return; /* Bad hair day... */ | |
776 | } | |
777 | ||
778 | hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */ | |
779 | ||
780 | mpv = hw_cpv(mp); /* Get virtual address of mapping */ | |
781 | if(mpv->physent) { /* If there is a physical page, */ | |
782 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ | |
783 | } | |
784 | splx(s); /* Restore interrupt state */ | |
785 | debugLog2(10, mpv->PTEr, 0); /* end remap */ | |
786 | ||
787 | return; /* Leave... */ | |
788 | } | |
789 | ||
790 | /* | |
791 | * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes | |
792 | * | |
793 | * This routine takes a physical entry and sets the physical attributes. There can be no mappings | |
794 | * associated with this page when we do it. | |
795 | */ | |
796 | ||
797 | void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */ | |
798 | ||
799 | debugLog2(11, pp->pte1, prot); /* end remap */ | |
800 | ||
801 | if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ | |
802 | panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n", | |
803 | pp, pp->phys_link, pp->pte1); /* Complain about timeout */ | |
804 | } | |
805 | ||
806 | hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */ | |
807 | ||
808 | hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ | |
809 | debugLog2(12, pp->pte1, wimg); /* end remap */ | |
810 | ||
811 | return; /* Leave... */ | |
812 | } | |
813 | ||
814 | /* | |
815 | * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page | |
816 | * | |
817 | * This routine takes a physical entry and runs through all mappings attached to it and invalidates | |
818 | * any PTEs it finds. | |
819 | * | |
820 | * Interruptions must be disabled and the physical entry locked at entry. | |
821 | */ | |
822 | ||
823 | void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */ | |
824 | ||
825 | hw_inv_all(pp); /* Go set the change bit of a physical page */ | |
826 | ||
827 | return; /* Leave... */ | |
828 | } | |
829 | ||
830 | ||
831 | /* | |
832 | * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page | |
833 | * | |
834 | * This routine takes a physical entry and runs through all mappings attached to it and turns | |
835 | * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before | |
836 | * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
837 | * either (I don't think, maybe I'll change my mind later). | |
838 | * | |
839 | * Interruptions must be disabled and the physical entry locked at entry. | |
840 | */ | |
841 | ||
842 | void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */ | |
843 | ||
844 | hw_clr_mod(pp); /* Go clear the change bit of a physical page */ | |
845 | return; /* Leave... */ | |
846 | } | |
847 | ||
848 | ||
849 | /* | |
850 | * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page | |
851 | * | |
852 | * This routine takes a physical entry and runs through all mappings attached to it and turns | |
853 | * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before | |
854 | * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
855 | * either (I don't think, maybe I'll change my mind later). | |
856 | * | |
857 | * Interruptions must be disabled and the physical entry locked at entry. | |
858 | */ | |
859 | ||
860 | void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */ | |
861 | ||
862 | hw_set_mod(pp); /* Go set the change bit of a physical page */ | |
863 | return; /* Leave... */ | |
864 | } | |
865 | ||
866 | ||
867 | /* | |
868 | * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page | |
869 | * | |
870 | * This routine takes a physical entry and runs through all mappings attached to it and turns | |
871 | * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before | |
872 | * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
873 | * either (I don't think, maybe I'll change my mind later). | |
874 | * | |
875 | * Interruptions must be disabled at entry. | |
876 | */ | |
877 | ||
878 | void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */ | |
879 | ||
880 | mapping *mp; | |
881 | ||
882 | debugLog2(13, pp->pte1, 0); /* end remap */ | |
883 | if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */ | |
884 | panic("Lock timeout getting lock on physical entry\n"); /* Just die... */ | |
885 | } | |
886 | hw_clr_ref(pp); /* Go clear the reference bit of a physical page */ | |
887 | hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */ | |
888 | debugLog2(14, pp->pte1, 0); /* end remap */ | |
889 | return; /* Leave... */ | |
890 | } | |
891 | ||
892 | ||
893 | /* | |
894 | * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page | |
895 | * | |
896 | * This routine takes a physical entry and runs through all mappings attached to it and turns | |
897 | * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before | |
898 | * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
899 | * either (I don't think, maybe I'll change my mind later). | |
900 | * | |
901 | * Interruptions must be disabled and the physical entry locked at entry. | |
902 | */ | |
903 | ||
904 | void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */ | |
905 | ||
906 | hw_set_ref(pp); /* Go set the reference bit of a physical page */ | |
907 | return; /* Leave... */ | |
908 | } | |
909 | ||
910 | ||
911 | /* | |
912 | * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page | |
913 | * | |
914 | * This routine takes a physical entry and runs through all mappings attached to it and tests | |
915 | * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before | |
916 | * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations | |
917 | * either (I don't think, maybe I'll change my mind later). | |
918 | * | |
919 | * Interruptions must be disabled and the physical entry locked at entry. | |
920 | */ | |
921 | ||
922 | boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */ | |
923 | ||
924 | return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */ | |
925 | } | |
926 | ||
927 | ||
928 | /* | |
929 | * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page | |
930 | * | |
931 | * This routine takes a physical entry and runs through all mappings attached to it and tests | |
932 | * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before | |
933 | * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations | |
934 | * either (I don't think, maybe I'll change my mind later). | |
935 | * | |
936 | * Interruptions must be disabled and the physical entry locked at entry. | |
937 | */ | |
938 | ||
939 | boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */ | |
940 | ||
941 | return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */ | |
942 | } | |
943 | ||
944 | ||
945 | /* | |
946 | * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent | |
947 | * | |
948 | * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits | |
949 | */ | |
950 | ||
951 | void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */ | |
952 | ||
953 | pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */ | |
954 | ||
955 | return; /* Leave... */ | |
956 | } | |
957 | ||
958 | ||
959 | /* | |
960 | * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones | |
961 | * | |
962 | * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks | |
963 | * the number of free mappings remaining, and if below a threshold, replenishes them. | |
964 | * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise, | |
965 | * a new one is allocated. | |
966 | * | |
967 | * This routine allocates and/or memory and must be called from a safe place. | |
968 | * Currently, vm_pageout_scan is the safest place. We insure that the | |
969 | */ | |
970 | ||
971 | thread_call_t mapping_adjust_call; | |
972 | static thread_call_data_t mapping_adjust_call_data; | |
973 | ||
974 | void mapping_adjust(void) { /* Adjust free mappings */ | |
975 | ||
976 | kern_return_t retr; | |
977 | mappingblok *mb, *mbn; | |
978 | spl_t s; | |
979 | int allocsize, i; | |
980 | extern int vm_page_free_count; | |
981 | ||
982 | if(mapCtl.mapcmin <= MAPPERBLOK) { | |
983 | mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16; | |
984 | ||
985 | #if DEBUG | |
986 | kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin); | |
987 | kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n", | |
988 | mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln); | |
989 | #endif | |
990 | } | |
991 | ||
992 | s = splhigh(); /* Don't bother from now on */ | |
993 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
994 | panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */ | |
995 | } | |
996 | ||
997 | if (mapping_adjust_call == NULL) { | |
998 | thread_call_setup(&mapping_adjust_call_data, mapping_adjust, NULL); | |
999 | mapping_adjust_call = &mapping_adjust_call_data; | |
1000 | } | |
1001 | ||
1002 | while(1) { /* Keep going until we've got enough */ | |
1003 | ||
1004 | allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */ | |
1005 | if(allocsize < 1) break; /* Leave if we have all we need */ | |
1006 | ||
1007 | if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */ | |
1008 | mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */ | |
1009 | mapCtl.mapcreln--; /* Back off the count */ | |
1010 | allocsize = MAPPERBLOK; /* Show we allocated one block */ | |
1011 | } | |
1012 | else { /* No free ones, try to get it */ | |
1013 | ||
1014 | allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */ | |
1015 | if(allocsize > (mapCtl.mapcfree / 2)) allocsize = (mapCtl.mapcfree / 2); /* Don't try for anything that we can't comfortably map */ | |
1016 | ||
1017 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1018 | splx(s); /* Restore 'rupts */ | |
1019 | ||
1020 | for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */ | |
1021 | retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */ | |
1022 | if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */ | |
1023 | panic("Whoops... Not a bit of wired memory left for anyone\n"); | |
1024 | } | |
1025 | if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */ | |
1026 | } | |
1027 | ||
1028 | allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */ | |
1029 | s = splhigh(); /* Don't bother from now on */ | |
1030 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1031 | panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */ | |
1032 | } | |
1033 | } | |
1034 | for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */ | |
1035 | mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */ | |
1036 | mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */ | |
1037 | } | |
1038 | if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) | |
1039 | mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); | |
1040 | } | |
1041 | ||
1042 | if(mapCtl.mapcholdoff) { /* Should we hold off this release? */ | |
1043 | mapCtl.mapcrecurse = 0; /* We are done now */ | |
1044 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1045 | splx(s); /* Restore 'rupts */ | |
1046 | return; /* Return... */ | |
1047 | } | |
1048 | ||
1049 | mbn = mapCtl.mapcrel; /* Get first pending release block */ | |
1050 | mapCtl.mapcrel = 0; /* Dequeue them */ | |
1051 | mapCtl.mapcreln = 0; /* Set count to 0 */ | |
1052 | ||
1053 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1054 | splx(s); /* Restore 'rupts */ | |
1055 | ||
1056 | while((unsigned int)mbn) { /* Toss 'em all */ | |
1057 | mb = mbn->nextblok; /* Get the next */ | |
1058 | kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */ | |
1059 | mbn = mb; /* Chain to the next */ | |
1060 | } | |
1061 | ||
1062 | __asm__ volatile("sync"); /* Make sure all is well */ | |
1063 | mapCtl.mapcrecurse = 0; /* We are done now */ | |
1064 | return; | |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * mapping_free(mapping *mp) - release a mapping to the free list | |
1069 | * | |
1070 | * This routine takes a mapping and adds it to the free list. | |
1071 | * If this mapping make the block non-empty, we queue it to the free block list. | |
1072 | * NOTE: we might want to queue it to the end to keep quelch the pathalogical | |
1073 | * case when we get a mapping and free it repeatedly causing the block to chain and unchain. | |
1074 | * If this release fills a block and we are above the threshold, we release the block | |
1075 | */ | |
1076 | ||
1077 | void mapping_free(struct mapping *mp) { /* Release a mapping */ | |
1078 | ||
1079 | mappingblok *mb, *mbn; | |
1080 | spl_t s; | |
1081 | unsigned int full, mindx; | |
1082 | ||
1083 | mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */ | |
1084 | mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */ | |
1085 | ||
1086 | s = splhigh(); /* Don't bother from now on */ | |
1087 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1088 | panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */ | |
1089 | } | |
1090 | ||
1091 | full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */ | |
1092 | mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */ | |
1093 | ||
1094 | if(full) { /* If it was full before this: */ | |
1095 | mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */ | |
1096 | mapCtl.mapcnext = mb; /* Chain us to the head of the list */ | |
1097 | } | |
1098 | ||
1099 | mapCtl.mapcfree++; /* Bump free count */ | |
1100 | mapCtl.mapcinuse--; /* Decriment in use count */ | |
1101 | ||
1102 | mapCtl.mapcfreec++; /* Count total calls */ | |
1103 | ||
1104 | if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */ | |
1105 | if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3]) | |
1106 | == 0xFFFFFFFF) { /* See if empty now */ | |
1107 | ||
1108 | if(mapCtl.mapcnext == mb) { /* Are we first on the list? */ | |
1109 | mapCtl.mapcnext = mb->nextblok; /* Unchain us */ | |
1110 | if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */ | |
1111 | } | |
1112 | else { /* We're not first */ | |
1113 | for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */ | |
1114 | if(mbn->nextblok == mb) break; /* Is the next one our's? */ | |
1115 | } | |
1116 | if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp); | |
1117 | mbn->nextblok = mb->nextblok; /* Dequeue us */ | |
1118 | if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */ | |
1119 | } | |
1120 | ||
1121 | if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */ | |
1122 | mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */ | |
1123 | mapCtl.mapcnext = mb; /* Chain us to the head */ | |
1124 | if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */ | |
1125 | } | |
1126 | else { | |
1127 | mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */ | |
1128 | mapCtl.mapcreln++; /* Count on release list */ | |
1129 | mb->nextblok = mapCtl.mapcrel; /* Move pointer */ | |
1130 | mapCtl.mapcrel = mb; /* Chain us in front */ | |
1131 | } | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */ | |
1136 | if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ | |
1137 | thread_call_enter(mapping_adjust_call); /* Go toss some */ | |
1138 | } | |
1139 | } | |
1140 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1141 | splx(s); /* Restore 'rupts */ | |
1142 | ||
1143 | return; /* Bye, dude... */ | |
1144 | } | |
1145 | ||
1146 | ||
1147 | /* | |
1148 | * mapping_alloc(void) - obtain a mapping from the free list | |
1149 | * | |
1150 | * This routine takes a mapping off of the free list and returns it's address. | |
1151 | * | |
1152 | * We do this by finding a free entry in the first block and allocating it. | |
1153 | * If this allocation empties the block, we remove it from the free list. | |
1154 | * If this allocation drops the total number of free entries below a threshold, | |
1155 | * we allocate a new block. | |
1156 | * | |
1157 | */ | |
1158 | ||
1159 | mapping *mapping_alloc(void) { /* Obtain a mapping */ | |
1160 | ||
1161 | register mapping *mp; | |
1162 | mappingblok *mb, *mbn; | |
1163 | spl_t s; | |
1164 | int mindx; | |
1165 | kern_return_t retr; | |
1166 | ||
1167 | s = splhigh(); /* Don't bother from now on */ | |
1168 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1169 | panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ | |
1170 | } | |
1171 | ||
1172 | if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */ | |
1173 | panic("mapping_alloc - free mappings exhausted\n"); /* Whine and moan */ | |
1174 | } | |
1175 | ||
1176 | if(!(mindx = mapalc(mb))) { /* Allocate a slot */ | |
1177 | panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */ | |
1178 | } | |
1179 | ||
1180 | if(mindx < 0) { /* Did we just take the last one */ | |
1181 | mindx = -mindx; /* Make positive */ | |
1182 | mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */ | |
1183 | if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */ | |
1184 | } | |
1185 | ||
1186 | mapCtl.mapcfree--; /* Decrement free count */ | |
1187 | mapCtl.mapcinuse++; /* Bump in use count */ | |
1188 | ||
1189 | mapCtl.mapcallocc++; /* Count total calls */ | |
1190 | ||
1191 | /* | |
1192 | * Note: in the following code, we will attempt to rescue blocks only one at a time. | |
1193 | * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none | |
1194 | * rescueable, we will kick the misc scan who will allocate some for us. We only do this | |
1195 | * if we haven't already done it. | |
1196 | * For early boot, we are set up to only rescue one block at a time. This is because we prime | |
1197 | * the release list with as much as we need until threads start. | |
1198 | */ | |
1199 | if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */ | |
1200 | if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */ | |
1201 | mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */ | |
1202 | mapCtl.mapcreln--; /* Back off the count */ | |
1203 | mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */ | |
1204 | } | |
1205 | else { /* We need to replenish */ | |
1206 | if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) { | |
1207 | if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ | |
1208 | thread_call_enter(mapping_adjust_call); /* Go allocate some more */ | |
1209 | } | |
1210 | } | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1215 | splx(s); /* Restore 'rupts */ | |
1216 | ||
1217 | mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */ | |
1218 | __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */ | |
1219 | return mp; /* Send it back... */ | |
1220 | } | |
1221 | ||
1222 | ||
1223 | void | |
1224 | consider_mapping_adjust() | |
1225 | { | |
1226 | spl_t s; | |
1227 | ||
1228 | s = splhigh(); /* Don't bother from now on */ | |
1229 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1230 | panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ | |
1231 | } | |
1232 | ||
1233 | if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) { | |
1234 | if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ | |
1235 | thread_call_enter(mapping_adjust_call); /* Go allocate some more */ | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1240 | splx(s); /* Restore 'rupts */ | |
1241 | ||
1242 | } | |
1243 | ||
1244 | ||
1245 | ||
1246 | /* | |
1247 | * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list | |
1248 | * | |
1249 | * The mapping block is a page size area on a page boundary. It contains 1 header and 127 | |
1250 | * mappings. This call adds and initializes a block for use. | |
1251 | * | |
1252 | * The header contains a chain link, bit maps, a virtual to real translation mask, and | |
1253 | * some statistics. Bit maps map each slot on the page (bit 0 is not used because it | |
1254 | * corresponds to the header). The translation mask is the XOR of the virtual and real | |
1255 | * addresses (needless to say, the block must be wired). | |
1256 | * | |
1257 | * We handle these mappings the same way as saveareas: the block is only on the chain so | |
1258 | * long as there are free entries in it. | |
1259 | * | |
1260 | * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free | |
1261 | * mappings. Blocks marked PERM won't ever be released. | |
1262 | * | |
1263 | * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel | |
1264 | * list. We do this only at start up time. This is done because we only allocate blocks | |
1265 | * in the pageout scan and it doesn't start up until after we run out of the initial mappings. | |
1266 | * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put | |
1267 | * them on the release queue, the allocate routine will rescue them. Then when the | |
1268 | * pageout scan starts, all extra ones will be released. | |
1269 | * | |
1270 | */ | |
1271 | ||
1272 | ||
1273 | void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) { | |
1274 | /* Set's start and end of a block of mappings | |
1275 | perm indicates if the block can be released | |
1276 | or goes straight to the release queue . | |
1277 | locked indicates if the lock is held already */ | |
1278 | ||
1279 | mappingblok *mb; | |
1280 | spl_t s; | |
1281 | int i; | |
1282 | unsigned int raddr; | |
1283 | ||
1284 | mb = (mappingblok *)mbl; /* Start of area */ | |
1285 | ||
1286 | ||
1287 | if(perm >= 0) { /* See if we need to initialize the block */ | |
1288 | if(perm) { | |
1289 | raddr = (unsigned int)mbl; /* Perm means V=R */ | |
1290 | mb->mapblokflags = mbPerm; /* Set perm */ | |
1291 | } | |
1292 | else { | |
1293 | raddr = kvtophys(mbl); /* Get real address */ | |
1294 | mb->mapblokflags = 0; /* Set not perm */ | |
1295 | } | |
1296 | ||
1297 | mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */ | |
1298 | ||
1299 | mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */ | |
1300 | mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */ | |
1301 | mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */ | |
1302 | mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */ | |
1303 | } | |
1304 | ||
1305 | s = splhigh(); /* Don't bother from now on */ | |
1306 | if(!locked) { /* Do we need the lock? */ | |
1307 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1308 | panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */ | |
1309 | } | |
1310 | } | |
1311 | ||
1312 | if(perm < 0) { /* Direct to release queue? */ | |
1313 | mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */ | |
1314 | mapCtl.mapcrel = mb; /* Queue us on in */ | |
1315 | mapCtl.mapcreln++; /* Count the free block */ | |
1316 | } | |
1317 | else { /* Add to the free list */ | |
1318 | ||
1319 | mb->nextblok = 0; /* We always add to the end */ | |
1320 | mapCtl.mapcfree += MAPPERBLOK; /* Bump count */ | |
1321 | ||
1322 | if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */ | |
1323 | mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */ | |
1324 | } | |
1325 | else { /* We are not the first */ | |
1326 | mapCtl.mapclast->nextblok = mb; /* Point the last to us */ | |
1327 | mapCtl.mapclast = mb; /* We are now last */ | |
1328 | } | |
1329 | } | |
1330 | ||
1331 | if(!locked) { /* Do we need to unlock? */ | |
1332 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1333 | } | |
1334 | splx(s); /* Restore 'rupts */ | |
1335 | return; /* All done, leave... */ | |
1336 | } | |
1337 | ||
1338 | ||
1339 | /* | |
1340 | * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request | |
1341 | * | |
1342 | * No locks can be held, because we allocate memory here. | |
1343 | * This routine needs a corresponding mapping_relpre call to remove the | |
1344 | * hold off flag so that the adjust routine will free the extra mapping | |
1345 | * blocks on the release list. I don't like this, but I don't know | |
1346 | * how else to do this for now... | |
1347 | * | |
1348 | */ | |
1349 | ||
1350 | void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */ | |
1351 | ||
1352 | int nmapb, i; | |
1353 | kern_return_t retr; | |
1354 | mappingblok *mbn; | |
1355 | spl_t s; | |
1356 | ||
1357 | s = splhigh(); /* Don't bother from now on */ | |
1358 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1359 | panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */ | |
1360 | } | |
1361 | ||
1362 | nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */ | |
1363 | ||
1364 | mapCtl.mapcholdoff++; /* Bump the hold off count */ | |
1365 | ||
1366 | if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */ | |
1367 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1368 | splx(s); /* Restore 'rupts */ | |
1369 | return; | |
1370 | } | |
1371 | if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ | |
1372 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1373 | splx(s); /* Restore 'rupts */ | |
1374 | return; | |
1375 | } | |
1376 | nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */ | |
1377 | ||
1378 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1379 | splx(s); /* Restore 'rupts */ | |
1380 | ||
1381 | for(i = 0; i < nmapb; i++) { /* Allocate 'em all */ | |
1382 | retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */ | |
1383 | if(retr != KERN_SUCCESS) { /* Did we get some memory? */ | |
1384 | panic("Whoops... Not a bit of wired memory left for anyone\n"); | |
1385 | } | |
1386 | mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */ | |
1387 | } | |
1388 | if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) | |
1389 | mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); | |
1390 | ||
1391 | mapCtl.mapcrecurse = 0; /* We are done now */ | |
1392 | } | |
1393 | ||
1394 | /* | |
1395 | * void mapping_relpre(void) - Releases preallocation release hold off | |
1396 | * | |
1397 | * This routine removes the | |
1398 | * hold off flag so that the adjust routine will free the extra mapping | |
1399 | * blocks on the release list. I don't like this, but I don't know | |
1400 | * how else to do this for now... | |
1401 | * | |
1402 | */ | |
1403 | ||
1404 | void mapping_relpre(void) { /* Releases release hold off */ | |
1405 | ||
1406 | spl_t s; | |
1407 | ||
1408 | s = splhigh(); /* Don't bother from now on */ | |
1409 | if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ | |
1410 | panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */ | |
1411 | } | |
1412 | if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */ | |
1413 | panic("mapping_relpre: hold-off count went negative\n"); | |
1414 | } | |
1415 | ||
1416 | hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ | |
1417 | splx(s); /* Restore 'rupts */ | |
1418 | } | |
1419 | ||
1420 | /* | |
1421 | * void mapping_free_prime(void) - Primes the mapping block release list | |
1422 | * | |
1423 | * See mapping_free_init. | |
1424 | * No locks can be held, because we allocate memory here. | |
1425 | * One processor running only. | |
1426 | * | |
1427 | */ | |
1428 | ||
1429 | void mapping_free_prime(void) { /* Primes the mapping block release list */ | |
1430 | ||
1431 | int nmapb, i; | |
1432 | kern_return_t retr; | |
1433 | mappingblok *mbn; | |
1434 | vm_offset_t mapping_min; | |
1435 | ||
1436 | retr = kmem_suballoc(kernel_map, &mapping_min, mem_size / 16, | |
1437 | FALSE, TRUE, &mapping_map); | |
1438 | ||
1439 | if (retr != KERN_SUCCESS) | |
1440 | panic("mapping_free_prime: kmem_suballoc failed"); | |
1441 | ||
1442 | ||
1443 | nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */ | |
1444 | nmapb = nmapb * 4; /* Get 4 times our initial allocation */ | |
1445 | ||
1446 | #if DEBUG | |
1447 | kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n", | |
1448 | mapCtl.mapcfree, mapCtl.mapcinuse, nmapb); | |
1449 | #endif | |
1450 | ||
1451 | for(i = 0; i < nmapb; i++) { /* Allocate 'em all */ | |
1452 | retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */ | |
1453 | if(retr != KERN_SUCCESS) { /* Did we get some memory? */ | |
1454 | panic("Whoops... Not a bit of wired memory left for anyone\n"); | |
1455 | } | |
1456 | mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */ | |
1457 | } | |
1458 | if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) | |
1459 | mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); | |
1460 | } | |
1461 | ||
1462 | ||
1463 | ||
1464 | mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, | |
1465 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
1466 | { | |
1467 | *count = mapCtl.mapcinuse; | |
1468 | *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln); | |
1469 | *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc; | |
1470 | *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1)); | |
1471 | *alloc_size = PAGE_SIZE; | |
1472 | ||
1473 | *collectable = 1; | |
1474 | *exhaustable = 0; | |
1475 | } | |
1476 | ||
1477 | ||
1478 | /* | |
1479 | * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space | |
1480 | * | |
1481 | * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with | |
1482 | * the same space. If it finds it, it returns the virtual address. | |
1483 | * | |
1484 | * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check | |
1485 | * for it and fail it myself... | |
1486 | */ | |
1487 | ||
1488 | vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */ | |
1489 | ||
1490 | spl_t s; | |
1491 | register mapping *mp, *mpv; | |
1492 | vm_offset_t va; | |
1493 | ||
1494 | if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */ | |
1495 | ||
1496 | if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ | |
1497 | splx(s); /* Restore 'rupts */ | |
1498 | panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */ | |
1499 | return(0); /* Should die before here */ | |
1500 | } | |
1501 | ||
1502 | va = 0; /* Assume failure */ | |
1503 | ||
1504 | for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */ | |
1505 | ||
1506 | if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */ | |
1507 | ||
1508 | va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ | |
1509 | va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ | |
1510 | va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ | |
1511 | break; /* We're done now, pass virtual address back */ | |
1512 | } | |
1513 | ||
1514 | hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ | |
1515 | splx(s); /* Restore 'rupts */ | |
1516 | return(va); /* Return the result or 0... */ | |
1517 | } | |
1518 | ||
1519 | /* | |
1520 | * kvtophys(addr) | |
1521 | * | |
1522 | * Convert a kernel virtual address to a physical address | |
1523 | */ | |
1524 | vm_offset_t kvtophys(vm_offset_t va) { | |
1525 | ||
1526 | register mapping *mp, *mpv; | |
1527 | register blokmap *bmp; | |
1528 | register vm_offset_t pa; | |
1529 | spl_t s; | |
1530 | ||
1531 | s=splhigh(); /* Don't bother from now on */ | |
1532 | mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */ | |
1533 | ||
1534 | if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ | |
1535 | splx(s); /* Restore 'rupts */ | |
1536 | panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */ | |
1537 | return 0; | |
1538 | } | |
1539 | ||
1540 | if(!mp) { /* If it was not a normal page */ | |
1541 | pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ | |
1542 | splx(s); /* Restore 'rupts */ | |
1543 | return pa; /* Return physical address */ | |
1544 | } | |
1545 | ||
1546 | mpv = hw_cpv(mp); /* Convert to virtual addressing */ | |
1547 | ||
1548 | if(!mpv->physent) { /* Was there a physical entry? */ | |
1549 | pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */ | |
1550 | } | |
1551 | else { | |
1552 | pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */ | |
1553 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ | |
1554 | } | |
1555 | ||
1556 | splx(s); /* Restore 'rupts */ | |
1557 | return pa; /* Return the physical address... */ | |
1558 | } | |
1559 | ||
1560 | /* | |
1561 | * phystokv(addr) | |
1562 | * | |
1563 | * Convert a physical address to a kernel virtual address if | |
1564 | * there is a mapping, otherwise return NULL | |
1565 | */ | |
1566 | ||
1567 | vm_offset_t phystokv(vm_offset_t pa) { | |
1568 | ||
1569 | struct phys_entry *pp; | |
1570 | vm_offset_t va; | |
1571 | ||
1572 | pp = pmap_find_physentry(pa); /* Find the physical entry */ | |
1573 | if (PHYS_NULL == pp) { | |
1574 | return (vm_offset_t)NULL; /* If none, return null */ | |
1575 | } | |
1576 | if(!(va=mapping_p2v(kernel_pmap, pp))) { | |
1577 | return 0; /* Can't find it, return 0... */ | |
1578 | } | |
1579 | return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */ | |
1580 | ||
1581 | } | |
1582 | ||
1583 | /* | |
1584 | * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on | |
1585 | * page 0 access for the current thread. | |
1586 | * | |
1587 | * If parameter is TRUE, faults are ignored | |
1588 | * If parameter is FALSE, faults are honored | |
1589 | * | |
1590 | */ | |
1591 | ||
1592 | void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */ | |
1593 | ||
1594 | if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */ | |
1595 | else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */ | |
1596 | ||
1597 | return; /* Return the result or 0... */ | |
1598 | } | |
1599 | ||
1600 | ||
1601 | /* | |
1602 | * Allocates a range of virtual addresses in a map as optimally as | |
1603 | * possible for block mapping. The start address is aligned such | |
1604 | * that a minimum number of power-of-two sized/aligned blocks is | |
1605 | * required to cover the entire range. | |
1606 | * | |
1607 | * We also use a mask of valid block sizes to determine optimality. | |
1608 | * | |
1609 | * Note that the passed in pa is not actually mapped to the selected va, | |
1610 | * rather, it is used to figure the optimal boundary. The actual | |
1611 | * V to R mapping is done externally. | |
1612 | * | |
1613 | * This function will return KERN_INVALID_ADDRESS if an optimal address | |
1614 | * can not be found. It is not necessarily a fatal error, the caller may still be | |
1615 | * still be able to do a non-optimal assignment. | |
1616 | */ | |
1617 | ||
1618 | kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, | |
1619 | vm_size_t size, vm_prot_t prot) { | |
1620 | ||
1621 | vm_map_entry_t entry, next, tmp_entry, new_entry; | |
1622 | vm_offset_t start, end, algnpa, endadr, strtadr, curradr; | |
1623 | vm_offset_t boundary; | |
1624 | ||
1625 | unsigned int maxsize, minsize, leading, trailing; | |
1626 | ||
1627 | assert(page_aligned(pa)); | |
1628 | assert(page_aligned(size)); | |
1629 | ||
1630 | if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */ | |
1631 | ||
1632 | minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */ | |
1633 | maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */ | |
1634 | ||
1635 | boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */ | |
1636 | if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */ | |
1637 | ||
1638 | vm_map_lock(map); /* No touchee no mapee */ | |
1639 | ||
1640 | for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */ | |
1641 | if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */ | |
1642 | algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */ | |
1643 | leading = algnpa - pa; /* Get leading size */ | |
1644 | ||
1645 | curradr = 0; /* Start low */ | |
1646 | ||
1647 | while(1) { /* Try all possible values for this opt level */ | |
1648 | ||
1649 | curradr = curradr + boundary; /* Get the next optimal address */ | |
1650 | strtadr = curradr - leading; /* Calculate start of optimal range */ | |
1651 | endadr = strtadr + size; /* And now the end */ | |
1652 | ||
1653 | if((curradr < boundary) || /* Did address wrap here? */ | |
1654 | (strtadr > curradr) || /* How about this way? */ | |
1655 | (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */ | |
1656 | ||
1657 | if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */ | |
1658 | if(endadr > map->max_offset) break; /* No room right now... */ | |
1659 | ||
1660 | if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */ | |
1661 | ||
1662 | next = entry->vme_next; /* Get the next entry */ | |
1663 | if((next == vm_map_to_entry(map)) || /* Are we the last entry? */ | |
1664 | (next->vme_start >= endadr)) { /* or do we end before the next entry? */ | |
1665 | ||
1666 | new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */ | |
1667 | VM_OBJECT_NULL, | |
1668 | 0, /* Offset into object of 0 */ | |
1669 | FALSE, /* No copy needed */ | |
1670 | FALSE, /* Not shared */ | |
1671 | FALSE, /* Not in transition */ | |
1672 | prot, /* Set the protection to requested */ | |
1673 | prot, /* We can't change protection */ | |
1674 | VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind, | |
1675 | 'cause we don't page in this area */ | |
1676 | VM_INHERIT_DEFAULT, /* Default inheritance */ | |
1677 | 0); /* Nothing is wired */ | |
1678 | ||
1679 | vm_map_unlock(map); /* Let the world see it all */ | |
1680 | *va = strtadr; /* Tell everyone */ | |
1681 | *bnd = boundary; /* Say what boundary we are aligned to */ | |
1682 | return(KERN_SUCCESS); /* Leave, all is right with the world... */ | |
1683 | } | |
1684 | } | |
1685 | } | |
1686 | ||
1687 | vm_map_unlock(map); /* Couldn't find a slot */ | |
1688 | return(KERN_INVALID_ADDRESS); | |
1689 | } | |
1690 | ||
1691 | /* | |
1692 | * Copies data from a physical page to a virtual page. This is used to | |
1693 | * move data from the kernel to user state. | |
1694 | * | |
1695 | * Note that it is invalid to have a source that spans a page boundry. | |
1696 | * This can block. | |
1697 | * We don't check protection either. | |
1698 | * And we don't handle a block mapped sink address either. | |
1699 | * | |
1700 | */ | |
1701 | ||
1702 | kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) { | |
1703 | ||
1704 | vm_map_t map; | |
1705 | kern_return_t ret; | |
1706 | unsigned int spaceid; | |
1707 | int left, csize; | |
1708 | vm_offset_t pa; | |
1709 | register mapping *mpv, *mp; | |
1710 | spl_t s; | |
1711 | ||
1712 | if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */ | |
1713 | map = current_act()->map; /* Get the current map */ | |
1714 | ||
1715 | while(size) { | |
1716 | s=splhigh(); /* Don't bother me */ | |
1717 | ||
1718 | spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */ | |
1719 | ||
1720 | mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */ | |
1721 | if(!mp) { /* Was it there? */ | |
1722 | splx(s); /* Restore the interrupt level */ | |
1723 | ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in... */ | |
1724 | if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */ | |
1725 | ||
1726 | return KERN_FAILURE; /* Didn't find any, return no good... */ | |
1727 | } | |
1728 | if((unsigned int)mp&1) { /* Did we timeout? */ | |
1729 | panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */ | |
1730 | splx(s); /* Restore the interrupt level */ | |
1731 | return KERN_FAILURE; /* Bad hair day, return FALSE... */ | |
1732 | } | |
1733 | ||
1734 | mpv = hw_cpv(mp); /* Convert mapping block to virtual */ | |
1735 | ||
1736 | if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */ | |
1737 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */ | |
1738 | splx(s); /* Restore the interrupt level */ | |
1739 | ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* check for a COW area */ | |
1740 | if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */ | |
1741 | return KERN_FAILURE; /* Didn't find any, return no good... */ | |
1742 | } | |
1743 | left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */ | |
1744 | ||
1745 | csize = size < left ? size : left; /* Set amount to copy this pass */ | |
1746 | ||
1747 | pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */ | |
1748 | ||
1749 | bcopy_phys((char *)source, (char *)pa, csize); /* Do a physical copy */ | |
1750 | ||
1751 | hw_set_mod(mpv->physent); /* Go set the change of the sink */ | |
1752 | ||
1753 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */ | |
1754 | splx(s); /* Open up for interrupts */ | |
1755 | ||
1756 | sink += csize; /* Move up to start of next page */ | |
1757 | source += csize; /* Move up source */ | |
1758 | size -= csize; /* Set amount for next pass */ | |
1759 | } | |
1760 | return KERN_SUCCESS; | |
1761 | } | |
1762 | ||
1763 | ||
1764 | #if DEBUG | |
1765 | /* | |
1766 | * Dumps out the mapping stuff associated with a virtual address | |
1767 | */ | |
1768 | void dumpaddr(space_t space, vm_offset_t va) { | |
1769 | ||
1770 | mapping *mp, *mpv; | |
1771 | vm_offset_t pa; | |
1772 | spl_t s; | |
1773 | ||
1774 | s=splhigh(); /* Don't bother me */ | |
1775 | ||
1776 | mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */ | |
1777 | if(!mp) { /* Did we find one? */ | |
1778 | splx(s); /* Restore the interrupt level */ | |
1779 | printf("dumpaddr: virtual address (%08X) not mapped\n", va); | |
1780 | return; /* Didn't find any, return FALSE... */ | |
1781 | } | |
1782 | if((unsigned int)mp&1) { /* Did we timeout? */ | |
1783 | panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */ | |
1784 | splx(s); /* Restore the interrupt level */ | |
1785 | return; /* Bad hair day, return FALSE... */ | |
1786 | } | |
1787 | printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */ | |
1788 | mpv = hw_cpv(mp); /* Get virtual address of mapping */ | |
1789 | dumpmapping(mpv); | |
1790 | if(mpv->physent) { | |
1791 | dumppca(mpv); | |
1792 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ | |
1793 | } | |
1794 | splx(s); /* Was there something you needed? */ | |
1795 | return; /* Tell them we did it */ | |
1796 | } | |
1797 | ||
1798 | ||
1799 | ||
1800 | /* | |
1801 | * Prints out a mapping control block | |
1802 | * | |
1803 | */ | |
1804 | ||
1805 | void dumpmapping(struct mapping *mp) { /* Dump out a mapping */ | |
1806 | ||
1807 | printf("Dump of mapping block: %08X\n", mp); /* Header */ | |
1808 | printf(" next: %08X\n", mp->next); | |
1809 | printf(" hashnext: %08X\n", mp->hashnext); | |
1810 | printf(" PTEhash: %08X\n", mp->PTEhash); | |
1811 | printf(" PTEent: %08X\n", mp->PTEent); | |
1812 | printf(" physent: %08X\n", mp->physent); | |
1813 | printf(" PTEv: %08X\n", mp->PTEv); | |
1814 | printf(" PTEr: %08X\n", mp->PTEr); | |
1815 | printf(" pmap: %08X\n", mp->pmap); | |
1816 | ||
1817 | if(mp->physent) { /* Print physent if it exists */ | |
1818 | printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1); | |
1819 | } | |
1820 | else { | |
1821 | printf("Associated physical entry: none\n"); | |
1822 | } | |
1823 | ||
1824 | dumppca(mp); /* Dump out the PCA information */ | |
1825 | ||
1826 | return; | |
1827 | } | |
1828 | ||
1829 | /* | |
1830 | * Prints out a PTEG control area | |
1831 | * | |
1832 | */ | |
1833 | ||
1834 | void dumppca(struct mapping *mp) { /* PCA */ | |
1835 | ||
1836 | PCA *pca; | |
1837 | unsigned int *pteg; | |
1838 | ||
1839 | pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */ | |
1840 | pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16)); | |
1841 | printf(" Dump of PCA: %08X\n", pca); /* Header */ | |
1842 | printf(" PCAlock: %08X\n", pca->PCAlock); | |
1843 | printf(" PCAallo: %08X\n", pca->flgs.PCAallo); | |
1844 | printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]); | |
1845 | printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]); | |
1846 | printf("Dump of PTEG: %08X\n", pteg); /* Header */ | |
1847 | printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]); | |
1848 | printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]); | |
1849 | printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]); | |
1850 | printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]); | |
1851 | return; | |
1852 | } | |
1853 | ||
1854 | /* | |
1855 | * Dumps starting with a physical entry | |
1856 | */ | |
1857 | ||
1858 | void dumpphys(struct phys_entry *pp) { /* Dump from physent */ | |
1859 | ||
1860 | mapping *mp; | |
1861 | PCA *pca; | |
1862 | unsigned int *pteg; | |
1863 | ||
1864 | printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1); | |
1865 | mp = hw_cpv(pp->phys_link); | |
1866 | while(mp) { | |
1867 | dumpmapping(mp); | |
1868 | dumppca(mp); | |
1869 | mp = hw_cpv(mp->next); | |
1870 | } | |
1871 | ||
1872 | return; | |
1873 | } | |
1874 | ||
1875 | #endif | |
1876 | ||
1877 | ||
1878 | kern_return_t bmapvideo(vm_offset_t *info); | |
1879 | kern_return_t bmapvideo(vm_offset_t *info) { | |
1880 | ||
1881 | extern struct vc_info vinfo; | |
1882 | ||
1883 | (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */ | |
1884 | return KERN_SUCCESS; | |
1885 | } | |
1886 | ||
1887 | kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); | |
1888 | kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { | |
1889 | ||
1890 | pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */ | |
1891 | return KERN_SUCCESS; | |
1892 | } | |
1893 | ||
1894 | kern_return_t bmapmapr(vm_offset_t va); | |
1895 | kern_return_t bmapmapr(vm_offset_t va) { | |
1896 | ||
1897 | mapping_remove(current_act()->task->map->pmap, va); /* Remove map */ | |
1898 | return KERN_SUCCESS; | |
1899 | } |