]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * This file is used to maintain the exception save areas | |
30 | * | |
31 | */ | |
32 | ||
1c79356b A |
33 | #include <debug.h> |
34 | #include <mach_kgdb.h> | |
35 | #include <mach_vm_debug.h> | |
36 | ||
37 | #include <kern/thread.h> | |
38 | #include <mach/vm_attributes.h> | |
39 | #include <mach/vm_param.h> | |
40 | #include <vm/vm_kern.h> | |
41 | #include <vm/vm_map.h> | |
42 | #include <vm/vm_page.h> | |
43 | #include <mach/ppc/thread_status.h> | |
44 | #include <kern/spl.h> | |
45 | #include <kern/simple_lock.h> | |
46 | ||
47 | #include <kern/misc_protos.h> | |
48 | #include <ppc/misc_protos.h> | |
49 | #include <ppc/proc_reg.h> | |
50 | #include <ppc/mem.h> | |
51 | #include <ppc/pmap.h> | |
1c79356b A |
52 | #include <ppc/Firmware.h> |
53 | #include <ppc/mappings.h> | |
54 | #include <ppc/exception.h> | |
55 | #include <ppc/savearea.h> | |
56 | #include <ddb/db_output.h> | |
57 | ||
58 | ||
9bccf70c | 59 | struct Saveanchor backpocket; /* Emergency saveareas */ |
1c79356b A |
60 | unsigned int debsave0 = 0; /* Debug flag */ |
61 | unsigned int backchain = 0; /* Debug flag */ | |
62 | ||
63 | /* | |
64 | * These routines keep track of exception save areas and keeps the count within specific limits. If there are | |
65 | * too few, more are allocated, too many, and they are released. This savearea is where the PCBs are | |
66 | * stored. They never span a page boundary and are referenced by both virtual and real addresses. | |
67 | * Within the interrupt vectors, the real address is used because at that level, no exceptions | |
68 | * can be tolerated. Save areas can be dynamic or permanent. Permanant saveareas are allocated | |
69 | * at boot time and must be in place before any type of exception occurs. These are never released, | |
70 | * and the number is based upon some arbitrary (yet to be determined) amount times the number of | |
71 | * processors. This represents the minimum number required to process a total system failure without | |
72 | * destroying valuable and ever-so-handy system debugging information. | |
73 | * | |
9bccf70c A |
74 | * We keep two global free lists (the savearea free pool and the savearea free list) and one local |
75 | * list per processor. | |
1c79356b | 76 | * |
9bccf70c A |
77 | * The local lists are small and require no locked access. They are chained using physical addresses |
78 | * and no interruptions are allowed when adding to or removing from the list. Also known as the | |
79 | * qfret list. This list is local to a processor and is intended for use only by very low level | |
80 | * context handling code. | |
81 | * | |
82 | * The savearea free list is a medium size list that is globally accessible. It is updated | |
83 | * while holding a simple lock. The length of time that the lock is held is kept short. The | |
84 | * longest period of time is when the list is trimmed. Like the qfret lists, this is chained physically | |
85 | * and must be accessed with translation and interruptions disabled. This is where the bulk | |
86 | * of the free entries are located. | |
87 | * | |
88 | * The saveareas are allocated from full pages. A pool element is marked | |
89 | * with an allocation map that shows which "slots" are free. These pages are allocated via the | |
90 | * normal kernel memory allocation functions. Queueing is with physical addresses. The enqueue, | |
91 | * dequeue, and search for free blocks is done under free list lock. | |
92 | * only if there are empty slots in it. | |
93 | * | |
94 | * Saveareas that are counted as "in use" once they are removed from the savearea free list. | |
95 | * This means that all areas on the local qfret list are considered in use. | |
96 | * | |
97 | * There are two methods of obtaining a savearea. The save_get function (which is also inlined | |
98 | * in the low-level exception handler) attempts to get an area from the local qfret list. This is | |
99 | * done completely without locks. If qfret is exahusted (or maybe just too low) an area is allocated | |
100 | * from the savearea free list. If the free list is empty, we install the back pocket areas and | |
101 | * panic. | |
102 | * | |
103 | * The save_alloc function is designed to be called by high level routines, e.g., thread creation, | |
104 | * etc. It will allocate from the free list. After allocation, it will compare the free count | |
105 | * to the target value. If outside of the range, it will adjust the size either upwards or | |
106 | * downwards. | |
107 | * | |
108 | * If we need to shrink the list, it will be trimmed to the target size and unlocked. The code | |
109 | * will walk the chain and return each savearea to its pool page. If a pool page becomes | |
110 | * completely empty, it is dequeued from the free pool list and enqueued (atomic queue | |
111 | * function) to be released. | |
112 | * | |
113 | * Once the trim list is finished, the pool release queue is checked to see if there are pages | |
114 | * waiting to be released. If so, they are released one at a time. | |
115 | * | |
116 | * If the free list needed to be grown rather than shrunken, we will first attempt to recover | |
117 | * a page from the pending release queue (built when we trim the free list). If we find one, | |
118 | * it is allocated, otherwise, a page of kernel memory is allocated. This loops until there are | |
119 | * enough free saveareas. | |
120 | * | |
1c79356b A |
121 | */ |
122 | ||
9bccf70c A |
123 | |
124 | ||
1c79356b | 125 | /* |
9bccf70c A |
126 | * Allocate our initial context save areas. As soon as we do this, |
127 | * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed | |
128 | * to be at least page aligned. | |
55e303ae A |
129 | * |
130 | * Note: these initial saveareas are all to be allocated from V=R, less than 4GB | |
131 | * space. | |
1c79356b A |
132 | */ |
133 | ||
134 | ||
55e303ae | 135 | void savearea_init(vm_offset_t addr) { |
1c79356b | 136 | |
55e303ae A |
137 | savearea_comm *savec; |
138 | vm_offset_t save; | |
2d21ac55 | 139 | unsigned int i; |
1c79356b | 140 | |
9bccf70c A |
141 | |
142 | saveanchor.savetarget = InitialSaveTarget; /* Initial target value */ | |
143 | saveanchor.saveinuse = 0; /* Number of areas in use */ | |
1c79356b | 144 | |
55e303ae | 145 | saveanchor.savefree = 0; /* Remember the start of the free chain */ |
9bccf70c | 146 | saveanchor.savefreecnt = 0; /* Remember the length */ |
55e303ae A |
147 | saveanchor.savepoolfwd = (addr64_t)&saveanchor; /* Remember pool forward */ |
148 | saveanchor.savepoolbwd = (addr64_t)&saveanchor; /* Remember pool backward */ | |
1c79356b | 149 | |
9bccf70c | 150 | save = addr; /* Point to the whole block of blocks */ |
1c79356b A |
151 | |
152 | /* | |
9bccf70c | 153 | * First we allocate the back pocket in case of emergencies |
1c79356b A |
154 | */ |
155 | ||
156 | ||
55e303ae | 157 | for(i=0; i < BackPocketSaveBloks; i++) { /* Initialize the back pocket saveareas */ |
1c79356b | 158 | |
9bccf70c | 159 | savec = (savearea_comm *)save; /* Get the control area for this one */ |
1c79356b | 160 | |
9bccf70c A |
161 | savec->sac_alloc = 0; /* Mark it allocated */ |
162 | savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ | |
163 | savec->sac_flags = sac_perm; /* Mark it permanent */ | |
164 | savec->sac_flags |= 0x0000EE00; /* Debug eyecatcher */ | |
55e303ae | 165 | save_queue((uint32_t)savec >> 12); /* Add page to savearea lists */ |
9bccf70c | 166 | save += PAGE_SIZE; /* Jump up to the next one now */ |
1c79356b | 167 | |
1c79356b | 168 | } |
1c79356b | 169 | |
9bccf70c | 170 | backpocket = saveanchor; /* Save this for emergencies */ |
1c79356b A |
171 | |
172 | ||
173 | /* | |
9bccf70c A |
174 | * We've saved away the back pocket savearea info, so reset it all and |
175 | * now allocate for real | |
1c79356b A |
176 | */ |
177 | ||
178 | ||
9bccf70c A |
179 | saveanchor.savefree = 0; /* Remember the start of the free chain */ |
180 | saveanchor.savefreecnt = 0; /* Remember the length */ | |
181 | saveanchor.saveadjust = 0; /* Set none needed yet */ | |
55e303ae A |
182 | saveanchor.savepoolfwd = (addr64_t)&saveanchor; /* Remember pool forward */ |
183 | saveanchor.savepoolbwd = (addr64_t)&saveanchor; /* Remember pool backward */ | |
1c79356b | 184 | |
9bccf70c | 185 | for(i=0; i < InitialSaveBloks; i++) { /* Initialize the saveareas */ |
1c79356b | 186 | |
9bccf70c | 187 | savec = (savearea_comm *)save; /* Get the control area for this one */ |
1c79356b | 188 | |
9bccf70c A |
189 | savec->sac_alloc = 0; /* Mark it allocated */ |
190 | savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ | |
191 | savec->sac_flags = sac_perm; /* Mark it permanent */ | |
192 | savec->sac_flags |= 0x0000EE00; /* Debug eyecatcher */ | |
55e303ae | 193 | save_queue((uint32_t)savec >> 12); /* Add page to savearea lists */ |
9bccf70c | 194 | save += PAGE_SIZE; /* Jump up to the next one now */ |
1c79356b A |
195 | |
196 | } | |
9bccf70c | 197 | |
9bccf70c A |
198 | /* |
199 | * We now have a free list that has our initial number of entries | |
200 | * The local qfret lists is empty. When we call save_get below it will see that | |
201 | * the local list is empty and fill it for us. | |
202 | * | |
55e303ae A |
203 | * It is ok to call save_get here because all initial saveareas are V=R in less |
204 | * than 4GB space, so 32-bit addressing is ok. | |
205 | * | |
9bccf70c A |
206 | */ |
207 | ||
55e303ae A |
208 | /* |
209 | * This will populate the local list and get the first one for the system | |
210 | */ | |
2d21ac55 A |
211 | /* XXX next_savearea should be a void * 4425541 */ |
212 | getPerProc()->next_savearea = (unsigned long)(void *)save_get(); | |
55e303ae | 213 | |
9bccf70c A |
214 | /* |
215 | * The system is now able to take interruptions | |
216 | */ | |
1c79356b A |
217 | } |
218 | ||
9bccf70c A |
219 | |
220 | ||
221 | ||
1c79356b | 222 | /* |
55e303ae | 223 | * Obtains a savearea. If the free list needs size adjustment it happens here. |
9bccf70c | 224 | * Don't actually allocate the savearea until after the adjustment is done. |
1c79356b A |
225 | */ |
226 | ||
9bccf70c A |
227 | struct savearea *save_alloc(void) { /* Reserve a save area */ |
228 | ||
229 | ||
230 | if(saveanchor.saveadjust) save_adjust(); /* If size need adjustment, do it now */ | |
231 | ||
232 | return save_get(); /* Pass the baby... */ | |
233 | } | |
234 | ||
235 | ||
236 | /* | |
2d21ac55 A |
237 | * This routine releases a save area to the free queue. If after that, |
238 | * we have more than our maximum target, we start releasing what we can | |
239 | * until we hit the normal target. | |
9bccf70c | 240 | */ |
1c79356b | 241 | |
2d21ac55 A |
242 | void |
243 | save_release(struct savearea *save) | |
244 | { | |
245 | /* Return a savearea to the free list */ | |
246 | save_ret(save); | |
1c79356b | 247 | |
2d21ac55 A |
248 | /* Adjust the savearea free list and pool size if needed */ |
249 | if(saveanchor.saveadjust) | |
250 | save_adjust(); | |
1c79356b A |
251 | } |
252 | ||
1c79356b | 253 | /* |
9bccf70c A |
254 | * Adjusts the size of the free list. Can either release or allocate full pages |
255 | * of kernel memory. This can block. | |
256 | * | |
257 | * Note that we will only run one adjustment and the amount needed may change | |
258 | * while we are executing. | |
259 | * | |
260 | * Calling this routine is triggered by saveanchor.saveadjust. This value is always calculated just before | |
261 | * we unlock the saveanchor lock (this keeps it pretty accurate). If the total of savefreecnt and saveinuse | |
262 | * is within the hysteresis range, it is set to 0. If outside, it is set to the number needed to bring | |
263 | * the total to the target value. Note that there is a minimum size to the free list (FreeListMin) and if | |
264 | * savefreecnt falls below that, saveadjust is set to the number needed to bring it to that. | |
1c79356b | 265 | */ |
1c79356b | 266 | |
9bccf70c A |
267 | |
268 | void save_adjust(void) { | |
1c79356b | 269 | |
55e303ae | 270 | savearea_comm *sctl, *sctlnext, *freepage; |
9bccf70c | 271 | kern_return_t ret; |
55e303ae | 272 | ppnum_t physpage; |
9bccf70c A |
273 | |
274 | if(saveanchor.saveadjust < 0) { /* Do we need to adjust down? */ | |
275 | ||
276 | sctl = (savearea_comm *)save_trim_free(); /* Trim list to the need count, return start of trim list */ | |
277 | ||
278 | while(sctl) { /* Release the free pages back to the kernel */ | |
55e303ae | 279 | sctlnext = CAST_DOWN(savearea_comm *, sctl->save_prev); /* Get next in list */ |
9bccf70c A |
280 | kmem_free(kernel_map, (vm_offset_t) sctl, PAGE_SIZE); /* Release the page */ |
281 | sctl = sctlnext; /* Chain onwards */ | |
282 | } | |
283 | } | |
284 | else { /* We need more... */ | |
285 | ||
286 | if(save_recover()) return; /* If we can recover enough from the pool, return */ | |
287 | ||
288 | while(saveanchor.saveadjust > 0) { /* Keep going until we have enough */ | |
289 | ||
290 | ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&freepage, PAGE_SIZE); /* Get a page for free pool */ | |
291 | if(ret != KERN_SUCCESS) { /* Did we get some memory? */ | |
292 | panic("Whoops... Not a bit of wired memory left for saveareas\n"); | |
293 | } | |
294 | ||
55e303ae A |
295 | physpage = pmap_find_phys(kernel_pmap, (vm_offset_t)freepage); /* Find physical page */ |
296 | if(!physpage) { /* See if we actually have this mapped*/ | |
2d21ac55 | 297 | panic("save_adjust: wired page not mapped - va = %p\n", freepage); /* Die */ |
55e303ae | 298 | } |
9bccf70c A |
299 | |
300 | bzero((void *)freepage, PAGE_SIZE); /* Clear it all to zeros */ | |
301 | freepage->sac_alloc = 0; /* Mark all entries taken */ | |
55e303ae | 302 | freepage->sac_vrswap = ((uint64_t)physpage << 12) ^ (uint64_t)((uintptr_t)freepage); /* XOR to calculate conversion mask */ |
9bccf70c A |
303 | |
304 | freepage->sac_flags |= 0x0000EE00; /* Set debug eyecatcher */ | |
305 | ||
55e303ae | 306 | save_queue(physpage); /* Add all saveareas on page to free list */ |
9bccf70c | 307 | } |
1c79356b | 308 | } |
1c79356b A |
309 | } |
310 | ||
9bccf70c A |
311 | /* |
312 | * Fake up information to make the saveareas look like a zone | |
313 | */ | |
91447636 | 314 | void |
9bccf70c A |
315 | save_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, |
316 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
317 | { | |
318 | *count = saveanchor.saveinuse; | |
319 | *cur_size = (saveanchor.savefreecnt + saveanchor.saveinuse) * (PAGE_SIZE / sac_cnt); | |
320 | *max_size = saveanchor.savemaxcount * (PAGE_SIZE / sac_cnt); | |
2d21ac55 | 321 | *elem_size = sizeof(struct savearea); |
9bccf70c A |
322 | *alloc_size = PAGE_SIZE; |
323 | *collectable = 1; | |
324 | *exhaustable = 0; | |
325 | } | |
1c79356b A |
326 | |
327 |