]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * This file is used to maintain the exception save areas | |
24 | * | |
25 | */ | |
26 | ||
27 | #include <cpus.h> | |
28 | #include <debug.h> | |
29 | #include <mach_kgdb.h> | |
30 | #include <mach_vm_debug.h> | |
31 | ||
32 | #include <kern/thread.h> | |
33 | #include <mach/vm_attributes.h> | |
34 | #include <mach/vm_param.h> | |
35 | #include <vm/vm_kern.h> | |
36 | #include <vm/vm_map.h> | |
37 | #include <vm/vm_page.h> | |
38 | #include <mach/ppc/thread_status.h> | |
39 | #include <kern/spl.h> | |
40 | #include <kern/simple_lock.h> | |
41 | ||
42 | #include <kern/misc_protos.h> | |
43 | #include <ppc/misc_protos.h> | |
44 | #include <ppc/proc_reg.h> | |
45 | #include <ppc/mem.h> | |
46 | #include <ppc/pmap.h> | |
47 | #include <ppc/pmap_internals.h> | |
48 | #include <ppc/Firmware.h> | |
49 | #include <ppc/mappings.h> | |
50 | #include <ppc/exception.h> | |
51 | #include <ppc/savearea.h> | |
52 | #include <ddb/db_output.h> | |
53 | ||
54 | ||
55 | extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ | |
9bccf70c | 56 | struct Saveanchor backpocket; /* Emergency saveareas */ |
1c79356b A |
57 | unsigned int debsave0 = 0; /* Debug flag */ |
58 | unsigned int backchain = 0; /* Debug flag */ | |
59 | ||
60 | /* | |
61 | * These routines keep track of exception save areas and keeps the count within specific limits. If there are | |
62 | * too few, more are allocated, too many, and they are released. This savearea is where the PCBs are | |
63 | * stored. They never span a page boundary and are referenced by both virtual and real addresses. | |
64 | * Within the interrupt vectors, the real address is used because at that level, no exceptions | |
65 | * can be tolerated. Save areas can be dynamic or permanent. Permanant saveareas are allocated | |
66 | * at boot time and must be in place before any type of exception occurs. These are never released, | |
67 | * and the number is based upon some arbitrary (yet to be determined) amount times the number of | |
68 | * processors. This represents the minimum number required to process a total system failure without | |
69 | * destroying valuable and ever-so-handy system debugging information. | |
70 | * | |
9bccf70c A |
71 | * We keep two global free lists (the savearea free pool and the savearea free list) and one local |
72 | * list per processor. | |
1c79356b | 73 | * |
9bccf70c A |
74 | * The local lists are small and require no locked access. They are chained using physical addresses |
75 | * and no interruptions are allowed when adding to or removing from the list. Also known as the | |
76 | * qfret list. This list is local to a processor and is intended for use only by very low level | |
77 | * context handling code. | |
78 | * | |
79 | * The savearea free list is a medium size list that is globally accessible. It is updated | |
80 | * while holding a simple lock. The length of time that the lock is held is kept short. The | |
81 | * longest period of time is when the list is trimmed. Like the qfret lists, this is chained physically | |
82 | * and must be accessed with translation and interruptions disabled. This is where the bulk | |
83 | * of the free entries are located. | |
84 | * | |
85 | * The saveareas are allocated from full pages. A pool element is marked | |
86 | * with an allocation map that shows which "slots" are free. These pages are allocated via the | |
87 | * normal kernel memory allocation functions. Queueing is with physical addresses. The enqueue, | |
88 | * dequeue, and search for free blocks is done under free list lock. | |
89 | * only if there are empty slots in it. | |
90 | * | |
91 | * Saveareas that are counted as "in use" once they are removed from the savearea free list. | |
92 | * This means that all areas on the local qfret list are considered in use. | |
93 | * | |
94 | * There are two methods of obtaining a savearea. The save_get function (which is also inlined | |
95 | * in the low-level exception handler) attempts to get an area from the local qfret list. This is | |
96 | * done completely without locks. If qfret is exahusted (or maybe just too low) an area is allocated | |
97 | * from the savearea free list. If the free list is empty, we install the back pocket areas and | |
98 | * panic. | |
99 | * | |
100 | * The save_alloc function is designed to be called by high level routines, e.g., thread creation, | |
101 | * etc. It will allocate from the free list. After allocation, it will compare the free count | |
102 | * to the target value. If outside of the range, it will adjust the size either upwards or | |
103 | * downwards. | |
104 | * | |
105 | * If we need to shrink the list, it will be trimmed to the target size and unlocked. The code | |
106 | * will walk the chain and return each savearea to its pool page. If a pool page becomes | |
107 | * completely empty, it is dequeued from the free pool list and enqueued (atomic queue | |
108 | * function) to be released. | |
109 | * | |
110 | * Once the trim list is finished, the pool release queue is checked to see if there are pages | |
111 | * waiting to be released. If so, they are released one at a time. | |
112 | * | |
113 | * If the free list needed to be grown rather than shrunken, we will first attempt to recover | |
114 | * a page from the pending release queue (built when we trim the free list). If we find one, | |
115 | * it is allocated, otherwise, a page of kernel memory is allocated. This loops until there are | |
116 | * enough free saveareas. | |
117 | * | |
1c79356b A |
118 | */ |
119 | ||
9bccf70c A |
120 | |
121 | ||
1c79356b | 122 | /* |
9bccf70c A |
123 | * Allocate our initial context save areas. As soon as we do this, |
124 | * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed | |
125 | * to be at least page aligned. | |
1c79356b A |
126 | */ |
127 | ||
128 | ||
9bccf70c | 129 | void savearea_init(vm_offset_t *addrx) { |
1c79356b | 130 | |
9bccf70c A |
131 | savearea_comm *savec, *savec2, *saveprev; |
132 | vm_offset_t save, save2, addr; | |
133 | int i; | |
1c79356b | 134 | |
9bccf70c A |
135 | |
136 | saveanchor.savetarget = InitialSaveTarget; /* Initial target value */ | |
137 | saveanchor.saveinuse = 0; /* Number of areas in use */ | |
1c79356b | 138 | |
9bccf70c A |
139 | saveanchor.savefree = 0; /* Remember the start of the free chain */ |
140 | saveanchor.savefreecnt = 0; /* Remember the length */ | |
141 | saveanchor.savepoolfwd = (unsigned int *)&saveanchor; /* Remember pool forward */ | |
142 | saveanchor.savepoolbwd = (unsigned int *)&saveanchor; /* Remember pool backward */ | |
1c79356b | 143 | |
9bccf70c | 144 | addr = *addrx; /* Make this easier for ourselves */ |
1c79356b | 145 | |
9bccf70c | 146 | save = addr; /* Point to the whole block of blocks */ |
1c79356b A |
147 | |
148 | /* | |
9bccf70c | 149 | * First we allocate the back pocket in case of emergencies |
1c79356b A |
150 | */ |
151 | ||
152 | ||
9bccf70c | 153 | for(i=0; i < 8; i++) { /* Initialize the back pocket saveareas */ |
1c79356b | 154 | |
9bccf70c | 155 | savec = (savearea_comm *)save; /* Get the control area for this one */ |
1c79356b | 156 | |
9bccf70c A |
157 | savec->sac_alloc = 0; /* Mark it allocated */ |
158 | savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ | |
159 | savec->sac_flags = sac_perm; /* Mark it permanent */ | |
160 | savec->sac_flags |= 0x0000EE00; /* Debug eyecatcher */ | |
161 | save_queue((savearea *)savec); /* Add page to savearea lists */ | |
162 | save += PAGE_SIZE; /* Jump up to the next one now */ | |
1c79356b | 163 | |
1c79356b | 164 | } |
1c79356b | 165 | |
9bccf70c | 166 | backpocket = saveanchor; /* Save this for emergencies */ |
1c79356b A |
167 | |
168 | ||
169 | /* | |
9bccf70c A |
170 | * We've saved away the back pocket savearea info, so reset it all and |
171 | * now allocate for real | |
1c79356b A |
172 | */ |
173 | ||
174 | ||
9bccf70c A |
175 | saveanchor.savefree = 0; /* Remember the start of the free chain */ |
176 | saveanchor.savefreecnt = 0; /* Remember the length */ | |
177 | saveanchor.saveadjust = 0; /* Set none needed yet */ | |
178 | saveanchor.savepoolfwd = (unsigned int *)&saveanchor; /* Remember pool forward */ | |
179 | saveanchor.savepoolbwd = (unsigned int *)&saveanchor; /* Remember pool backward */ | |
1c79356b | 180 | |
9bccf70c | 181 | for(i=0; i < InitialSaveBloks; i++) { /* Initialize the saveareas */ |
1c79356b | 182 | |
9bccf70c | 183 | savec = (savearea_comm *)save; /* Get the control area for this one */ |
1c79356b | 184 | |
9bccf70c A |
185 | savec->sac_alloc = 0; /* Mark it allocated */ |
186 | savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ | |
187 | savec->sac_flags = sac_perm; /* Mark it permanent */ | |
188 | savec->sac_flags |= 0x0000EE00; /* Debug eyecatcher */ | |
189 | save_queue((savearea *)savec); /* Add page to savearea lists */ | |
190 | save += PAGE_SIZE; /* Jump up to the next one now */ | |
1c79356b A |
191 | |
192 | } | |
9bccf70c A |
193 | |
194 | *addrx = save; /* Move the free storage lowwater mark */ | |
195 | ||
196 | /* | |
197 | * We now have a free list that has our initial number of entries | |
198 | * The local qfret lists is empty. When we call save_get below it will see that | |
199 | * the local list is empty and fill it for us. | |
200 | * | |
201 | * It is ok to call save_get_phys here because even though if we are translation on, we are still V=R and | |
202 | * running with BAT registers so no interruptions. Regular interruptions will be off. Using save_get | |
203 | * would be wrong if the tracing was enabled--it would cause an exception. | |
204 | */ | |
205 | ||
206 | save2 = (vm_offset_t)save_get_phys(); /* This will populate the local list | |
207 | and get the first one for the system */ | |
208 | per_proc_info[0].next_savearea = (unsigned int)save2; /* Tell the exception handler about it */ | |
209 | ||
210 | /* | |
211 | * The system is now able to take interruptions | |
212 | */ | |
213 | ||
1c79356b | 214 | return; |
9bccf70c | 215 | |
1c79356b A |
216 | } |
217 | ||
9bccf70c A |
218 | |
219 | ||
220 | ||
1c79356b | 221 | /* |
9bccf70c A |
222 | * Returns a savearea. If the free list needs size adjustment it happens here. |
223 | * Don't actually allocate the savearea until after the adjustment is done. | |
1c79356b A |
224 | */ |
225 | ||
9bccf70c A |
226 | struct savearea *save_alloc(void) { /* Reserve a save area */ |
227 | ||
228 | ||
229 | if(saveanchor.saveadjust) save_adjust(); /* If size need adjustment, do it now */ | |
230 | ||
231 | return save_get(); /* Pass the baby... */ | |
232 | } | |
233 | ||
234 | ||
235 | /* | |
236 | * This routine releases a save area to the free queue. If after that, we have more than our maximum target, | |
237 | * we start releasing what we can until we hit the normal target. | |
238 | */ | |
1c79356b A |
239 | |
240 | ||
1c79356b | 241 | |
9bccf70c A |
242 | void save_release(struct savearea *save) { /* Release a save area */ |
243 | ||
244 | save_ret(save); /* Return a savearea to the free list */ | |
245 | ||
246 | if(saveanchor.saveadjust) save_adjust(); /* Adjust the savearea free list and pool size if needed */ | |
1c79356b | 247 | |
1c79356b | 248 | return; |
9bccf70c | 249 | |
1c79356b A |
250 | } |
251 | ||
252 | ||
1c79356b | 253 | /* |
9bccf70c A |
254 | * Adjusts the size of the free list. Can either release or allocate full pages |
255 | * of kernel memory. This can block. | |
256 | * | |
257 | * Note that we will only run one adjustment and the amount needed may change | |
258 | * while we are executing. | |
259 | * | |
260 | * Calling this routine is triggered by saveanchor.saveadjust. This value is always calculated just before | |
261 | * we unlock the saveanchor lock (this keeps it pretty accurate). If the total of savefreecnt and saveinuse | |
262 | * is within the hysteresis range, it is set to 0. If outside, it is set to the number needed to bring | |
263 | * the total to the target value. Note that there is a minimum size to the free list (FreeListMin) and if | |
264 | * savefreecnt falls below that, saveadjust is set to the number needed to bring it to that. | |
1c79356b | 265 | */ |
1c79356b | 266 | |
9bccf70c A |
267 | |
268 | void save_adjust(void) { | |
1c79356b | 269 | |
9bccf70c A |
270 | savearea_comm *sctl, *sctlnext, *freepool, *freepage, *realpage; |
271 | kern_return_t ret; | |
272 | ||
273 | if(saveanchor.saveadjust < 0) { /* Do we need to adjust down? */ | |
274 | ||
275 | sctl = (savearea_comm *)save_trim_free(); /* Trim list to the need count, return start of trim list */ | |
276 | ||
277 | while(sctl) { /* Release the free pages back to the kernel */ | |
278 | sctlnext = (savearea_comm *)sctl->save_prev; /* Get next in list */ | |
279 | kmem_free(kernel_map, (vm_offset_t) sctl, PAGE_SIZE); /* Release the page */ | |
280 | sctl = sctlnext; /* Chain onwards */ | |
281 | } | |
282 | } | |
283 | else { /* We need more... */ | |
284 | ||
285 | if(save_recover()) return; /* If we can recover enough from the pool, return */ | |
286 | ||
287 | while(saveanchor.saveadjust > 0) { /* Keep going until we have enough */ | |
288 | ||
289 | ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&freepage, PAGE_SIZE); /* Get a page for free pool */ | |
290 | if(ret != KERN_SUCCESS) { /* Did we get some memory? */ | |
291 | panic("Whoops... Not a bit of wired memory left for saveareas\n"); | |
292 | } | |
293 | ||
294 | realpage = (savearea_comm *)pmap_extract(kernel_pmap, (vm_offset_t)freepage); /* Get the physical */ | |
295 | ||
296 | bzero((void *)freepage, PAGE_SIZE); /* Clear it all to zeros */ | |
297 | freepage->sac_alloc = 0; /* Mark all entries taken */ | |
298 | freepage->sac_vrswap = (unsigned int)freepage ^ (unsigned int)realpage; /* Form mask to convert V to R and vice versa */ | |
299 | ||
300 | freepage->sac_flags |= 0x0000EE00; /* Set debug eyecatcher */ | |
301 | ||
302 | save_queue((savearea *)realpage); /* Add all saveareas on page to free list */ | |
303 | } | |
1c79356b | 304 | } |
1c79356b A |
305 | } |
306 | ||
9bccf70c A |
307 | /* |
308 | * Fake up information to make the saveareas look like a zone | |
309 | */ | |
1c79356b | 310 | |
9bccf70c A |
311 | save_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, |
312 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
313 | { | |
314 | *count = saveanchor.saveinuse; | |
315 | *cur_size = (saveanchor.savefreecnt + saveanchor.saveinuse) * (PAGE_SIZE / sac_cnt); | |
316 | *max_size = saveanchor.savemaxcount * (PAGE_SIZE / sac_cnt); | |
317 | *elem_size = sizeof(savearea); | |
318 | *alloc_size = PAGE_SIZE; | |
319 | *collectable = 1; | |
320 | *exhaustable = 0; | |
321 | } | |
1c79356b A |
322 | |
323 |