]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/savearea.c
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * This file is used to maintain the exception save areas
34 #include <mach_kgdb.h>
35 #include <mach_vm_debug.h>
37 #include <kern/thread.h>
38 #include <mach/vm_attributes.h>
39 #include <mach/vm_param.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_page.h>
43 #include <mach/ppc/thread_status.h>
45 #include <kern/simple_lock.h>
47 #include <kern/misc_protos.h>
48 #include <ppc/misc_protos.h>
49 #include <ppc/proc_reg.h>
52 #include <ppc/Firmware.h>
53 #include <ppc/mappings.h>
54 #include <ppc/exception.h>
55 #include <ppc/savearea.h>
56 #include <ddb/db_output.h>
59 extern struct Saveanchor saveanchor
; /* Aliged savearea anchor */
60 struct Saveanchor backpocket
; /* Emergency saveareas */
61 unsigned int debsave0
= 0; /* Debug flag */
62 unsigned int backchain
= 0; /* Debug flag */
65 * These routines keep track of exception save areas and keeps the count within specific limits. If there are
66 * too few, more are allocated, too many, and they are released. This savearea is where the PCBs are
67 * stored. They never span a page boundary and are referenced by both virtual and real addresses.
68 * Within the interrupt vectors, the real address is used because at that level, no exceptions
69 * can be tolerated. Save areas can be dynamic or permanent. Permanant saveareas are allocated
70 * at boot time and must be in place before any type of exception occurs. These are never released,
71 * and the number is based upon some arbitrary (yet to be determined) amount times the number of
72 * processors. This represents the minimum number required to process a total system failure without
73 * destroying valuable and ever-so-handy system debugging information.
75 * We keep two global free lists (the savearea free pool and the savearea free list) and one local
78 * The local lists are small and require no locked access. They are chained using physical addresses
79 * and no interruptions are allowed when adding to or removing from the list. Also known as the
80 * qfret list. This list is local to a processor and is intended for use only by very low level
81 * context handling code.
83 * The savearea free list is a medium size list that is globally accessible. It is updated
84 * while holding a simple lock. The length of time that the lock is held is kept short. The
85 * longest period of time is when the list is trimmed. Like the qfret lists, this is chained physically
86 * and must be accessed with translation and interruptions disabled. This is where the bulk
87 * of the free entries are located.
89 * The saveareas are allocated from full pages. A pool element is marked
90 * with an allocation map that shows which "slots" are free. These pages are allocated via the
91 * normal kernel memory allocation functions. Queueing is with physical addresses. The enqueue,
92 * dequeue, and search for free blocks is done under free list lock.
93 * only if there are empty slots in it.
95 * Saveareas that are counted as "in use" once they are removed from the savearea free list.
96 * This means that all areas on the local qfret list are considered in use.
98 * There are two methods of obtaining a savearea. The save_get function (which is also inlined
99 * in the low-level exception handler) attempts to get an area from the local qfret list. This is
100 * done completely without locks. If qfret is exahusted (or maybe just too low) an area is allocated
101 * from the savearea free list. If the free list is empty, we install the back pocket areas and
104 * The save_alloc function is designed to be called by high level routines, e.g., thread creation,
105 * etc. It will allocate from the free list. After allocation, it will compare the free count
106 * to the target value. If outside of the range, it will adjust the size either upwards or
109 * If we need to shrink the list, it will be trimmed to the target size and unlocked. The code
110 * will walk the chain and return each savearea to its pool page. If a pool page becomes
111 * completely empty, it is dequeued from the free pool list and enqueued (atomic queue
112 * function) to be released.
114 * Once the trim list is finished, the pool release queue is checked to see if there are pages
115 * waiting to be released. If so, they are released one at a time.
117 * If the free list needed to be grown rather than shrunken, we will first attempt to recover
118 * a page from the pending release queue (built when we trim the free list). If we find one,
119 * it is allocated, otherwise, a page of kernel memory is allocated. This loops until there are
120 * enough free saveareas.
127 * Allocate our initial context save areas. As soon as we do this,
128 * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed
129 * to be at least page aligned.
131 * Note: these initial saveareas are all to be allocated from V=R, less than 4GB
136 void savearea_init(vm_offset_t addr
) {
138 savearea_comm
*savec
;
143 saveanchor
.savetarget
= InitialSaveTarget
; /* Initial target value */
144 saveanchor
.saveinuse
= 0; /* Number of areas in use */
146 saveanchor
.savefree
= 0; /* Remember the start of the free chain */
147 saveanchor
.savefreecnt
= 0; /* Remember the length */
148 saveanchor
.savepoolfwd
= (addr64_t
)&saveanchor
; /* Remember pool forward */
149 saveanchor
.savepoolbwd
= (addr64_t
)&saveanchor
; /* Remember pool backward */
151 save
= addr
; /* Point to the whole block of blocks */
154 * First we allocate the back pocket in case of emergencies
158 for(i
=0; i
< BackPocketSaveBloks
; i
++) { /* Initialize the back pocket saveareas */
160 savec
= (savearea_comm
*)save
; /* Get the control area for this one */
162 savec
->sac_alloc
= 0; /* Mark it allocated */
163 savec
->sac_vrswap
= 0; /* V=R, so the translation factor is 0 */
164 savec
->sac_flags
= sac_perm
; /* Mark it permanent */
165 savec
->sac_flags
|= 0x0000EE00; /* Debug eyecatcher */
166 save_queue((uint32_t)savec
>> 12); /* Add page to savearea lists */
167 save
+= PAGE_SIZE
; /* Jump up to the next one now */
171 backpocket
= saveanchor
; /* Save this for emergencies */
175 * We've saved away the back pocket savearea info, so reset it all and
176 * now allocate for real
180 saveanchor
.savefree
= 0; /* Remember the start of the free chain */
181 saveanchor
.savefreecnt
= 0; /* Remember the length */
182 saveanchor
.saveadjust
= 0; /* Set none needed yet */
183 saveanchor
.savepoolfwd
= (addr64_t
)&saveanchor
; /* Remember pool forward */
184 saveanchor
.savepoolbwd
= (addr64_t
)&saveanchor
; /* Remember pool backward */
186 for(i
=0; i
< InitialSaveBloks
; i
++) { /* Initialize the saveareas */
188 savec
= (savearea_comm
*)save
; /* Get the control area for this one */
190 savec
->sac_alloc
= 0; /* Mark it allocated */
191 savec
->sac_vrswap
= 0; /* V=R, so the translation factor is 0 */
192 savec
->sac_flags
= sac_perm
; /* Mark it permanent */
193 savec
->sac_flags
|= 0x0000EE00; /* Debug eyecatcher */
194 save_queue((uint32_t)savec
>> 12); /* Add page to savearea lists */
195 save
+= PAGE_SIZE
; /* Jump up to the next one now */
200 * We now have a free list that has our initial number of entries
201 * The local qfret lists is empty. When we call save_get below it will see that
202 * the local list is empty and fill it for us.
204 * It is ok to call save_get here because all initial saveareas are V=R in less
205 * than 4GB space, so 32-bit addressing is ok.
210 * This will populate the local list and get the first one for the system
212 getPerProc()->next_savearea
= (vm_offset_t
)save_get();
215 * The system is now able to take interruptions
224 * Obtains a savearea. If the free list needs size adjustment it happens here.
225 * Don't actually allocate the savearea until after the adjustment is done.
228 struct savearea
*save_alloc(void) { /* Reserve a save area */
231 if(saveanchor
.saveadjust
) save_adjust(); /* If size need adjustment, do it now */
233 return save_get(); /* Pass the baby... */
238 * This routine releases a save area to the free queue. If after that, we have more than our maximum target,
239 * we start releasing what we can until we hit the normal target.
244 void save_release(struct savearea
*save
) { /* Release a save area */
246 save_ret(save
); /* Return a savearea to the free list */
248 if(saveanchor
.saveadjust
) save_adjust(); /* Adjust the savearea free list and pool size if needed */
256 * Adjusts the size of the free list. Can either release or allocate full pages
257 * of kernel memory. This can block.
259 * Note that we will only run one adjustment and the amount needed may change
260 * while we are executing.
262 * Calling this routine is triggered by saveanchor.saveadjust. This value is always calculated just before
263 * we unlock the saveanchor lock (this keeps it pretty accurate). If the total of savefreecnt and saveinuse
264 * is within the hysteresis range, it is set to 0. If outside, it is set to the number needed to bring
265 * the total to the target value. Note that there is a minimum size to the free list (FreeListMin) and if
266 * savefreecnt falls below that, saveadjust is set to the number needed to bring it to that.
270 void save_adjust(void) {
272 savearea_comm
*sctl
, *sctlnext
, *freepage
;
277 if(saveanchor
.saveadjust
< 0) { /* Do we need to adjust down? */
279 sctl
= (savearea_comm
*)save_trim_free(); /* Trim list to the need count, return start of trim list */
281 while(sctl
) { /* Release the free pages back to the kernel */
282 sctlnext
= CAST_DOWN(savearea_comm
*, sctl
->save_prev
); /* Get next in list */
283 kmem_free(kernel_map
, (vm_offset_t
) sctl
, PAGE_SIZE
); /* Release the page */
284 sctl
= sctlnext
; /* Chain onwards */
287 else { /* We need more... */
289 if(save_recover()) return; /* If we can recover enough from the pool, return */
291 while(saveanchor
.saveadjust
> 0) { /* Keep going until we have enough */
293 ret
= kmem_alloc_wired(kernel_map
, (vm_offset_t
*)&freepage
, PAGE_SIZE
); /* Get a page for free pool */
294 if(ret
!= KERN_SUCCESS
) { /* Did we get some memory? */
295 panic("Whoops... Not a bit of wired memory left for saveareas\n");
298 physpage
= pmap_find_phys(kernel_pmap
, (vm_offset_t
)freepage
); /* Find physical page */
299 if(!physpage
) { /* See if we actually have this mapped*/
300 panic("save_adjust: wired page not mapped - va = %08X\n", freepage
); /* Die */
303 bzero((void *)freepage
, PAGE_SIZE
); /* Clear it all to zeros */
304 freepage
->sac_alloc
= 0; /* Mark all entries taken */
305 freepage
->sac_vrswap
= ((uint64_t)physpage
<< 12) ^ (uint64_t)((uintptr_t)freepage
); /* XOR to calculate conversion mask */
307 freepage
->sac_flags
|= 0x0000EE00; /* Set debug eyecatcher */
309 save_queue(physpage
); /* Add all saveareas on page to free list */
315 * Fake up information to make the saveareas look like a zone
318 save_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
319 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
321 *count
= saveanchor
.saveinuse
;
322 *cur_size
= (saveanchor
.savefreecnt
+ saveanchor
.saveinuse
) * (PAGE_SIZE
/ sac_cnt
);
323 *max_size
= saveanchor
.savemaxcount
* (PAGE_SIZE
/ sac_cnt
);
324 *elem_size
= sizeof(savearea
);
325 *alloc_size
= PAGE_SIZE
;