]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
d7e50217 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
d7e50217 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
d7e50217 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Copyright (c) 1990,1991,1992 The University of Utah and | |
30 | * the Center for Software Science (CSS). All rights reserved. | |
31 | * | |
32 | * Permission to use, copy, modify and distribute this software is hereby | |
33 | * granted provided that (1) source code retains these copyright, permission, | |
34 | * and disclaimer notices, and (2) redistributions including binaries | |
35 | * reproduce the notices in supporting documentation, and (3) all advertising | |
36 | * materials mentioning features or use of this software display the following | |
37 | * acknowledgement: ``This product includes software developed by the Center | |
38 | * for Software Science at the University of Utah.'' | |
39 | * | |
40 | * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
41 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF | |
42 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
43 | * | |
44 | * CSS requests users of this software to return to css-dist@cs.utah.edu any | |
45 | * improvements that they make and grant CSS redistribution rights. | |
46 | * | |
47 | * Utah $Hdr: pcb.c 1.23 92/06/27$ | |
48 | */ | |
49 | ||
50 | #include <cpus.h> | |
51 | #include <debug.h> | |
52 | ||
53 | #include <types.h> | |
54 | #include <kern/task.h> | |
55 | #include <kern/thread.h> | |
56 | #include <kern/thread_act.h> | |
57 | #include <kern/thread_swap.h> | |
58 | #include <mach/thread_status.h> | |
59 | #include <vm/vm_kern.h> | |
60 | #include <kern/mach_param.h> | |
61 | ||
62 | #include <kern/misc_protos.h> | |
63 | #include <ppc/misc_protos.h> | |
1c79356b A |
64 | #include <ppc/exception.h> |
65 | #include <ppc/proc_reg.h> | |
66 | #include <kern/spl.h> | |
67 | #include <ppc/pmap.h> | |
68 | #include <ppc/trap.h> | |
69 | #include <ppc/mappings.h> | |
70 | #include <ppc/savearea.h> | |
71 | #include <ppc/Firmware.h> | |
72 | #include <ppc/asm.h> | |
73 | #include <ppc/thread_act.h> | |
74 | #include <ppc/vmachmon.h> | |
765c9de3 | 75 | #include <ppc/low_trace.h> |
1c79356b A |
76 | |
77 | #include <sys/kdebug.h> | |
78 | ||
79 | extern int real_ncpus; /* Number of actual CPUs */ | |
80 | extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ | |
81 | ||
82 | /* | |
83 | * These constants are dumb. They should not be in asm.h! | |
84 | */ | |
85 | ||
86 | #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE) | |
87 | ||
88 | #if DEBUG | |
89 | int fpu_trap_count = 0; | |
90 | int fpu_switch_count = 0; | |
91 | int vec_trap_count = 0; | |
92 | int vec_switch_count = 0; | |
93 | #endif | |
94 | ||
95 | extern struct thread_shuttle *Switch_context( | |
96 | struct thread_shuttle *old, | |
97 | void (*cont)(void), | |
98 | struct thread_shuttle *new); | |
99 | ||
100 | ||
101 | #if MACH_LDEBUG || MACH_KDB | |
102 | void log_thread_action (char *, long, long, long); | |
103 | #endif | |
104 | ||
105 | ||
106 | /* | |
107 | * consider_machine_collect: try to collect machine-dependent pages | |
108 | */ | |
109 | void | |
110 | consider_machine_collect() | |
111 | { | |
112 | /* | |
113 | * none currently available | |
114 | */ | |
115 | return; | |
116 | } | |
117 | ||
118 | void | |
119 | consider_machine_adjust() | |
120 | { | |
121 | consider_mapping_adjust(); | |
122 | } | |
123 | ||
124 | ||
125 | /* | |
126 | * stack_attach: Attach a kernel stack to a thread. | |
127 | */ | |
128 | void | |
129 | machine_kernel_stack_init( | |
130 | struct thread_shuttle *thread, | |
131 | void (*start_pos)(thread_t)) | |
132 | { | |
133 | vm_offset_t stack; | |
d7e50217 | 134 | unsigned int *kss, *stck; |
1c79356b A |
135 | struct savearea *sv; |
136 | ||
137 | assert(thread->top_act->mact.pcb); | |
138 | assert(thread->kernel_stack); | |
139 | stack = thread->kernel_stack; | |
140 | ||
1c79356b | 141 | kss = (unsigned int *)STACK_IKS(stack); |
9bccf70c | 142 | sv = thread->top_act->mact.pcb; /* This for the sake of C */ |
1c79356b | 143 | |
d7e50217 A |
144 | sv->save_lr = (uint64_t) start_pos; /* Set up the execution address */ |
145 | sv->save_srr0 = (uint64_t) start_pos; /* Here too */ | |
9bccf70c | 146 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */ |
d7e50217 A |
147 | stck = (unsigned int *)((unsigned int)kss - KF_SIZE); /* Point to the top frame */ |
148 | sv->save_r1 = (uint64_t)stck; /* Point to the top frame on the stack */ | |
9bccf70c A |
149 | sv->save_fpscr = 0; /* Clear all floating point exceptions */ |
150 | sv->save_vrsave = 0; /* Set the vector save state */ | |
151 | sv->save_vscr[3] = 0x00010000; /* Supress java mode */ | |
1c79356b | 152 | |
d7e50217 | 153 | *stck = 0; /* Zero the frame backpointer */ |
1c79356b A |
154 | thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */ |
155 | ||
156 | } | |
157 | ||
158 | /* | |
159 | * switch_context: Switch from one thread to another, needed for | |
160 | * switching of space | |
161 | * | |
162 | */ | |
163 | struct thread_shuttle* | |
164 | switch_context( | |
165 | struct thread_shuttle *old, | |
166 | void (*continuation)(void), | |
167 | struct thread_shuttle *new) | |
168 | { | |
169 | register thread_act_t old_act = old->top_act, new_act = new->top_act; | |
170 | register struct thread_shuttle* retval; | |
171 | pmap_t new_pmap; | |
9bccf70c | 172 | facility_context *fowner; |
d7e50217 A |
173 | struct per_proc_info *ppinfo; |
174 | ||
9bccf70c | 175 | |
1c79356b A |
176 | #if MACH_LDEBUG || MACH_KDB |
177 | log_thread_action("switch", | |
178 | (long)old, | |
179 | (long)new, | |
180 | (long)__builtin_return_address(0)); | |
181 | #endif | |
9bccf70c | 182 | |
d7e50217 A |
183 | ppinfo = getPerProc(); /* Get our processor block */ |
184 | ||
185 | ppinfo->old_thread = (unsigned int)old; | |
186 | ppinfo->cpu_flags &= ~traceBE; /* disable branch tracing if on */ | |
1c79356b | 187 | |
1c79356b A |
188 | check_simple_locks(); |
189 | ||
190 | /* Our context might wake up on another processor, so we must | |
191 | * not keep hot state in our FPU, it must go back to the pcb | |
192 | * so that it can be found by the other if needed | |
193 | */ | |
9bccf70c | 194 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ |
d7e50217 | 195 | fowner = ppinfo->FPU_owner; /* Cache this because it may change */ |
9bccf70c A |
196 | if(fowner) { /* Is there any live context? */ |
197 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
198 | fpu_save(fowner); /* Yes, save it */ | |
199 | } | |
200 | } | |
d7e50217 | 201 | fowner = ppinfo->VMX_owner; /* Cache this because it may change */ |
9bccf70c A |
202 | if(fowner) { /* Is there any live context? */ |
203 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
204 | vec_save(fowner); /* Yes, save it */ | |
205 | } | |
206 | } | |
1c79356b A |
207 | } |
208 | ||
d7e50217 A |
209 | /* |
210 | * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags | |
211 | * This bits can be modified in the per proc without updating the thread spcFlags | |
212 | */ | |
213 | if(old_act->mact.specFlags & runningVM) { | |
214 | old_act->mact.specFlags &= ~(userProtKey|FamVMmode); | |
215 | old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode); | |
1c79356b | 216 | } |
1c79356b A |
217 | |
218 | /* | |
219 | * We do not have to worry about the PMAP module, so switch. | |
220 | * | |
221 | * We must not use top_act->map since this may not be the actual | |
222 | * task map, but the map being used for a klcopyin/out. | |
223 | */ | |
224 | ||
225 | if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
226 | pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
d7e50217 A |
227 | ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys; |
228 | ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs; | |
229 | ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept; | |
1c79356b A |
230 | } |
231 | else { /* otherwise, we use the task's pmap */ | |
232 | new_pmap = new_act->task->map->pmap; | |
233 | if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) { | |
234 | pmap_switch(new_pmap); /* Switch if there is a change */ | |
235 | } | |
236 | } | |
237 | ||
d7e50217 A |
238 | if(old_act->mact.cioSpace != invalSpace) { /* Does our old guy have an active copyin/out? */ |
239 | old_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ | |
240 | hw_blow_seg(copyIOaddr); /* Blow off the first segment */ | |
241 | hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */ | |
242 | } | |
243 | ||
1c79356b A |
244 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, |
245 | (int)old, (int)new, old->sched_pri, new->sched_pri, 0); | |
246 | ||
d7e50217 | 247 | /* *********** SWITCH HERE **************/ |
1c79356b A |
248 | retval = Switch_context(old, continuation, new); |
249 | assert(retval != (struct thread_shuttle*)NULL); | |
d7e50217 A |
250 | /* *********** SWITCH HERE **************/ |
251 | ||
1c79356b | 252 | |
d7e50217 A |
253 | if (branch_tracing_enabled()) { |
254 | ppinfo = getPerProc(); /* Get our processor block */ | |
255 | ppinfo->cpu_flags |= traceBE; /* restore branch tracing */ | |
256 | } | |
0b4e3aa0 | 257 | |
1c79356b A |
258 | /* We've returned from having switched context, so we should be |
259 | * back in the original context. | |
260 | */ | |
261 | ||
262 | return retval; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Alter the thread's state so that a following thread_exception_return | |
267 | * will make the thread return 'retval' from a syscall. | |
268 | */ | |
269 | void | |
270 | thread_set_syscall_return( | |
271 | struct thread_shuttle *thread, | |
272 | kern_return_t retval) | |
273 | { | |
1c79356b | 274 | |
9bccf70c | 275 | thread->top_act->mact.pcb->save_r3 = retval; |
1c79356b A |
276 | } |
277 | ||
278 | /* | |
279 | * Initialize the machine-dependent state for a new thread. | |
280 | */ | |
281 | kern_return_t | |
282 | thread_machine_create( | |
283 | struct thread_shuttle *thread, | |
284 | thread_act_t thr_act, | |
285 | void (*start_pos)(thread_t)) | |
286 | { | |
287 | ||
288 | savearea *sv; /* Pointer to newly allocated savearea */ | |
289 | unsigned int *CIsTooLimited, i; | |
290 | ||
291 | ||
9bccf70c | 292 | hw_atomic_add(&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need" |
1c79356b | 293 | for this activation */ |
9bccf70c | 294 | assert(thr_act->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */ |
1c79356b A |
295 | |
296 | sv = save_alloc(); /* Go get us a savearea */ | |
297 | ||
9bccf70c A |
298 | bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */ |
299 | ||
300 | sv->save_hdr.save_prev = 0; /* Clear the back pointer */ | |
301 | sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ | |
302 | sv->save_hdr.save_act = thr_act; /* Set who owns it */ | |
9bccf70c A |
303 | thr_act->mact.pcb = sv; /* Point to the save area */ |
304 | thr_act->mact.curctx = &thr_act->mact.facctx; /* Initialize facility context */ | |
305 | thr_act->mact.facctx.facAct = thr_act; /* Initialize facility context pointer to activation */ | |
d7e50217 A |
306 | thr_act->mact.cioSpace = invalSpace; /* Initialize copyin/out space to invalid */ |
307 | thr_act->mact.preemption_count = 0; /* Initialize preemption counter */ | |
1c79356b | 308 | |
1c79356b A |
309 | /* |
310 | * User threads will pull their context from the pcb when first | |
311 | * returning to user mode, so fill in all the necessary values. | |
312 | * Kernel threads are initialized from the save state structure | |
313 | * at the base of the kernel stack (see stack_attach()). | |
314 | */ | |
315 | ||
d7e50217 A |
316 | sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */ |
317 | sv->save_fpscr = 0; /* Clear all floating point exceptions */ | |
318 | sv->save_vrsave = 0; /* Set the vector save state */ | |
319 | sv->save_vscr[0] = 0x00000000; | |
320 | sv->save_vscr[1] = 0x00000000; | |
321 | sv->save_vscr[2] = 0x00000000; | |
322 | sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */ | |
1c79356b | 323 | |
1c79356b A |
324 | return(KERN_SUCCESS); |
325 | } | |
326 | ||
327 | /* | |
328 | * Machine-dependent cleanup prior to destroying a thread | |
329 | */ | |
330 | void | |
331 | thread_machine_destroy( thread_t thread ) | |
332 | { | |
333 | spl_t s; | |
334 | ||
335 | if (thread->kernel_stack) { | |
336 | s = splsched(); | |
337 | stack_free(thread); | |
338 | splx(s); | |
339 | } | |
340 | } | |
341 | ||
342 | /* | |
343 | * flush out any lazily evaluated HW state in the | |
344 | * owning thread's context, before termination. | |
345 | */ | |
346 | void | |
347 | thread_machine_flush( thread_act_t cur_act ) | |
348 | { | |
349 | } | |
350 | ||
351 | /* | |
352 | * Number of times we needed to swap an activation back in before | |
353 | * switching to it. | |
354 | */ | |
355 | int switch_act_swapins = 0; | |
356 | ||
357 | /* | |
358 | * machine_switch_act | |
359 | * | |
360 | * Machine-dependent details of activation switching. Called with | |
361 | * RPC locks held and preemption disabled. | |
362 | */ | |
363 | void | |
364 | machine_switch_act( | |
365 | thread_t thread, | |
366 | thread_act_t old, | |
367 | thread_act_t new, | |
368 | int cpu) | |
369 | { | |
370 | pmap_t new_pmap; | |
9bccf70c | 371 | facility_context *fowner; |
d7e50217 A |
372 | struct per_proc_info *ppinfo; |
373 | ||
374 | ppinfo = getPerProc(); /* Get our processor block */ | |
1c79356b A |
375 | |
376 | /* Our context might wake up on another processor, so we must | |
377 | * not keep hot state in our FPU, it must go back to the pcb | |
378 | * so that it can be found by the other if needed | |
379 | */ | |
9bccf70c | 380 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ |
d7e50217 | 381 | fowner = ppinfo->FPU_owner; /* Cache this because it may change */ |
9bccf70c A |
382 | if(fowner) { /* Is there any live context? */ |
383 | if(fowner->facAct == old) { /* Is it for us? */ | |
384 | fpu_save(fowner); /* Yes, save it */ | |
385 | } | |
386 | } | |
d7e50217 | 387 | fowner = ppinfo->VMX_owner; /* Cache this because it may change */ |
9bccf70c A |
388 | if(fowner) { /* Is there any live context? */ |
389 | if(fowner->facAct == old) { /* Is it for us? */ | |
390 | vec_save(fowner); /* Yes, save it */ | |
391 | } | |
392 | } | |
1c79356b A |
393 | } |
394 | ||
d7e50217 A |
395 | old->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ |
396 | ||
1c79356b A |
397 | active_stacks[cpu] = thread->kernel_stack; |
398 | ||
399 | ast_context(new, cpu); | |
400 | ||
401 | /* Activations might have different pmaps | |
402 | * (process->kernel->server, for example). | |
403 | * Change space if needed | |
404 | */ | |
405 | ||
d7e50217 A |
406 | if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ |
407 | pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
1c79356b A |
408 | } |
409 | else { /* otherwise, we use the task's pmap */ | |
410 | new_pmap = new->task->map->pmap; | |
411 | if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) { | |
412 | pmap_switch(new_pmap); | |
413 | } | |
414 | } | |
415 | ||
d7e50217 | 416 | |
1c79356b A |
417 | } |
418 | ||
419 | void | |
420 | pcb_user_to_kernel(thread_act_t act) | |
421 | { | |
422 | ||
d7e50217 | 423 | return; /* Not needed, I hope... */ |
1c79356b A |
424 | } |
425 | ||
426 | ||
427 | /* | |
428 | * act_machine_sv_free | |
429 | * release saveareas associated with an act. if flag is true, release | |
430 | * user level savearea(s) too, else don't | |
431 | * | |
432 | * this code cannot block so we call the proper save area free routine | |
433 | */ | |
434 | void | |
435 | act_machine_sv_free(thread_act_t act) | |
436 | { | |
9bccf70c | 437 | register savearea *pcb, *userpcb; |
d7e50217 A |
438 | register savearea_vec *vsv, *vpst, *vsvt; |
439 | register savearea_fpu *fsv, *fpst, *fsvt; | |
1c79356b A |
440 | register savearea *svp; |
441 | register int i; | |
442 | ||
443 | /* | |
9bccf70c | 444 | * This function will release all non-user state context. |
1c79356b A |
445 | */ |
446 | ||
9bccf70c A |
447 | /* |
448 | * | |
449 | * Walk through and release all floating point and vector contexts that are not | |
450 | * user state. We will also blow away live context if it belongs to non-user state. | |
d7e50217 A |
451 | * Note that the level can not change while we are in this code. Nor can another |
452 | * context be pushed on the stack. | |
453 | * | |
454 | * We do nothing here if the current level is user. Otherwise, | |
455 | * the live context is cleared. Then we find the user saved context. | |
456 | * Next, we take the sync lock (to keep us from munging things in *_switch). | |
457 | * The level is set to 0 and all stacked context other than user is dequeued. | |
458 | * Then we unlock. Next, all of the old kernel contexts are released. | |
9bccf70c A |
459 | * |
460 | */ | |
461 | ||
462 | if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */ | |
d7e50217 | 463 | |
9bccf70c | 464 | toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */ |
9bccf70c | 465 | |
d7e50217 A |
466 | vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ |
467 | ||
468 | while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */ | |
9bccf70c | 469 | |
d7e50217 A |
470 | if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */ |
471 | panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */ | |
472 | } | |
473 | ||
474 | vsvt = act->mact.curctx->VMXsave; /* Get the top of the chain */ | |
475 | act->mact.curctx->VMXsave = vsv; /* Point to the user context */ | |
476 | act->mact.curctx->VMXlevel = 0; /* Set the level to user */ | |
477 | hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync); /* Unlock */ | |
478 | ||
479 | while(vsvt) { /* Clear any VMX saved state */ | |
480 | if (vsvt == vsv) break; /* Done when hit user if any */ | |
481 | vpst = vsvt; /* Remember so we can toss this */ | |
482 | vsvt = (savearea_vec *)vsvt->save_hdr.save_prev; /* Get one underneath our's */ | |
483 | save_ret((savearea *)vpst); /* Release it */ | |
484 | } | |
485 | ||
486 | } | |
9bccf70c A |
487 | |
488 | if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */ | |
d7e50217 A |
489 | |
490 | toss_live_fpu(act->mact.curctx); /* Dump live floats if is not user */ | |
1c79356b | 491 | |
d7e50217 A |
492 | fsv = act->mact.curctx->FPUsave; /* Get the top floats savearea */ |
493 | ||
494 | while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */ | |
9bccf70c | 495 | |
d7e50217 A |
496 | if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */ |
497 | panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */ | |
498 | } | |
499 | ||
500 | fsvt = act->mact.curctx->FPUsave; /* Get the top of the chain */ | |
501 | act->mact.curctx->FPUsave = fsv; /* Point to the user context */ | |
502 | act->mact.curctx->FPUlevel = 0; /* Set the level to user */ | |
503 | hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync); /* Unlock */ | |
504 | ||
505 | while(fsvt) { /* Clear any VMX saved state */ | |
506 | if (fsvt == fsv) break; /* Done when hit user if any */ | |
507 | fpst = fsvt; /* Remember so we can toss this */ | |
508 | fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev; /* Get one underneath our's */ | |
509 | save_ret((savearea *)fpst); /* Release it */ | |
510 | } | |
511 | ||
1c79356b A |
512 | } |
513 | ||
9bccf70c A |
514 | /* |
515 | * free all regular saveareas except a user savearea, if any | |
516 | */ | |
1c79356b | 517 | |
9bccf70c A |
518 | pcb = act->mact.pcb; /* Get the general savearea */ |
519 | userpcb = 0; /* Assume no user context for now */ | |
520 | ||
521 | while(pcb) { /* Any float saved state? */ | |
522 | if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */ | |
523 | userpcb = pcb; /* Remember so we can toss this */ | |
524 | break; | |
525 | } | |
526 | svp = pcb; /* Remember this */ | |
527 | pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */ | |
528 | save_ret(svp); /* Release it */ | |
1c79356b | 529 | } |
9bccf70c A |
530 | |
531 | act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */ | |
532 | ||
1c79356b A |
533 | } |
534 | ||
535 | ||
536 | /* | |
537 | * act_virtual_machine_destroy: | |
538 | * Shutdown any virtual machines associated with a thread | |
539 | */ | |
540 | void | |
541 | act_virtual_machine_destroy(thread_act_t act) | |
542 | { | |
543 | if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */ | |
544 | disable_bluebox_internal(act); /* Kill off bluebox */ | |
545 | } | |
546 | ||
547 | if(act->mact.vmmControl) { /* Check if VMM is active */ | |
548 | vmm_tear_down_all(act); /* Kill off all VMM contexts */ | |
549 | } | |
550 | } | |
551 | ||
552 | /* | |
553 | * act_machine_destroy: Shutdown any state associated with a thread pcb. | |
554 | */ | |
555 | void | |
556 | act_machine_destroy(thread_act_t act) | |
557 | { | |
9bccf70c A |
558 | |
559 | register savearea *pcb, *ppsv; | |
560 | register savearea_vec *vsv, *vpsv; | |
561 | register savearea_fpu *fsv, *fpsv; | |
562 | register savearea *svp; | |
563 | register int i; | |
1c79356b | 564 | |
9bccf70c A |
565 | /* |
566 | * This function will release all context. | |
567 | */ | |
1c79356b | 568 | |
9bccf70c A |
569 | act_virtual_machine_destroy(act); /* Make sure all virtual machines are dead first */ |
570 | ||
1c79356b | 571 | /* |
9bccf70c A |
572 | * |
573 | * Walk through and release all floating point and vector contexts. Also kill live context. | |
574 | * | |
1c79356b A |
575 | */ |
576 | ||
9bccf70c A |
577 | toss_live_vec(act->mact.curctx); /* Dump live vectors */ |
578 | ||
579 | vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ | |
1c79356b | 580 | |
9bccf70c A |
581 | while(vsv) { /* Any VMX saved state? */ |
582 | vpsv = vsv; /* Remember so we can toss this */ | |
583 | vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */ | |
584 | save_release((savearea *)vpsv); /* Release it */ | |
1c79356b | 585 | } |
9bccf70c A |
586 | |
587 | act->mact.curctx->VMXsave = 0; /* Kill chain */ | |
588 | ||
589 | toss_live_fpu(act->mact.curctx); /* Dump live float */ | |
1c79356b | 590 | |
9bccf70c A |
591 | fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */ |
592 | ||
593 | while(fsv) { /* Any float saved state? */ | |
594 | fpsv = fsv; /* Remember so we can toss this */ | |
595 | fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */ | |
596 | save_release((savearea *)fpsv); /* Release it */ | |
1c79356b | 597 | } |
9bccf70c A |
598 | |
599 | act->mact.curctx->FPUsave = 0; /* Kill chain */ | |
600 | ||
601 | /* | |
602 | * free all regular saveareas. | |
603 | */ | |
1c79356b | 604 | |
9bccf70c | 605 | pcb = act->mact.pcb; /* Get the general savearea */ |
1c79356b | 606 | |
9bccf70c A |
607 | while(pcb) { /* Any float saved state? */ |
608 | ppsv = pcb; /* Remember so we can toss this */ | |
609 | pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */ | |
610 | save_release(ppsv); /* Release it */ | |
1c79356b | 611 | } |
9bccf70c A |
612 | |
613 | hw_atomic_sub(&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */ | |
1c79356b | 614 | |
1c79356b A |
615 | } |
616 | ||
9bccf70c | 617 | |
1c79356b A |
618 | kern_return_t |
619 | act_machine_create(task_t task, thread_act_t thr_act) | |
620 | { | |
621 | /* | |
622 | * Clear & Init the pcb (sets up user-mode s regs) | |
623 | * We don't use this anymore. | |
624 | */ | |
625 | ||
1c79356b A |
626 | return KERN_SUCCESS; |
627 | } | |
628 | ||
629 | void act_machine_init() | |
630 | { | |
1c79356b A |
631 | |
632 | /* Good to verify these once */ | |
633 | assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); | |
634 | ||
635 | assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT ); | |
636 | assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT ); | |
637 | assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT ); | |
1c79356b A |
638 | |
639 | /* | |
640 | * If we start using kernel activations, | |
641 | * would normally create kernel_thread_pool here, | |
642 | * populating it from the act_zone | |
643 | */ | |
644 | } | |
645 | ||
646 | void | |
647 | act_machine_return(int code) | |
648 | { | |
649 | thread_act_t thr_act = current_act(); | |
650 | ||
1c79356b A |
651 | /* |
652 | * This code is called with nothing locked. | |
653 | * It also returns with nothing locked, if it returns. | |
654 | * | |
655 | * This routine terminates the current thread activation. | |
656 | * If this is the only activation associated with its | |
657 | * thread shuttle, then the entire thread (shuttle plus | |
658 | * activation) is terminated. | |
659 | */ | |
660 | assert( code == KERN_TERMINATED ); | |
661 | assert( thr_act ); | |
9bccf70c | 662 | assert(thr_act->thread->top_act == thr_act); |
1c79356b A |
663 | |
664 | /* This is the only activation attached to the shuttle... */ | |
665 | ||
1c79356b A |
666 | thread_terminate_self(); |
667 | ||
668 | /*NOTREACHED*/ | |
669 | panic("act_machine_return: TALKING ZOMBIE! (1)"); | |
670 | } | |
671 | ||
672 | void | |
673 | thread_machine_set_current(struct thread_shuttle *thread) | |
674 | { | |
675 | register int my_cpu = cpu_number(); | |
676 | ||
9bccf70c A |
677 | set_machine_current_thread(thread); |
678 | set_machine_current_act(thread->top_act); | |
1c79356b A |
679 | |
680 | active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; | |
681 | } | |
682 | ||
683 | void | |
684 | thread_machine_init(void) | |
685 | { | |
686 | #ifdef MACHINE_STACK | |
687 | #if KERNEL_STACK_SIZE > PPC_PGBYTES | |
688 | panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n"); | |
689 | #endif | |
690 | #endif | |
691 | } | |
692 | ||
693 | #if MACH_ASSERT | |
1c79356b A |
694 | |
695 | void | |
696 | dump_thread(thread_t th) | |
697 | { | |
698 | printf(" thread @ 0x%x:\n", th); | |
699 | } | |
700 | ||
701 | int | |
702 | dump_act(thread_act_t thr_act) | |
703 | { | |
704 | if (!thr_act) | |
705 | return(0); | |
706 | ||
707 | printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", | |
708 | thr_act, thr_act->ref_count, | |
709 | thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, | |
710 | thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); | |
711 | ||
712 | printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n", | |
713 | thr_act->alerts, thr_act->alert_mask, | |
714 | thr_act->suspend_count, thr_act->active, | |
715 | thr_act->higher, thr_act->lower); | |
716 | ||
717 | return((int)thr_act); | |
718 | } | |
719 | ||
720 | #endif | |
721 | ||
722 | unsigned int | |
723 | get_useraddr() | |
724 | { | |
725 | ||
726 | thread_act_t thr_act = current_act(); | |
727 | ||
9bccf70c | 728 | return(thr_act->mact.pcb->save_srr0); |
1c79356b A |
729 | } |
730 | ||
731 | /* | |
732 | * detach and return a kernel stack from a thread | |
733 | */ | |
734 | ||
735 | vm_offset_t | |
736 | stack_detach(thread_t thread) | |
737 | { | |
738 | vm_offset_t stack; | |
739 | ||
9bccf70c A |
740 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), |
741 | thread, thread->priority, | |
742 | thread->sched_pri, 0, 0); | |
743 | ||
744 | if (thread->top_act) | |
745 | act_machine_sv_free(thread->top_act); | |
1c79356b A |
746 | |
747 | stack = thread->kernel_stack; | |
748 | thread->kernel_stack = 0; | |
749 | return(stack); | |
750 | } | |
751 | ||
752 | /* | |
753 | * attach a kernel stack to a thread and initialize it | |
754 | * | |
755 | * attaches a stack to a thread. if there is no save | |
756 | * area we allocate one. the top save area is then | |
757 | * loaded with the pc (continuation address), the initial | |
758 | * stack pointer, and a std kernel MSR. if the top | |
759 | * save area is the user save area bad things will | |
760 | * happen | |
761 | * | |
762 | */ | |
763 | ||
764 | void | |
765 | stack_attach(struct thread_shuttle *thread, | |
766 | vm_offset_t stack, | |
767 | void (*start_pos)(thread_t)) | |
768 | { | |
769 | thread_act_t thr_act; | |
770 | unsigned int *kss; | |
771 | struct savearea *sv; | |
772 | ||
773 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), | |
774 | thread, thread->priority, | |
775 | thread->sched_pri, start_pos, | |
776 | 0); | |
777 | ||
778 | assert(stack); | |
779 | kss = (unsigned int *)STACK_IKS(stack); | |
780 | thread->kernel_stack = stack; | |
781 | ||
782 | /* during initialization we sometimes do not have an | |
783 | activation. in that case do not do anything */ | |
784 | if ((thr_act = thread->top_act) != 0) { | |
785 | sv = save_get(); /* cannot block */ | |
9bccf70c A |
786 | sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ |
787 | sv->save_hdr.save_act = thr_act; | |
788 | sv->save_hdr.save_prev = thr_act->mact.pcb; | |
789 | thr_act->mact.pcb = sv; | |
1c79356b A |
790 | |
791 | sv->save_srr0 = (unsigned int) start_pos; | |
792 | /* sv->save_r3 = ARG ? */ | |
793 | sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE); | |
794 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; | |
9bccf70c A |
795 | sv->save_fpscr = 0; /* Clear all floating point exceptions */ |
796 | sv->save_vrsave = 0; /* Set the vector save state */ | |
797 | sv->save_vscr[3] = 0x00010000; /* Supress java mode */ | |
1c79356b A |
798 | *((int *)sv->save_r1) = 0; |
799 | thr_act->mact.ksp = 0; | |
800 | } | |
801 | ||
802 | return; | |
803 | } | |
804 | ||
805 | /* | |
806 | * move a stack from old to new thread | |
807 | */ | |
808 | ||
809 | void | |
810 | stack_handoff(thread_t old, | |
811 | thread_t new) | |
812 | { | |
813 | ||
9bccf70c A |
814 | vm_offset_t stack; |
815 | pmap_t new_pmap; | |
816 | facility_context *fowner; | |
d7e50217 A |
817 | int my_cpu; |
818 | mapping *mp; | |
819 | struct per_proc_info *ppinfo; | |
9bccf70c A |
820 | |
821 | assert(new->top_act); | |
822 | assert(old->top_act); | |
823 | ||
d7e50217 | 824 | my_cpu = cpu_number(); |
9bccf70c A |
825 | stack = stack_detach(old); |
826 | new->kernel_stack = stack; | |
827 | if (stack == old->stack_privilege) { | |
828 | assert(new->stack_privilege); | |
829 | old->stack_privilege = new->stack_privilege; | |
830 | new->stack_privilege = stack; | |
831 | } | |
1c79356b | 832 | |
d7e50217 A |
833 | ppinfo = getPerProc(); /* Get our processor block */ |
834 | ||
835 | ppinfo->cpu_flags &= ~traceBE; /* Turn off special branch trace */ | |
0b4e3aa0 | 836 | |
9bccf70c | 837 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ |
d7e50217 | 838 | fowner = ppinfo->FPU_owner; /* Cache this because it may change */ |
9bccf70c A |
839 | if(fowner) { /* Is there any live context? */ |
840 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
841 | fpu_save(fowner); /* Yes, save it */ | |
842 | } | |
843 | } | |
d7e50217 | 844 | fowner = ppinfo->VMX_owner; /* Cache this because it may change */ |
9bccf70c A |
845 | if(fowner) { /* Is there any live context? */ |
846 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
847 | vec_save(fowner); /* Yes, save it */ | |
848 | } | |
849 | } | |
850 | } | |
d7e50217 A |
851 | /* |
852 | * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags | |
853 | * This bits can be modified in the per proc without updating the thread spcFlags | |
854 | */ | |
855 | if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */ | |
856 | old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode); | |
857 | old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode); | |
858 | } | |
1c79356b | 859 | |
9bccf70c | 860 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, |
1c79356b A |
861 | (int)old, (int)new, old->sched_pri, new->sched_pri, 0); |
862 | ||
863 | ||
864 | if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
865 | pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
d7e50217 A |
866 | ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys; |
867 | ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs; | |
868 | ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept; | |
1c79356b A |
869 | } |
870 | else { /* otherwise, we use the task's pmap */ | |
871 | new_pmap = new->top_act->task->map->pmap; | |
872 | if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) { | |
873 | pmap_switch(new_pmap); | |
874 | } | |
875 | } | |
876 | ||
9bccf70c | 877 | thread_machine_set_current(new); |
d7e50217 A |
878 | active_stacks[my_cpu] = new->kernel_stack; |
879 | ppinfo->Uassist = new->top_act->mact.cthread_self; | |
9bccf70c | 880 | |
d7e50217 A |
881 | ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv; |
882 | ppinfo->spcFlags = new->top_act->mact.specFlags; | |
883 | ||
884 | old->top_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ | |
885 | mp = (mapping *)&ppinfo->ppCIOmp; | |
886 | mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */ | |
9bccf70c A |
887 | |
888 | if (branch_tracing_enabled()) | |
d7e50217 | 889 | ppinfo->cpu_flags |= traceBE; |
765c9de3 | 890 | |
d7e50217 | 891 | if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0); /* Cut trace entry if tracing */ |
765c9de3 | 892 | |
1c79356b A |
893 | return; |
894 | } | |
895 | ||
896 | /* | |
897 | * clean and initialize the current kernel stack and go to | |
898 | * the given continuation routine | |
899 | */ | |
900 | ||
901 | void | |
902 | call_continuation(void (*continuation)(void) ) | |
903 | { | |
904 | ||
905 | unsigned int *kss; | |
906 | vm_offset_t tsp; | |
907 | ||
908 | assert(current_thread()->kernel_stack); | |
909 | kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack); | |
910 | assert(continuation); | |
911 | ||
912 | tsp = (vm_offset_t)((int)kss - KF_SIZE); | |
913 | assert(tsp); | |
914 | *((int *)tsp) = 0; | |
915 | ||
916 | Call_continuation(continuation, tsp); | |
917 | ||
918 | return; | |
919 | } | |
920 | ||
921 | void | |
922 | thread_swapin_mach_alloc(thread_t thread) | |
923 | { | |
924 | struct savearea *sv; | |
925 | ||
926 | assert(thread->top_act->mact.pcb == 0); | |
927 | ||
928 | sv = save_alloc(); | |
929 | assert(sv); | |
9bccf70c A |
930 | sv->save_hdr.save_prev = 0; /* Initialize back chain */ |
931 | sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ | |
932 | sv->save_hdr.save_act = thread->top_act; /* Initialize owner */ | |
933 | thread->top_act->mact.pcb = sv; | |
1c79356b A |
934 | |
935 | } |