]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Copyright (c) 1990,1991,1992 The University of Utah and | |
30 | * the Center for Software Science (CSS). All rights reserved. | |
31 | * | |
32 | * Permission to use, copy, modify and distribute this software is hereby | |
33 | * granted provided that (1) source code retains these copyright, permission, | |
34 | * and disclaimer notices, and (2) redistributions including binaries | |
35 | * reproduce the notices in supporting documentation, and (3) all advertising | |
36 | * materials mentioning features or use of this software display the following | |
37 | * acknowledgement: ``This product includes software developed by the Center | |
38 | * for Software Science at the University of Utah.'' | |
39 | * | |
40 | * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
41 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF | |
42 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
43 | * | |
44 | * CSS requests users of this software to return to css-dist@cs.utah.edu any | |
45 | * improvements that they make and grant CSS redistribution rights. | |
46 | * | |
47 | * Utah $Hdr: pcb.c 1.23 92/06/27$ | |
48 | */ | |
49 | ||
50 | #include <cpus.h> | |
51 | #include <debug.h> | |
52 | ||
53 | #include <types.h> | |
54 | #include <kern/task.h> | |
55 | #include <kern/thread.h> | |
56 | #include <kern/thread_act.h> | |
57 | #include <kern/thread_swap.h> | |
58 | #include <mach/thread_status.h> | |
59 | #include <vm/vm_kern.h> | |
60 | #include <kern/mach_param.h> | |
61 | ||
62 | #include <kern/misc_protos.h> | |
63 | #include <ppc/misc_protos.h> | |
1c79356b A |
64 | #include <ppc/exception.h> |
65 | #include <ppc/proc_reg.h> | |
66 | #include <kern/spl.h> | |
67 | #include <ppc/pmap.h> | |
68 | #include <ppc/trap.h> | |
69 | #include <ppc/mappings.h> | |
70 | #include <ppc/savearea.h> | |
71 | #include <ppc/Firmware.h> | |
72 | #include <ppc/asm.h> | |
73 | #include <ppc/thread_act.h> | |
74 | #include <ppc/vmachmon.h> | |
765c9de3 | 75 | #include <ppc/low_trace.h> |
1c79356b A |
76 | |
77 | #include <sys/kdebug.h> | |
78 | ||
79 | extern int real_ncpus; /* Number of actual CPUs */ | |
80 | extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ | |
81 | ||
55e303ae A |
82 | void machine_act_terminate(thread_act_t act); |
83 | ||
1c79356b A |
84 | /* |
85 | * These constants are dumb. They should not be in asm.h! | |
86 | */ | |
87 | ||
88 | #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE) | |
89 | ||
90 | #if DEBUG | |
91 | int fpu_trap_count = 0; | |
92 | int fpu_switch_count = 0; | |
93 | int vec_trap_count = 0; | |
94 | int vec_switch_count = 0; | |
95 | #endif | |
96 | ||
1c79356b A |
97 | /* |
98 | * consider_machine_collect: try to collect machine-dependent pages | |
99 | */ | |
100 | void | |
101 | consider_machine_collect() | |
102 | { | |
103 | /* | |
104 | * none currently available | |
105 | */ | |
106 | return; | |
107 | } | |
108 | ||
109 | void | |
110 | consider_machine_adjust() | |
111 | { | |
112 | consider_mapping_adjust(); | |
113 | } | |
114 | ||
1c79356b A |
115 | /* |
116 | * switch_context: Switch from one thread to another, needed for | |
117 | * switching of space | |
118 | * | |
119 | */ | |
55e303ae A |
120 | thread_t |
121 | machine_switch_context( | |
122 | thread_t old, | |
123 | thread_continue_t continuation, | |
124 | thread_t new) | |
1c79356b A |
125 | { |
126 | register thread_act_t old_act = old->top_act, new_act = new->top_act; | |
55e303ae | 127 | register thread_t retval; |
1c79356b | 128 | pmap_t new_pmap; |
9bccf70c | 129 | facility_context *fowner; |
55e303ae A |
130 | struct per_proc_info *ppinfo; |
131 | ||
132 | if (old == new) | |
133 | panic("machine_switch_context"); | |
9bccf70c | 134 | |
55e303ae A |
135 | ppinfo = getPerProc(); /* Get our processor block */ |
136 | ||
137 | ppinfo->old_thread = (unsigned int)old; | |
138 | ppinfo->cpu_flags &= ~traceBE; /* disable branch tracing if on */ | |
1c79356b | 139 | |
1c79356b A |
140 | check_simple_locks(); |
141 | ||
142 | /* Our context might wake up on another processor, so we must | |
143 | * not keep hot state in our FPU, it must go back to the pcb | |
144 | * so that it can be found by the other if needed | |
145 | */ | |
9bccf70c | 146 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ |
55e303ae | 147 | fowner = ppinfo->FPU_owner; /* Cache this because it may change */ |
9bccf70c A |
148 | if(fowner) { /* Is there any live context? */ |
149 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
150 | fpu_save(fowner); /* Yes, save it */ | |
151 | } | |
152 | } | |
55e303ae | 153 | fowner = ppinfo->VMX_owner; /* Cache this because it may change */ |
9bccf70c A |
154 | if(fowner) { /* Is there any live context? */ |
155 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
156 | vec_save(fowner); /* Yes, save it */ | |
157 | } | |
158 | } | |
1c79356b A |
159 | } |
160 | ||
d7e50217 A |
161 | /* |
162 | * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags | |
163 | * This bits can be modified in the per proc without updating the thread spcFlags | |
164 | */ | |
165 | if(old_act->mact.specFlags & runningVM) { | |
166 | old_act->mact.specFlags &= ~(userProtKey|FamVMmode); | |
55e303ae | 167 | old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode); |
1c79356b | 168 | } |
1c79356b A |
169 | |
170 | /* | |
171 | * We do not have to worry about the PMAP module, so switch. | |
172 | * | |
173 | * We must not use top_act->map since this may not be the actual | |
174 | * task map, but the map being used for a klcopyin/out. | |
175 | */ | |
176 | ||
177 | if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
178 | pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
55e303ae A |
179 | ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys; |
180 | ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs; | |
181 | ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept; | |
1c79356b A |
182 | } |
183 | else { /* otherwise, we use the task's pmap */ | |
184 | new_pmap = new_act->task->map->pmap; | |
185 | if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) { | |
186 | pmap_switch(new_pmap); /* Switch if there is a change */ | |
187 | } | |
188 | } | |
189 | ||
55e303ae A |
190 | if(old_act->mact.cioSpace != invalSpace) { /* Does our old guy have an active copyin/out? */ |
191 | old_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ | |
192 | hw_blow_seg(copyIOaddr); /* Blow off the first segment */ | |
193 | hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */ | |
194 | } | |
195 | ||
1c79356b | 196 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, |
55e303ae | 197 | old->reason, (int)new, old->sched_pri, new->sched_pri, 0); |
1c79356b | 198 | |
1c79356b A |
199 | retval = Switch_context(old, continuation, new); |
200 | assert(retval != (struct thread_shuttle*)NULL); | |
201 | ||
55e303ae A |
202 | if (branch_tracing_enabled()) { |
203 | ppinfo = getPerProc(); /* Get our processor block */ | |
204 | ppinfo->cpu_flags |= traceBE; /* restore branch tracing */ | |
205 | } | |
0b4e3aa0 | 206 | |
1c79356b A |
207 | /* We've returned from having switched context, so we should be |
208 | * back in the original context. | |
209 | */ | |
210 | ||
211 | return retval; | |
212 | } | |
213 | ||
1c79356b A |
214 | /* |
215 | * Initialize the machine-dependent state for a new thread. | |
216 | */ | |
217 | kern_return_t | |
55e303ae A |
218 | machine_thread_create( |
219 | thread_t thread, | |
220 | task_t task) | |
1c79356b | 221 | { |
1c79356b A |
222 | savearea *sv; /* Pointer to newly allocated savearea */ |
223 | unsigned int *CIsTooLimited, i; | |
224 | ||
55e303ae | 225 | hw_atomic_add((uint32_t *)&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need" |
1c79356b | 226 | for this activation */ |
55e303ae | 227 | assert(thread->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */ |
1c79356b A |
228 | |
229 | sv = save_alloc(); /* Go get us a savearea */ | |
230 | ||
9bccf70c A |
231 | bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */ |
232 | ||
233 | sv->save_hdr.save_prev = 0; /* Clear the back pointer */ | |
234 | sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ | |
55e303ae A |
235 | sv->save_hdr.save_act = (struct thread_activation *)thread; /* Set who owns it */ |
236 | thread->mact.pcb = sv; /* Point to the save area */ | |
237 | thread->mact.curctx = &thread->mact.facctx; /* Initialize facility context */ | |
238 | thread->mact.facctx.facAct = thread; /* Initialize facility context pointer to activation */ | |
239 | thread->mact.cioSpace = invalSpace; /* Initialize copyin/out space to invalid */ | |
240 | thread->mact.preemption_count = 0; /* Initialize preemption counter */ | |
241 | ||
1c79356b A |
242 | /* |
243 | * User threads will pull their context from the pcb when first | |
244 | * returning to user mode, so fill in all the necessary values. | |
245 | * Kernel threads are initialized from the save state structure | |
246 | * at the base of the kernel stack (see stack_attach()). | |
247 | */ | |
248 | ||
55e303ae A |
249 | thread->mact.upcb = sv; /* Set user pcb */ |
250 | sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */ | |
251 | sv->save_fpscr = 0; /* Clear all floating point exceptions */ | |
252 | sv->save_vrsave = 0; /* Set the vector save state */ | |
253 | sv->save_vscr[0] = 0x00000000; | |
254 | sv->save_vscr[1] = 0x00000000; | |
255 | sv->save_vscr[2] = 0x00000000; | |
256 | sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */ | |
1c79356b | 257 | |
1c79356b A |
258 | return(KERN_SUCCESS); |
259 | } | |
260 | ||
261 | /* | |
262 | * Machine-dependent cleanup prior to destroying a thread | |
263 | */ | |
264 | void | |
55e303ae A |
265 | machine_thread_destroy( |
266 | thread_t thread) | |
1c79356b | 267 | { |
55e303ae A |
268 | register savearea *pcb, *ppsv; |
269 | register savearea_vec *vsv, *vpsv; | |
270 | register savearea_fpu *fsv, *fpsv; | |
271 | register savearea *svp; | |
272 | register int i; | |
273 | ||
274 | /* | |
275 | * This function will release all context. | |
276 | */ | |
277 | ||
278 | machine_act_terminate(thread); /* Make sure all virtual machines are dead first */ | |
279 | ||
280 | /* | |
281 | * | |
282 | * Walk through and release all floating point and vector contexts. Also kill live context. | |
283 | * | |
284 | */ | |
285 | ||
286 | toss_live_vec(thread->mact.curctx); /* Dump live vectors */ | |
1c79356b | 287 | |
55e303ae A |
288 | vsv = thread->mact.curctx->VMXsave; /* Get the top vector savearea */ |
289 | ||
290 | while(vsv) { /* Any VMX saved state? */ | |
291 | vpsv = vsv; /* Remember so we can toss this */ | |
292 | vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Get one underneath our's */ | |
293 | save_release((savearea *)vpsv); /* Release it */ | |
1c79356b | 294 | } |
55e303ae A |
295 | |
296 | thread->mact.curctx->VMXsave = 0; /* Kill chain */ | |
297 | ||
298 | toss_live_fpu(thread->mact.curctx); /* Dump live float */ | |
299 | ||
300 | fsv = thread->mact.curctx->FPUsave; /* Get the top float savearea */ | |
301 | ||
302 | while(fsv) { /* Any float saved state? */ | |
303 | fpsv = fsv; /* Remember so we can toss this */ | |
304 | fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Get one underneath our's */ | |
305 | save_release((savearea *)fpsv); /* Release it */ | |
306 | } | |
307 | ||
308 | thread->mact.curctx->FPUsave = 0; /* Kill chain */ | |
1c79356b A |
309 | |
310 | /* | |
55e303ae | 311 | * free all regular saveareas. |
1c79356b | 312 | */ |
55e303ae A |
313 | |
314 | pcb = thread->mact.pcb; /* Get the general savearea */ | |
315 | ||
316 | while(pcb) { /* Any float saved state? */ | |
317 | ppsv = pcb; /* Remember so we can toss this */ | |
318 | pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */ | |
319 | save_release(ppsv); /* Release it */ | |
320 | } | |
321 | ||
322 | hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */ | |
1c79356b A |
323 | } |
324 | ||
325 | /* | |
326 | * Number of times we needed to swap an activation back in before | |
327 | * switching to it. | |
328 | */ | |
329 | int switch_act_swapins = 0; | |
330 | ||
331 | /* | |
332 | * machine_switch_act | |
333 | * | |
334 | * Machine-dependent details of activation switching. Called with | |
335 | * RPC locks held and preemption disabled. | |
336 | */ | |
337 | void | |
338 | machine_switch_act( | |
55e303ae | 339 | thread_t thread, |
1c79356b | 340 | thread_act_t old, |
55e303ae | 341 | thread_act_t new) |
1c79356b A |
342 | { |
343 | pmap_t new_pmap; | |
9bccf70c | 344 | facility_context *fowner; |
55e303ae A |
345 | struct per_proc_info *ppinfo; |
346 | ||
347 | ppinfo = getPerProc(); /* Get our processor block */ | |
1c79356b A |
348 | |
349 | /* Our context might wake up on another processor, so we must | |
350 | * not keep hot state in our FPU, it must go back to the pcb | |
351 | * so that it can be found by the other if needed | |
352 | */ | |
9bccf70c | 353 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ |
55e303ae | 354 | fowner = ppinfo->FPU_owner; /* Cache this because it may change */ |
9bccf70c A |
355 | if(fowner) { /* Is there any live context? */ |
356 | if(fowner->facAct == old) { /* Is it for us? */ | |
357 | fpu_save(fowner); /* Yes, save it */ | |
358 | } | |
359 | } | |
55e303ae | 360 | fowner = ppinfo->VMX_owner; /* Cache this because it may change */ |
9bccf70c A |
361 | if(fowner) { /* Is there any live context? */ |
362 | if(fowner->facAct == old) { /* Is it for us? */ | |
363 | vec_save(fowner); /* Yes, save it */ | |
364 | } | |
365 | } | |
1c79356b A |
366 | } |
367 | ||
55e303ae | 368 | old->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ |
1c79356b | 369 | |
55e303ae | 370 | ast_context(new, cpu_number()); |
1c79356b A |
371 | |
372 | /* Activations might have different pmaps | |
373 | * (process->kernel->server, for example). | |
374 | * Change space if needed | |
375 | */ | |
376 | ||
de355530 A |
377 | if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ |
378 | pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
1c79356b A |
379 | } |
380 | else { /* otherwise, we use the task's pmap */ | |
381 | new_pmap = new->task->map->pmap; | |
382 | if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) { | |
383 | pmap_switch(new_pmap); | |
384 | } | |
385 | } | |
386 | ||
387 | } | |
388 | ||
1c79356b A |
389 | /* |
390 | * act_machine_sv_free | |
391 | * release saveareas associated with an act. if flag is true, release | |
392 | * user level savearea(s) too, else don't | |
393 | * | |
394 | * this code cannot block so we call the proper save area free routine | |
395 | */ | |
396 | void | |
397 | act_machine_sv_free(thread_act_t act) | |
398 | { | |
9bccf70c | 399 | register savearea *pcb, *userpcb; |
55e303ae A |
400 | register savearea_vec *vsv, *vpst, *vsvt; |
401 | register savearea_fpu *fsv, *fpst, *fsvt; | |
1c79356b A |
402 | register savearea *svp; |
403 | register int i; | |
404 | ||
405 | /* | |
9bccf70c | 406 | * This function will release all non-user state context. |
1c79356b A |
407 | */ |
408 | ||
9bccf70c A |
409 | /* |
410 | * | |
411 | * Walk through and release all floating point and vector contexts that are not | |
412 | * user state. We will also blow away live context if it belongs to non-user state. | |
55e303ae A |
413 | * Note that the level can not change while we are in this code. Nor can another |
414 | * context be pushed on the stack. | |
415 | * | |
416 | * We do nothing here if the current level is user. Otherwise, | |
417 | * the live context is cleared. Then we find the user saved context. | |
418 | * Next, we take the sync lock (to keep us from munging things in *_switch). | |
419 | * The level is set to 0 and all stacked context other than user is dequeued. | |
420 | * Then we unlock. Next, all of the old kernel contexts are released. | |
9bccf70c A |
421 | * |
422 | */ | |
423 | ||
424 | if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */ | |
55e303ae | 425 | |
9bccf70c | 426 | toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */ |
9bccf70c | 427 | |
55e303ae A |
428 | vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */ |
429 | ||
430 | while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */ | |
de355530 | 431 | |
55e303ae A |
432 | if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */ |
433 | panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */ | |
434 | } | |
435 | ||
436 | vsvt = act->mact.curctx->VMXsave; /* Get the top of the chain */ | |
437 | act->mact.curctx->VMXsave = vsv; /* Point to the user context */ | |
438 | act->mact.curctx->VMXlevel = 0; /* Set the level to user */ | |
439 | hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync); /* Unlock */ | |
440 | ||
441 | while(vsvt) { /* Clear any VMX saved state */ | |
442 | if (vsvt == vsv) break; /* Done when hit user if any */ | |
443 | vpst = vsvt; /* Remember so we can toss this */ | |
444 | vsvt = (savearea_vec *)vsvt->save_hdr.save_prev; /* Get one underneath our's */ | |
445 | save_ret((savearea *)vpst); /* Release it */ | |
446 | } | |
447 | ||
448 | } | |
9bccf70c A |
449 | |
450 | if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */ | |
55e303ae A |
451 | |
452 | toss_live_fpu(act->mact.curctx); /* Dump live floats if is not user */ | |
1c79356b | 453 | |
55e303ae A |
454 | fsv = act->mact.curctx->FPUsave; /* Get the top floats savearea */ |
455 | ||
456 | while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */ | |
9bccf70c | 457 | |
55e303ae A |
458 | if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */ |
459 | panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */ | |
460 | } | |
461 | ||
462 | fsvt = act->mact.curctx->FPUsave; /* Get the top of the chain */ | |
463 | act->mact.curctx->FPUsave = fsv; /* Point to the user context */ | |
464 | act->mact.curctx->FPUlevel = 0; /* Set the level to user */ | |
465 | hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync); /* Unlock */ | |
466 | ||
467 | while(fsvt) { /* Clear any VMX saved state */ | |
468 | if (fsvt == fsv) break; /* Done when hit user if any */ | |
469 | fpst = fsvt; /* Remember so we can toss this */ | |
470 | fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev; /* Get one underneath our's */ | |
471 | save_ret((savearea *)fpst); /* Release it */ | |
472 | } | |
473 | ||
1c79356b A |
474 | } |
475 | ||
9bccf70c A |
476 | /* |
477 | * free all regular saveareas except a user savearea, if any | |
478 | */ | |
1c79356b | 479 | |
9bccf70c A |
480 | pcb = act->mact.pcb; /* Get the general savearea */ |
481 | userpcb = 0; /* Assume no user context for now */ | |
482 | ||
483 | while(pcb) { /* Any float saved state? */ | |
484 | if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */ | |
485 | userpcb = pcb; /* Remember so we can toss this */ | |
486 | break; | |
487 | } | |
488 | svp = pcb; /* Remember this */ | |
55e303ae | 489 | pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */ |
9bccf70c | 490 | save_ret(svp); /* Release it */ |
1c79356b | 491 | } |
9bccf70c A |
492 | |
493 | act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */ | |
494 | ||
1c79356b A |
495 | } |
496 | ||
55e303ae A |
497 | void |
498 | machine_thread_set_current(thread_t thread) | |
499 | { | |
500 | set_machine_current_act(thread->top_act); | |
501 | } | |
1c79356b | 502 | |
1c79356b | 503 | void |
55e303ae A |
504 | machine_act_terminate( |
505 | thread_act_t act) | |
1c79356b A |
506 | { |
507 | if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */ | |
508 | disable_bluebox_internal(act); /* Kill off bluebox */ | |
509 | } | |
510 | ||
511 | if(act->mact.vmmControl) { /* Check if VMM is active */ | |
512 | vmm_tear_down_all(act); /* Kill off all VMM contexts */ | |
513 | } | |
514 | } | |
515 | ||
1c79356b | 516 | void |
55e303ae | 517 | machine_thread_terminate_self(void) |
1c79356b | 518 | { |
55e303ae | 519 | machine_act_terminate(current_act()); |
1c79356b A |
520 | } |
521 | ||
522 | void | |
55e303ae | 523 | machine_thread_init(void) |
1c79356b A |
524 | { |
525 | #ifdef MACHINE_STACK | |
526 | #if KERNEL_STACK_SIZE > PPC_PGBYTES | |
527 | panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n"); | |
528 | #endif | |
529 | #endif | |
530 | } | |
531 | ||
532 | #if MACH_ASSERT | |
1c79356b A |
533 | |
534 | void | |
535 | dump_thread(thread_t th) | |
536 | { | |
537 | printf(" thread @ 0x%x:\n", th); | |
538 | } | |
539 | ||
540 | int | |
541 | dump_act(thread_act_t thr_act) | |
542 | { | |
543 | if (!thr_act) | |
544 | return(0); | |
545 | ||
546 | printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", | |
547 | thr_act, thr_act->ref_count, | |
548 | thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, | |
549 | thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); | |
550 | ||
55e303ae A |
551 | printf("\tsusp=%x active=%x hi=%x lo=%x\n", |
552 | 0 /*thr_act->alerts*/, 0 /*thr_act->alert_mask*/, | |
1c79356b A |
553 | thr_act->suspend_count, thr_act->active, |
554 | thr_act->higher, thr_act->lower); | |
555 | ||
556 | return((int)thr_act); | |
557 | } | |
558 | ||
559 | #endif | |
560 | ||
561 | unsigned int | |
562 | get_useraddr() | |
563 | { | |
55e303ae | 564 | return(current_act()->mact.upcb->save_srr0); |
1c79356b A |
565 | } |
566 | ||
567 | /* | |
568 | * detach and return a kernel stack from a thread | |
569 | */ | |
570 | ||
571 | vm_offset_t | |
55e303ae A |
572 | machine_stack_detach( |
573 | thread_t thread) | |
1c79356b A |
574 | { |
575 | vm_offset_t stack; | |
576 | ||
9bccf70c A |
577 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), |
578 | thread, thread->priority, | |
579 | thread->sched_pri, 0, 0); | |
580 | ||
581 | if (thread->top_act) | |
582 | act_machine_sv_free(thread->top_act); | |
1c79356b A |
583 | |
584 | stack = thread->kernel_stack; | |
585 | thread->kernel_stack = 0; | |
586 | return(stack); | |
587 | } | |
588 | ||
589 | /* | |
590 | * attach a kernel stack to a thread and initialize it | |
591 | * | |
592 | * attaches a stack to a thread. if there is no save | |
593 | * area we allocate one. the top save area is then | |
594 | * loaded with the pc (continuation address), the initial | |
595 | * stack pointer, and a std kernel MSR. if the top | |
596 | * save area is the user save area bad things will | |
597 | * happen | |
598 | * | |
599 | */ | |
600 | ||
601 | void | |
55e303ae A |
602 | machine_stack_attach( |
603 | thread_t thread, | |
604 | vm_offset_t stack, | |
605 | void (*start)(thread_t)) | |
1c79356b A |
606 | { |
607 | thread_act_t thr_act; | |
608 | unsigned int *kss; | |
609 | struct savearea *sv; | |
610 | ||
611 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), | |
612 | thread, thread->priority, | |
55e303ae | 613 | thread->sched_pri, start, |
1c79356b A |
614 | 0); |
615 | ||
616 | assert(stack); | |
617 | kss = (unsigned int *)STACK_IKS(stack); | |
618 | thread->kernel_stack = stack; | |
619 | ||
620 | /* during initialization we sometimes do not have an | |
621 | activation. in that case do not do anything */ | |
622 | if ((thr_act = thread->top_act) != 0) { | |
623 | sv = save_get(); /* cannot block */ | |
9bccf70c | 624 | sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */ |
55e303ae A |
625 | sv->save_hdr.save_act = (struct thread_activation *)thr_act; |
626 | sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thr_act->mact.pcb); | |
9bccf70c | 627 | thr_act->mact.pcb = sv; |
1c79356b | 628 | |
55e303ae | 629 | sv->save_srr0 = (unsigned int) start; |
1c79356b A |
630 | /* sv->save_r3 = ARG ? */ |
631 | sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE); | |
632 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; | |
9bccf70c A |
633 | sv->save_fpscr = 0; /* Clear all floating point exceptions */ |
634 | sv->save_vrsave = 0; /* Set the vector save state */ | |
635 | sv->save_vscr[3] = 0x00010000; /* Supress java mode */ | |
55e303ae | 636 | *(CAST_DOWN(int *, sv->save_r1)) = 0; |
1c79356b A |
637 | thr_act->mact.ksp = 0; |
638 | } | |
639 | ||
640 | return; | |
641 | } | |
642 | ||
643 | /* | |
644 | * move a stack from old to new thread | |
645 | */ | |
646 | ||
647 | void | |
55e303ae A |
648 | machine_stack_handoff( |
649 | thread_t old, | |
650 | thread_t new) | |
1c79356b A |
651 | { |
652 | ||
9bccf70c A |
653 | vm_offset_t stack; |
654 | pmap_t new_pmap; | |
655 | facility_context *fowner; | |
55e303ae A |
656 | mapping *mp; |
657 | struct per_proc_info *ppinfo; | |
9bccf70c A |
658 | |
659 | assert(new->top_act); | |
660 | assert(old->top_act); | |
55e303ae A |
661 | |
662 | if (old == new) | |
663 | panic("machine_stack_handoff"); | |
9bccf70c | 664 | |
55e303ae | 665 | stack = machine_stack_detach(old); |
9bccf70c | 666 | new->kernel_stack = stack; |
55e303ae A |
667 | if (stack == old->reserved_stack) { |
668 | assert(new->reserved_stack); | |
669 | old->reserved_stack = new->reserved_stack; | |
670 | new->reserved_stack = stack; | |
9bccf70c | 671 | } |
1c79356b | 672 | |
55e303ae A |
673 | ppinfo = getPerProc(); /* Get our processor block */ |
674 | ||
675 | ppinfo->cpu_flags &= ~traceBE; /* Turn off special branch trace */ | |
0b4e3aa0 | 676 | |
9bccf70c | 677 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ |
55e303ae | 678 | fowner = ppinfo->FPU_owner; /* Cache this because it may change */ |
9bccf70c A |
679 | if(fowner) { /* Is there any live context? */ |
680 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
681 | fpu_save(fowner); /* Yes, save it */ | |
682 | } | |
683 | } | |
55e303ae | 684 | fowner = ppinfo->VMX_owner; /* Cache this because it may change */ |
9bccf70c A |
685 | if(fowner) { /* Is there any live context? */ |
686 | if(fowner->facAct == old->top_act) { /* Is it for us? */ | |
687 | vec_save(fowner); /* Yes, save it */ | |
688 | } | |
689 | } | |
690 | } | |
55e303ae | 691 | |
d7e50217 A |
692 | /* |
693 | * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags | |
694 | * This bits can be modified in the per proc without updating the thread spcFlags | |
695 | */ | |
696 | if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */ | |
697 | old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode); | |
55e303ae | 698 | old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode); |
d7e50217 | 699 | } |
1c79356b | 700 | |
9bccf70c | 701 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, |
55e303ae | 702 | old->reason, (int)new, old->sched_pri, new->sched_pri, 0); |
1c79356b A |
703 | |
704 | ||
705 | if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
706 | pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
55e303ae A |
707 | ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys; |
708 | ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs; | |
709 | ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept; | |
1c79356b A |
710 | } |
711 | else { /* otherwise, we use the task's pmap */ | |
712 | new_pmap = new->top_act->task->map->pmap; | |
713 | if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) { | |
714 | pmap_switch(new_pmap); | |
715 | } | |
716 | } | |
717 | ||
55e303ae A |
718 | machine_thread_set_current(new); |
719 | ppinfo->Uassist = new->top_act->mact.cthread_self; | |
9bccf70c | 720 | |
55e303ae A |
721 | ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv; |
722 | ppinfo->spcFlags = new->top_act->mact.specFlags; | |
723 | ||
724 | old->top_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */ | |
725 | mp = (mapping *)&ppinfo->ppCIOmp; | |
726 | mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */ | |
9bccf70c A |
727 | |
728 | if (branch_tracing_enabled()) | |
55e303ae | 729 | ppinfo->cpu_flags |= traceBE; |
765c9de3 | 730 | |
55e303ae | 731 | if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0); /* Cut trace entry if tracing */ |
765c9de3 | 732 | |
1c79356b A |
733 | return; |
734 | } | |
735 | ||
736 | /* | |
737 | * clean and initialize the current kernel stack and go to | |
738 | * the given continuation routine | |
739 | */ | |
740 | ||
741 | void | |
742 | call_continuation(void (*continuation)(void) ) | |
743 | { | |
744 | ||
745 | unsigned int *kss; | |
746 | vm_offset_t tsp; | |
747 | ||
748 | assert(current_thread()->kernel_stack); | |
749 | kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack); | |
750 | assert(continuation); | |
751 | ||
752 | tsp = (vm_offset_t)((int)kss - KF_SIZE); | |
753 | assert(tsp); | |
754 | *((int *)tsp) = 0; | |
755 | ||
756 | Call_continuation(continuation, tsp); | |
757 | ||
758 | return; | |
759 | } |