]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pcb.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
36 *
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
43 *
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
45 */
46
47#include <cpus.h>
48#include <debug.h>
49
50#include <types.h>
51#include <kern/task.h>
52#include <kern/thread.h>
53#include <kern/thread_act.h>
54#include <kern/thread_swap.h>
55#include <mach/thread_status.h>
56#include <vm/vm_kern.h>
57#include <kern/mach_param.h>
58
59#include <kern/misc_protos.h>
60#include <ppc/misc_protos.h>
1c79356b
A
61#include <ppc/exception.h>
62#include <ppc/proc_reg.h>
63#include <kern/spl.h>
64#include <ppc/pmap.h>
65#include <ppc/trap.h>
66#include <ppc/mappings.h>
67#include <ppc/savearea.h>
68#include <ppc/Firmware.h>
69#include <ppc/asm.h>
70#include <ppc/thread_act.h>
71#include <ppc/vmachmon.h>
765c9de3 72#include <ppc/low_trace.h>
1c79356b
A
73
74#include <sys/kdebug.h>
75
76extern int real_ncpus; /* Number of actual CPUs */
77extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
78
55e303ae
A
79void machine_act_terminate(thread_act_t act);
80
1c79356b
A
81/*
82 * These constants are dumb. They should not be in asm.h!
83 */
84
85#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
86
87#if DEBUG
88int fpu_trap_count = 0;
89int fpu_switch_count = 0;
90int vec_trap_count = 0;
91int vec_switch_count = 0;
92#endif
93
1c79356b
A
94/*
95 * consider_machine_collect: try to collect machine-dependent pages
96 */
97void
98consider_machine_collect()
99{
100 /*
101 * none currently available
102 */
103 return;
104}
105
106void
107consider_machine_adjust()
108{
109 consider_mapping_adjust();
110}
111
1c79356b
A
112/*
113 * switch_context: Switch from one thread to another, needed for
114 * switching of space
115 *
116 */
55e303ae
A
117thread_t
118machine_switch_context(
119 thread_t old,
120 thread_continue_t continuation,
121 thread_t new)
1c79356b
A
122{
123 register thread_act_t old_act = old->top_act, new_act = new->top_act;
55e303ae 124 register thread_t retval;
1c79356b 125 pmap_t new_pmap;
9bccf70c 126 facility_context *fowner;
55e303ae
A
127 struct per_proc_info *ppinfo;
128
129 if (old == new)
130 panic("machine_switch_context");
9bccf70c 131
55e303ae
A
132 ppinfo = getPerProc(); /* Get our processor block */
133
134 ppinfo->old_thread = (unsigned int)old;
135 ppinfo->cpu_flags &= ~traceBE; /* disable branch tracing if on */
1c79356b 136
1c79356b
A
137 check_simple_locks();
138
139 /* Our context might wake up on another processor, so we must
140 * not keep hot state in our FPU, it must go back to the pcb
141 * so that it can be found by the other if needed
142 */
9bccf70c 143 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 144 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c
A
145 if(fowner) { /* Is there any live context? */
146 if(fowner->facAct == old->top_act) { /* Is it for us? */
147 fpu_save(fowner); /* Yes, save it */
148 }
149 }
55e303ae 150 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c
A
151 if(fowner) { /* Is there any live context? */
152 if(fowner->facAct == old->top_act) { /* Is it for us? */
153 vec_save(fowner); /* Yes, save it */
154 }
155 }
1c79356b
A
156 }
157
d7e50217
A
158 /*
159 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
160 * This bits can be modified in the per proc without updating the thread spcFlags
161 */
162 if(old_act->mact.specFlags & runningVM) {
163 old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
55e303ae 164 old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
1c79356b 165 }
ab86ba33
A
166 old_act->mact.specFlags &= ~OnProc;
167 new_act->mact.specFlags |= OnProc;
1c79356b
A
168
169 /*
170 * We do not have to worry about the PMAP module, so switch.
171 *
172 * We must not use top_act->map since this may not be the actual
173 * task map, but the map being used for a klcopyin/out.
174 */
175
176 if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
177 pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
55e303ae
A
178 ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys;
179 ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs;
180 ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
1c79356b
A
181 }
182 else { /* otherwise, we use the task's pmap */
183 new_pmap = new_act->task->map->pmap;
184 if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
185 pmap_switch(new_pmap); /* Switch if there is a change */
186 }
187 }
188
55e303ae
A
189 if(old_act->mact.cioSpace != invalSpace) { /* Does our old guy have an active copyin/out? */
190 old_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */
191 hw_blow_seg(copyIOaddr); /* Blow off the first segment */
192 hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */
193 }
194
1c79356b 195 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
55e303ae 196 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
1c79356b 197
1c79356b
A
198 retval = Switch_context(old, continuation, new);
199 assert(retval != (struct thread_shuttle*)NULL);
200
55e303ae
A
201 if (branch_tracing_enabled()) {
202 ppinfo = getPerProc(); /* Get our processor block */
203 ppinfo->cpu_flags |= traceBE; /* restore branch tracing */
204 }
0b4e3aa0 205
1c79356b
A
206 /* We've returned from having switched context, so we should be
207 * back in the original context.
208 */
209
210 return retval;
211}
212
1c79356b
A
213/*
214 * Initialize the machine-dependent state for a new thread.
215 */
216kern_return_t
55e303ae
A
217machine_thread_create(
218 thread_t thread,
219 task_t task)
1c79356b 220{
1c79356b
A
221 savearea *sv; /* Pointer to newly allocated savearea */
222 unsigned int *CIsTooLimited, i;
223
55e303ae 224 hw_atomic_add((uint32_t *)&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
1c79356b 225 for this activation */
55e303ae 226 assert(thread->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */
1c79356b
A
227
228 sv = save_alloc(); /* Go get us a savearea */
229
9bccf70c
A
230 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */
231
232 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
233 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
55e303ae
A
234 sv->save_hdr.save_act = (struct thread_activation *)thread; /* Set who owns it */
235 thread->mact.pcb = sv; /* Point to the save area */
236 thread->mact.curctx = &thread->mact.facctx; /* Initialize facility context */
237 thread->mact.facctx.facAct = thread; /* Initialize facility context pointer to activation */
238 thread->mact.cioSpace = invalSpace; /* Initialize copyin/out space to invalid */
239 thread->mact.preemption_count = 0; /* Initialize preemption counter */
240
1c79356b
A
241 /*
242 * User threads will pull their context from the pcb when first
243 * returning to user mode, so fill in all the necessary values.
244 * Kernel threads are initialized from the save state structure
245 * at the base of the kernel stack (see stack_attach()).
246 */
247
55e303ae
A
248 thread->mact.upcb = sv; /* Set user pcb */
249 sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */
250 sv->save_fpscr = 0; /* Clear all floating point exceptions */
251 sv->save_vrsave = 0; /* Set the vector save state */
252 sv->save_vscr[0] = 0x00000000;
253 sv->save_vscr[1] = 0x00000000;
254 sv->save_vscr[2] = 0x00000000;
255 sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */
1c79356b 256
1c79356b
A
257 return(KERN_SUCCESS);
258}
259
260/*
261 * Machine-dependent cleanup prior to destroying a thread
262 */
263void
55e303ae
A
264machine_thread_destroy(
265 thread_t thread)
1c79356b 266{
55e303ae
A
267 register savearea *pcb, *ppsv;
268 register savearea_vec *vsv, *vpsv;
269 register savearea_fpu *fsv, *fpsv;
270 register savearea *svp;
271 register int i;
272
273/*
274 * This function will release all context.
275 */
276
277 machine_act_terminate(thread); /* Make sure all virtual machines are dead first */
278
279/*
280 *
281 * Walk through and release all floating point and vector contexts. Also kill live context.
282 *
283 */
284
285 toss_live_vec(thread->mact.curctx); /* Dump live vectors */
1c79356b 286
55e303ae
A
287 vsv = thread->mact.curctx->VMXsave; /* Get the top vector savearea */
288
289 while(vsv) { /* Any VMX saved state? */
290 vpsv = vsv; /* Remember so we can toss this */
291 vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Get one underneath our's */
292 save_release((savearea *)vpsv); /* Release it */
1c79356b 293 }
55e303ae
A
294
295 thread->mact.curctx->VMXsave = 0; /* Kill chain */
296
297 toss_live_fpu(thread->mact.curctx); /* Dump live float */
298
299 fsv = thread->mact.curctx->FPUsave; /* Get the top float savearea */
300
301 while(fsv) { /* Any float saved state? */
302 fpsv = fsv; /* Remember so we can toss this */
303 fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Get one underneath our's */
304 save_release((savearea *)fpsv); /* Release it */
305 }
306
307 thread->mact.curctx->FPUsave = 0; /* Kill chain */
1c79356b
A
308
309/*
55e303ae 310 * free all regular saveareas.
1c79356b 311 */
55e303ae
A
312
313 pcb = thread->mact.pcb; /* Get the general savearea */
314
315 while(pcb) { /* Any float saved state? */
316 ppsv = pcb; /* Remember so we can toss this */
317 pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */
318 save_release(ppsv); /* Release it */
319 }
320
321 hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
1c79356b
A
322}
323
324/*
325 * Number of times we needed to swap an activation back in before
326 * switching to it.
327 */
328int switch_act_swapins = 0;
329
330/*
331 * machine_switch_act
332 *
333 * Machine-dependent details of activation switching. Called with
334 * RPC locks held and preemption disabled.
335 */
336void
337machine_switch_act(
55e303ae 338 thread_t thread,
1c79356b 339 thread_act_t old,
55e303ae 340 thread_act_t new)
1c79356b
A
341{
342 pmap_t new_pmap;
9bccf70c 343 facility_context *fowner;
55e303ae
A
344 struct per_proc_info *ppinfo;
345
346 ppinfo = getPerProc(); /* Get our processor block */
1c79356b
A
347
348 /* Our context might wake up on another processor, so we must
349 * not keep hot state in our FPU, it must go back to the pcb
350 * so that it can be found by the other if needed
351 */
9bccf70c 352 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 353 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c
A
354 if(fowner) { /* Is there any live context? */
355 if(fowner->facAct == old) { /* Is it for us? */
356 fpu_save(fowner); /* Yes, save it */
357 }
358 }
55e303ae 359 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c
A
360 if(fowner) { /* Is there any live context? */
361 if(fowner->facAct == old) { /* Is it for us? */
362 vec_save(fowner); /* Yes, save it */
363 }
364 }
1c79356b
A
365 }
366
55e303ae 367 old->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */
1c79356b 368
55e303ae 369 ast_context(new, cpu_number());
1c79356b
A
370
371 /* Activations might have different pmaps
372 * (process->kernel->server, for example).
373 * Change space if needed
374 */
375
de355530
A
376 if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
377 pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
1c79356b
A
378 }
379 else { /* otherwise, we use the task's pmap */
380 new_pmap = new->task->map->pmap;
381 if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
382 pmap_switch(new_pmap);
383 }
384 }
385
386}
387
1c79356b
A
388/*
389 * act_machine_sv_free
390 * release saveareas associated with an act. if flag is true, release
391 * user level savearea(s) too, else don't
392 *
393 * this code cannot block so we call the proper save area free routine
394 */
395void
396act_machine_sv_free(thread_act_t act)
397{
9bccf70c 398 register savearea *pcb, *userpcb;
55e303ae
A
399 register savearea_vec *vsv, *vpst, *vsvt;
400 register savearea_fpu *fsv, *fpst, *fsvt;
1c79356b
A
401 register savearea *svp;
402 register int i;
403
404/*
9bccf70c 405 * This function will release all non-user state context.
1c79356b
A
406 */
407
9bccf70c
A
408/*
409 *
410 * Walk through and release all floating point and vector contexts that are not
411 * user state. We will also blow away live context if it belongs to non-user state.
55e303ae
A
412 * Note that the level can not change while we are in this code. Nor can another
413 * context be pushed on the stack.
414 *
415 * We do nothing here if the current level is user. Otherwise,
416 * the live context is cleared. Then we find the user saved context.
417 * Next, we take the sync lock (to keep us from munging things in *_switch).
418 * The level is set to 0 and all stacked context other than user is dequeued.
419 * Then we unlock. Next, all of the old kernel contexts are released.
9bccf70c
A
420 *
421 */
422
423 if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */
55e303ae 424
9bccf70c 425 toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */
9bccf70c 426
55e303ae
A
427 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
428
429 while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */
de355530 430
55e303ae
A
431 if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */
432 panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
433 }
434
435 vsvt = act->mact.curctx->VMXsave; /* Get the top of the chain */
436 act->mact.curctx->VMXsave = vsv; /* Point to the user context */
437 act->mact.curctx->VMXlevel = 0; /* Set the level to user */
438 hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync); /* Unlock */
439
440 while(vsvt) { /* Clear any VMX saved state */
441 if (vsvt == vsv) break; /* Done when hit user if any */
442 vpst = vsvt; /* Remember so we can toss this */
443 vsvt = (savearea_vec *)vsvt->save_hdr.save_prev; /* Get one underneath our's */
444 save_ret((savearea *)vpst); /* Release it */
445 }
446
447 }
9bccf70c
A
448
449 if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */
55e303ae
A
450
451 toss_live_fpu(act->mact.curctx); /* Dump live floats if is not user */
1c79356b 452
55e303ae
A
453 fsv = act->mact.curctx->FPUsave; /* Get the top floats savearea */
454
455 while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */
9bccf70c 456
55e303ae
A
457 if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */
458 panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
459 }
460
461 fsvt = act->mact.curctx->FPUsave; /* Get the top of the chain */
462 act->mact.curctx->FPUsave = fsv; /* Point to the user context */
463 act->mact.curctx->FPUlevel = 0; /* Set the level to user */
464 hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync); /* Unlock */
465
466 while(fsvt) { /* Clear any VMX saved state */
467 if (fsvt == fsv) break; /* Done when hit user if any */
468 fpst = fsvt; /* Remember so we can toss this */
469 fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev; /* Get one underneath our's */
470 save_ret((savearea *)fpst); /* Release it */
471 }
472
1c79356b
A
473 }
474
9bccf70c
A
475/*
476 * free all regular saveareas except a user savearea, if any
477 */
1c79356b 478
9bccf70c
A
479 pcb = act->mact.pcb; /* Get the general savearea */
480 userpcb = 0; /* Assume no user context for now */
481
482 while(pcb) { /* Any float saved state? */
483 if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
484 userpcb = pcb; /* Remember so we can toss this */
485 break;
486 }
487 svp = pcb; /* Remember this */
55e303ae 488 pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */
9bccf70c 489 save_ret(svp); /* Release it */
1c79356b 490 }
9bccf70c
A
491
492 act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
493
1c79356b
A
494}
495
55e303ae
A
496void
497machine_thread_set_current(thread_t thread)
498{
499 set_machine_current_act(thread->top_act);
500}
1c79356b 501
1c79356b 502void
55e303ae
A
503machine_act_terminate(
504 thread_act_t act)
1c79356b
A
505{
506 if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */
507 disable_bluebox_internal(act); /* Kill off bluebox */
508 }
509
510 if(act->mact.vmmControl) { /* Check if VMM is active */
511 vmm_tear_down_all(act); /* Kill off all VMM contexts */
512 }
513}
514
1c79356b 515void
55e303ae 516machine_thread_terminate_self(void)
1c79356b 517{
55e303ae 518 machine_act_terminate(current_act());
1c79356b
A
519}
520
521void
55e303ae 522machine_thread_init(void)
1c79356b
A
523{
524#ifdef MACHINE_STACK
525#if KERNEL_STACK_SIZE > PPC_PGBYTES
526 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
527#endif
528#endif
529}
530
531#if MACH_ASSERT
1c79356b
A
532
533void
534dump_thread(thread_t th)
535{
536 printf(" thread @ 0x%x:\n", th);
537}
538
539int
540 dump_act(thread_act_t thr_act)
541{
542 if (!thr_act)
543 return(0);
544
545 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
546 thr_act, thr_act->ref_count,
547 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
548 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
549
55e303ae
A
550 printf("\tsusp=%x active=%x hi=%x lo=%x\n",
551 0 /*thr_act->alerts*/, 0 /*thr_act->alert_mask*/,
1c79356b
A
552 thr_act->suspend_count, thr_act->active,
553 thr_act->higher, thr_act->lower);
554
555 return((int)thr_act);
556}
557
558#endif
559
560unsigned int
561get_useraddr()
562{
55e303ae 563 return(current_act()->mact.upcb->save_srr0);
1c79356b
A
564}
565
566/*
567 * detach and return a kernel stack from a thread
568 */
569
570vm_offset_t
55e303ae
A
571machine_stack_detach(
572 thread_t thread)
1c79356b
A
573{
574 vm_offset_t stack;
575
9bccf70c
A
576 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
577 thread, thread->priority,
578 thread->sched_pri, 0, 0);
579
580 if (thread->top_act)
581 act_machine_sv_free(thread->top_act);
1c79356b
A
582
583 stack = thread->kernel_stack;
584 thread->kernel_stack = 0;
585 return(stack);
586}
587
588/*
589 * attach a kernel stack to a thread and initialize it
590 *
591 * attaches a stack to a thread. if there is no save
592 * area we allocate one. the top save area is then
593 * loaded with the pc (continuation address), the initial
594 * stack pointer, and a std kernel MSR. if the top
595 * save area is the user save area bad things will
596 * happen
597 *
598 */
599
600void
55e303ae
A
601machine_stack_attach(
602 thread_t thread,
603 vm_offset_t stack,
604 void (*start)(thread_t))
1c79356b
A
605{
606 thread_act_t thr_act;
607 unsigned int *kss;
608 struct savearea *sv;
609
610 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
611 thread, thread->priority,
55e303ae 612 thread->sched_pri, start,
1c79356b
A
613 0);
614
615 assert(stack);
616 kss = (unsigned int *)STACK_IKS(stack);
617 thread->kernel_stack = stack;
618
619 /* during initialization we sometimes do not have an
620 activation. in that case do not do anything */
621 if ((thr_act = thread->top_act) != 0) {
622 sv = save_get(); /* cannot block */
9bccf70c 623 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
55e303ae
A
624 sv->save_hdr.save_act = (struct thread_activation *)thr_act;
625 sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thr_act->mact.pcb);
9bccf70c 626 thr_act->mact.pcb = sv;
1c79356b 627
55e303ae 628 sv->save_srr0 = (unsigned int) start;
1c79356b
A
629 /* sv->save_r3 = ARG ? */
630 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
631 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
9bccf70c
A
632 sv->save_fpscr = 0; /* Clear all floating point exceptions */
633 sv->save_vrsave = 0; /* Set the vector save state */
634 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
55e303ae 635 *(CAST_DOWN(int *, sv->save_r1)) = 0;
1c79356b
A
636 thr_act->mact.ksp = 0;
637 }
638
639 return;
640}
641
642/*
643 * move a stack from old to new thread
644 */
645
646void
55e303ae
A
647machine_stack_handoff(
648 thread_t old,
649 thread_t new)
1c79356b
A
650{
651
9bccf70c
A
652 vm_offset_t stack;
653 pmap_t new_pmap;
654 facility_context *fowner;
55e303ae
A
655 mapping *mp;
656 struct per_proc_info *ppinfo;
9bccf70c
A
657
658 assert(new->top_act);
659 assert(old->top_act);
55e303ae
A
660
661 if (old == new)
662 panic("machine_stack_handoff");
9bccf70c 663
55e303ae 664 stack = machine_stack_detach(old);
9bccf70c 665 new->kernel_stack = stack;
55e303ae
A
666 if (stack == old->reserved_stack) {
667 assert(new->reserved_stack);
668 old->reserved_stack = new->reserved_stack;
669 new->reserved_stack = stack;
9bccf70c 670 }
1c79356b 671
55e303ae
A
672 ppinfo = getPerProc(); /* Get our processor block */
673
674 ppinfo->cpu_flags &= ~traceBE; /* Turn off special branch trace */
0b4e3aa0 675
9bccf70c 676 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 677 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c
A
678 if(fowner) { /* Is there any live context? */
679 if(fowner->facAct == old->top_act) { /* Is it for us? */
680 fpu_save(fowner); /* Yes, save it */
681 }
682 }
55e303ae 683 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c
A
684 if(fowner) { /* Is there any live context? */
685 if(fowner->facAct == old->top_act) { /* Is it for us? */
686 vec_save(fowner); /* Yes, save it */
687 }
688 }
689 }
55e303ae 690
d7e50217
A
691 /*
692 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
693 * This bits can be modified in the per proc without updating the thread spcFlags
694 */
695 if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */
696 old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
55e303ae 697 old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
d7e50217 698 }
ab86ba33
A
699 old->top_act->mact.specFlags &= ~OnProc;
700 new->top_act->mact.specFlags |= OnProc;
1c79356b 701
9bccf70c 702 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
55e303ae 703 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
1c79356b
A
704
705
706 if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
707 pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
55e303ae
A
708 ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys;
709 ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs;
710 ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
1c79356b
A
711 }
712 else { /* otherwise, we use the task's pmap */
713 new_pmap = new->top_act->task->map->pmap;
714 if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
715 pmap_switch(new_pmap);
716 }
717 }
718
55e303ae
A
719 machine_thread_set_current(new);
720 ppinfo->Uassist = new->top_act->mact.cthread_self;
9bccf70c 721
55e303ae
A
722 ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
723 ppinfo->spcFlags = new->top_act->mact.specFlags;
724
725 old->top_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */
726 mp = (mapping *)&ppinfo->ppCIOmp;
727 mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */
9bccf70c
A
728
729 if (branch_tracing_enabled())
55e303ae 730 ppinfo->cpu_flags |= traceBE;
765c9de3 731
55e303ae 732 if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0); /* Cut trace entry if tracing */
765c9de3 733
1c79356b
A
734 return;
735}
736
737/*
738 * clean and initialize the current kernel stack and go to
739 * the given continuation routine
740 */
741
742void
743call_continuation(void (*continuation)(void) )
744{
745
746 unsigned int *kss;
747 vm_offset_t tsp;
748
749 assert(current_thread()->kernel_stack);
750 kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
751 assert(continuation);
752
753 tsp = (vm_offset_t)((int)kss - KF_SIZE);
754 assert(tsp);
755 *((int *)tsp) = 0;
756
757 Call_continuation(continuation, tsp);
758
759 return;
760}