]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/pcb.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Copyright (c) 1990,1991,1992 The University of Utah and
30 * the Center for Software Science (CSS). All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software is hereby
33 * granted provided that (1) source code retains these copyright, permission,
34 * and disclaimer notices, and (2) redistributions including binaries
35 * reproduce the notices in supporting documentation, and (3) all advertising
36 * materials mentioning features or use of this software display the following
37 * acknowledgement: ``This product includes software developed by the Center
38 * for Software Science at the University of Utah.''
39 *
40 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
41 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
42 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * CSS requests users of this software to return to css-dist@cs.utah.edu any
45 * improvements that they make and grant CSS redistribution rights.
46 *
47 * Utah $Hdr: pcb.c 1.23 92/06/27$
48 */
49
50 #include <cpus.h>
51 #include <debug.h>
52
53 #include <types.h>
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/thread_act.h>
57 #include <kern/thread_swap.h>
58 #include <mach/thread_status.h>
59 #include <vm/vm_kern.h>
60 #include <kern/mach_param.h>
61
62 #include <kern/misc_protos.h>
63 #include <ppc/misc_protos.h>
64 #include <ppc/exception.h>
65 #include <ppc/proc_reg.h>
66 #include <kern/spl.h>
67 #include <ppc/pmap.h>
68 #include <ppc/trap.h>
69 #include <ppc/mappings.h>
70 #include <ppc/savearea.h>
71 #include <ppc/Firmware.h>
72 #include <ppc/asm.h>
73 #include <ppc/thread_act.h>
74 #include <ppc/vmachmon.h>
75 #include <ppc/low_trace.h>
76
77 #include <sys/kdebug.h>
78
79 extern int real_ncpus; /* Number of actual CPUs */
80 extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
81
82 void machine_act_terminate(thread_act_t act);
83
84 /*
85 * These constants are dumb. They should not be in asm.h!
86 */
87
88 #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
89
90 #if DEBUG
91 int fpu_trap_count = 0;
92 int fpu_switch_count = 0;
93 int vec_trap_count = 0;
94 int vec_switch_count = 0;
95 #endif
96
97 /*
98 * consider_machine_collect: try to collect machine-dependent pages
99 */
100 void
101 consider_machine_collect()
102 {
103 /*
104 * none currently available
105 */
106 return;
107 }
108
109 void
110 consider_machine_adjust()
111 {
112 consider_mapping_adjust();
113 }
114
115 /*
116 * switch_context: Switch from one thread to another, needed for
117 * switching of space
118 *
119 */
120 thread_t
121 machine_switch_context(
122 thread_t old,
123 thread_continue_t continuation,
124 thread_t new)
125 {
126 register thread_act_t old_act = old->top_act, new_act = new->top_act;
127 register thread_t retval;
128 pmap_t new_pmap;
129 facility_context *fowner;
130 struct per_proc_info *ppinfo;
131
132 if (old == new)
133 panic("machine_switch_context");
134
135 ppinfo = getPerProc(); /* Get our processor block */
136
137 ppinfo->old_thread = (unsigned int)old;
138 ppinfo->cpu_flags &= ~traceBE; /* disable branch tracing if on */
139
140 check_simple_locks();
141
142 /* Our context might wake up on another processor, so we must
143 * not keep hot state in our FPU, it must go back to the pcb
144 * so that it can be found by the other if needed
145 */
146 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
147 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
148 if(fowner) { /* Is there any live context? */
149 if(fowner->facAct == old->top_act) { /* Is it for us? */
150 fpu_save(fowner); /* Yes, save it */
151 }
152 }
153 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
154 if(fowner) { /* Is there any live context? */
155 if(fowner->facAct == old->top_act) { /* Is it for us? */
156 vec_save(fowner); /* Yes, save it */
157 }
158 }
159 }
160
161 /*
162 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
163 * This bits can be modified in the per proc without updating the thread spcFlags
164 */
165 if(old_act->mact.specFlags & runningVM) {
166 old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
167 old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
168 }
169 old_act->mact.specFlags &= ~OnProc;
170 new_act->mact.specFlags |= OnProc;
171
172 /*
173 * We do not have to worry about the PMAP module, so switch.
174 *
175 * We must not use top_act->map since this may not be the actual
176 * task map, but the map being used for a klcopyin/out.
177 */
178
179 if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
180 pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
181 ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys;
182 ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs;
183 ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
184 }
185 else { /* otherwise, we use the task's pmap */
186 new_pmap = new_act->task->map->pmap;
187 if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
188 pmap_switch(new_pmap); /* Switch if there is a change */
189 }
190 }
191
192 if(old_act->mact.cioSpace != invalSpace) { /* Does our old guy have an active copyin/out? */
193 old_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */
194 hw_blow_seg(copyIOaddr); /* Blow off the first segment */
195 hw_blow_seg(copyIOaddr + 0x10000000ULL); /* Blow off the second segment */
196 }
197
198 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
199 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
200
201 retval = Switch_context(old, continuation, new);
202 assert(retval != (struct thread_shuttle*)NULL);
203
204 if (branch_tracing_enabled()) {
205 ppinfo = getPerProc(); /* Get our processor block */
206 ppinfo->cpu_flags |= traceBE; /* restore branch tracing */
207 }
208
209 /* We've returned from having switched context, so we should be
210 * back in the original context.
211 */
212
213 return retval;
214 }
215
216 /*
217 * Initialize the machine-dependent state for a new thread.
218 */
219 kern_return_t
220 machine_thread_create(
221 thread_t thread,
222 task_t task)
223 {
224 savearea *sv; /* Pointer to newly allocated savearea */
225 unsigned int *CIsTooLimited, i;
226
227 hw_atomic_add((uint32_t *)&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
228 for this activation */
229 assert(thread->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */
230
231 sv = save_alloc(); /* Go get us a savearea */
232
233 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */
234
235 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
236 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
237 sv->save_hdr.save_act = (struct thread_activation *)thread; /* Set who owns it */
238 thread->mact.pcb = sv; /* Point to the save area */
239 thread->mact.curctx = &thread->mact.facctx; /* Initialize facility context */
240 thread->mact.facctx.facAct = thread; /* Initialize facility context pointer to activation */
241 thread->mact.cioSpace = invalSpace; /* Initialize copyin/out space to invalid */
242 thread->mact.preemption_count = 0; /* Initialize preemption counter */
243
244 /*
245 * User threads will pull their context from the pcb when first
246 * returning to user mode, so fill in all the necessary values.
247 * Kernel threads are initialized from the save state structure
248 * at the base of the kernel stack (see stack_attach()).
249 */
250
251 thread->mact.upcb = sv; /* Set user pcb */
252 sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */
253 sv->save_fpscr = 0; /* Clear all floating point exceptions */
254 sv->save_vrsave = 0; /* Set the vector save state */
255 sv->save_vscr[0] = 0x00000000;
256 sv->save_vscr[1] = 0x00000000;
257 sv->save_vscr[2] = 0x00000000;
258 sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */
259
260 return(KERN_SUCCESS);
261 }
262
263 /*
264 * Machine-dependent cleanup prior to destroying a thread
265 */
266 void
267 machine_thread_destroy(
268 thread_t thread)
269 {
270 register savearea *pcb, *ppsv;
271 register savearea_vec *vsv, *vpsv;
272 register savearea_fpu *fsv, *fpsv;
273 register savearea *svp;
274 register int i;
275
276 /*
277 * This function will release all context.
278 */
279
280 machine_act_terminate(thread); /* Make sure all virtual machines are dead first */
281
282 /*
283 *
284 * Walk through and release all floating point and vector contexts. Also kill live context.
285 *
286 */
287
288 toss_live_vec(thread->mact.curctx); /* Dump live vectors */
289
290 vsv = thread->mact.curctx->VMXsave; /* Get the top vector savearea */
291
292 while(vsv) { /* Any VMX saved state? */
293 vpsv = vsv; /* Remember so we can toss this */
294 vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Get one underneath our's */
295 save_release((savearea *)vpsv); /* Release it */
296 }
297
298 thread->mact.curctx->VMXsave = 0; /* Kill chain */
299
300 toss_live_fpu(thread->mact.curctx); /* Dump live float */
301
302 fsv = thread->mact.curctx->FPUsave; /* Get the top float savearea */
303
304 while(fsv) { /* Any float saved state? */
305 fpsv = fsv; /* Remember so we can toss this */
306 fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Get one underneath our's */
307 save_release((savearea *)fpsv); /* Release it */
308 }
309
310 thread->mact.curctx->FPUsave = 0; /* Kill chain */
311
312 /*
313 * free all regular saveareas.
314 */
315
316 pcb = thread->mact.pcb; /* Get the general savearea */
317
318 while(pcb) { /* Any float saved state? */
319 ppsv = pcb; /* Remember so we can toss this */
320 pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */
321 save_release(ppsv); /* Release it */
322 }
323
324 hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
325 }
326
327 /*
328 * Number of times we needed to swap an activation back in before
329 * switching to it.
330 */
331 int switch_act_swapins = 0;
332
333 /*
334 * machine_switch_act
335 *
336 * Machine-dependent details of activation switching. Called with
337 * RPC locks held and preemption disabled.
338 */
339 void
340 machine_switch_act(
341 thread_t thread,
342 thread_act_t old,
343 thread_act_t new)
344 {
345 pmap_t new_pmap;
346 facility_context *fowner;
347 struct per_proc_info *ppinfo;
348
349 ppinfo = getPerProc(); /* Get our processor block */
350
351 /* Our context might wake up on another processor, so we must
352 * not keep hot state in our FPU, it must go back to the pcb
353 * so that it can be found by the other if needed
354 */
355 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
356 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
357 if(fowner) { /* Is there any live context? */
358 if(fowner->facAct == old) { /* Is it for us? */
359 fpu_save(fowner); /* Yes, save it */
360 }
361 }
362 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
363 if(fowner) { /* Is there any live context? */
364 if(fowner->facAct == old) { /* Is it for us? */
365 vec_save(fowner); /* Yes, save it */
366 }
367 }
368 }
369
370 old->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */
371
372 ast_context(new, cpu_number());
373
374 /* Activations might have different pmaps
375 * (process->kernel->server, for example).
376 * Change space if needed
377 */
378
379 if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
380 pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
381 }
382 else { /* otherwise, we use the task's pmap */
383 new_pmap = new->task->map->pmap;
384 if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
385 pmap_switch(new_pmap);
386 }
387 }
388
389 }
390
391 /*
392 * act_machine_sv_free
393 * release saveareas associated with an act. if flag is true, release
394 * user level savearea(s) too, else don't
395 *
396 * this code cannot block so we call the proper save area free routine
397 */
398 void
399 act_machine_sv_free(thread_act_t act)
400 {
401 register savearea *pcb, *userpcb;
402 register savearea_vec *vsv, *vpst, *vsvt;
403 register savearea_fpu *fsv, *fpst, *fsvt;
404 register savearea *svp;
405 register int i;
406
407 /*
408 * This function will release all non-user state context.
409 */
410
411 /*
412 *
413 * Walk through and release all floating point and vector contexts that are not
414 * user state. We will also blow away live context if it belongs to non-user state.
415 * Note that the level can not change while we are in this code. Nor can another
416 * context be pushed on the stack.
417 *
418 * We do nothing here if the current level is user. Otherwise,
419 * the live context is cleared. Then we find the user saved context.
420 * Next, we take the sync lock (to keep us from munging things in *_switch).
421 * The level is set to 0 and all stacked context other than user is dequeued.
422 * Then we unlock. Next, all of the old kernel contexts are released.
423 *
424 */
425
426 if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */
427
428 toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */
429
430 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
431
432 while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */
433
434 if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */
435 panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
436 }
437
438 vsvt = act->mact.curctx->VMXsave; /* Get the top of the chain */
439 act->mact.curctx->VMXsave = vsv; /* Point to the user context */
440 act->mact.curctx->VMXlevel = 0; /* Set the level to user */
441 hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync); /* Unlock */
442
443 while(vsvt) { /* Clear any VMX saved state */
444 if (vsvt == vsv) break; /* Done when hit user if any */
445 vpst = vsvt; /* Remember so we can toss this */
446 vsvt = (savearea_vec *)vsvt->save_hdr.save_prev; /* Get one underneath our's */
447 save_ret((savearea *)vpst); /* Release it */
448 }
449
450 }
451
452 if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */
453
454 toss_live_fpu(act->mact.curctx); /* Dump live floats if is not user */
455
456 fsv = act->mact.curctx->FPUsave; /* Get the top floats savearea */
457
458 while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */
459
460 if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */
461 panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
462 }
463
464 fsvt = act->mact.curctx->FPUsave; /* Get the top of the chain */
465 act->mact.curctx->FPUsave = fsv; /* Point to the user context */
466 act->mact.curctx->FPUlevel = 0; /* Set the level to user */
467 hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync); /* Unlock */
468
469 while(fsvt) { /* Clear any VMX saved state */
470 if (fsvt == fsv) break; /* Done when hit user if any */
471 fpst = fsvt; /* Remember so we can toss this */
472 fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev; /* Get one underneath our's */
473 save_ret((savearea *)fpst); /* Release it */
474 }
475
476 }
477
478 /*
479 * free all regular saveareas except a user savearea, if any
480 */
481
482 pcb = act->mact.pcb; /* Get the general savearea */
483 userpcb = 0; /* Assume no user context for now */
484
485 while(pcb) { /* Any float saved state? */
486 if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
487 userpcb = pcb; /* Remember so we can toss this */
488 break;
489 }
490 svp = pcb; /* Remember this */
491 pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */
492 save_ret(svp); /* Release it */
493 }
494
495 act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
496
497 }
498
499 void
500 machine_thread_set_current(thread_t thread)
501 {
502 set_machine_current_act(thread->top_act);
503 }
504
505 void
506 machine_act_terminate(
507 thread_act_t act)
508 {
509 if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */
510 disable_bluebox_internal(act); /* Kill off bluebox */
511 }
512
513 if(act->mact.vmmControl) { /* Check if VMM is active */
514 vmm_tear_down_all(act); /* Kill off all VMM contexts */
515 }
516 }
517
518 void
519 machine_thread_terminate_self(void)
520 {
521 machine_act_terminate(current_act());
522 }
523
524 void
525 machine_thread_init(void)
526 {
527 #ifdef MACHINE_STACK
528 #if KERNEL_STACK_SIZE > PPC_PGBYTES
529 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
530 #endif
531 #endif
532 }
533
534 #if MACH_ASSERT
535
536 void
537 dump_thread(thread_t th)
538 {
539 printf(" thread @ 0x%x:\n", th);
540 }
541
542 int
543 dump_act(thread_act_t thr_act)
544 {
545 if (!thr_act)
546 return(0);
547
548 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
549 thr_act, thr_act->ref_count,
550 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
551 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
552
553 printf("\tsusp=%x active=%x hi=%x lo=%x\n",
554 0 /*thr_act->alerts*/, 0 /*thr_act->alert_mask*/,
555 thr_act->suspend_count, thr_act->active,
556 thr_act->higher, thr_act->lower);
557
558 return((int)thr_act);
559 }
560
561 #endif
562
563 unsigned int
564 get_useraddr()
565 {
566 return(current_act()->mact.upcb->save_srr0);
567 }
568
569 /*
570 * detach and return a kernel stack from a thread
571 */
572
573 vm_offset_t
574 machine_stack_detach(
575 thread_t thread)
576 {
577 vm_offset_t stack;
578
579 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
580 thread, thread->priority,
581 thread->sched_pri, 0, 0);
582
583 if (thread->top_act)
584 act_machine_sv_free(thread->top_act);
585
586 stack = thread->kernel_stack;
587 thread->kernel_stack = 0;
588 return(stack);
589 }
590
591 /*
592 * attach a kernel stack to a thread and initialize it
593 *
594 * attaches a stack to a thread. if there is no save
595 * area we allocate one. the top save area is then
596 * loaded with the pc (continuation address), the initial
597 * stack pointer, and a std kernel MSR. if the top
598 * save area is the user save area bad things will
599 * happen
600 *
601 */
602
603 void
604 machine_stack_attach(
605 thread_t thread,
606 vm_offset_t stack,
607 void (*start)(thread_t))
608 {
609 thread_act_t thr_act;
610 unsigned int *kss;
611 struct savearea *sv;
612
613 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
614 thread, thread->priority,
615 thread->sched_pri, start,
616 0);
617
618 assert(stack);
619 kss = (unsigned int *)STACK_IKS(stack);
620 thread->kernel_stack = stack;
621
622 /* during initialization we sometimes do not have an
623 activation. in that case do not do anything */
624 if ((thr_act = thread->top_act) != 0) {
625 sv = save_get(); /* cannot block */
626 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
627 sv->save_hdr.save_act = (struct thread_activation *)thr_act;
628 sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thr_act->mact.pcb);
629 thr_act->mact.pcb = sv;
630
631 sv->save_srr0 = (unsigned int) start;
632 /* sv->save_r3 = ARG ? */
633 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
634 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
635 sv->save_fpscr = 0; /* Clear all floating point exceptions */
636 sv->save_vrsave = 0; /* Set the vector save state */
637 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
638 *(CAST_DOWN(int *, sv->save_r1)) = 0;
639 thr_act->mact.ksp = 0;
640 }
641
642 return;
643 }
644
645 /*
646 * move a stack from old to new thread
647 */
648
649 void
650 machine_stack_handoff(
651 thread_t old,
652 thread_t new)
653 {
654
655 vm_offset_t stack;
656 pmap_t new_pmap;
657 facility_context *fowner;
658 mapping *mp;
659 struct per_proc_info *ppinfo;
660
661 assert(new->top_act);
662 assert(old->top_act);
663
664 if (old == new)
665 panic("machine_stack_handoff");
666
667 stack = machine_stack_detach(old);
668 new->kernel_stack = stack;
669 if (stack == old->reserved_stack) {
670 assert(new->reserved_stack);
671 old->reserved_stack = new->reserved_stack;
672 new->reserved_stack = stack;
673 }
674
675 ppinfo = getPerProc(); /* Get our processor block */
676
677 ppinfo->cpu_flags &= ~traceBE; /* Turn off special branch trace */
678
679 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
680 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
681 if(fowner) { /* Is there any live context? */
682 if(fowner->facAct == old->top_act) { /* Is it for us? */
683 fpu_save(fowner); /* Yes, save it */
684 }
685 }
686 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
687 if(fowner) { /* Is there any live context? */
688 if(fowner->facAct == old->top_act) { /* Is it for us? */
689 vec_save(fowner); /* Yes, save it */
690 }
691 }
692 }
693
694 /*
695 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
696 * This bits can be modified in the per proc without updating the thread spcFlags
697 */
698 if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */
699 old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
700 old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
701 }
702 old->top_act->mact.specFlags &= ~OnProc;
703 new->top_act->mact.specFlags |= OnProc;
704
705 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
706 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
707
708
709 if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
710 pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
711 ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys;
712 ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs;
713 ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
714 }
715 else { /* otherwise, we use the task's pmap */
716 new_pmap = new->top_act->task->map->pmap;
717 if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
718 pmap_switch(new_pmap);
719 }
720 }
721
722 machine_thread_set_current(new);
723 ppinfo->Uassist = new->top_act->mact.cthread_self;
724
725 ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
726 ppinfo->spcFlags = new->top_act->mact.specFlags;
727
728 old->top_act->mact.cioSpace |= cioSwitchAway; /* Show we switched away from this guy */
729 mp = (mapping *)&ppinfo->ppCIOmp;
730 mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */
731
732 if (branch_tracing_enabled())
733 ppinfo->cpu_flags |= traceBE;
734
735 if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0); /* Cut trace entry if tracing */
736
737 return;
738 }
739
740 /*
741 * clean and initialize the current kernel stack and go to
742 * the given continuation routine
743 */
744
745 void
746 call_continuation(void (*continuation)(void) )
747 {
748
749 unsigned int *kss;
750 vm_offset_t tsp;
751
752 assert(current_thread()->kernel_stack);
753 kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
754 assert(continuation);
755
756 tsp = (vm_offset_t)((int)kss - KF_SIZE);
757 assert(tsp);
758 *((int *)tsp) = 0;
759
760 Call_continuation(continuation, tsp);
761
762 return;
763 }