2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
51 #include <kern/task.h>
52 #include <kern/thread.h>
53 #include <kern/thread_act.h>
54 #include <kern/thread_swap.h>
55 #include <mach/thread_status.h>
56 #include <vm/vm_kern.h>
57 #include <kern/mach_param.h>
59 #include <kern/misc_protos.h>
60 #include <ppc/misc_protos.h>
61 #include <ppc/exception.h>
62 #include <ppc/proc_reg.h>
66 #include <ppc/mappings.h>
67 #include <ppc/savearea.h>
68 #include <ppc/Firmware.h>
70 #include <ppc/thread_act.h>
71 #include <ppc/vmachmon.h>
72 #include <ppc/low_trace.h>
74 #include <sys/kdebug.h>
76 extern int real_ncpus
; /* Number of actual CPUs */
77 extern struct Saveanchor saveanchor
; /* Aliged savearea anchor */
79 void machine_act_terminate(thread_act_t act
);
82 * These constants are dumb. They should not be in asm.h!
85 #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
88 int fpu_trap_count
= 0;
89 int fpu_switch_count
= 0;
90 int vec_trap_count
= 0;
91 int vec_switch_count
= 0;
95 * consider_machine_collect: try to collect machine-dependent pages
98 consider_machine_collect()
101 * none currently available
107 consider_machine_adjust()
109 consider_mapping_adjust();
113 * switch_context: Switch from one thread to another, needed for
118 machine_switch_context(
120 thread_continue_t continuation
,
123 register thread_act_t old_act
= old
->top_act
, new_act
= new->top_act
;
124 register thread_t retval
;
126 facility_context
*fowner
;
127 struct per_proc_info
*ppinfo
;
130 panic("machine_switch_context");
132 ppinfo
= getPerProc(); /* Get our processor block */
134 ppinfo
->old_thread
= (unsigned int)old
;
135 ppinfo
->cpu_flags
&= ~traceBE
; /* disable branch tracing if on */
137 check_simple_locks();
139 /* Our context might wake up on another processor, so we must
140 * not keep hot state in our FPU, it must go back to the pcb
141 * so that it can be found by the other if needed
143 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
144 fowner
= ppinfo
->FPU_owner
; /* Cache this because it may change */
145 if(fowner
) { /* Is there any live context? */
146 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
147 fpu_save(fowner
); /* Yes, save it */
150 fowner
= ppinfo
->VMX_owner
; /* Cache this because it may change */
151 if(fowner
) { /* Is there any live context? */
152 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
153 vec_save(fowner
); /* Yes, save it */
159 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
160 * This bits can be modified in the per proc without updating the thread spcFlags
162 if(old_act
->mact
.specFlags
& runningVM
) {
163 old_act
->mact
.specFlags
&= ~(userProtKey
|FamVMmode
);
164 old_act
->mact
.specFlags
|= (ppinfo
->spcFlags
) & (userProtKey
|FamVMmode
);
166 old_act
->mact
.specFlags
&= ~OnProc
;
167 new_act
->mact
.specFlags
|= OnProc
;
170 * We do not have to worry about the PMAP module, so switch.
172 * We must not use top_act->map since this may not be the actual
173 * task map, but the map being used for a klcopyin/out.
176 if(new_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
177 pmap_switch(new_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
178 ppinfo
->VMMareaPhys
= new_act
->mact
.vmmCEntry
->vmmContextPhys
;
179 ppinfo
->VMMXAFlgs
= new_act
->mact
.vmmCEntry
->vmmXAFlgs
;
180 ppinfo
->FAMintercept
= new_act
->mact
.vmmCEntry
->vmmFAMintercept
;
182 else { /* otherwise, we use the task's pmap */
183 new_pmap
= new_act
->task
->map
->pmap
;
184 if ((old_act
->task
->map
->pmap
!= new_pmap
) || (old_act
->mact
.specFlags
& runningVM
)) {
185 pmap_switch(new_pmap
); /* Switch if there is a change */
189 if(old_act
->mact
.cioSpace
!= invalSpace
) { /* Does our old guy have an active copyin/out? */
190 old_act
->mact
.cioSpace
|= cioSwitchAway
; /* Show we switched away from this guy */
191 hw_blow_seg(copyIOaddr
); /* Blow off the first segment */
192 hw_blow_seg(copyIOaddr
+ 0x10000000ULL
); /* Blow off the second segment */
195 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
196 old
->reason
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
198 retval
= Switch_context(old
, continuation
, new);
199 assert(retval
!= (struct thread_shuttle
*)NULL
);
201 if (branch_tracing_enabled()) {
202 ppinfo
= getPerProc(); /* Get our processor block */
203 ppinfo
->cpu_flags
|= traceBE
; /* restore branch tracing */
206 /* We've returned from having switched context, so we should be
207 * back in the original context.
214 * Initialize the machine-dependent state for a new thread.
217 machine_thread_create(
221 savearea
*sv
; /* Pointer to newly allocated savearea */
222 unsigned int *CIsTooLimited
, i
;
224 hw_atomic_add((uint32_t *)&saveanchor
.savetarget
, 4); /* Account for the number of saveareas we think we "need"
225 for this activation */
226 assert(thread
->mact
.pcb
== (savearea
*)0); /* Make sure there was no previous savearea */
228 sv
= save_alloc(); /* Go get us a savearea */
230 bzero((char *)((unsigned int)sv
+ sizeof(savearea_comm
)), (sizeof(savearea
) - sizeof(savearea_comm
))); /* Clear it */
232 sv
->save_hdr
.save_prev
= 0; /* Clear the back pointer */
233 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
234 sv
->save_hdr
.save_act
= (struct thread_activation
*)thread
; /* Set who owns it */
235 thread
->mact
.pcb
= sv
; /* Point to the save area */
236 thread
->mact
.curctx
= &thread
->mact
.facctx
; /* Initialize facility context */
237 thread
->mact
.facctx
.facAct
= thread
; /* Initialize facility context pointer to activation */
238 thread
->mact
.cioSpace
= invalSpace
; /* Initialize copyin/out space to invalid */
239 thread
->mact
.preemption_count
= 0; /* Initialize preemption counter */
242 * User threads will pull their context from the pcb when first
243 * returning to user mode, so fill in all the necessary values.
244 * Kernel threads are initialized from the save state structure
245 * at the base of the kernel stack (see stack_attach()).
248 thread
->mact
.upcb
= sv
; /* Set user pcb */
249 sv
->save_srr1
= (uint64_t)MSR_EXPORT_MASK_SET
; /* Set the default user MSR */
250 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
251 sv
->save_vrsave
= 0; /* Set the vector save state */
252 sv
->save_vscr
[0] = 0x00000000;
253 sv
->save_vscr
[1] = 0x00000000;
254 sv
->save_vscr
[2] = 0x00000000;
255 sv
->save_vscr
[3] = 0x00010000; /* Disable java mode and clear saturated */
257 return(KERN_SUCCESS
);
261 * Machine-dependent cleanup prior to destroying a thread
264 machine_thread_destroy(
267 register savearea
*pcb
, *ppsv
;
268 register savearea_vec
*vsv
, *vpsv
;
269 register savearea_fpu
*fsv
, *fpsv
;
270 register savearea
*svp
;
274 * This function will release all context.
277 machine_act_terminate(thread
); /* Make sure all virtual machines are dead first */
281 * Walk through and release all floating point and vector contexts. Also kill live context.
285 toss_live_vec(thread
->mact
.curctx
); /* Dump live vectors */
287 vsv
= thread
->mact
.curctx
->VMXsave
; /* Get the top vector savearea */
289 while(vsv
) { /* Any VMX saved state? */
290 vpsv
= vsv
; /* Remember so we can toss this */
291 vsv
= CAST_DOWN(savearea_vec
*, vsv
->save_hdr
.save_prev
); /* Get one underneath our's */
292 save_release((savearea
*)vpsv
); /* Release it */
295 thread
->mact
.curctx
->VMXsave
= 0; /* Kill chain */
297 toss_live_fpu(thread
->mact
.curctx
); /* Dump live float */
299 fsv
= thread
->mact
.curctx
->FPUsave
; /* Get the top float savearea */
301 while(fsv
) { /* Any float saved state? */
302 fpsv
= fsv
; /* Remember so we can toss this */
303 fsv
= CAST_DOWN(savearea_fpu
*, fsv
->save_hdr
.save_prev
); /* Get one underneath our's */
304 save_release((savearea
*)fpsv
); /* Release it */
307 thread
->mact
.curctx
->FPUsave
= 0; /* Kill chain */
310 * free all regular saveareas.
313 pcb
= thread
->mact
.pcb
; /* Get the general savearea */
315 while(pcb
) { /* Any float saved state? */
316 ppsv
= pcb
; /* Remember so we can toss this */
317 pcb
= CAST_DOWN(savearea
*, pcb
->save_hdr
.save_prev
); /* Get one underneath our's */
318 save_release(ppsv
); /* Release it */
321 hw_atomic_sub((uint32_t *)&saveanchor
.savetarget
, 4); /* Unaccount for the number of saveareas we think we "need" */
325 * Number of times we needed to swap an activation back in before
328 int switch_act_swapins
= 0;
333 * Machine-dependent details of activation switching. Called with
334 * RPC locks held and preemption disabled.
343 facility_context
*fowner
;
344 struct per_proc_info
*ppinfo
;
346 ppinfo
= getPerProc(); /* Get our processor block */
348 /* Our context might wake up on another processor, so we must
349 * not keep hot state in our FPU, it must go back to the pcb
350 * so that it can be found by the other if needed
352 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
353 fowner
= ppinfo
->FPU_owner
; /* Cache this because it may change */
354 if(fowner
) { /* Is there any live context? */
355 if(fowner
->facAct
== old
) { /* Is it for us? */
356 fpu_save(fowner
); /* Yes, save it */
359 fowner
= ppinfo
->VMX_owner
; /* Cache this because it may change */
360 if(fowner
) { /* Is there any live context? */
361 if(fowner
->facAct
== old
) { /* Is it for us? */
362 vec_save(fowner
); /* Yes, save it */
367 old
->mact
.cioSpace
|= cioSwitchAway
; /* Show we switched away from this guy */
369 ast_context(new, cpu_number());
371 /* Activations might have different pmaps
372 * (process->kernel->server, for example).
373 * Change space if needed
376 if(new->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
377 pmap_switch(new->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
379 else { /* otherwise, we use the task's pmap */
380 new_pmap
= new->task
->map
->pmap
;
381 if ((old
->task
->map
->pmap
!= new_pmap
) || (old
->mact
.specFlags
& runningVM
)) {
382 pmap_switch(new_pmap
);
389 * act_machine_sv_free
390 * release saveareas associated with an act. if flag is true, release
391 * user level savearea(s) too, else don't
393 * this code cannot block so we call the proper save area free routine
396 act_machine_sv_free(thread_act_t act
)
398 register savearea
*pcb
, *userpcb
;
399 register savearea_vec
*vsv
, *vpst
, *vsvt
;
400 register savearea_fpu
*fsv
, *fpst
, *fsvt
;
401 register savearea
*svp
;
405 * This function will release all non-user state context.
410 * Walk through and release all floating point and vector contexts that are not
411 * user state. We will also blow away live context if it belongs to non-user state.
412 * Note that the level can not change while we are in this code. Nor can another
413 * context be pushed on the stack.
415 * We do nothing here if the current level is user. Otherwise,
416 * the live context is cleared. Then we find the user saved context.
417 * Next, we take the sync lock (to keep us from munging things in *_switch).
418 * The level is set to 0 and all stacked context other than user is dequeued.
419 * Then we unlock. Next, all of the old kernel contexts are released.
423 if(act
->mact
.curctx
->VMXlevel
) { /* Is the current level user state? */
425 toss_live_vec(act
->mact
.curctx
); /* Dump live vectors if is not user */
427 vsv
= act
->mact
.curctx
->VMXsave
; /* Get the top vector savearea */
429 while(vsv
&& vsv
->save_hdr
.save_level
) vsv
= (savearea_vec
*)vsv
->save_hdr
.save_prev
; /* Find user context if any */
431 if(!hw_lock_to((hw_lock_t
)&act
->mact
.curctx
->VMXsync
, LockTimeOut
)) { /* Get the sync lock */
432 panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
435 vsvt
= act
->mact
.curctx
->VMXsave
; /* Get the top of the chain */
436 act
->mact
.curctx
->VMXsave
= vsv
; /* Point to the user context */
437 act
->mact
.curctx
->VMXlevel
= 0; /* Set the level to user */
438 hw_lock_unlock((hw_lock_t
)&act
->mact
.curctx
->VMXsync
); /* Unlock */
440 while(vsvt
) { /* Clear any VMX saved state */
441 if (vsvt
== vsv
) break; /* Done when hit user if any */
442 vpst
= vsvt
; /* Remember so we can toss this */
443 vsvt
= (savearea_vec
*)vsvt
->save_hdr
.save_prev
; /* Get one underneath our's */
444 save_ret((savearea
*)vpst
); /* Release it */
449 if(act
->mact
.curctx
->FPUlevel
) { /* Is the current level user state? */
451 toss_live_fpu(act
->mact
.curctx
); /* Dump live floats if is not user */
453 fsv
= act
->mact
.curctx
->FPUsave
; /* Get the top floats savearea */
455 while(fsv
&& fsv
->save_hdr
.save_level
) fsv
= (savearea_fpu
*)fsv
->save_hdr
.save_prev
; /* Find user context if any */
457 if(!hw_lock_to((hw_lock_t
)&act
->mact
.curctx
->FPUsync
, LockTimeOut
)) { /* Get the sync lock */
458 panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
461 fsvt
= act
->mact
.curctx
->FPUsave
; /* Get the top of the chain */
462 act
->mact
.curctx
->FPUsave
= fsv
; /* Point to the user context */
463 act
->mact
.curctx
->FPUlevel
= 0; /* Set the level to user */
464 hw_lock_unlock((hw_lock_t
)&act
->mact
.curctx
->FPUsync
); /* Unlock */
466 while(fsvt
) { /* Clear any VMX saved state */
467 if (fsvt
== fsv
) break; /* Done when hit user if any */
468 fpst
= fsvt
; /* Remember so we can toss this */
469 fsvt
= (savearea_fpu
*)fsvt
->save_hdr
.save_prev
; /* Get one underneath our's */
470 save_ret((savearea
*)fpst
); /* Release it */
476 * free all regular saveareas except a user savearea, if any
479 pcb
= act
->mact
.pcb
; /* Get the general savearea */
480 userpcb
= 0; /* Assume no user context for now */
482 while(pcb
) { /* Any float saved state? */
483 if (pcb
->save_srr1
& MASK(MSR_PR
)) { /* Is this a user savearea? */
484 userpcb
= pcb
; /* Remember so we can toss this */
487 svp
= pcb
; /* Remember this */
488 pcb
= CAST_DOWN(savearea
*, pcb
->save_hdr
.save_prev
); /* Get one underneath our's */
489 save_ret(svp
); /* Release it */
492 act
->mact
.pcb
= userpcb
; /* Chain in the user if there is one, or 0 if not */
497 machine_thread_set_current(thread_t thread
)
499 set_machine_current_act(thread
->top_act
);
503 machine_act_terminate(
506 if(act
->mact
.bbDescAddr
) { /* Check if the Blue box assist is active */
507 disable_bluebox_internal(act
); /* Kill off bluebox */
510 if(act
->mact
.vmmControl
) { /* Check if VMM is active */
511 vmm_tear_down_all(act
); /* Kill off all VMM contexts */
516 machine_thread_terminate_self(void)
518 machine_act_terminate(current_act());
522 machine_thread_init(void)
525 #if KERNEL_STACK_SIZE > PPC_PGBYTES
526 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
534 dump_thread(thread_t th
)
536 printf(" thread @ 0x%x:\n", th
);
540 dump_act(thread_act_t thr_act
)
545 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
546 thr_act
, thr_act
->ref_count
,
547 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
548 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
550 printf("\tsusp=%x active=%x hi=%x lo=%x\n",
551 0 /*thr_act->alerts*/, 0 /*thr_act->alert_mask*/,
552 thr_act
->suspend_count
, thr_act
->active
,
553 thr_act
->higher
, thr_act
->lower
);
555 return((int)thr_act
);
563 return(current_act()->mact
.upcb
->save_srr0
);
567 * detach and return a kernel stack from a thread
571 machine_stack_detach(
576 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
577 thread
, thread
->priority
,
578 thread
->sched_pri
, 0, 0);
581 act_machine_sv_free(thread
->top_act
);
583 stack
= thread
->kernel_stack
;
584 thread
->kernel_stack
= 0;
589 * attach a kernel stack to a thread and initialize it
591 * attaches a stack to a thread. if there is no save
592 * area we allocate one. the top save area is then
593 * loaded with the pc (continuation address), the initial
594 * stack pointer, and a std kernel MSR. if the top
595 * save area is the user save area bad things will
601 machine_stack_attach(
604 void (*start
)(thread_t
))
606 thread_act_t thr_act
;
610 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
611 thread
, thread
->priority
,
612 thread
->sched_pri
, start
,
616 kss
= (unsigned int *)STACK_IKS(stack
);
617 thread
->kernel_stack
= stack
;
619 /* during initialization we sometimes do not have an
620 activation. in that case do not do anything */
621 if ((thr_act
= thread
->top_act
) != 0) {
622 sv
= save_get(); /* cannot block */
623 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
624 sv
->save_hdr
.save_act
= (struct thread_activation
*)thr_act
;
625 sv
->save_hdr
.save_prev
= (addr64_t
)((uintptr_t)thr_act
->mact
.pcb
);
626 thr_act
->mact
.pcb
= sv
;
628 sv
->save_srr0
= (unsigned int) start
;
629 /* sv->save_r3 = ARG ? */
630 sv
->save_r1
= (vm_offset_t
)((int)kss
- KF_SIZE
);
631 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
;
632 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
633 sv
->save_vrsave
= 0; /* Set the vector save state */
634 sv
->save_vscr
[3] = 0x00010000; /* Supress java mode */
635 *(CAST_DOWN(int *, sv
->save_r1
)) = 0;
636 thr_act
->mact
.ksp
= 0;
643 * move a stack from old to new thread
647 machine_stack_handoff(
654 facility_context
*fowner
;
656 struct per_proc_info
*ppinfo
;
658 assert(new->top_act
);
659 assert(old
->top_act
);
662 panic("machine_stack_handoff");
664 stack
= machine_stack_detach(old
);
665 new->kernel_stack
= stack
;
666 if (stack
== old
->reserved_stack
) {
667 assert(new->reserved_stack
);
668 old
->reserved_stack
= new->reserved_stack
;
669 new->reserved_stack
= stack
;
672 ppinfo
= getPerProc(); /* Get our processor block */
674 ppinfo
->cpu_flags
&= ~traceBE
; /* Turn off special branch trace */
676 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
677 fowner
= ppinfo
->FPU_owner
; /* Cache this because it may change */
678 if(fowner
) { /* Is there any live context? */
679 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
680 fpu_save(fowner
); /* Yes, save it */
683 fowner
= ppinfo
->VMX_owner
; /* Cache this because it may change */
684 if(fowner
) { /* Is there any live context? */
685 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
686 vec_save(fowner
); /* Yes, save it */
692 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
693 * This bits can be modified in the per proc without updating the thread spcFlags
695 if(old
->top_act
->mact
.specFlags
& runningVM
) { /* Is the current thread running a VM? */
696 old
->top_act
->mact
.specFlags
&= ~(userProtKey
|FamVMmode
);
697 old
->top_act
->mact
.specFlags
|= (ppinfo
->spcFlags
) & (userProtKey
|FamVMmode
);
699 old
->top_act
->mact
.specFlags
&= ~OnProc
;
700 new->top_act
->mact
.specFlags
|= OnProc
;
702 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
703 old
->reason
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
706 if(new->top_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
707 pmap_switch(new->top_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
708 ppinfo
->VMMareaPhys
= new->top_act
->mact
.vmmCEntry
->vmmContextPhys
;
709 ppinfo
->VMMXAFlgs
= new->top_act
->mact
.vmmCEntry
->vmmXAFlgs
;
710 ppinfo
->FAMintercept
= new->top_act
->mact
.vmmCEntry
->vmmFAMintercept
;
712 else { /* otherwise, we use the task's pmap */
713 new_pmap
= new->top_act
->task
->map
->pmap
;
714 if ((old
->top_act
->task
->map
->pmap
!= new_pmap
) || (old
->top_act
->mact
.specFlags
& runningVM
)) {
715 pmap_switch(new_pmap
);
719 machine_thread_set_current(new);
720 ppinfo
->Uassist
= new->top_act
->mact
.cthread_self
;
722 ppinfo
->ppbbTaskEnv
= new->top_act
->mact
.bbTaskEnv
;
723 ppinfo
->spcFlags
= new->top_act
->mact
.specFlags
;
725 old
->top_act
->mact
.cioSpace
|= cioSwitchAway
; /* Show we switched away from this guy */
726 mp
= (mapping
*)&ppinfo
->ppCIOmp
;
727 mp
->mpSpace
= invalSpace
; /* Since we can't handoff in the middle of copy in/out, just invalidate */
729 if (branch_tracing_enabled())
730 ppinfo
->cpu_flags
|= traceBE
;
732 if(trcWork
.traceMask
) dbgTrace(0x12345678, (unsigned int)old
->top_act
, (unsigned int)new->top_act
, 0); /* Cut trace entry if tracing */
738 * clean and initialize the current kernel stack and go to
739 * the given continuation routine
743 call_continuation(void (*continuation
)(void) )
749 assert(current_thread()->kernel_stack
);
750 kss
= (unsigned int *)STACK_IKS(current_thread()->kernel_stack
);
751 assert(continuation
);
753 tsp
= (vm_offset_t
)((int)kss
- KF_SIZE
);
757 Call_continuation(continuation
, tsp
);