2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Copyright (c) 1990,1991,1992 The University of Utah and
30 * the Center for Software Science (CSS). All rights reserved.
32 * Permission to use, copy, modify and distribute this software is hereby
33 * granted provided that (1) source code retains these copyright, permission,
34 * and disclaimer notices, and (2) redistributions including binaries
35 * reproduce the notices in supporting documentation, and (3) all advertising
36 * materials mentioning features or use of this software display the following
37 * acknowledgement: ``This product includes software developed by the Center
38 * for Software Science at the University of Utah.''
40 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
41 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
42 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 * CSS requests users of this software to return to css-dist@cs.utah.edu any
45 * improvements that they make and grant CSS redistribution rights.
47 * Utah $Hdr: pcb.c 1.23 92/06/27$
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/thread_act.h>
57 #include <kern/thread_swap.h>
58 #include <mach/thread_status.h>
59 #include <vm/vm_kern.h>
60 #include <kern/mach_param.h>
62 #include <kern/misc_protos.h>
63 #include <ppc/misc_protos.h>
64 #include <ppc/exception.h>
65 #include <ppc/proc_reg.h>
69 #include <ppc/mappings.h>
70 #include <ppc/savearea.h>
71 #include <ppc/Firmware.h>
73 #include <ppc/thread_act.h>
74 #include <ppc/vmachmon.h>
75 #include <ppc/low_trace.h>
77 #include <sys/kdebug.h>
79 extern int real_ncpus
; /* Number of actual CPUs */
80 extern struct Saveanchor saveanchor
; /* Aliged savearea anchor */
83 * These constants are dumb. They should not be in asm.h!
86 #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
89 int fpu_trap_count
= 0;
90 int fpu_switch_count
= 0;
91 int vec_trap_count
= 0;
92 int vec_switch_count
= 0;
95 extern struct thread_shuttle
*Switch_context(
96 struct thread_shuttle
*old
,
98 struct thread_shuttle
*new);
101 #if MACH_LDEBUG || MACH_KDB
102 void log_thread_action (char *, long, long, long);
107 * consider_machine_collect: try to collect machine-dependent pages
110 consider_machine_collect()
113 * none currently available
119 consider_machine_adjust()
121 consider_mapping_adjust();
126 * stack_attach: Attach a kernel stack to a thread.
129 machine_kernel_stack_init(
130 struct thread_shuttle
*thread
,
131 void (*start_pos
)(thread_t
))
134 unsigned int *kss
, *stck
;
137 assert(thread
->top_act
->mact
.pcb
);
138 assert(thread
->kernel_stack
);
139 stack
= thread
->kernel_stack
;
141 kss
= (unsigned int *)STACK_IKS(stack
);
142 sv
= thread
->top_act
->mact
.pcb
; /* This for the sake of C */
144 sv
->save_lr
= (uint64_t) start_pos
; /* Set up the execution address */
145 sv
->save_srr0
= (uint64_t) start_pos
; /* Here too */
146 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
; /* Set the normal running MSR */
147 stck
= (unsigned int *)((unsigned int)kss
- KF_SIZE
); /* Point to the top frame */
148 sv
->save_r1
= (uint64_t)stck
; /* Point to the top frame on the stack */
149 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
150 sv
->save_vrsave
= 0; /* Set the vector save state */
151 sv
->save_vscr
[3] = 0x00010000; /* Supress java mode */
153 *stck
= 0; /* Zero the frame backpointer */
154 thread
->top_act
->mact
.ksp
= 0; /* Show that the kernel stack is in use already */
159 * switch_context: Switch from one thread to another, needed for
163 struct thread_shuttle
*
165 struct thread_shuttle
*old
,
166 void (*continuation
)(void),
167 struct thread_shuttle
*new)
169 register thread_act_t old_act
= old
->top_act
, new_act
= new->top_act
;
170 register struct thread_shuttle
* retval
;
172 facility_context
*fowner
;
173 struct per_proc_info
*ppinfo
;
176 #if MACH_LDEBUG || MACH_KDB
177 log_thread_action("switch",
180 (long)__builtin_return_address(0));
183 ppinfo
= getPerProc(); /* Get our processor block */
185 ppinfo
->old_thread
= (unsigned int)old
;
186 ppinfo
->cpu_flags
&= ~traceBE
; /* disable branch tracing if on */
188 check_simple_locks();
190 /* Our context might wake up on another processor, so we must
191 * not keep hot state in our FPU, it must go back to the pcb
192 * so that it can be found by the other if needed
194 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
195 fowner
= ppinfo
->FPU_owner
; /* Cache this because it may change */
196 if(fowner
) { /* Is there any live context? */
197 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
198 fpu_save(fowner
); /* Yes, save it */
201 fowner
= ppinfo
->VMX_owner
; /* Cache this because it may change */
202 if(fowner
) { /* Is there any live context? */
203 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
204 vec_save(fowner
); /* Yes, save it */
210 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
211 * This bits can be modified in the per proc without updating the thread spcFlags
213 if(old_act
->mact
.specFlags
& runningVM
) {
214 old_act
->mact
.specFlags
&= ~(userProtKey
|FamVMmode
);
215 old_act
->mact
.specFlags
|= (ppinfo
->spcFlags
) & (userProtKey
|FamVMmode
);
219 * We do not have to worry about the PMAP module, so switch.
221 * We must not use top_act->map since this may not be the actual
222 * task map, but the map being used for a klcopyin/out.
225 if(new_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
226 pmap_switch(new_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
227 ppinfo
->VMMareaPhys
= new_act
->mact
.vmmCEntry
->vmmContextPhys
;
228 ppinfo
->VMMXAFlgs
= new_act
->mact
.vmmCEntry
->vmmXAFlgs
;
229 ppinfo
->FAMintercept
= new_act
->mact
.vmmCEntry
->vmmFAMintercept
;
231 else { /* otherwise, we use the task's pmap */
232 new_pmap
= new_act
->task
->map
->pmap
;
233 if ((old_act
->task
->map
->pmap
!= new_pmap
) || (old_act
->mact
.specFlags
& runningVM
)) {
234 pmap_switch(new_pmap
); /* Switch if there is a change */
238 if(old_act
->mact
.cioSpace
!= invalSpace
) { /* Does our old guy have an active copyin/out? */
239 old_act
->mact
.cioSpace
|= cioSwitchAway
; /* Show we switched away from this guy */
240 hw_blow_seg(copyIOaddr
); /* Blow off the first segment */
241 hw_blow_seg(copyIOaddr
+ 0x10000000ULL
); /* Blow off the second segment */
244 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
245 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
247 /* *********** SWITCH HERE **************/
248 retval
= Switch_context(old
, continuation
, new);
249 assert(retval
!= (struct thread_shuttle
*)NULL
);
250 /* *********** SWITCH HERE **************/
253 if (branch_tracing_enabled()) {
254 ppinfo
= getPerProc(); /* Get our processor block */
255 ppinfo
->cpu_flags
|= traceBE
; /* restore branch tracing */
258 /* We've returned from having switched context, so we should be
259 * back in the original context.
266 * Alter the thread's state so that a following thread_exception_return
267 * will make the thread return 'retval' from a syscall.
270 thread_set_syscall_return(
271 struct thread_shuttle
*thread
,
272 kern_return_t retval
)
275 thread
->top_act
->mact
.pcb
->save_r3
= retval
;
279 * Initialize the machine-dependent state for a new thread.
282 thread_machine_create(
283 struct thread_shuttle
*thread
,
284 thread_act_t thr_act
,
285 void (*start_pos
)(thread_t
))
288 savearea
*sv
; /* Pointer to newly allocated savearea */
289 unsigned int *CIsTooLimited
, i
;
292 hw_atomic_add(&saveanchor
.savetarget
, 4); /* Account for the number of saveareas we think we "need"
293 for this activation */
294 assert(thr_act
->mact
.pcb
== (savearea
*)0); /* Make sure there was no previous savearea */
296 sv
= save_alloc(); /* Go get us a savearea */
298 bzero((char *)((unsigned int)sv
+ sizeof(savearea_comm
)), (sizeof(savearea
) - sizeof(savearea_comm
))); /* Clear it */
300 sv
->save_hdr
.save_prev
= 0; /* Clear the back pointer */
301 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
302 sv
->save_hdr
.save_act
= thr_act
; /* Set who owns it */
303 thr_act
->mact
.pcb
= sv
; /* Point to the save area */
304 thr_act
->mact
.curctx
= &thr_act
->mact
.facctx
; /* Initialize facility context */
305 thr_act
->mact
.facctx
.facAct
= thr_act
; /* Initialize facility context pointer to activation */
306 thr_act
->mact
.cioSpace
= invalSpace
; /* Initialize copyin/out space to invalid */
307 thr_act
->mact
.preemption_count
= 0; /* Initialize preemption counter */
310 * User threads will pull their context from the pcb when first
311 * returning to user mode, so fill in all the necessary values.
312 * Kernel threads are initialized from the save state structure
313 * at the base of the kernel stack (see stack_attach()).
316 sv
->save_srr1
= (uint64_t)MSR_EXPORT_MASK_SET
; /* Set the default user MSR */
317 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
318 sv
->save_vrsave
= 0; /* Set the vector save state */
319 sv
->save_vscr
[0] = 0x00000000;
320 sv
->save_vscr
[1] = 0x00000000;
321 sv
->save_vscr
[2] = 0x00000000;
322 sv
->save_vscr
[3] = 0x00010000; /* Disable java mode and clear saturated */
324 return(KERN_SUCCESS
);
328 * Machine-dependent cleanup prior to destroying a thread
331 thread_machine_destroy( thread_t thread
)
335 if (thread
->kernel_stack
) {
343 * flush out any lazily evaluated HW state in the
344 * owning thread's context, before termination.
347 thread_machine_flush( thread_act_t cur_act
)
352 * Number of times we needed to swap an activation back in before
355 int switch_act_swapins
= 0;
360 * Machine-dependent details of activation switching. Called with
361 * RPC locks held and preemption disabled.
371 facility_context
*fowner
;
372 struct per_proc_info
*ppinfo
;
374 ppinfo
= getPerProc(); /* Get our processor block */
376 /* Our context might wake up on another processor, so we must
377 * not keep hot state in our FPU, it must go back to the pcb
378 * so that it can be found by the other if needed
380 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
381 fowner
= ppinfo
->FPU_owner
; /* Cache this because it may change */
382 if(fowner
) { /* Is there any live context? */
383 if(fowner
->facAct
== old
) { /* Is it for us? */
384 fpu_save(fowner
); /* Yes, save it */
387 fowner
= ppinfo
->VMX_owner
; /* Cache this because it may change */
388 if(fowner
) { /* Is there any live context? */
389 if(fowner
->facAct
== old
) { /* Is it for us? */
390 vec_save(fowner
); /* Yes, save it */
395 old
->mact
.cioSpace
|= cioSwitchAway
; /* Show we switched away from this guy */
397 active_stacks
[cpu
] = thread
->kernel_stack
;
399 ast_context(new, cpu
);
401 /* Activations might have different pmaps
402 * (process->kernel->server, for example).
403 * Change space if needed
406 if(new->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
407 pmap_switch(new->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
409 else { /* otherwise, we use the task's pmap */
410 new_pmap
= new->task
->map
->pmap
;
411 if ((old
->task
->map
->pmap
!= new_pmap
) || (old
->mact
.specFlags
& runningVM
)) {
412 pmap_switch(new_pmap
);
420 pcb_user_to_kernel(thread_act_t act
)
423 return; /* Not needed, I hope... */
428 * act_machine_sv_free
429 * release saveareas associated with an act. if flag is true, release
430 * user level savearea(s) too, else don't
432 * this code cannot block so we call the proper save area free routine
435 act_machine_sv_free(thread_act_t act
)
437 register savearea
*pcb
, *userpcb
;
438 register savearea_vec
*vsv
, *vpst
, *vsvt
;
439 register savearea_fpu
*fsv
, *fpst
, *fsvt
;
440 register savearea
*svp
;
444 * This function will release all non-user state context.
449 * Walk through and release all floating point and vector contexts that are not
450 * user state. We will also blow away live context if it belongs to non-user state.
451 * Note that the level can not change while we are in this code. Nor can another
452 * context be pushed on the stack.
454 * We do nothing here if the current level is user. Otherwise,
455 * the live context is cleared. Then we find the user saved context.
456 * Next, we take the sync lock (to keep us from munging things in *_switch).
457 * The level is set to 0 and all stacked context other than user is dequeued.
458 * Then we unlock. Next, all of the old kernel contexts are released.
462 if(act
->mact
.curctx
->VMXlevel
) { /* Is the current level user state? */
464 toss_live_vec(act
->mact
.curctx
); /* Dump live vectors if is not user */
466 vsv
= act
->mact
.curctx
->VMXsave
; /* Get the top vector savearea */
468 while(vsv
&& vsv
->save_hdr
.save_level
) vsv
= (savearea_vec
*)vsv
->save_hdr
.save_prev
; /* Find user context if any */
470 if(!hw_lock_to((hw_lock_t
)&act
->mact
.curctx
->VMXsync
, LockTimeOut
)) { /* Get the sync lock */
471 panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
474 vsvt
= act
->mact
.curctx
->VMXsave
; /* Get the top of the chain */
475 act
->mact
.curctx
->VMXsave
= vsv
; /* Point to the user context */
476 act
->mact
.curctx
->VMXlevel
= 0; /* Set the level to user */
477 hw_lock_unlock((hw_lock_t
)&act
->mact
.curctx
->VMXsync
); /* Unlock */
479 while(vsvt
) { /* Clear any VMX saved state */
480 if (vsvt
== vsv
) break; /* Done when hit user if any */
481 vpst
= vsvt
; /* Remember so we can toss this */
482 vsvt
= (savearea_vec
*)vsvt
->save_hdr
.save_prev
; /* Get one underneath our's */
483 save_ret((savearea
*)vpst
); /* Release it */
488 if(act
->mact
.curctx
->FPUlevel
) { /* Is the current level user state? */
490 toss_live_fpu(act
->mact
.curctx
); /* Dump live floats if is not user */
492 fsv
= act
->mact
.curctx
->FPUsave
; /* Get the top floats savearea */
494 while(fsv
&& fsv
->save_hdr
.save_level
) fsv
= (savearea_fpu
*)fsv
->save_hdr
.save_prev
; /* Find user context if any */
496 if(!hw_lock_to((hw_lock_t
)&act
->mact
.curctx
->FPUsync
, LockTimeOut
)) { /* Get the sync lock */
497 panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
500 fsvt
= act
->mact
.curctx
->FPUsave
; /* Get the top of the chain */
501 act
->mact
.curctx
->FPUsave
= fsv
; /* Point to the user context */
502 act
->mact
.curctx
->FPUlevel
= 0; /* Set the level to user */
503 hw_lock_unlock((hw_lock_t
)&act
->mact
.curctx
->FPUsync
); /* Unlock */
505 while(fsvt
) { /* Clear any VMX saved state */
506 if (fsvt
== fsv
) break; /* Done when hit user if any */
507 fpst
= fsvt
; /* Remember so we can toss this */
508 fsvt
= (savearea_fpu
*)fsvt
->save_hdr
.save_prev
; /* Get one underneath our's */
509 save_ret((savearea
*)fpst
); /* Release it */
515 * free all regular saveareas except a user savearea, if any
518 pcb
= act
->mact
.pcb
; /* Get the general savearea */
519 userpcb
= 0; /* Assume no user context for now */
521 while(pcb
) { /* Any float saved state? */
522 if (pcb
->save_srr1
& MASK(MSR_PR
)) { /* Is this a user savearea? */
523 userpcb
= pcb
; /* Remember so we can toss this */
526 svp
= pcb
; /* Remember this */
527 pcb
= pcb
->save_hdr
.save_prev
; /* Get one underneath our's */
528 save_ret(svp
); /* Release it */
531 act
->mact
.pcb
= userpcb
; /* Chain in the user if there is one, or 0 if not */
537 * act_virtual_machine_destroy:
538 * Shutdown any virtual machines associated with a thread
541 act_virtual_machine_destroy(thread_act_t act
)
543 if(act
->mact
.bbDescAddr
) { /* Check if the Blue box assist is active */
544 disable_bluebox_internal(act
); /* Kill off bluebox */
547 if(act
->mact
.vmmControl
) { /* Check if VMM is active */
548 vmm_tear_down_all(act
); /* Kill off all VMM contexts */
553 * act_machine_destroy: Shutdown any state associated with a thread pcb.
556 act_machine_destroy(thread_act_t act
)
559 register savearea
*pcb
, *ppsv
;
560 register savearea_vec
*vsv
, *vpsv
;
561 register savearea_fpu
*fsv
, *fpsv
;
562 register savearea
*svp
;
566 * This function will release all context.
569 act_virtual_machine_destroy(act
); /* Make sure all virtual machines are dead first */
573 * Walk through and release all floating point and vector contexts. Also kill live context.
577 toss_live_vec(act
->mact
.curctx
); /* Dump live vectors */
579 vsv
= act
->mact
.curctx
->VMXsave
; /* Get the top vector savearea */
581 while(vsv
) { /* Any VMX saved state? */
582 vpsv
= vsv
; /* Remember so we can toss this */
583 vsv
= (savearea_vec
*)vsv
->save_hdr
.save_prev
; /* Get one underneath our's */
584 save_release((savearea
*)vpsv
); /* Release it */
587 act
->mact
.curctx
->VMXsave
= 0; /* Kill chain */
589 toss_live_fpu(act
->mact
.curctx
); /* Dump live float */
591 fsv
= act
->mact
.curctx
->FPUsave
; /* Get the top float savearea */
593 while(fsv
) { /* Any float saved state? */
594 fpsv
= fsv
; /* Remember so we can toss this */
595 fsv
= (savearea_fpu
*)fsv
->save_hdr
.save_prev
; /* Get one underneath our's */
596 save_release((savearea
*)fpsv
); /* Release it */
599 act
->mact
.curctx
->FPUsave
= 0; /* Kill chain */
602 * free all regular saveareas.
605 pcb
= act
->mact
.pcb
; /* Get the general savearea */
607 while(pcb
) { /* Any float saved state? */
608 ppsv
= pcb
; /* Remember so we can toss this */
609 pcb
= pcb
->save_hdr
.save_prev
; /* Get one underneath our's */
610 save_release(ppsv
); /* Release it */
613 hw_atomic_sub(&saveanchor
.savetarget
, 4); /* Unaccount for the number of saveareas we think we "need" */
619 act_machine_create(task_t task
, thread_act_t thr_act
)
622 * Clear & Init the pcb (sets up user-mode s regs)
623 * We don't use this anymore.
629 void act_machine_init()
632 /* Good to verify these once */
633 assert( THREAD_MACHINE_STATE_MAX
<= THREAD_STATE_MAX
);
635 assert( THREAD_STATE_MAX
>= PPC_THREAD_STATE_COUNT
);
636 assert( THREAD_STATE_MAX
>= PPC_EXCEPTION_STATE_COUNT
);
637 assert( THREAD_STATE_MAX
>= PPC_FLOAT_STATE_COUNT
);
640 * If we start using kernel activations,
641 * would normally create kernel_thread_pool here,
642 * populating it from the act_zone
647 act_machine_return(int code
)
649 thread_act_t thr_act
= current_act();
652 * This code is called with nothing locked.
653 * It also returns with nothing locked, if it returns.
655 * This routine terminates the current thread activation.
656 * If this is the only activation associated with its
657 * thread shuttle, then the entire thread (shuttle plus
658 * activation) is terminated.
660 assert( code
== KERN_TERMINATED
);
662 assert(thr_act
->thread
->top_act
== thr_act
);
664 /* This is the only activation attached to the shuttle... */
666 thread_terminate_self();
669 panic("act_machine_return: TALKING ZOMBIE! (1)");
673 thread_machine_set_current(struct thread_shuttle
*thread
)
675 register int my_cpu
= cpu_number();
677 set_machine_current_thread(thread
);
678 set_machine_current_act(thread
->top_act
);
680 active_kloaded
[my_cpu
] = thread
->top_act
->kernel_loaded
? thread
->top_act
: THR_ACT_NULL
;
684 thread_machine_init(void)
687 #if KERNEL_STACK_SIZE > PPC_PGBYTES
688 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
696 dump_thread(thread_t th
)
698 printf(" thread @ 0x%x:\n", th
);
702 dump_act(thread_act_t thr_act
)
707 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
708 thr_act
, thr_act
->ref_count
,
709 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
710 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
712 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
713 thr_act
->alerts
, thr_act
->alert_mask
,
714 thr_act
->suspend_count
, thr_act
->active
,
715 thr_act
->higher
, thr_act
->lower
);
717 return((int)thr_act
);
726 thread_act_t thr_act
= current_act();
728 return(thr_act
->mact
.pcb
->save_srr0
);
732 * detach and return a kernel stack from a thread
736 stack_detach(thread_t thread
)
740 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
741 thread
, thread
->priority
,
742 thread
->sched_pri
, 0, 0);
745 act_machine_sv_free(thread
->top_act
);
747 stack
= thread
->kernel_stack
;
748 thread
->kernel_stack
= 0;
753 * attach a kernel stack to a thread and initialize it
755 * attaches a stack to a thread. if there is no save
756 * area we allocate one. the top save area is then
757 * loaded with the pc (continuation address), the initial
758 * stack pointer, and a std kernel MSR. if the top
759 * save area is the user save area bad things will
765 stack_attach(struct thread_shuttle
*thread
,
767 void (*start_pos
)(thread_t
))
769 thread_act_t thr_act
;
773 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
774 thread
, thread
->priority
,
775 thread
->sched_pri
, start_pos
,
779 kss
= (unsigned int *)STACK_IKS(stack
);
780 thread
->kernel_stack
= stack
;
782 /* during initialization we sometimes do not have an
783 activation. in that case do not do anything */
784 if ((thr_act
= thread
->top_act
) != 0) {
785 sv
= save_get(); /* cannot block */
786 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
787 sv
->save_hdr
.save_act
= thr_act
;
788 sv
->save_hdr
.save_prev
= thr_act
->mact
.pcb
;
789 thr_act
->mact
.pcb
= sv
;
791 sv
->save_srr0
= (unsigned int) start_pos
;
792 /* sv->save_r3 = ARG ? */
793 sv
->save_r1
= (vm_offset_t
)((int)kss
- KF_SIZE
);
794 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
;
795 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
796 sv
->save_vrsave
= 0; /* Set the vector save state */
797 sv
->save_vscr
[3] = 0x00010000; /* Supress java mode */
798 *((int *)sv
->save_r1
) = 0;
799 thr_act
->mact
.ksp
= 0;
806 * move a stack from old to new thread
810 stack_handoff(thread_t old
,
816 facility_context
*fowner
;
819 struct per_proc_info
*ppinfo
;
821 assert(new->top_act
);
822 assert(old
->top_act
);
824 my_cpu
= cpu_number();
825 stack
= stack_detach(old
);
826 new->kernel_stack
= stack
;
827 if (stack
== old
->stack_privilege
) {
828 assert(new->stack_privilege
);
829 old
->stack_privilege
= new->stack_privilege
;
830 new->stack_privilege
= stack
;
833 ppinfo
= getPerProc(); /* Get our processor block */
835 ppinfo
->cpu_flags
&= ~traceBE
; /* Turn off special branch trace */
837 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
838 fowner
= ppinfo
->FPU_owner
; /* Cache this because it may change */
839 if(fowner
) { /* Is there any live context? */
840 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
841 fpu_save(fowner
); /* Yes, save it */
844 fowner
= ppinfo
->VMX_owner
; /* Cache this because it may change */
845 if(fowner
) { /* Is there any live context? */
846 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
847 vec_save(fowner
); /* Yes, save it */
852 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
853 * This bits can be modified in the per proc without updating the thread spcFlags
855 if(old
->top_act
->mact
.specFlags
& runningVM
) { /* Is the current thread running a VM? */
856 old
->top_act
->mact
.specFlags
&= ~(userProtKey
|FamVMmode
);
857 old
->top_act
->mact
.specFlags
|= (ppinfo
->spcFlags
) & (userProtKey
|FamVMmode
);
860 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
861 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
864 if(new->top_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
865 pmap_switch(new->top_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
866 ppinfo
->VMMareaPhys
= new->top_act
->mact
.vmmCEntry
->vmmContextPhys
;
867 ppinfo
->VMMXAFlgs
= new->top_act
->mact
.vmmCEntry
->vmmXAFlgs
;
868 ppinfo
->FAMintercept
= new->top_act
->mact
.vmmCEntry
->vmmFAMintercept
;
870 else { /* otherwise, we use the task's pmap */
871 new_pmap
= new->top_act
->task
->map
->pmap
;
872 if ((old
->top_act
->task
->map
->pmap
!= new_pmap
) || (old
->top_act
->mact
.specFlags
& runningVM
)) {
873 pmap_switch(new_pmap
);
877 thread_machine_set_current(new);
878 active_stacks
[my_cpu
] = new->kernel_stack
;
879 ppinfo
->Uassist
= new->top_act
->mact
.cthread_self
;
881 ppinfo
->ppbbTaskEnv
= new->top_act
->mact
.bbTaskEnv
;
882 ppinfo
->spcFlags
= new->top_act
->mact
.specFlags
;
884 old
->top_act
->mact
.cioSpace
|= cioSwitchAway
; /* Show we switched away from this guy */
885 mp
= (mapping
*)&ppinfo
->ppCIOmp
;
886 mp
->mpSpace
= invalSpace
; /* Since we can't handoff in the middle of copy in/out, just invalidate */
888 if (branch_tracing_enabled())
889 ppinfo
->cpu_flags
|= traceBE
;
891 if(trcWork
.traceMask
) dbgTrace(0x12345678, (unsigned int)old
->top_act
, (unsigned int)new->top_act
, 0); /* Cut trace entry if tracing */
897 * clean and initialize the current kernel stack and go to
898 * the given continuation routine
902 call_continuation(void (*continuation
)(void) )
908 assert(current_thread()->kernel_stack
);
909 kss
= (unsigned int *)STACK_IKS(current_thread()->kernel_stack
);
910 assert(continuation
);
912 tsp
= (vm_offset_t
)((int)kss
- KF_SIZE
);
916 Call_continuation(continuation
, tsp
);
922 thread_swapin_mach_alloc(thread_t thread
)
926 assert(thread
->top_act
->mact
.pcb
== 0);
930 sv
->save_hdr
.save_prev
= 0; /* Initialize back chain */
931 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
932 sv
->save_hdr
.save_act
= thread
->top_act
; /* Initialize owner */
933 thread
->top_act
->mact
.pcb
= sv
;