2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Copyright (c) 1990,1991,1992 The University of Utah and
30 * the Center for Software Science (CSS). All rights reserved.
32 * Permission to use, copy, modify and distribute this software is hereby
33 * granted provided that (1) source code retains these copyright, permission,
34 * and disclaimer notices, and (2) redistributions including binaries
35 * reproduce the notices in supporting documentation, and (3) all advertising
36 * materials mentioning features or use of this software display the following
37 * acknowledgement: ``This product includes software developed by the Center
38 * for Software Science at the University of Utah.''
40 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
41 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
42 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 * CSS requests users of this software to return to css-dist@cs.utah.edu any
45 * improvements that they make and grant CSS redistribution rights.
47 * Utah $Hdr: pcb.c 1.23 92/06/27$
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/thread_act.h>
57 #include <kern/thread_swap.h>
58 #include <mach/thread_status.h>
59 #include <vm/vm_kern.h>
60 #include <kern/mach_param.h>
62 #include <kern/misc_protos.h>
63 #include <ppc/misc_protos.h>
64 #include <ppc/exception.h>
65 #include <ppc/proc_reg.h>
69 #include <ppc/mappings.h>
70 #include <ppc/savearea.h>
71 #include <ppc/Firmware.h>
73 #include <ppc/thread_act.h>
74 #include <ppc/vmachmon.h>
75 #include <ppc/low_trace.h>
77 #include <sys/kdebug.h>
79 extern int real_ncpus
; /* Number of actual CPUs */
80 extern struct Saveanchor saveanchor
; /* Aliged savearea anchor */
83 * These constants are dumb. They should not be in asm.h!
86 #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
89 int fpu_trap_count
= 0;
90 int fpu_switch_count
= 0;
91 int vec_trap_count
= 0;
92 int vec_switch_count
= 0;
95 extern struct thread_shuttle
*Switch_context(
96 struct thread_shuttle
*old
,
98 struct thread_shuttle
*new);
101 #if MACH_LDEBUG || MACH_KDB
102 void log_thread_action (char *, long, long, long);
107 * consider_machine_collect: try to collect machine-dependent pages
110 consider_machine_collect()
113 * none currently available
119 consider_machine_adjust()
121 consider_mapping_adjust();
126 * stack_attach: Attach a kernel stack to a thread.
129 machine_kernel_stack_init(
130 struct thread_shuttle
*thread
,
131 void (*start_pos
)(thread_t
))
137 assert(thread
->top_act
->mact
.pcb
);
138 assert(thread
->kernel_stack
);
139 stack
= thread
->kernel_stack
;
142 if (watchacts
& WA_PCB
)
143 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread
,stack
,start_pos
);
144 #endif /* MACH_ASSERT */
146 kss
= (unsigned int *)STACK_IKS(stack
);
147 sv
= thread
->top_act
->mact
.pcb
; /* This for the sake of C */
149 sv
->save_lr
= (unsigned int) start_pos
; /* Set up the execution address */
150 sv
->save_srr0
= (unsigned int) start_pos
; /* Here too */
151 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
; /* Set the normal running MSR */
152 sv
->save_r1
= (vm_offset_t
) ((int)kss
- KF_SIZE
); /* Point to the top frame on the stack */
153 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
154 sv
->save_vrsave
= 0; /* Set the vector save state */
155 sv
->save_vscr
[3] = 0x00010000; /* Supress java mode */
157 *((int *)sv
->save_r1
) = 0; /* Zero the frame backpointer */
158 thread
->top_act
->mact
.ksp
= 0; /* Show that the kernel stack is in use already */
163 * switch_context: Switch from one thread to another, needed for
167 struct thread_shuttle
*
169 struct thread_shuttle
*old
,
170 void (*continuation
)(void),
171 struct thread_shuttle
*new)
173 register thread_act_t old_act
= old
->top_act
, new_act
= new->top_act
;
174 register struct thread_shuttle
* retval
;
176 facility_context
*fowner
;
179 #if MACH_LDEBUG || MACH_KDB
180 log_thread_action("switch",
183 (long)__builtin_return_address(0));
186 my_cpu
= cpu_number();
187 per_proc_info
[my_cpu
].old_thread
= (unsigned int)old
;
188 per_proc_info
[my_cpu
].cpu_flags
&= ~traceBE
; /* disable branch tracing if on */
189 assert(old_act
->kernel_loaded
||
190 active_stacks
[my_cpu
] == old_act
->thread
->kernel_stack
);
192 check_simple_locks();
194 /* Our context might wake up on another processor, so we must
195 * not keep hot state in our FPU, it must go back to the pcb
196 * so that it can be found by the other if needed
198 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
199 fowner
= per_proc_info
[my_cpu
].FPU_owner
; /* Cache this because it may change */
200 if(fowner
) { /* Is there any live context? */
201 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
202 fpu_save(fowner
); /* Yes, save it */
205 fowner
= per_proc_info
[my_cpu
].VMX_owner
; /* Cache this because it may change */
206 if(fowner
) { /* Is there any live context? */
207 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
208 vec_save(fowner
); /* Yes, save it */
214 if (watchacts
& WA_PCB
) {
215 printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
216 old
,continuation
,new);
221 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
222 * This bits can be modified in the per proc without updating the thread spcFlags
224 if(old_act
->mact
.specFlags
& runningVM
) {
225 old_act
->mact
.specFlags
&= ~(userProtKey
|FamVMmode
);
226 old_act
->mact
.specFlags
|= (per_proc_info
[my_cpu
].spcFlags
) & (userProtKey
|FamVMmode
);
230 * We do not have to worry about the PMAP module, so switch.
232 * We must not use top_act->map since this may not be the actual
233 * task map, but the map being used for a klcopyin/out.
236 if(new_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
237 pmap_switch(new_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
238 per_proc_info
[my_cpu
].VMMareaPhys
= (vm_offset_t
)new_act
->mact
.vmmCEntry
->vmmContextPhys
;
239 per_proc_info
[my_cpu
].FAMintercept
= new_act
->mact
.vmmCEntry
->vmmFAMintercept
;
241 else { /* otherwise, we use the task's pmap */
242 new_pmap
= new_act
->task
->map
->pmap
;
243 if ((old_act
->task
->map
->pmap
!= new_pmap
) || (old_act
->mact
.specFlags
& runningVM
)) {
244 pmap_switch(new_pmap
); /* Switch if there is a change */
248 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
249 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
251 retval
= Switch_context(old
, continuation
, new);
252 assert(retval
!= (struct thread_shuttle
*)NULL
);
254 if (branch_tracing_enabled())
255 per_proc_info
[my_cpu
].cpu_flags
|= traceBE
; /* restore branch tracing */
257 /* We've returned from having switched context, so we should be
258 * back in the original context.
265 * Alter the thread's state so that a following thread_exception_return
266 * will make the thread return 'retval' from a syscall.
269 thread_set_syscall_return(
270 struct thread_shuttle
*thread
,
271 kern_return_t retval
)
275 if (watchacts
& WA_PCB
)
276 printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread
,retval
);
277 #endif /* MACH_ASSERT */
279 thread
->top_act
->mact
.pcb
->save_r3
= retval
;
283 * Initialize the machine-dependent state for a new thread.
286 thread_machine_create(
287 struct thread_shuttle
*thread
,
288 thread_act_t thr_act
,
289 void (*start_pos
)(thread_t
))
292 savearea
*sv
; /* Pointer to newly allocated savearea */
293 unsigned int *CIsTooLimited
, i
;
297 if (watchacts
& WA_PCB
)
298 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread
, thr_act
, start_pos
);
299 #endif /* MACH_ASSERT */
301 hw_atomic_add(&saveanchor
.savetarget
, 4); /* Account for the number of saveareas we think we "need"
302 for this activation */
303 assert(thr_act
->mact
.pcb
== (savearea
*)0); /* Make sure there was no previous savearea */
305 sv
= save_alloc(); /* Go get us a savearea */
307 bzero((char *)((unsigned int)sv
+ sizeof(savearea_comm
)), (sizeof(savearea
) - sizeof(savearea_comm
))); /* Clear it */
309 sv
->save_hdr
.save_prev
= 0; /* Clear the back pointer */
310 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
311 sv
->save_hdr
.save_act
= thr_act
; /* Set who owns it */
312 sv
->save_vscr
[3] = 0x00010000; /* Supress java mode */
313 thr_act
->mact
.pcb
= sv
; /* Point to the save area */
314 thr_act
->mact
.curctx
= &thr_act
->mact
.facctx
; /* Initialize facility context */
315 thr_act
->mact
.facctx
.facAct
= thr_act
; /* Initialize facility context pointer to activation */
318 if (watchacts
& WA_PCB
)
319 printf("pcb_init(%x) pcb=%x\n", thr_act
, sv
);
320 #endif /* MACH_ASSERT */
322 * User threads will pull their context from the pcb when first
323 * returning to user mode, so fill in all the necessary values.
324 * Kernel threads are initialized from the save state structure
325 * at the base of the kernel stack (see stack_attach()).
328 sv
->save_srr1
= MSR_EXPORT_MASK_SET
; /* Set the default user MSR */
330 CIsTooLimited
= (unsigned int *)(&sv
->save_sr0
); /* Make a pointer 'cause C can't cast on the left */
331 for(i
=0; i
<16; i
++) { /* Initialize all SRs */
332 CIsTooLimited
[i
] = SEG_REG_PROT
| (i
<< 20) | thr_act
->task
->map
->pmap
->space
; /* Set the SR value */
335 return(KERN_SUCCESS
);
339 * Machine-dependent cleanup prior to destroying a thread
342 thread_machine_destroy( thread_t thread
)
346 if (thread
->kernel_stack
) {
354 * flush out any lazily evaluated HW state in the
355 * owning thread's context, before termination.
358 thread_machine_flush( thread_act_t cur_act
)
363 * Number of times we needed to swap an activation back in before
366 int switch_act_swapins
= 0;
371 * Machine-dependent details of activation switching. Called with
372 * RPC locks held and preemption disabled.
382 facility_context
*fowner
;
384 /* Our context might wake up on another processor, so we must
385 * not keep hot state in our FPU, it must go back to the pcb
386 * so that it can be found by the other if needed
388 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
389 fowner
= per_proc_info
[cpu_number()].FPU_owner
; /* Cache this because it may change */
390 if(fowner
) { /* Is there any live context? */
391 if(fowner
->facAct
== old
) { /* Is it for us? */
392 fpu_save(fowner
); /* Yes, save it */
395 fowner
= per_proc_info
[cpu_number()].VMX_owner
; /* Cache this because it may change */
396 if(fowner
) { /* Is there any live context? */
397 if(fowner
->facAct
== old
) { /* Is it for us? */
398 vec_save(fowner
); /* Yes, save it */
403 active_stacks
[cpu
] = thread
->kernel_stack
;
405 ast_context(new, cpu
);
407 /* Activations might have different pmaps
408 * (process->kernel->server, for example).
409 * Change space if needed
412 if(new->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
413 pmap_switch(new->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
415 else { /* otherwise, we use the task's pmap */
416 new_pmap
= new->task
->map
->pmap
;
417 if ((old
->task
->map
->pmap
!= new_pmap
) || (old
->mact
.specFlags
& runningVM
)) {
418 pmap_switch(new_pmap
);
425 pcb_user_to_kernel(thread_act_t act
)
428 return; /* Not needed, I hope... */
433 * act_machine_sv_free
434 * release saveareas associated with an act. if flag is true, release
435 * user level savearea(s) too, else don't
437 * this code cannot block so we call the proper save area free routine
440 act_machine_sv_free(thread_act_t act
)
442 register savearea
*pcb
, *userpcb
;
443 register savearea_vec
*vsv
, *vpsv
;
444 register savearea_fpu
*fsv
, *fpsv
;
445 register savearea
*svp
;
449 * This function will release all non-user state context.
454 * Walk through and release all floating point and vector contexts that are not
455 * user state. We will also blow away live context if it belongs to non-user state.
459 if(act
->mact
.curctx
->VMXlevel
) { /* Is the current level user state? */
460 toss_live_vec(act
->mact
.curctx
); /* Dump live vectors if is not user */
461 act
->mact
.curctx
->VMXlevel
= 0; /* Mark as user state */
464 vsv
= act
->mact
.curctx
->VMXsave
; /* Get the top vector savearea */
466 while(vsv
) { /* Any VMX saved state? */
467 vpsv
= vsv
; /* Remember so we can toss this */
468 if (!vsv
->save_hdr
.save_level
) break; /* Done when hit user if any */
469 vsv
= (savearea_vec
*)vsv
->save_hdr
.save_prev
; /* Get one underneath our's */
470 save_ret((savearea
*)vpsv
); /* Release it */
473 act
->mact
.curctx
->VMXsave
= vsv
; /* Queue the user context to the top */
475 if(act
->mact
.curctx
->FPUlevel
) { /* Is the current level user state? */
476 toss_live_fpu(act
->mact
.curctx
); /* Dump live float if is not user */
477 act
->mact
.curctx
->FPUlevel
= 0; /* Mark as user state */
480 fsv
= act
->mact
.curctx
->FPUsave
; /* Get the top float savearea */
482 while(fsv
) { /* Any float saved state? */
483 fpsv
= fsv
; /* Remember so we can toss this */
484 if (!fsv
->save_hdr
.save_level
) break; /* Done when hit user if any */
485 fsv
= (savearea_fpu
*)fsv
->save_hdr
.save_prev
; /* Get one underneath our's */
486 save_ret((savearea
*)fpsv
); /* Release it */
489 act
->mact
.curctx
->FPUsave
= fsv
; /* Queue the user context to the top */
492 * free all regular saveareas except a user savearea, if any
495 pcb
= act
->mact
.pcb
; /* Get the general savearea */
496 userpcb
= 0; /* Assume no user context for now */
498 while(pcb
) { /* Any float saved state? */
499 if (pcb
->save_srr1
& MASK(MSR_PR
)) { /* Is this a user savearea? */
500 userpcb
= pcb
; /* Remember so we can toss this */
503 svp
= pcb
; /* Remember this */
504 pcb
= pcb
->save_hdr
.save_prev
; /* Get one underneath our's */
505 save_ret(svp
); /* Release it */
508 act
->mact
.pcb
= userpcb
; /* Chain in the user if there is one, or 0 if not */
514 * act_virtual_machine_destroy:
515 * Shutdown any virtual machines associated with a thread
518 act_virtual_machine_destroy(thread_act_t act
)
520 if(act
->mact
.bbDescAddr
) { /* Check if the Blue box assist is active */
521 disable_bluebox_internal(act
); /* Kill off bluebox */
524 if(act
->mact
.vmmControl
) { /* Check if VMM is active */
525 vmm_tear_down_all(act
); /* Kill off all VMM contexts */
530 * act_machine_destroy: Shutdown any state associated with a thread pcb.
533 act_machine_destroy(thread_act_t act
)
536 register savearea
*pcb
, *ppsv
;
537 register savearea_vec
*vsv
, *vpsv
;
538 register savearea_fpu
*fsv
, *fpsv
;
539 register savearea
*svp
;
543 if (watchacts
& WA_PCB
)
544 printf("act_machine_destroy(0x%x)\n", act
);
545 #endif /* MACH_ASSERT */
548 * This function will release all context.
551 act_virtual_machine_destroy(act
); /* Make sure all virtual machines are dead first */
555 * Walk through and release all floating point and vector contexts. Also kill live context.
559 toss_live_vec(act
->mact
.curctx
); /* Dump live vectors */
561 vsv
= act
->mact
.curctx
->VMXsave
; /* Get the top vector savearea */
563 while(vsv
) { /* Any VMX saved state? */
564 vpsv
= vsv
; /* Remember so we can toss this */
565 vsv
= (savearea_vec
*)vsv
->save_hdr
.save_prev
; /* Get one underneath our's */
566 save_release((savearea
*)vpsv
); /* Release it */
569 act
->mact
.curctx
->VMXsave
= 0; /* Kill chain */
571 toss_live_fpu(act
->mact
.curctx
); /* Dump live float */
573 fsv
= act
->mact
.curctx
->FPUsave
; /* Get the top float savearea */
575 while(fsv
) { /* Any float saved state? */
576 fpsv
= fsv
; /* Remember so we can toss this */
577 fsv
= (savearea_fpu
*)fsv
->save_hdr
.save_prev
; /* Get one underneath our's */
578 save_release((savearea
*)fpsv
); /* Release it */
581 act
->mact
.curctx
->FPUsave
= 0; /* Kill chain */
584 * free all regular saveareas.
587 pcb
= act
->mact
.pcb
; /* Get the general savearea */
589 while(pcb
) { /* Any float saved state? */
590 ppsv
= pcb
; /* Remember so we can toss this */
591 pcb
= pcb
->save_hdr
.save_prev
; /* Get one underneath our's */
592 save_release(ppsv
); /* Release it */
595 hw_atomic_sub(&saveanchor
.savetarget
, 4); /* Unaccount for the number of saveareas we think we "need" */
601 act_machine_create(task_t task
, thread_act_t thr_act
)
604 * Clear & Init the pcb (sets up user-mode s regs)
605 * We don't use this anymore.
611 void act_machine_init()
614 if (watchacts
& WA_PCB
)
615 printf("act_machine_init()\n");
616 #endif /* MACH_ASSERT */
618 /* Good to verify these once */
619 assert( THREAD_MACHINE_STATE_MAX
<= THREAD_STATE_MAX
);
621 assert( THREAD_STATE_MAX
>= PPC_THREAD_STATE_COUNT
);
622 assert( THREAD_STATE_MAX
>= PPC_EXCEPTION_STATE_COUNT
);
623 assert( THREAD_STATE_MAX
>= PPC_FLOAT_STATE_COUNT
);
626 * If we start using kernel activations,
627 * would normally create kernel_thread_pool here,
628 * populating it from the act_zone
633 act_machine_return(int code
)
635 thread_act_t thr_act
= current_act();
638 if (watchacts
& WA_EXIT
)
639 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
640 code
, thr_act
, thr_act
->ref_count
,
641 thr_act
->thread
, thr_act
->thread
->ref_count
);
642 #endif /* MACH_ASSERT */
646 * This code is called with nothing locked.
647 * It also returns with nothing locked, if it returns.
649 * This routine terminates the current thread activation.
650 * If this is the only activation associated with its
651 * thread shuttle, then the entire thread (shuttle plus
652 * activation) is terminated.
654 assert( code
== KERN_TERMINATED
);
656 assert(thr_act
->thread
->top_act
== thr_act
);
658 /* This is the only activation attached to the shuttle... */
660 thread_terminate_self();
663 panic("act_machine_return: TALKING ZOMBIE! (1)");
667 thread_machine_set_current(struct thread_shuttle
*thread
)
669 register int my_cpu
= cpu_number();
671 set_machine_current_thread(thread
);
672 set_machine_current_act(thread
->top_act
);
674 active_kloaded
[my_cpu
] = thread
->top_act
->kernel_loaded
? thread
->top_act
: THR_ACT_NULL
;
678 thread_machine_init(void)
681 #if KERNEL_STACK_SIZE > PPC_PGBYTES
682 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
690 dump_thread(thread_t th
)
692 printf(" thread @ 0x%x:\n", th
);
696 dump_act(thread_act_t thr_act
)
701 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
702 thr_act
, thr_act
->ref_count
,
703 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
704 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
706 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
707 thr_act
->alerts
, thr_act
->alert_mask
,
708 thr_act
->suspend_count
, thr_act
->active
,
709 thr_act
->higher
, thr_act
->lower
);
711 return((int)thr_act
);
720 thread_act_t thr_act
= current_act();
722 return(thr_act
->mact
.pcb
->save_srr0
);
726 * detach and return a kernel stack from a thread
730 stack_detach(thread_t thread
)
734 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
735 thread
, thread
->priority
,
736 thread
->sched_pri
, 0, 0);
739 act_machine_sv_free(thread
->top_act
);
741 stack
= thread
->kernel_stack
;
742 thread
->kernel_stack
= 0;
747 * attach a kernel stack to a thread and initialize it
749 * attaches a stack to a thread. if there is no save
750 * area we allocate one. the top save area is then
751 * loaded with the pc (continuation address), the initial
752 * stack pointer, and a std kernel MSR. if the top
753 * save area is the user save area bad things will
759 stack_attach(struct thread_shuttle
*thread
,
761 void (*start_pos
)(thread_t
))
763 thread_act_t thr_act
;
767 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
768 thread
, thread
->priority
,
769 thread
->sched_pri
, start_pos
,
773 kss
= (unsigned int *)STACK_IKS(stack
);
774 thread
->kernel_stack
= stack
;
776 /* during initialization we sometimes do not have an
777 activation. in that case do not do anything */
778 if ((thr_act
= thread
->top_act
) != 0) {
779 sv
= save_get(); /* cannot block */
780 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
781 sv
->save_hdr
.save_act
= thr_act
;
782 sv
->save_hdr
.save_prev
= thr_act
->mact
.pcb
;
783 thr_act
->mact
.pcb
= sv
;
785 sv
->save_srr0
= (unsigned int) start_pos
;
786 /* sv->save_r3 = ARG ? */
787 sv
->save_r1
= (vm_offset_t
)((int)kss
- KF_SIZE
);
788 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
;
789 sv
->save_fpscr
= 0; /* Clear all floating point exceptions */
790 sv
->save_vrsave
= 0; /* Set the vector save state */
791 sv
->save_vscr
[3] = 0x00010000; /* Supress java mode */
792 *((int *)sv
->save_r1
) = 0;
793 thr_act
->mact
.ksp
= 0;
800 * move a stack from old to new thread
804 stack_handoff(thread_t old
,
810 facility_context
*fowner
;
813 assert(new->top_act
);
814 assert(old
->top_act
);
816 my_cpu
= cpu_number();
817 stack
= stack_detach(old
);
818 new->kernel_stack
= stack
;
819 if (stack
== old
->stack_privilege
) {
820 assert(new->stack_privilege
);
821 old
->stack_privilege
= new->stack_privilege
;
822 new->stack_privilege
= stack
;
825 per_proc_info
[my_cpu
].cpu_flags
&= ~traceBE
;
827 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
828 fowner
= per_proc_info
[my_cpu
].FPU_owner
; /* Cache this because it may change */
829 if(fowner
) { /* Is there any live context? */
830 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
831 fpu_save(fowner
); /* Yes, save it */
834 fowner
= per_proc_info
[my_cpu
].VMX_owner
; /* Cache this because it may change */
835 if(fowner
) { /* Is there any live context? */
836 if(fowner
->facAct
== old
->top_act
) { /* Is it for us? */
837 vec_save(fowner
); /* Yes, save it */
842 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
843 * This bits can be modified in the per proc without updating the thread spcFlags
845 if(old
->top_act
->mact
.specFlags
& runningVM
) { /* Is the current thread running a VM? */
846 old
->top_act
->mact
.specFlags
&= ~(userProtKey
|FamVMmode
);
847 old
->top_act
->mact
.specFlags
|= (per_proc_info
[my_cpu
].spcFlags
) & (userProtKey
|FamVMmode
);
850 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
851 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
854 if(new->top_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
855 pmap_switch(new->top_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
856 per_proc_info
[my_cpu
].VMMareaPhys
= (vm_offset_t
)new->top_act
->mact
.vmmCEntry
->vmmContextPhys
;
857 per_proc_info
[my_cpu
].FAMintercept
= new->top_act
->mact
.vmmCEntry
->vmmFAMintercept
;
859 else { /* otherwise, we use the task's pmap */
860 new_pmap
= new->top_act
->task
->map
->pmap
;
861 if ((old
->top_act
->task
->map
->pmap
!= new_pmap
) || (old
->top_act
->mact
.specFlags
& runningVM
)) {
862 pmap_switch(new_pmap
);
866 thread_machine_set_current(new);
867 active_stacks
[my_cpu
] = new->kernel_stack
;
868 per_proc_info
[my_cpu
].Uassist
= new->top_act
->mact
.cthread_self
;
870 per_proc_info
[my_cpu
].ppbbTaskEnv
= new->top_act
->mact
.bbTaskEnv
;
871 per_proc_info
[my_cpu
].spcFlags
= new->top_act
->mact
.specFlags
;
873 if (branch_tracing_enabled())
874 per_proc_info
[my_cpu
].cpu_flags
|= traceBE
;
876 if(trcWork
.traceMask
) dbgTrace(0x12345678, (unsigned int)old
->top_act
, (unsigned int)new->top_act
); /* Cut trace entry if tracing */
882 * clean and initialize the current kernel stack and go to
883 * the given continuation routine
887 call_continuation(void (*continuation
)(void) )
893 assert(current_thread()->kernel_stack
);
894 kss
= (unsigned int *)STACK_IKS(current_thread()->kernel_stack
);
895 assert(continuation
);
897 tsp
= (vm_offset_t
)((int)kss
- KF_SIZE
);
901 Call_continuation(continuation
, tsp
);
907 thread_swapin_mach_alloc(thread_t thread
)
911 assert(thread
->top_act
->mact
.pcb
== 0);
915 sv
->save_hdr
.save_prev
= 0; /* Initialize back chain */
916 sv
->save_hdr
.save_flags
= (sv
->save_hdr
.save_flags
& ~SAVtype
) | (SAVgeneral
<< SAVtypeshft
); /* Mark as in use */
917 sv
->save_hdr
.save_act
= thread
->top_act
; /* Initialize owner */
918 thread
->top_act
->mact
.pcb
= sv
;