2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
51 #include <kern/task.h>
52 #include <kern/thread.h>
53 #include <kern/thread_act.h>
54 #include <kern/thread_swap.h>
55 #include <mach/thread_status.h>
56 #include <vm/vm_kern.h>
57 #include <kern/mach_param.h>
59 #include <kern/misc_protos.h>
60 #include <ppc/misc_protos.h>
61 #include <ppc/fpu_protos.h>
62 #include <ppc/exception.h>
63 #include <ppc/proc_reg.h>
67 #include <ppc/mappings.h>
68 #include <ppc/savearea.h>
69 #include <ppc/Firmware.h>
71 #include <ppc/thread_act.h>
72 #include <ppc/vmachmon.h>
73 #include <ppc/low_trace.h>
75 #include <sys/kdebug.h>
77 extern int real_ncpus
; /* Number of actual CPUs */
78 extern struct Saveanchor saveanchor
; /* Aliged savearea anchor */
81 * These constants are dumb. They should not be in asm.h!
84 #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
87 int fpu_trap_count
= 0;
88 int fpu_switch_count
= 0;
89 int vec_trap_count
= 0;
90 int vec_switch_count
= 0;
93 extern struct thread_shuttle
*Switch_context(
94 struct thread_shuttle
*old
,
96 struct thread_shuttle
*new);
99 #if MACH_LDEBUG || MACH_KDB
100 void log_thread_action (char *, long, long, long);
105 * consider_machine_collect: try to collect machine-dependent pages
108 consider_machine_collect()
111 * none currently available
117 consider_machine_adjust()
119 consider_mapping_adjust();
124 * stack_attach: Attach a kernel stack to a thread.
127 machine_kernel_stack_init(
128 struct thread_shuttle
*thread
,
129 void (*start_pos
)(thread_t
))
135 assert(thread
->top_act
->mact
.pcb
);
136 assert(thread
->kernel_stack
);
137 stack
= thread
->kernel_stack
;
140 if (watchacts
& WA_PCB
)
141 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread
,stack
,start_pos
);
142 #endif /* MACH_ASSERT */
144 kss
= (unsigned int *)STACK_IKS(stack
);
145 sv
=(savearea
*)(thread
->top_act
->mact
.pcb
); /* This for the sake of C */
147 sv
->save_lr
= (unsigned int) start_pos
; /* Set up the execution address */
148 sv
->save_srr0
= (unsigned int) start_pos
; /* Here too */
149 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
; /* Set the normal running MSR */
150 sv
->save_r1
= (vm_offset_t
) ((int)kss
- KF_SIZE
); /* Point to the top frame on the stack */
151 sv
->save_xfpscrpad
= 0; /* Start with a clear fpscr */
152 sv
->save_xfpscr
= 0; /* Start with a clear fpscr */
154 *((int *)sv
->save_r1
) = 0; /* Zero the frame backpointer */
155 thread
->top_act
->mact
.ksp
= 0; /* Show that the kernel stack is in use already */
160 * switch_context: Switch from one thread to another, needed for
164 struct thread_shuttle
*
166 struct thread_shuttle
*old
,
167 void (*continuation
)(void),
168 struct thread_shuttle
*new)
170 register thread_act_t old_act
= old
->top_act
, new_act
= new->top_act
;
171 register struct thread_shuttle
* retval
;
173 #if MACH_LDEBUG || MACH_KDB
174 log_thread_action("switch",
177 (long)__builtin_return_address(0));
179 per_proc_info
[cpu_number()].old_thread
= old
;
180 per_proc_info
[cpu_number()].cpu_flags
&= ~traceBE
; /* disable branch tracing if on */
181 assert(old_act
->kernel_loaded
||
182 active_stacks
[cpu_number()] == old_act
->thread
->kernel_stack
);
184 check_simple_locks();
186 /* Our context might wake up on another processor, so we must
187 * not keep hot state in our FPU, it must go back to the pcb
188 * so that it can be found by the other if needed
190 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
191 fpu_save(old_act
); /* Save floating point if used */
192 vec_save(old_act
); /* Save vector if used */
196 if (watchacts
& WA_PCB
) {
197 printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
198 old
,continuation
,new);
203 * We do not have to worry about the PMAP module, so switch.
205 * We must not use top_act->map since this may not be the actual
206 * task map, but the map being used for a klcopyin/out.
209 if(new_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
210 pmap_switch(new_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
212 else { /* otherwise, we use the task's pmap */
213 new_pmap
= new_act
->task
->map
->pmap
;
214 if ((old_act
->task
->map
->pmap
!= new_pmap
) || (old_act
->mact
.specFlags
& runningVM
)) {
215 pmap_switch(new_pmap
); /* Switch if there is a change */
219 /* Sanity check - is the stack pointer inside the stack that
220 * we're about to switch to? Is the execution address within
221 * the kernel's VM space??
224 printf("************* stack=%08X; R1=%08X; LR=%08X; old=%08X; cont=%08X; new=%08X\n",
225 new->kernel_stack
, new_act
->mact
.pcb
->ss
.r1
,
226 new_act
->mact
.pcb
->ss
.lr
, old
, continuation
, new); /* (TEST/DEBUG) */
227 assert((new->kernel_stack
< new_act
->mact
.pcb
->ss
.r1
) &&
228 ((unsigned int)STACK_IKS(new->kernel_stack
) >
229 new_act
->mact
.pcb
->ss
.r1
));
230 assert(new_act
->mact
.pcb
->ss
.lr
< VM_MAX_KERNEL_ADDRESS
);
234 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
235 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
238 retval
= Switch_context(old
, continuation
, new);
239 assert(retval
!= (struct thread_shuttle
*)NULL
);
241 if (branch_tracing_enabled())
242 per_proc_info
[cpu_number()].cpu_flags
|= traceBE
; /* restore branch tracing */
244 /* We've returned from having switched context, so we should be
245 * back in the original context.
252 * Alter the thread's state so that a following thread_exception_return
253 * will make the thread return 'retval' from a syscall.
256 thread_set_syscall_return(
257 struct thread_shuttle
*thread
,
258 kern_return_t retval
)
260 struct ppc_saved_state
*ssp
= &thread
->top_act
->mact
.pcb
->ss
;
263 if (watchacts
& WA_PCB
)
264 printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread
,retval
);
265 #endif /* MACH_ASSERT */
271 * Initialize the machine-dependent state for a new thread.
274 thread_machine_create(
275 struct thread_shuttle
*thread
,
276 thread_act_t thr_act
,
277 void (*start_pos
)(thread_t
))
280 savearea
*sv
; /* Pointer to newly allocated savearea */
281 unsigned int *CIsTooLimited
, i
;
285 if (watchacts
& WA_PCB
)
286 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread
, thr_act
, start_pos
);
287 #endif /* MACH_ASSERT */
289 hw_atomic_add(&saveanchor
.saveneed
, 4); /* Account for the number of saveareas we think we "need"
290 for this activation */
291 assert(thr_act
->mact
.pcb
== (pcb_t
)0); /* Make sure there was no previous savearea */
293 sv
= save_alloc(); /* Go get us a savearea */
295 bzero((char *) sv
, sizeof(struct pcb
)); /* Clear out the whole shebang */
297 sv
->save_act
= thr_act
; /* Set who owns it */
299 thr_act
->mact
.pcb
= (pcb_t
)sv
; /* Point to the save area */
302 if (watchacts
& WA_PCB
)
303 printf("pcb_init(%x) pcb=%x\n", thr_act
, sv
);
304 #endif /* MACH_ASSERT */
306 * User threads will pull their context from the pcb when first
307 * returning to user mode, so fill in all the necessary values.
308 * Kernel threads are initialized from the save state structure
309 * at the base of the kernel stack (see stack_attach()).
312 sv
->save_srr1
= MSR_EXPORT_MASK_SET
; /* Set the default user MSR */
314 CIsTooLimited
= (unsigned int *)(&sv
->save_sr0
); /* Make a pointer 'cause C can't cast on the left */
315 for(i
=0; i
<16; i
++) { /* Initialize all SRs */
316 CIsTooLimited
[i
] = SEG_REG_PROT
| (i
<< 20) | thr_act
->task
->map
->pmap
->space
; /* Set the SR value */
318 sv
->save_sr_copyin
= SEG_REG_PROT
| (SR_COPYIN_NUM
<<20) | thr_act
->task
->map
->pmap
->space
; /* Default the copyin */
320 return(KERN_SUCCESS
);
324 * Machine-dependent cleanup prior to destroying a thread
327 thread_machine_destroy( thread_t thread
)
331 if (thread
->kernel_stack
) {
339 * flush out any lazily evaluated HW state in the
340 * owning thread's context, before termination.
343 thread_machine_flush( thread_act_t cur_act
)
348 * Number of times we needed to swap an activation back in before
351 int switch_act_swapins
= 0;
356 * Machine-dependent details of activation switching. Called with
357 * RPC locks held and preemption disabled.
368 /* Our context might wake up on another processor, so we must
369 * not keep hot state in our FPU, it must go back to the pcb
370 * so that it can be found by the other if needed
372 if(real_ncpus
> 1) { /* This is potentially slow, so only do when actually SMP */
373 fpu_save(old
); /* Save floating point if used */
374 vec_save(old
); /* Save vector if used */
377 active_stacks
[cpu
] = thread
->kernel_stack
;
379 ast_context(new, cpu
);
381 /* Activations might have different pmaps
382 * (process->kernel->server, for example).
383 * Change space if needed
386 if(new->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
387 pmap_switch(new->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
389 else { /* otherwise, we use the task's pmap */
390 new_pmap
= new->task
->map
->pmap
;
391 if ((old
->task
->map
->pmap
!= new_pmap
) || (old
->mact
.specFlags
& runningVM
)) {
392 pmap_switch(new_pmap
);
399 pcb_user_to_kernel(thread_act_t act
)
402 return; /* Not needed, I hope... */
407 * act_machine_sv_free
408 * release saveareas associated with an act. if flag is true, release
409 * user level savearea(s) too, else don't
411 * this code cannot block so we call the proper save area free routine
414 act_machine_sv_free(thread_act_t act
)
416 register pcb_t pcb
,userpcb
,npcb
;
417 register savearea
*svp
;
421 * This next bit insures that any live facility context for this thread is discarded on every processor
422 * that may have it. We go through all per-processor blocks and zero the facility owner if
423 * it is the thread being destroyed. This needs to be done via a compare-and-swap because
424 * some other processor could change the owner while we are clearing it. It turns out that
425 * this is the only place where we need the interlock, normal use of the owner field is cpu-local
426 * and doesn't need the interlock. Because we are called during termintation, and a thread
427 * terminates itself, the context on other processors has been saved (because we save it as
428 * part of the context switch), even if it is still considered live. Since the dead thread is
429 * not running elsewhere, and the context is saved, any other processor looking at the owner
430 * field will not attempt to save context again, meaning that it doesn't matter if the owner
431 * changes out from under it.
435 * free VMX and FPU saveareas. do not free user save areas.
436 * user VMX and FPU saveareas, if any, i'm told are last in
437 * the chain so we just stop if we find them
438 * we identify user VMX and FPU saveareas when we find a pcb
439 * with a save level of 0. we identify user regular save
440 * areas when we find one with MSR_PR set
443 pcb
= act
->mact
.VMX_pcb
; /* Get the top vector savearea */
444 while(pcb
) { /* Any VMX saved state? */
445 svp
= (savearea
*)pcb
; /* save lots of casting later */
446 if (svp
->save_level_vec
== 0) break; /* done when hit user if any */
447 pcb
= (pcb_t
)svp
->save_prev_vector
; /* Get one underneath our's */
448 svp
->save_flags
&= ~SAVvmxvalid
; /* Clear the VMX flag */
449 if(!(svp
->save_flags
& SAVinuse
)) { /* Anyone left with this one? */
451 save_ret(svp
); /* release it */
454 act
->mact
.VMX_pcb
= pcb
;
455 if (act
->mact
.VMX_lvl
!= 0) {
456 for(i
=0; i
< real_ncpus
; i
++) { /* Cycle through processors */
457 (void)hw_compare_and_store((unsigned int)act
, 0, &per_proc_info
[i
].VMX_thread
); /* Clear if ours */
461 pcb
= act
->mact
.FPU_pcb
; /* Get the top floating point savearea */
462 while(pcb
) { /* Any floating point saved state? */
463 svp
= (savearea
*)pcb
;
464 if (svp
->save_level_fp
== 0) break; /* done when hit user if any */
465 pcb
= (pcb_t
)svp
->save_prev_float
; /* Get one underneath our's */
466 svp
->save_flags
&= ~SAVfpuvalid
; /* Clear the floating point flag */
467 if(!(svp
->save_flags
& SAVinuse
)) { /* Anyone left with this one? */
468 save_ret(svp
); /* Nope, release it */
471 act
->mact
.FPU_pcb
= pcb
;
472 if (act
->mact
.FPU_lvl
!= 0) {
473 for(i
=0; i
< real_ncpus
; i
++) { /* Cycle through processors */
474 (void)hw_compare_and_store((unsigned int)act
, 0, &per_proc_info
[i
].FPU_thread
); /* Clear if ours */
479 * free all regular saveareas except a user savearea, if any
485 svp
= (savearea
*)pcb
;
486 if ((svp
->save_srr1
& MASK(MSR_PR
))) {
487 assert(userpcb
== (pcb_t
)0);
489 svp
= (savearea
*)userpcb
;
490 npcb
= (pcb_t
)svp
->save_prev
;
491 svp
->save_prev
= (struct savearea
*)0;
493 svp
->save_flags
&= ~SAVattach
; /* Clear the attached flag */
494 npcb
= (pcb_t
)svp
->save_prev
;
495 if(!(svp
->save_flags
& SAVinuse
)) /* Anyone left with this one? */
500 act
->mact
.pcb
= userpcb
;
506 * act_virtual_machine_destroy:
507 * Shutdown any virtual machines associated with a thread
510 act_virtual_machine_destroy(thread_act_t act
)
512 if(act
->mact
.bbDescAddr
) { /* Check if the Blue box assist is active */
513 disable_bluebox_internal(act
); /* Kill off bluebox */
516 if(act
->mact
.vmmControl
) { /* Check if VMM is active */
517 vmm_tear_down_all(act
); /* Kill off all VMM contexts */
522 * act_machine_destroy: Shutdown any state associated with a thread pcb.
525 act_machine_destroy(thread_act_t act
)
527 register pcb_t pcb
, opcb
;
531 if (watchacts
& WA_PCB
)
532 printf("act_machine_destroy(0x%x)\n", act
);
533 #endif /* MACH_ASSERT */
535 act_virtual_machine_destroy(act
);
538 * This next bit insures that any live facility context for this thread is discarded on every processor
539 * that may have it. We go through all per-processor blocks and zero the facility owner if
540 * it is the thread being destroyed. This needs to be done via a compare-and-swap because
541 * some other processor could change the owner while we are clearing it. It turns out that
542 * this is the only place where we need the interlock, normal use of the owner field is cpu-local
543 * and doesn't need the interlock. Because we are called during termintation, and a thread
544 * terminates itself, the context on other processors has been saved (because we save it as
545 * part of the context switch), even if it is still considered live. Since the dead thread is
546 * not running elsewhere, and the context is saved, any other processor looking at the owner
547 * field will not attempt to save context again, meaning that it doesn't matter if the owner
548 * changes out from under it.
551 for(i
=0; i
< real_ncpus
; i
++) { /* Cycle through processors */
552 (void)hw_compare_and_store((unsigned int)act
, 0, &per_proc_info
[i
].FPU_thread
); /* Clear if ours */
553 (void)hw_compare_and_store((unsigned int)act
, 0, &per_proc_info
[i
].VMX_thread
); /* Clear if ours */
556 pcb
= act
->mact
.VMX_pcb
; /* Get the top vector savearea */
557 while(pcb
) { /* Any VMX saved state? */
558 opcb
= pcb
; /* Save current savearea address */
559 pcb
= (pcb_t
)(((savearea
*)pcb
)->save_prev_vector
); /* Get one underneath our's */
560 ((savearea
*)opcb
)->save_flags
&= ~SAVvmxvalid
; /* Clear the VMX flag */
562 if(!(((savearea
*)opcb
)->save_flags
& SAVinuse
)) { /* Anyone left with this one? */
563 save_release((savearea
*)opcb
); /* Nope, release it */
566 act
->mact
.VMX_pcb
= (pcb_t
)0; /* Clear pointer */
568 pcb
= act
->mact
.FPU_pcb
; /* Get the top floating point savearea */
569 while(pcb
) { /* Any floating point saved state? */
570 opcb
= pcb
; /* Save current savearea address */
571 pcb
= (pcb_t
)(((savearea
*)pcb
)->save_prev_float
); /* Get one underneath our's */
572 ((savearea
*)opcb
)->save_flags
&= ~SAVfpuvalid
; /* Clear the floating point flag */
574 if(!(((savearea
*)opcb
)->save_flags
& SAVinuse
)) { /* Anyone left with this one? */
575 save_release((savearea
*)opcb
); /* Nope, release it */
578 act
->mact
.FPU_pcb
= (pcb_t
)0; /* Clear pointer */
580 pcb
= act
->mact
.pcb
; /* Get the top normal savearea */
581 act
->mact
.pcb
= (pcb_t
)0; /* Clear pointer */
583 while(pcb
) { /* Any normal saved state left? */
584 opcb
= pcb
; /* Keep track of what we're working on */
585 pcb
= (pcb_t
)(((savearea
*)pcb
)->save_prev
); /* Get one underneath our's */
587 ((savearea
*)opcb
)->save_flags
= 0; /* Clear all flags since we release this in any case */
588 save_release((savearea
*)opcb
); /* Release this one */
591 hw_atomic_sub(&saveanchor
.saveneed
, 4); /* Unaccount for the number of saveareas we think we "need"
592 for this activation */
596 act_machine_create(task_t task
, thread_act_t thr_act
)
599 * Clear & Init the pcb (sets up user-mode s regs)
600 * We don't use this anymore.
605 unsigned int *CIsTooLimited
;
611 void act_machine_init()
614 if (watchacts
& WA_PCB
)
615 printf("act_machine_init()\n");
616 #endif /* MACH_ASSERT */
618 /* Good to verify these once */
619 assert( THREAD_MACHINE_STATE_MAX
<= THREAD_STATE_MAX
);
621 assert( THREAD_STATE_MAX
>= PPC_THREAD_STATE_COUNT
);
622 assert( THREAD_STATE_MAX
>= PPC_EXCEPTION_STATE_COUNT
);
623 assert( THREAD_STATE_MAX
>= PPC_FLOAT_STATE_COUNT
);
624 assert( THREAD_STATE_MAX
>= sizeof(struct ppc_saved_state
)/sizeof(int));
627 * If we start using kernel activations,
628 * would normally create kernel_thread_pool here,
629 * populating it from the act_zone
634 act_machine_return(int code
)
636 thread_act_t thr_act
= current_act();
639 if (watchacts
& WA_EXIT
)
640 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
641 code
, thr_act
, thr_act
->ref_count
,
642 thr_act
->thread
, thr_act
->thread
->ref_count
);
643 #endif /* MACH_ASSERT */
647 * This code is called with nothing locked.
648 * It also returns with nothing locked, if it returns.
650 * This routine terminates the current thread activation.
651 * If this is the only activation associated with its
652 * thread shuttle, then the entire thread (shuttle plus
653 * activation) is terminated.
655 assert( code
== KERN_TERMINATED
);
658 act_lock_thread(thr_act
);
660 #ifdef CALLOUT_RPC_MODEL
662 * JMM - This needs to get cleaned up to work under the much simpler
663 * return (instead of callout model).
665 if (thr_act
->thread
->top_act
!= thr_act
) {
667 * this is not the top activation;
668 * if possible, we should clone the shuttle so that
669 * both the root RPC-chain and the soon-to-be-orphaned
670 * RPC-chain have shuttles
672 * JMM - Cloning is a horrible idea! Instead we should alert
673 * the pieces upstream to return the shuttle. We will use
676 act_unlock_thread(thr_act
);
677 panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED");
680 if (thr_act
->lower
!= THR_ACT_NULL
) {
681 thread_t cur_thread
= current_thread();
682 thread_act_t cur_act
;
683 struct ipc_port
*iplock
;
685 /* terminate the entire thread (shuttle plus activation) */
686 /* terminate only this activation, send an appropriate */
687 /* return code back to the activation that invoked us. */
688 iplock
= thr_act
->pool_port
; /* remember for unlock call */
689 thr_act
->lower
->alerts
|= SERVER_TERMINATED
;
690 install_special_handler(thr_act
->lower
);
692 /* Return to previous act with error code */
694 act_locked_act_reference(thr_act
); /* keep it around */
695 act_switch_swapcheck(cur_thread
, (ipc_port_t
)0);
697 (void) switch_act(THR_ACT_NULL
);
698 /* assert(thr_act->ref_count == 0); */ /* XXX */
699 cur_act
= cur_thread
->top_act
;
700 MACH_RPC_RET(cur_act
) = KERN_RPC_SERVER_TERMINATED
;
701 machine_kernel_stack_init(cur_thread
, mach_rpc_return_error
);
703 * The following unlocks must be done separately since fields
704 * used by `act_unlock_thread()' have been cleared, meaning
705 * that it would not release all of the appropriate locks.
707 rpc_unlock(cur_thread
);
708 if (iplock
) ip_unlock(iplock
); /* must be done separately */
710 act_deallocate(thr_act
); /* free it */
711 Load_context(cur_thread
);
714 panic("act_machine_return: TALKING ZOMBIE! (2)");
717 #endif /* CALLOUT_RPC_MODEL */
719 /* This is the only activation attached to the shuttle... */
721 assert(thr_act
->thread
->top_act
== thr_act
);
722 act_unlock_thread(thr_act
);
723 thread_terminate_self();
726 panic("act_machine_return: TALKING ZOMBIE! (1)");
730 thread_machine_set_current(struct thread_shuttle
*thread
)
732 register int my_cpu
= cpu_number();
734 cpu_data
[my_cpu
].active_thread
= thread
;
736 active_kloaded
[my_cpu
] = thread
->top_act
->kernel_loaded
? thread
->top_act
: THR_ACT_NULL
;
740 thread_machine_init(void)
743 #if KERNEL_STACK_SIZE > PPC_PGBYTES
744 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
753 printf("pcb @ %8.8x:\n", pcb
);
760 dump_thread(thread_t th
)
762 printf(" thread @ 0x%x:\n", th
);
766 dump_act(thread_act_t thr_act
)
771 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
772 thr_act
, thr_act
->ref_count
,
773 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
774 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
776 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
777 thr_act
->alerts
, thr_act
->alert_mask
,
778 thr_act
->suspend_count
, thr_act
->active
,
779 thr_act
->higher
, thr_act
->lower
);
781 return((int)thr_act
);
790 thread_act_t thr_act
= current_act();
792 return(thr_act
->mact
.pcb
->ss
.srr0
);
796 * detach and return a kernel stack from a thread
800 stack_detach(thread_t thread
)
804 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
805 thread
, thread
->priority
,
806 thread
->sched_pri
, 0,
809 stack
= thread
->kernel_stack
;
810 thread
->kernel_stack
= 0;
815 * attach a kernel stack to a thread and initialize it
817 * attaches a stack to a thread. if there is no save
818 * area we allocate one. the top save area is then
819 * loaded with the pc (continuation address), the initial
820 * stack pointer, and a std kernel MSR. if the top
821 * save area is the user save area bad things will
827 stack_attach(struct thread_shuttle
*thread
,
829 void (*start_pos
)(thread_t
))
831 thread_act_t thr_act
;
835 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
836 thread
, thread
->priority
,
837 thread
->sched_pri
, start_pos
,
841 kss
= (unsigned int *)STACK_IKS(stack
);
842 thread
->kernel_stack
= stack
;
844 /* during initialization we sometimes do not have an
845 activation. in that case do not do anything */
846 if ((thr_act
= thread
->top_act
) != 0) {
847 sv
= save_get(); /* cannot block */
848 // bzero((char *) sv, sizeof(struct pcb));
849 sv
->save_act
= thr_act
;
850 sv
->save_prev
= (struct savearea
*)thr_act
->mact
.pcb
;
851 thr_act
->mact
.pcb
= (pcb_t
)sv
;
853 sv
->save_srr0
= (unsigned int) start_pos
;
854 /* sv->save_r3 = ARG ? */
855 sv
->save_r1
= (vm_offset_t
)((int)kss
- KF_SIZE
);
856 sv
->save_srr1
= MSR_SUPERVISOR_INT_OFF
;
857 sv
->save_xfpscrpad
= 0; /* Start with a clear fpscr */
858 sv
->save_xfpscr
= 0; /* Start with a clear fpscr */
859 *((int *)sv
->save_r1
) = 0;
860 thr_act
->mact
.ksp
= 0;
867 * move a stack from old to new thread
871 stack_handoff(thread_t old
,
878 assert(new->top_act
);
879 assert(old
->top_act
);
881 stack
= stack_detach(old
);
882 new->kernel_stack
= stack
;
884 per_proc_info
[cpu_number()].cpu_flags
&= ~traceBE
;
887 if (real_ncpus
> 1) {
888 fpu_save(old
->top_act
);
889 vec_save(old
->top_act
);
893 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
894 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
897 if(new->top_act
->mact
.specFlags
& runningVM
) { /* Is the new guy running a VM? */
898 pmap_switch(new->top_act
->mact
.vmmCEntry
->vmmPmap
); /* Switch to the VM's pmap */
900 else { /* otherwise, we use the task's pmap */
901 new_pmap
= new->top_act
->task
->map
->pmap
;
902 if ((old
->top_act
->task
->map
->pmap
!= new_pmap
) || (old
->top_act
->mact
.specFlags
& runningVM
)) {
903 pmap_switch(new_pmap
);
907 thread_machine_set_current(new);
908 active_stacks
[cpu_number()] = new->kernel_stack
;
909 per_proc_info
[cpu_number()].Uassist
= new->top_act
->mact
.cthread_self
;
911 per_proc_info
[cpu_number()].ppbbTaskEnv
= new->top_act
->mact
.bbTaskEnv
;
912 per_proc_info
[cpu_number()].spcFlags
= new->top_act
->mact
.specFlags
;
914 if (branch_tracing_enabled())
915 per_proc_info
[cpu_number()].cpu_flags
|= traceBE
;
917 if(trcWork
.traceMask
) dbgTrace(0x12345678, (unsigned int)old
->top_act
, (unsigned int)new->top_act
); /* Cut trace entry if tracing */
923 * clean and initialize the current kernel stack and go to
924 * the given continuation routine
928 call_continuation(void (*continuation
)(void) )
934 assert(current_thread()->kernel_stack
);
935 kss
= (unsigned int *)STACK_IKS(current_thread()->kernel_stack
);
936 assert(continuation
);
938 tsp
= (vm_offset_t
)((int)kss
- KF_SIZE
);
942 Call_continuation(continuation
, tsp
);
948 thread_swapin_mach_alloc(thread_t thread
)
952 assert(thread
->top_act
->mact
.pcb
== 0);
956 // bzero((char *) sv, sizeof(struct pcb));
957 sv
->save_act
= thread
->top_act
;
958 thread
->top_act
->mact
.pcb
= (pcb_t
)sv
;