]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/pcb.c
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
36 *
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
43 *
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
45 */
46
47 #include <cpus.h>
48 #include <debug.h>
49
50 #include <types.h>
51 #include <kern/task.h>
52 #include <kern/thread.h>
53 #include <kern/thread_act.h>
54 #include <kern/thread_swap.h>
55 #include <mach/thread_status.h>
56 #include <vm/vm_kern.h>
57 #include <kern/mach_param.h>
58
59 #include <kern/misc_protos.h>
60 #include <ppc/misc_protos.h>
61 #include <ppc/fpu_protos.h>
62 #include <ppc/exception.h>
63 #include <ppc/proc_reg.h>
64 #include <kern/spl.h>
65 #include <ppc/pmap.h>
66 #include <ppc/trap.h>
67 #include <ppc/mappings.h>
68 #include <ppc/savearea.h>
69 #include <ppc/Firmware.h>
70 #include <ppc/asm.h>
71 #include <ppc/thread_act.h>
72 #include <ppc/vmachmon.h>
73
74 #include <sys/kdebug.h>
75
76 extern int real_ncpus; /* Number of actual CPUs */
77 extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
78
79 /*
80 * These constants are dumb. They should not be in asm.h!
81 */
82
83 #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
84
85 #if DEBUG
86 int fpu_trap_count = 0;
87 int fpu_switch_count = 0;
88 int vec_trap_count = 0;
89 int vec_switch_count = 0;
90 #endif
91
92 extern struct thread_shuttle *Switch_context(
93 struct thread_shuttle *old,
94 void (*cont)(void),
95 struct thread_shuttle *new);
96
97
98 #if MACH_LDEBUG || MACH_KDB
99 void log_thread_action (char *, long, long, long);
100 #endif
101
102
103 /*
104 * consider_machine_collect: try to collect machine-dependent pages
105 */
106 void
107 consider_machine_collect()
108 {
109 /*
110 * none currently available
111 */
112 return;
113 }
114
115 void
116 consider_machine_adjust()
117 {
118 consider_mapping_adjust();
119 }
120
121
122 /*
123 * stack_attach: Attach a kernel stack to a thread.
124 */
125 void
126 machine_kernel_stack_init(
127 struct thread_shuttle *thread,
128 void (*start_pos)(thread_t))
129 {
130 vm_offset_t stack;
131 unsigned int *kss;
132 struct savearea *sv;
133
134 assert(thread->top_act->mact.pcb);
135 assert(thread->kernel_stack);
136 stack = thread->kernel_stack;
137
138 #if MACH_ASSERT
139 if (watchacts & WA_PCB)
140 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos);
141 #endif /* MACH_ASSERT */
142
143 kss = (unsigned int *)STACK_IKS(stack);
144 sv=(savearea *)(thread->top_act->mact.pcb); /* This for the sake of C */
145
146 sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */
147 sv->save_srr0 = (unsigned int) start_pos; /* Here too */
148 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */
149 sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */
150 sv->save_xfpscrpad = 0; /* Start with a clear fpscr */
151 sv->save_xfpscr = 0; /* Start with a clear fpscr */
152
153 *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */
154 thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */
155
156 }
157
158 /*
159 * switch_context: Switch from one thread to another, needed for
160 * switching of space
161 *
162 */
163 struct thread_shuttle*
164 switch_context(
165 struct thread_shuttle *old,
166 void (*continuation)(void),
167 struct thread_shuttle *new)
168 {
169 register thread_act_t old_act = old->top_act, new_act = new->top_act;
170 register struct thread_shuttle* retval;
171 pmap_t new_pmap;
172 #if MACH_LDEBUG || MACH_KDB
173 log_thread_action("switch",
174 (long)old,
175 (long)new,
176 (long)__builtin_return_address(0));
177 #endif
178 per_proc_info[cpu_number()].old_thread = old;
179 assert(old_act->kernel_loaded ||
180 active_stacks[cpu_number()] == old_act->thread->kernel_stack);
181
182 if(get_preemption_level() != 1) { /* Make sure we are not at wrong preemption level */
183 panic("switch_context: Invalid preemption level (%d); old = %08X, cont = %08X, new = %08X\n",
184 get_preemption_level(), old, continuation, new);
185 }
186 check_simple_locks();
187
188 /* Our context might wake up on another processor, so we must
189 * not keep hot state in our FPU, it must go back to the pcb
190 * so that it can be found by the other if needed
191 */
192 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
193 fpu_save(); /* Save floating point if used */
194 vec_save(); /* Save vector if used */
195 }
196
197 #if DEBUG
198 if (watchacts & WA_PCB) {
199 printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
200 old,continuation,new);
201 }
202 #endif /* DEBUG */
203
204 /*
205 * We do not have to worry about the PMAP module, so switch.
206 *
207 * We must not use top_act->map since this may not be the actual
208 * task map, but the map being used for a klcopyin/out.
209 */
210
211 if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
212 pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
213 }
214 else { /* otherwise, we use the task's pmap */
215 new_pmap = new_act->task->map->pmap;
216 if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
217 pmap_switch(new_pmap); /* Switch if there is a change */
218 }
219 }
220
221 /* Sanity check - is the stack pointer inside the stack that
222 * we're about to switch to? Is the execution address within
223 * the kernel's VM space??
224 */
225 #if 0
226 printf("************* stack=%08X; R1=%08X; LR=%08X; old=%08X; cont=%08X; new=%08X\n",
227 new->kernel_stack, new_act->mact.pcb->ss.r1,
228 new_act->mact.pcb->ss.lr, old, continuation, new); /* (TEST/DEBUG) */
229 assert((new->kernel_stack < new_act->mact.pcb->ss.r1) &&
230 ((unsigned int)STACK_IKS(new->kernel_stack) >
231 new_act->mact.pcb->ss.r1));
232 assert(new_act->mact.pcb->ss.lr < VM_MAX_KERNEL_ADDRESS);
233 #endif
234
235
236 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
237 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
238
239
240 retval = Switch_context(old, continuation, new);
241 assert(retval != (struct thread_shuttle*)NULL);
242
243 /* We've returned from having switched context, so we should be
244 * back in the original context.
245 */
246
247 return retval;
248 }
249
250 /*
251 * Alter the thread's state so that a following thread_exception_return
252 * will make the thread return 'retval' from a syscall.
253 */
254 void
255 thread_set_syscall_return(
256 struct thread_shuttle *thread,
257 kern_return_t retval)
258 {
259 struct ppc_saved_state *ssp = &thread->top_act->mact.pcb->ss;
260
261 #if MACH_ASSERT
262 if (watchacts & WA_PCB)
263 printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval);
264 #endif /* MACH_ASSERT */
265
266 ssp->r3 = retval;
267 }
268
269 /*
270 * Initialize the machine-dependent state for a new thread.
271 */
272 kern_return_t
273 thread_machine_create(
274 struct thread_shuttle *thread,
275 thread_act_t thr_act,
276 void (*start_pos)(thread_t))
277 {
278
279 savearea *sv; /* Pointer to newly allocated savearea */
280 unsigned int *CIsTooLimited, i;
281
282
283 #if MACH_ASSERT
284 if (watchacts & WA_PCB)
285 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos);
286 #endif /* MACH_ASSERT */
287
288 hw_atomic_add(&saveanchor.saveneed, 4); /* Account for the number of saveareas we think we "need"
289 for this activation */
290 assert(thr_act->mact.pcb == (pcb_t)0); /* Make sure there was no previous savearea */
291
292 sv = save_alloc(); /* Go get us a savearea */
293
294 bzero((char *) sv, sizeof(struct pcb)); /* Clear out the whole shebang */
295
296 sv->save_act = thr_act; /* Set who owns it */
297 sv->save_vrsave = 0;
298 thr_act->mact.pcb = (pcb_t)sv; /* Point to the save area */
299
300 thread->kernel_stack = (int)stack_alloc(thread,start_pos); /* Allocate our kernel stack */
301 assert(thread->kernel_stack); /* Make sure we got it */
302
303 #if MACH_ASSERT
304 if (watchacts & WA_PCB)
305 printf("pcb_init(%x) pcb=%x\n", thr_act, sv);
306 #endif /* MACH_ASSERT */
307 /*
308 * User threads will pull their context from the pcb when first
309 * returning to user mode, so fill in all the necessary values.
310 * Kernel threads are initialized from the save state structure
311 * at the base of the kernel stack (see stack_attach()).
312 */
313
314 sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */
315
316 CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */
317 for(i=0; i<16; i++) { /* Initialize all SRs */
318 CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */
319 }
320 sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | thr_act->task->map->pmap->space; /* Default the copyin */
321
322 return(KERN_SUCCESS);
323 }
324
325 /*
326 * Machine-dependent cleanup prior to destroying a thread
327 */
328 void
329 thread_machine_destroy( thread_t thread )
330 {
331 spl_t s;
332
333 if (thread->kernel_stack) {
334 s = splsched();
335 stack_free(thread);
336 splx(s);
337 }
338 }
339
340 /*
341 * flush out any lazily evaluated HW state in the
342 * owning thread's context, before termination.
343 */
344 void
345 thread_machine_flush( thread_act_t cur_act )
346 {
347 }
348
349 /*
350 * Number of times we needed to swap an activation back in before
351 * switching to it.
352 */
353 int switch_act_swapins = 0;
354
355 /*
356 * machine_switch_act
357 *
358 * Machine-dependent details of activation switching. Called with
359 * RPC locks held and preemption disabled.
360 */
361 void
362 machine_switch_act(
363 thread_t thread,
364 thread_act_t old,
365 thread_act_t new,
366 int cpu)
367 {
368 pmap_t new_pmap;
369
370 /* Our context might wake up on another processor, so we must
371 * not keep hot state in our FPU, it must go back to the pcb
372 * so that it can be found by the other if needed
373 */
374 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
375 fpu_save(); /* Save floating point if used */
376 vec_save(); /* Save vector if used */
377 }
378
379 active_stacks[cpu] = thread->kernel_stack;
380
381 ast_context(new, cpu);
382
383 /* Activations might have different pmaps
384 * (process->kernel->server, for example).
385 * Change space if needed
386 */
387
388 if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
389 pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
390 }
391 else { /* otherwise, we use the task's pmap */
392 new_pmap = new->task->map->pmap;
393 if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
394 pmap_switch(new_pmap);
395 }
396 }
397
398 }
399
400 void
401 pcb_user_to_kernel(thread_act_t act)
402 {
403
404 return; /* Not needed, I hope... */
405 }
406
407
408 /*
409 * act_machine_sv_free
410 * release saveareas associated with an act. if flag is true, release
411 * user level savearea(s) too, else don't
412 *
413 * this code cannot block so we call the proper save area free routine
414 */
415 void
416 act_machine_sv_free(thread_act_t act)
417 {
418 register pcb_t pcb,userpcb,npcb;
419 register savearea *svp;
420 register int i;
421
422 /*
423 * This next bit insures that any live facility context for this thread is discarded on every processor
424 * that may have it. We go through all per-processor blocks and zero the facility owner if
425 * it is the thread being destroyed. This needs to be done via a compare-and-swap because
426 * some other processor could change the owner while we are clearing it. It turns out that
427 * this is the only place where we need the interlock, normal use of the owner field is cpu-local
428 * and doesn't need the interlock. Because we are called during termintation, and a thread
429 * terminates itself, the context on other processors has been saved (because we save it as
430 * part of the context switch), even if it is still considered live. Since the dead thread is
431 * not running elsewhere, and the context is saved, any other processor looking at the owner
432 * field will not attempt to save context again, meaning that it doesn't matter if the owner
433 * changes out from under it.
434 */
435
436 /*
437 * free VMX and FPU saveareas. do not free user save areas.
438 * user VMX and FPU saveareas, if any, i'm told are last in
439 * the chain so we just stop if we find them
440 * we identify user VMX and FPU saveareas when we find a pcb
441 * with a save level of 0. we identify user regular save
442 * areas when we find one with MSR_PR set
443 */
444
445 pcb = act->mact.VMX_pcb; /* Get the top vector savearea */
446 while(pcb) { /* Any VMX saved state? */
447 svp = (savearea *)pcb; /* save lots of casting later */
448 if (svp->save_level_vec == 0) break; /* done when hit user if any */
449 pcb = (pcb_t)svp->save_prev_vector; /* Get one underneath our's */
450 svp->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */
451 if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */
452
453 save_ret(svp); /* release it */
454 }
455 }
456 act->mact.VMX_pcb = pcb;
457 if (act->mact.VMX_lvl != 0) {
458 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
459 (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */
460 }
461 }
462
463 pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */
464 while(pcb) { /* Any floating point saved state? */
465 svp = (savearea *)pcb;
466 if (svp->save_level_fp == 0) break; /* done when hit user if any */
467 pcb = (pcb_t)svp->save_prev_float; /* Get one underneath our's */
468 svp->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */
469 if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */
470 save_ret(svp); /* Nope, release it */
471 }
472 }
473 act->mact.FPU_pcb = pcb;
474 if (act->mact.FPU_lvl != 0) {
475 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
476 (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */
477 }
478 }
479
480 /*
481 * free all regular saveareas except a user savearea, if any
482 */
483
484 pcb = act->mact.pcb;
485 userpcb = (pcb_t)0;
486 while(pcb) {
487 svp = (savearea *)pcb;
488 if ((svp->save_srr1 & MASK(MSR_PR))) {
489 assert(userpcb == (pcb_t)0);
490 userpcb = pcb;
491 svp = (savearea *)userpcb;
492 npcb = (pcb_t)svp->save_prev;
493 svp->save_prev = (struct savearea *)0;
494 } else {
495 svp->save_flags &= ~SAVattach; /* Clear the attached flag */
496 npcb = (pcb_t)svp->save_prev;
497 if(!(svp->save_flags & SAVinuse)) /* Anyone left with this one? */
498 save_ret(svp);
499 }
500 pcb = npcb;
501 }
502 act->mact.pcb = userpcb;
503
504 }
505
506
507 /*
508 * act_virtual_machine_destroy:
509 * Shutdown any virtual machines associated with a thread
510 */
511 void
512 act_virtual_machine_destroy(thread_act_t act)
513 {
514 if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */
515 disable_bluebox_internal(act); /* Kill off bluebox */
516 }
517
518 if(act->mact.vmmControl) { /* Check if VMM is active */
519 vmm_tear_down_all(act); /* Kill off all VMM contexts */
520 }
521 }
522
523 /*
524 * act_machine_destroy: Shutdown any state associated with a thread pcb.
525 */
526 void
527 act_machine_destroy(thread_act_t act)
528 {
529 register pcb_t pcb, opcb;
530 int i;
531
532 #if MACH_ASSERT
533 if (watchacts & WA_PCB)
534 printf("act_machine_destroy(0x%x)\n", act);
535 #endif /* MACH_ASSERT */
536
537 act_virtual_machine_destroy(act);
538
539 /*
540 * This next bit insures that any live facility context for this thread is discarded on every processor
541 * that may have it. We go through all per-processor blocks and zero the facility owner if
542 * it is the thread being destroyed. This needs to be done via a compare-and-swap because
543 * some other processor could change the owner while we are clearing it. It turns out that
544 * this is the only place where we need the interlock, normal use of the owner field is cpu-local
545 * and doesn't need the interlock. Because we are called during termintation, and a thread
546 * terminates itself, the context on other processors has been saved (because we save it as
547 * part of the context switch), even if it is still considered live. Since the dead thread is
548 * not running elsewhere, and the context is saved, any other processor looking at the owner
549 * field will not attempt to save context again, meaning that it doesn't matter if the owner
550 * changes out from under it.
551 */
552
553 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
554 (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */
555 (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */
556 }
557
558 pcb = act->mact.VMX_pcb; /* Get the top vector savearea */
559 while(pcb) { /* Any VMX saved state? */
560 opcb = pcb; /* Save current savearea address */
561 pcb = (pcb_t)(((savearea *)pcb)->save_prev_vector); /* Get one underneath our's */
562 ((savearea *)opcb)->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */
563
564 if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */
565 save_release((savearea *)opcb); /* Nope, release it */
566 }
567 }
568 act->mact.VMX_pcb = (pcb_t)0; /* Clear pointer */
569
570 pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */
571 while(pcb) { /* Any floating point saved state? */
572 opcb = pcb; /* Save current savearea address */
573 pcb = (pcb_t)(((savearea *)pcb)->save_prev_float); /* Get one underneath our's */
574 ((savearea *)opcb)->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */
575
576 if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */
577 save_release((savearea *)opcb); /* Nope, release it */
578 }
579 }
580 act->mact.FPU_pcb = (pcb_t)0; /* Clear pointer */
581
582 pcb = act->mact.pcb; /* Get the top normal savearea */
583 act->mact.pcb = (pcb_t)0; /* Clear pointer */
584
585 while(pcb) { /* Any normal saved state left? */
586 opcb = pcb; /* Keep track of what we're working on */
587 pcb = (pcb_t)(((savearea *)pcb)->save_prev); /* Get one underneath our's */
588
589 ((savearea *)opcb)->save_flags = 0; /* Clear all flags since we release this in any case */
590 save_release((savearea *)opcb); /* Release this one */
591 }
592
593 hw_atomic_sub(&saveanchor.saveneed, 4); /* Unaccount for the number of saveareas we think we "need"
594 for this activation */
595 }
596
597 kern_return_t
598 act_machine_create(task_t task, thread_act_t thr_act)
599 {
600 /*
601 * Clear & Init the pcb (sets up user-mode s regs)
602 * We don't use this anymore.
603 */
604
605 register pcb_t pcb;
606 register int i;
607 unsigned int *CIsTooLimited;
608 pmap_t pmap;
609
610 return KERN_SUCCESS;
611 }
612
613 void act_machine_init()
614 {
615 #if MACH_ASSERT
616 if (watchacts & WA_PCB)
617 printf("act_machine_init()\n");
618 #endif /* MACH_ASSERT */
619
620 /* Good to verify these once */
621 assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
622
623 assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
624 assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
625 assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
626 assert( THREAD_STATE_MAX >= sizeof(struct ppc_saved_state)/sizeof(int));
627
628 /*
629 * If we start using kernel activations,
630 * would normally create kernel_thread_pool here,
631 * populating it from the act_zone
632 */
633 }
634
635 void
636 act_machine_return(int code)
637 {
638 thread_act_t thr_act = current_act();
639
640 #if MACH_ASSERT
641 if (watchacts & WA_EXIT)
642 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
643 code, thr_act, thr_act->ref_count,
644 thr_act->thread, thr_act->thread->ref_count);
645 #endif /* MACH_ASSERT */
646
647
648 /*
649 * This code is called with nothing locked.
650 * It also returns with nothing locked, if it returns.
651 *
652 * This routine terminates the current thread activation.
653 * If this is the only activation associated with its
654 * thread shuttle, then the entire thread (shuttle plus
655 * activation) is terminated.
656 */
657 assert( code == KERN_TERMINATED );
658 assert( thr_act );
659
660 act_lock_thread(thr_act);
661
662 #ifdef CALLOUT_RPC_MODEL
663 /*
664 * JMM - This needs to get cleaned up to work under the much simpler
665 * return (instead of callout model).
666 */
667 if (thr_act->thread->top_act != thr_act) {
668 /*
669 * this is not the top activation;
670 * if possible, we should clone the shuttle so that
671 * both the root RPC-chain and the soon-to-be-orphaned
672 * RPC-chain have shuttles
673 *
674 * JMM - Cloning is a horrible idea! Instead we should alert
675 * the pieces upstream to return the shuttle. We will use
676 * alerts for this.
677 */
678 act_unlock_thread(thr_act);
679 panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED");
680 }
681
682 if (thr_act->lower != THR_ACT_NULL) {
683 thread_t cur_thread = current_thread();
684 thread_act_t cur_act;
685 struct ipc_port *iplock;
686
687 /* terminate the entire thread (shuttle plus activation) */
688 /* terminate only this activation, send an appropriate */
689 /* return code back to the activation that invoked us. */
690 iplock = thr_act->pool_port; /* remember for unlock call */
691 thr_act->lower->alerts |= SERVER_TERMINATED;
692 install_special_handler(thr_act->lower);
693
694 /* Return to previous act with error code */
695
696 act_locked_act_reference(thr_act); /* keep it around */
697 act_switch_swapcheck(cur_thread, (ipc_port_t)0);
698
699 (void) switch_act(THR_ACT_NULL);
700 /* assert(thr_act->ref_count == 0); */ /* XXX */
701 cur_act = cur_thread->top_act;
702 MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED;
703 machine_kernel_stack_init(cur_thread, mach_rpc_return_error);
704 /*
705 * The following unlocks must be done separately since fields
706 * used by `act_unlock_thread()' have been cleared, meaning
707 * that it would not release all of the appropriate locks.
708 */
709 rpc_unlock(cur_thread);
710 if (iplock) ip_unlock(iplock); /* must be done separately */
711 act_unlock(thr_act);
712 act_deallocate(thr_act); /* free it */
713 Load_context(cur_thread);
714 /*NOTREACHED*/
715
716 panic("act_machine_return: TALKING ZOMBIE! (2)");
717 }
718
719 #endif /* CALLOUT_RPC_MODEL */
720
721 /* This is the only activation attached to the shuttle... */
722
723 assert(thr_act->thread->top_act == thr_act);
724 act_unlock_thread(thr_act);
725 thread_terminate_self();
726
727 /*NOTREACHED*/
728 panic("act_machine_return: TALKING ZOMBIE! (1)");
729 }
730
731 void
732 thread_machine_set_current(struct thread_shuttle *thread)
733 {
734 register int my_cpu = cpu_number();
735
736 cpu_data[my_cpu].active_thread = thread;
737
738 active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
739 }
740
741 void
742 thread_machine_init(void)
743 {
744 #ifdef MACHINE_STACK
745 #if KERNEL_STACK_SIZE > PPC_PGBYTES
746 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
747 #endif
748 #endif
749 }
750
751 #if MACH_ASSERT
752 void
753 dump_pcb(pcb_t pcb)
754 {
755 printf("pcb @ %8.8x:\n", pcb);
756 #if DEBUG
757 regDump(&pcb->ss);
758 #endif /* DEBUG */
759 }
760
761 void
762 dump_thread(thread_t th)
763 {
764 printf(" thread @ 0x%x:\n", th);
765 }
766
767 int
768 dump_act(thread_act_t thr_act)
769 {
770 if (!thr_act)
771 return(0);
772
773 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
774 thr_act, thr_act->ref_count,
775 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
776 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
777
778 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
779 thr_act->alerts, thr_act->alert_mask,
780 thr_act->suspend_count, thr_act->active,
781 thr_act->higher, thr_act->lower);
782
783 return((int)thr_act);
784 }
785
786 #endif
787
788 unsigned int
789 get_useraddr()
790 {
791
792 thread_act_t thr_act = current_act();
793
794 return(thr_act->mact.pcb->ss.srr0);
795 }
796
797 /*
798 * detach and return a kernel stack from a thread
799 */
800
801 vm_offset_t
802 stack_detach(thread_t thread)
803 {
804 vm_offset_t stack;
805
806 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
807 thread, thread->priority,
808 thread->sched_pri, 0,
809 0);
810
811 stack = thread->kernel_stack;
812 thread->kernel_stack = 0;
813 return(stack);
814 }
815
816 /*
817 * attach a kernel stack to a thread and initialize it
818 *
819 * attaches a stack to a thread. if there is no save
820 * area we allocate one. the top save area is then
821 * loaded with the pc (continuation address), the initial
822 * stack pointer, and a std kernel MSR. if the top
823 * save area is the user save area bad things will
824 * happen
825 *
826 */
827
828 void
829 stack_attach(struct thread_shuttle *thread,
830 vm_offset_t stack,
831 void (*start_pos)(thread_t))
832 {
833 thread_act_t thr_act;
834 unsigned int *kss;
835 struct savearea *sv;
836
837 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
838 thread, thread->priority,
839 thread->sched_pri, start_pos,
840 0);
841
842 assert(stack);
843 kss = (unsigned int *)STACK_IKS(stack);
844 thread->kernel_stack = stack;
845
846 /* during initialization we sometimes do not have an
847 activation. in that case do not do anything */
848 if ((thr_act = thread->top_act) != 0) {
849 sv = save_get(); /* cannot block */
850 // bzero((char *) sv, sizeof(struct pcb));
851 sv->save_act = thr_act;
852 sv->save_prev = (struct savearea *)thr_act->mact.pcb;
853 thr_act->mact.pcb = (pcb_t)sv;
854
855 sv->save_srr0 = (unsigned int) start_pos;
856 /* sv->save_r3 = ARG ? */
857 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
858 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
859 sv->save_xfpscrpad = 0; /* Start with a clear fpscr */
860 sv->save_xfpscr = 0; /* Start with a clear fpscr */
861 *((int *)sv->save_r1) = 0;
862 thr_act->mact.ksp = 0;
863 }
864
865 return;
866 }
867
868 /*
869 * move a stack from old to new thread
870 */
871
872 void
873 stack_handoff(thread_t old,
874 thread_t new)
875 {
876
877 vm_offset_t stack;
878 pmap_t new_pmap;
879
880 assert(new->top_act);
881 assert(old->top_act);
882
883 stack = stack_detach(old);
884 new->kernel_stack = stack;
885
886 #if NCPUS > 1
887 if (real_ncpus > 1) {
888 fpu_save();
889 vec_save();
890 }
891 #endif
892
893 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
894 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
895
896
897 if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
898 pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
899 }
900 else { /* otherwise, we use the task's pmap */
901 new_pmap = new->top_act->task->map->pmap;
902 if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
903 pmap_switch(new_pmap);
904 }
905 }
906
907 thread_machine_set_current(new);
908 active_stacks[cpu_number()] = new->kernel_stack;
909 per_proc_info[cpu_number()].Uassist = new->top_act->mact.cthread_self;
910 return;
911 }
912
913 /*
914 * clean and initialize the current kernel stack and go to
915 * the given continuation routine
916 */
917
918 void
919 call_continuation(void (*continuation)(void) )
920 {
921
922 unsigned int *kss;
923 vm_offset_t tsp;
924
925 assert(current_thread()->kernel_stack);
926 kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
927 assert(continuation);
928
929 tsp = (vm_offset_t)((int)kss - KF_SIZE);
930 assert(tsp);
931 *((int *)tsp) = 0;
932
933 Call_continuation(continuation, tsp);
934
935 return;
936 }
937
938 void
939 thread_swapin_mach_alloc(thread_t thread)
940 {
941 struct savearea *sv;
942
943 assert(thread->top_act->mact.pcb == 0);
944
945 sv = save_alloc();
946 assert(sv);
947 // bzero((char *) sv, sizeof(struct pcb));
948 sv->save_act = thread->top_act;
949 thread->top_act->mact.pcb = (pcb_t)sv;
950
951 }