]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pcb.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Copyright (c) 1990,1991,1992 The University of Utah and
30 * the Center for Software Science (CSS). All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software is hereby
33 * granted provided that (1) source code retains these copyright, permission,
34 * and disclaimer notices, and (2) redistributions including binaries
35 * reproduce the notices in supporting documentation, and (3) all advertising
36 * materials mentioning features or use of this software display the following
37 * acknowledgement: ``This product includes software developed by the Center
38 * for Software Science at the University of Utah.''
39 *
40 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
41 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
42 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * CSS requests users of this software to return to css-dist@cs.utah.edu any
45 * improvements that they make and grant CSS redistribution rights.
46 *
47 * Utah $Hdr: pcb.c 1.23 92/06/27$
48 */
49
50#include <cpus.h>
51#include <debug.h>
52
53#include <types.h>
54#include <kern/task.h>
55#include <kern/thread.h>
56#include <kern/thread_act.h>
57#include <kern/thread_swap.h>
58#include <mach/thread_status.h>
59#include <vm/vm_kern.h>
60#include <kern/mach_param.h>
61
62#include <kern/misc_protos.h>
63#include <ppc/misc_protos.h>
1c79356b
A
64#include <ppc/exception.h>
65#include <ppc/proc_reg.h>
66#include <kern/spl.h>
67#include <ppc/pmap.h>
68#include <ppc/trap.h>
69#include <ppc/mappings.h>
70#include <ppc/savearea.h>
71#include <ppc/Firmware.h>
72#include <ppc/asm.h>
73#include <ppc/thread_act.h>
74#include <ppc/vmachmon.h>
765c9de3 75#include <ppc/low_trace.h>
1c79356b
A
76
77#include <sys/kdebug.h>
78
79extern int real_ncpus; /* Number of actual CPUs */
80extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
81
82/*
83 * These constants are dumb. They should not be in asm.h!
84 */
85
86#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
87
88#if DEBUG
89int fpu_trap_count = 0;
90int fpu_switch_count = 0;
91int vec_trap_count = 0;
92int vec_switch_count = 0;
93#endif
94
95extern struct thread_shuttle *Switch_context(
96 struct thread_shuttle *old,
97 void (*cont)(void),
98 struct thread_shuttle *new);
99
100
101#if MACH_LDEBUG || MACH_KDB
102void log_thread_action (char *, long, long, long);
103#endif
104
105
106/*
107 * consider_machine_collect: try to collect machine-dependent pages
108 */
109void
110consider_machine_collect()
111{
112 /*
113 * none currently available
114 */
115 return;
116}
117
118void
119consider_machine_adjust()
120{
121 consider_mapping_adjust();
122}
123
124
125/*
126 * stack_attach: Attach a kernel stack to a thread.
127 */
128void
129machine_kernel_stack_init(
130 struct thread_shuttle *thread,
131 void (*start_pos)(thread_t))
132{
133 vm_offset_t stack;
de355530 134 unsigned int *kss;
1c79356b
A
135 struct savearea *sv;
136
137 assert(thread->top_act->mact.pcb);
138 assert(thread->kernel_stack);
139 stack = thread->kernel_stack;
140
de355530
A
141#if MACH_ASSERT
142 if (watchacts & WA_PCB)
143 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos);
144#endif /* MACH_ASSERT */
145
1c79356b 146 kss = (unsigned int *)STACK_IKS(stack);
9bccf70c 147 sv = thread->top_act->mact.pcb; /* This for the sake of C */
1c79356b 148
de355530
A
149 sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */
150 sv->save_srr0 = (unsigned int) start_pos; /* Here too */
9bccf70c 151 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */
de355530 152 sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */
9bccf70c
A
153 sv->save_fpscr = 0; /* Clear all floating point exceptions */
154 sv->save_vrsave = 0; /* Set the vector save state */
155 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
1c79356b 156
de355530 157 *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */
1c79356b
A
158 thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */
159
160}
161
162/*
163 * switch_context: Switch from one thread to another, needed for
164 * switching of space
165 *
166 */
167struct thread_shuttle*
168switch_context(
169 struct thread_shuttle *old,
170 void (*continuation)(void),
171 struct thread_shuttle *new)
172{
173 register thread_act_t old_act = old->top_act, new_act = new->top_act;
174 register struct thread_shuttle* retval;
175 pmap_t new_pmap;
9bccf70c 176 facility_context *fowner;
de355530 177 int my_cpu;
9bccf70c 178
1c79356b
A
179#if MACH_LDEBUG || MACH_KDB
180 log_thread_action("switch",
181 (long)old,
182 (long)new,
183 (long)__builtin_return_address(0));
184#endif
9bccf70c 185
de355530
A
186 my_cpu = cpu_number();
187 per_proc_info[my_cpu].old_thread = (unsigned int)old;
188 per_proc_info[my_cpu].cpu_flags &= ~traceBE; /* disable branch tracing if on */
189 assert(old_act->kernel_loaded ||
190 active_stacks[my_cpu] == old_act->thread->kernel_stack);
1c79356b 191
1c79356b
A
192 check_simple_locks();
193
194 /* Our context might wake up on another processor, so we must
195 * not keep hot state in our FPU, it must go back to the pcb
196 * so that it can be found by the other if needed
197 */
9bccf70c 198 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
de355530 199 fowner = per_proc_info[my_cpu].FPU_owner; /* Cache this because it may change */
9bccf70c
A
200 if(fowner) { /* Is there any live context? */
201 if(fowner->facAct == old->top_act) { /* Is it for us? */
202 fpu_save(fowner); /* Yes, save it */
203 }
204 }
de355530 205 fowner = per_proc_info[my_cpu].VMX_owner; /* Cache this because it may change */
9bccf70c
A
206 if(fowner) { /* Is there any live context? */
207 if(fowner->facAct == old->top_act) { /* Is it for us? */
208 vec_save(fowner); /* Yes, save it */
209 }
210 }
1c79356b
A
211 }
212
de355530
A
213#if DEBUG
214 if (watchacts & WA_PCB) {
215 printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
216 old,continuation,new);
217 }
218#endif /* DEBUG */
219
d7e50217
A
220 /*
221 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
222 * This bits can be modified in the per proc without updating the thread spcFlags
223 */
224 if(old_act->mact.specFlags & runningVM) {
225 old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
de355530 226 old_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode);
1c79356b 227 }
1c79356b
A
228
229 /*
230 * We do not have to worry about the PMAP module, so switch.
231 *
232 * We must not use top_act->map since this may not be the actual
233 * task map, but the map being used for a klcopyin/out.
234 */
235
236 if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
237 pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
de355530
A
238 per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new_act->mact.vmmCEntry->vmmContextPhys;
239 per_proc_info[my_cpu].FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
1c79356b
A
240 }
241 else { /* otherwise, we use the task's pmap */
242 new_pmap = new_act->task->map->pmap;
243 if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
244 pmap_switch(new_pmap); /* Switch if there is a change */
245 }
246 }
247
1c79356b
A
248 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
249 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
250
1c79356b
A
251 retval = Switch_context(old, continuation, new);
252 assert(retval != (struct thread_shuttle*)NULL);
253
de355530
A
254 if (branch_tracing_enabled())
255 per_proc_info[my_cpu].cpu_flags |= traceBE; /* restore branch tracing */
0b4e3aa0 256
1c79356b
A
257 /* We've returned from having switched context, so we should be
258 * back in the original context.
259 */
260
261 return retval;
262}
263
264/*
265 * Alter the thread's state so that a following thread_exception_return
266 * will make the thread return 'retval' from a syscall.
267 */
268void
269thread_set_syscall_return(
270 struct thread_shuttle *thread,
271 kern_return_t retval)
272{
1c79356b 273
de355530
A
274#if MACH_ASSERT
275 if (watchacts & WA_PCB)
276 printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval);
277#endif /* MACH_ASSERT */
278
9bccf70c 279 thread->top_act->mact.pcb->save_r3 = retval;
1c79356b
A
280}
281
282/*
283 * Initialize the machine-dependent state for a new thread.
284 */
285kern_return_t
286thread_machine_create(
287 struct thread_shuttle *thread,
288 thread_act_t thr_act,
289 void (*start_pos)(thread_t))
290{
291
292 savearea *sv; /* Pointer to newly allocated savearea */
293 unsigned int *CIsTooLimited, i;
294
295
de355530
A
296#if MACH_ASSERT
297 if (watchacts & WA_PCB)
298 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos);
299#endif /* MACH_ASSERT */
300
9bccf70c 301 hw_atomic_add(&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
1c79356b 302 for this activation */
9bccf70c 303 assert(thr_act->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */
1c79356b
A
304
305 sv = save_alloc(); /* Go get us a savearea */
306
9bccf70c
A
307 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */
308
309 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
310 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
311 sv->save_hdr.save_act = thr_act; /* Set who owns it */
de355530 312 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
9bccf70c
A
313 thr_act->mact.pcb = sv; /* Point to the save area */
314 thr_act->mact.curctx = &thr_act->mact.facctx; /* Initialize facility context */
315 thr_act->mact.facctx.facAct = thr_act; /* Initialize facility context pointer to activation */
1c79356b 316
de355530
A
317#if MACH_ASSERT
318 if (watchacts & WA_PCB)
319 printf("pcb_init(%x) pcb=%x\n", thr_act, sv);
320#endif /* MACH_ASSERT */
1c79356b
A
321 /*
322 * User threads will pull their context from the pcb when first
323 * returning to user mode, so fill in all the necessary values.
324 * Kernel threads are initialized from the save state structure
325 * at the base of the kernel stack (see stack_attach()).
326 */
327
de355530 328 sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */
1c79356b 329
de355530
A
330 CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */
331 for(i=0; i<16; i++) { /* Initialize all SRs */
332 CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */
333 }
334
1c79356b
A
335 return(KERN_SUCCESS);
336}
337
338/*
339 * Machine-dependent cleanup prior to destroying a thread
340 */
341void
342thread_machine_destroy( thread_t thread )
343{
344 spl_t s;
345
346 if (thread->kernel_stack) {
347 s = splsched();
348 stack_free(thread);
349 splx(s);
350 }
351}
352
353/*
354 * flush out any lazily evaluated HW state in the
355 * owning thread's context, before termination.
356 */
357void
358thread_machine_flush( thread_act_t cur_act )
359{
360}
361
362/*
363 * Number of times we needed to swap an activation back in before
364 * switching to it.
365 */
366int switch_act_swapins = 0;
367
368/*
369 * machine_switch_act
370 *
371 * Machine-dependent details of activation switching. Called with
372 * RPC locks held and preemption disabled.
373 */
374void
375machine_switch_act(
376 thread_t thread,
377 thread_act_t old,
378 thread_act_t new,
379 int cpu)
380{
381 pmap_t new_pmap;
9bccf70c 382 facility_context *fowner;
1c79356b
A
383
384 /* Our context might wake up on another processor, so we must
385 * not keep hot state in our FPU, it must go back to the pcb
386 * so that it can be found by the other if needed
387 */
9bccf70c 388 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
de355530 389 fowner = per_proc_info[cpu_number()].FPU_owner; /* Cache this because it may change */
9bccf70c
A
390 if(fowner) { /* Is there any live context? */
391 if(fowner->facAct == old) { /* Is it for us? */
392 fpu_save(fowner); /* Yes, save it */
393 }
394 }
de355530 395 fowner = per_proc_info[cpu_number()].VMX_owner; /* Cache this because it may change */
9bccf70c
A
396 if(fowner) { /* Is there any live context? */
397 if(fowner->facAct == old) { /* Is it for us? */
398 vec_save(fowner); /* Yes, save it */
399 }
400 }
1c79356b
A
401 }
402
403 active_stacks[cpu] = thread->kernel_stack;
404
405 ast_context(new, cpu);
406
407 /* Activations might have different pmaps
408 * (process->kernel->server, for example).
409 * Change space if needed
410 */
411
de355530
A
412 if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
413 pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
1c79356b
A
414 }
415 else { /* otherwise, we use the task's pmap */
416 new_pmap = new->task->map->pmap;
417 if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
418 pmap_switch(new_pmap);
419 }
420 }
421
422}
423
424void
425pcb_user_to_kernel(thread_act_t act)
426{
427
de355530 428 return; /* Not needed, I hope... */
1c79356b
A
429}
430
431
432/*
433 * act_machine_sv_free
434 * release saveareas associated with an act. if flag is true, release
435 * user level savearea(s) too, else don't
436 *
437 * this code cannot block so we call the proper save area free routine
438 */
439void
440act_machine_sv_free(thread_act_t act)
441{
9bccf70c 442 register savearea *pcb, *userpcb;
de355530
A
443 register savearea_vec *vsv, *vpsv;
444 register savearea_fpu *fsv, *fpsv;
1c79356b
A
445 register savearea *svp;
446 register int i;
447
448/*
9bccf70c 449 * This function will release all non-user state context.
1c79356b
A
450 */
451
9bccf70c
A
452/*
453 *
454 * Walk through and release all floating point and vector contexts that are not
455 * user state. We will also blow away live context if it belongs to non-user state.
456 *
457 */
458
459 if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */
460 toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */
de355530
A
461 act->mact.curctx->VMXlevel = 0; /* Mark as user state */
462 }
9bccf70c 463
de355530
A
464 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
465
466 while(vsv) { /* Any VMX saved state? */
467 vpsv = vsv; /* Remember so we can toss this */
468 if (!vsv->save_hdr.save_level) break; /* Done when hit user if any */
469 vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */
470 save_ret((savearea *)vpsv); /* Release it */
d7e50217 471 }
de355530
A
472
473 act->mact.curctx->VMXsave = vsv; /* Queue the user context to the top */
9bccf70c
A
474
475 if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */
de355530
A
476 toss_live_fpu(act->mact.curctx); /* Dump live float if is not user */
477 act->mact.curctx->FPUlevel = 0; /* Mark as user state */
478 }
1c79356b 479
de355530 480 fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */
9bccf70c 481
de355530
A
482 while(fsv) { /* Any float saved state? */
483 fpsv = fsv; /* Remember so we can toss this */
484 if (!fsv->save_hdr.save_level) break; /* Done when hit user if any */
485 fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */
486 save_ret((savearea *)fpsv); /* Release it */
1c79356b 487 }
de355530
A
488
489 act->mact.curctx->FPUsave = fsv; /* Queue the user context to the top */
1c79356b 490
9bccf70c
A
491/*
492 * free all regular saveareas except a user savearea, if any
493 */
1c79356b 494
9bccf70c
A
495 pcb = act->mact.pcb; /* Get the general savearea */
496 userpcb = 0; /* Assume no user context for now */
497
498 while(pcb) { /* Any float saved state? */
499 if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
500 userpcb = pcb; /* Remember so we can toss this */
501 break;
502 }
503 svp = pcb; /* Remember this */
504 pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */
505 save_ret(svp); /* Release it */
1c79356b 506 }
9bccf70c
A
507
508 act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
509
1c79356b
A
510}
511
512
513/*
514 * act_virtual_machine_destroy:
515 * Shutdown any virtual machines associated with a thread
516 */
517void
518act_virtual_machine_destroy(thread_act_t act)
519{
520 if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */
521 disable_bluebox_internal(act); /* Kill off bluebox */
522 }
523
524 if(act->mact.vmmControl) { /* Check if VMM is active */
525 vmm_tear_down_all(act); /* Kill off all VMM contexts */
526 }
527}
528
529/*
530 * act_machine_destroy: Shutdown any state associated with a thread pcb.
531 */
532void
533act_machine_destroy(thread_act_t act)
534{
9bccf70c
A
535
536 register savearea *pcb, *ppsv;
537 register savearea_vec *vsv, *vpsv;
538 register savearea_fpu *fsv, *fpsv;
539 register savearea *svp;
540 register int i;
1c79356b 541
de355530
A
542#if MACH_ASSERT
543 if (watchacts & WA_PCB)
544 printf("act_machine_destroy(0x%x)\n", act);
545#endif /* MACH_ASSERT */
546
9bccf70c
A
547/*
548 * This function will release all context.
549 */
1c79356b 550
9bccf70c
A
551 act_virtual_machine_destroy(act); /* Make sure all virtual machines are dead first */
552
1c79356b 553/*
9bccf70c
A
554 *
555 * Walk through and release all floating point and vector contexts. Also kill live context.
556 *
1c79356b
A
557 */
558
9bccf70c
A
559 toss_live_vec(act->mact.curctx); /* Dump live vectors */
560
561 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
1c79356b 562
9bccf70c
A
563 while(vsv) { /* Any VMX saved state? */
564 vpsv = vsv; /* Remember so we can toss this */
565 vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */
566 save_release((savearea *)vpsv); /* Release it */
1c79356b 567 }
9bccf70c
A
568
569 act->mact.curctx->VMXsave = 0; /* Kill chain */
570
571 toss_live_fpu(act->mact.curctx); /* Dump live float */
1c79356b 572
9bccf70c
A
573 fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */
574
575 while(fsv) { /* Any float saved state? */
576 fpsv = fsv; /* Remember so we can toss this */
577 fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */
578 save_release((savearea *)fpsv); /* Release it */
1c79356b 579 }
9bccf70c
A
580
581 act->mact.curctx->FPUsave = 0; /* Kill chain */
582
583/*
584 * free all regular saveareas.
585 */
1c79356b 586
9bccf70c 587 pcb = act->mact.pcb; /* Get the general savearea */
1c79356b 588
9bccf70c
A
589 while(pcb) { /* Any float saved state? */
590 ppsv = pcb; /* Remember so we can toss this */
591 pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */
592 save_release(ppsv); /* Release it */
1c79356b 593 }
9bccf70c
A
594
595 hw_atomic_sub(&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
1c79356b 596
1c79356b
A
597}
598
9bccf70c 599
1c79356b
A
600kern_return_t
601act_machine_create(task_t task, thread_act_t thr_act)
602{
603 /*
604 * Clear & Init the pcb (sets up user-mode s regs)
605 * We don't use this anymore.
606 */
607
1c79356b
A
608 return KERN_SUCCESS;
609}
610
611void act_machine_init()
612{
de355530
A
613#if MACH_ASSERT
614 if (watchacts & WA_PCB)
615 printf("act_machine_init()\n");
616#endif /* MACH_ASSERT */
1c79356b
A
617
618 /* Good to verify these once */
619 assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
620
621 assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
622 assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
623 assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
1c79356b
A
624
625 /*
626 * If we start using kernel activations,
627 * would normally create kernel_thread_pool here,
628 * populating it from the act_zone
629 */
630}
631
632void
633act_machine_return(int code)
634{
635 thread_act_t thr_act = current_act();
636
de355530
A
637#if MACH_ASSERT
638 if (watchacts & WA_EXIT)
639 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
640 code, thr_act, thr_act->ref_count,
641 thr_act->thread, thr_act->thread->ref_count);
642#endif /* MACH_ASSERT */
643
644
1c79356b
A
645 /*
646 * This code is called with nothing locked.
647 * It also returns with nothing locked, if it returns.
648 *
649 * This routine terminates the current thread activation.
650 * If this is the only activation associated with its
651 * thread shuttle, then the entire thread (shuttle plus
652 * activation) is terminated.
653 */
654 assert( code == KERN_TERMINATED );
655 assert( thr_act );
9bccf70c 656 assert(thr_act->thread->top_act == thr_act);
1c79356b
A
657
658 /* This is the only activation attached to the shuttle... */
659
1c79356b
A
660 thread_terminate_self();
661
662 /*NOTREACHED*/
663 panic("act_machine_return: TALKING ZOMBIE! (1)");
664}
665
666void
667thread_machine_set_current(struct thread_shuttle *thread)
668{
669 register int my_cpu = cpu_number();
670
9bccf70c
A
671 set_machine_current_thread(thread);
672 set_machine_current_act(thread->top_act);
1c79356b
A
673
674 active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
675}
676
677void
678thread_machine_init(void)
679{
680#ifdef MACHINE_STACK
681#if KERNEL_STACK_SIZE > PPC_PGBYTES
682 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
683#endif
684#endif
685}
686
687#if MACH_ASSERT
1c79356b
A
688
689void
690dump_thread(thread_t th)
691{
692 printf(" thread @ 0x%x:\n", th);
693}
694
695int
696 dump_act(thread_act_t thr_act)
697{
698 if (!thr_act)
699 return(0);
700
701 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
702 thr_act, thr_act->ref_count,
703 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
704 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
705
706 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
707 thr_act->alerts, thr_act->alert_mask,
708 thr_act->suspend_count, thr_act->active,
709 thr_act->higher, thr_act->lower);
710
711 return((int)thr_act);
712}
713
714#endif
715
716unsigned int
717get_useraddr()
718{
719
720 thread_act_t thr_act = current_act();
721
9bccf70c 722 return(thr_act->mact.pcb->save_srr0);
1c79356b
A
723}
724
725/*
726 * detach and return a kernel stack from a thread
727 */
728
729vm_offset_t
730stack_detach(thread_t thread)
731{
732 vm_offset_t stack;
733
9bccf70c
A
734 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
735 thread, thread->priority,
736 thread->sched_pri, 0, 0);
737
738 if (thread->top_act)
739 act_machine_sv_free(thread->top_act);
1c79356b
A
740
741 stack = thread->kernel_stack;
742 thread->kernel_stack = 0;
743 return(stack);
744}
745
746/*
747 * attach a kernel stack to a thread and initialize it
748 *
749 * attaches a stack to a thread. if there is no save
750 * area we allocate one. the top save area is then
751 * loaded with the pc (continuation address), the initial
752 * stack pointer, and a std kernel MSR. if the top
753 * save area is the user save area bad things will
754 * happen
755 *
756 */
757
758void
759stack_attach(struct thread_shuttle *thread,
760 vm_offset_t stack,
761 void (*start_pos)(thread_t))
762{
763 thread_act_t thr_act;
764 unsigned int *kss;
765 struct savearea *sv;
766
767 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
768 thread, thread->priority,
769 thread->sched_pri, start_pos,
770 0);
771
772 assert(stack);
773 kss = (unsigned int *)STACK_IKS(stack);
774 thread->kernel_stack = stack;
775
776 /* during initialization we sometimes do not have an
777 activation. in that case do not do anything */
778 if ((thr_act = thread->top_act) != 0) {
779 sv = save_get(); /* cannot block */
9bccf70c
A
780 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
781 sv->save_hdr.save_act = thr_act;
782 sv->save_hdr.save_prev = thr_act->mact.pcb;
783 thr_act->mact.pcb = sv;
1c79356b
A
784
785 sv->save_srr0 = (unsigned int) start_pos;
786 /* sv->save_r3 = ARG ? */
787 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
788 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
9bccf70c
A
789 sv->save_fpscr = 0; /* Clear all floating point exceptions */
790 sv->save_vrsave = 0; /* Set the vector save state */
791 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
1c79356b
A
792 *((int *)sv->save_r1) = 0;
793 thr_act->mact.ksp = 0;
794 }
795
796 return;
797}
798
799/*
800 * move a stack from old to new thread
801 */
802
803void
804stack_handoff(thread_t old,
805 thread_t new)
806{
807
9bccf70c
A
808 vm_offset_t stack;
809 pmap_t new_pmap;
810 facility_context *fowner;
d7e50217 811 int my_cpu;
9bccf70c
A
812
813 assert(new->top_act);
814 assert(old->top_act);
815
d7e50217 816 my_cpu = cpu_number();
9bccf70c
A
817 stack = stack_detach(old);
818 new->kernel_stack = stack;
819 if (stack == old->stack_privilege) {
820 assert(new->stack_privilege);
821 old->stack_privilege = new->stack_privilege;
822 new->stack_privilege = stack;
823 }
1c79356b 824
de355530 825 per_proc_info[my_cpu].cpu_flags &= ~traceBE;
0b4e3aa0 826
9bccf70c 827 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
de355530 828 fowner = per_proc_info[my_cpu].FPU_owner; /* Cache this because it may change */
9bccf70c
A
829 if(fowner) { /* Is there any live context? */
830 if(fowner->facAct == old->top_act) { /* Is it for us? */
831 fpu_save(fowner); /* Yes, save it */
832 }
833 }
de355530 834 fowner = per_proc_info[my_cpu].VMX_owner; /* Cache this because it may change */
9bccf70c
A
835 if(fowner) { /* Is there any live context? */
836 if(fowner->facAct == old->top_act) { /* Is it for us? */
837 vec_save(fowner); /* Yes, save it */
838 }
839 }
840 }
d7e50217
A
841 /*
842 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
843 * This bits can be modified in the per proc without updating the thread spcFlags
844 */
845 if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */
846 old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
de355530 847 old->top_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode);
d7e50217 848 }
1c79356b 849
9bccf70c 850 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
1c79356b
A
851 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
852
853
854 if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
855 pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
de355530
A
856 per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new->top_act->mact.vmmCEntry->vmmContextPhys;
857 per_proc_info[my_cpu].FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
1c79356b
A
858 }
859 else { /* otherwise, we use the task's pmap */
860 new_pmap = new->top_act->task->map->pmap;
861 if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
862 pmap_switch(new_pmap);
863 }
864 }
865
9bccf70c 866 thread_machine_set_current(new);
d7e50217 867 active_stacks[my_cpu] = new->kernel_stack;
de355530 868 per_proc_info[my_cpu].Uassist = new->top_act->mact.cthread_self;
9bccf70c 869
de355530
A
870 per_proc_info[my_cpu].ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
871 per_proc_info[my_cpu].spcFlags = new->top_act->mact.specFlags;
9bccf70c
A
872
873 if (branch_tracing_enabled())
de355530 874 per_proc_info[my_cpu].cpu_flags |= traceBE;
765c9de3 875
de355530 876 if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act); /* Cut trace entry if tracing */
765c9de3 877
1c79356b
A
878 return;
879}
880
881/*
882 * clean and initialize the current kernel stack and go to
883 * the given continuation routine
884 */
885
886void
887call_continuation(void (*continuation)(void) )
888{
889
890 unsigned int *kss;
891 vm_offset_t tsp;
892
893 assert(current_thread()->kernel_stack);
894 kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
895 assert(continuation);
896
897 tsp = (vm_offset_t)((int)kss - KF_SIZE);
898 assert(tsp);
899 *((int *)tsp) = 0;
900
901 Call_continuation(continuation, tsp);
902
903 return;
904}
905
906void
907thread_swapin_mach_alloc(thread_t thread)
908{
909 struct savearea *sv;
910
911 assert(thread->top_act->mact.pcb == 0);
912
913 sv = save_alloc();
914 assert(sv);
9bccf70c
A
915 sv->save_hdr.save_prev = 0; /* Initialize back chain */
916 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
917 sv->save_hdr.save_act = thread->top_act; /* Initialize owner */
918 thread->top_act->mact.pcb = sv;
1c79356b
A
919
920}