]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pcb.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
36 *
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
43 *
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
45 */
46
47#include <cpus.h>
48#include <debug.h>
49
50#include <types.h>
51#include <kern/task.h>
52#include <kern/thread.h>
53#include <kern/thread_act.h>
54#include <kern/thread_swap.h>
55#include <mach/thread_status.h>
56#include <vm/vm_kern.h>
57#include <kern/mach_param.h>
58
59#include <kern/misc_protos.h>
60#include <ppc/misc_protos.h>
1c79356b
A
61#include <ppc/exception.h>
62#include <ppc/proc_reg.h>
63#include <kern/spl.h>
64#include <ppc/pmap.h>
65#include <ppc/trap.h>
66#include <ppc/mappings.h>
67#include <ppc/savearea.h>
68#include <ppc/Firmware.h>
69#include <ppc/asm.h>
70#include <ppc/thread_act.h>
71#include <ppc/vmachmon.h>
765c9de3 72#include <ppc/low_trace.h>
1c79356b
A
73
74#include <sys/kdebug.h>
75
76extern int real_ncpus; /* Number of actual CPUs */
77extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
78
79/*
80 * These constants are dumb. They should not be in asm.h!
81 */
82
83#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
84
85#if DEBUG
86int fpu_trap_count = 0;
87int fpu_switch_count = 0;
88int vec_trap_count = 0;
89int vec_switch_count = 0;
90#endif
91
92extern struct thread_shuttle *Switch_context(
93 struct thread_shuttle *old,
94 void (*cont)(void),
95 struct thread_shuttle *new);
96
97
98#if MACH_LDEBUG || MACH_KDB
99void log_thread_action (char *, long, long, long);
100#endif
101
102
103/*
104 * consider_machine_collect: try to collect machine-dependent pages
105 */
106void
107consider_machine_collect()
108{
109 /*
110 * none currently available
111 */
112 return;
113}
114
115void
116consider_machine_adjust()
117{
118 consider_mapping_adjust();
119}
120
121
122/*
123 * stack_attach: Attach a kernel stack to a thread.
124 */
125void
126machine_kernel_stack_init(
127 struct thread_shuttle *thread,
128 void (*start_pos)(thread_t))
129{
130 vm_offset_t stack;
de355530 131 unsigned int *kss;
1c79356b
A
132 struct savearea *sv;
133
134 assert(thread->top_act->mact.pcb);
135 assert(thread->kernel_stack);
136 stack = thread->kernel_stack;
137
de355530
A
138#if MACH_ASSERT
139 if (watchacts & WA_PCB)
140 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos);
141#endif /* MACH_ASSERT */
142
1c79356b 143 kss = (unsigned int *)STACK_IKS(stack);
9bccf70c 144 sv = thread->top_act->mact.pcb; /* This for the sake of C */
1c79356b 145
de355530
A
146 sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */
147 sv->save_srr0 = (unsigned int) start_pos; /* Here too */
9bccf70c 148 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */
de355530 149 sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */
9bccf70c
A
150 sv->save_fpscr = 0; /* Clear all floating point exceptions */
151 sv->save_vrsave = 0; /* Set the vector save state */
152 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
1c79356b 153
de355530 154 *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */
1c79356b
A
155 thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */
156
157}
158
159/*
160 * switch_context: Switch from one thread to another, needed for
161 * switching of space
162 *
163 */
164struct thread_shuttle*
165switch_context(
166 struct thread_shuttle *old,
167 void (*continuation)(void),
168 struct thread_shuttle *new)
169{
170 register thread_act_t old_act = old->top_act, new_act = new->top_act;
171 register struct thread_shuttle* retval;
172 pmap_t new_pmap;
9bccf70c 173 facility_context *fowner;
de355530 174 int my_cpu;
9bccf70c 175
1c79356b
A
176#if MACH_LDEBUG || MACH_KDB
177 log_thread_action("switch",
178 (long)old,
179 (long)new,
180 (long)__builtin_return_address(0));
181#endif
9bccf70c 182
de355530
A
183 my_cpu = cpu_number();
184 per_proc_info[my_cpu].old_thread = (unsigned int)old;
185 per_proc_info[my_cpu].cpu_flags &= ~traceBE; /* disable branch tracing if on */
186 assert(old_act->kernel_loaded ||
187 active_stacks[my_cpu] == old_act->thread->kernel_stack);
1c79356b 188
1c79356b
A
189 check_simple_locks();
190
191 /* Our context might wake up on another processor, so we must
192 * not keep hot state in our FPU, it must go back to the pcb
193 * so that it can be found by the other if needed
194 */
9bccf70c 195 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
de355530 196 fowner = per_proc_info[my_cpu].FPU_owner; /* Cache this because it may change */
9bccf70c
A
197 if(fowner) { /* Is there any live context? */
198 if(fowner->facAct == old->top_act) { /* Is it for us? */
199 fpu_save(fowner); /* Yes, save it */
200 }
201 }
de355530 202 fowner = per_proc_info[my_cpu].VMX_owner; /* Cache this because it may change */
9bccf70c
A
203 if(fowner) { /* Is there any live context? */
204 if(fowner->facAct == old->top_act) { /* Is it for us? */
205 vec_save(fowner); /* Yes, save it */
206 }
207 }
1c79356b
A
208 }
209
de355530
A
210#if DEBUG
211 if (watchacts & WA_PCB) {
212 printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
213 old,continuation,new);
214 }
215#endif /* DEBUG */
216
d7e50217
A
217 /*
218 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
219 * This bits can be modified in the per proc without updating the thread spcFlags
220 */
221 if(old_act->mact.specFlags & runningVM) {
222 old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
de355530 223 old_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode);
1c79356b 224 }
1c79356b
A
225
226 /*
227 * We do not have to worry about the PMAP module, so switch.
228 *
229 * We must not use top_act->map since this may not be the actual
230 * task map, but the map being used for a klcopyin/out.
231 */
232
233 if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
234 pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
de355530
A
235 per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new_act->mact.vmmCEntry->vmmContextPhys;
236 per_proc_info[my_cpu].FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
1c79356b
A
237 }
238 else { /* otherwise, we use the task's pmap */
239 new_pmap = new_act->task->map->pmap;
240 if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
241 pmap_switch(new_pmap); /* Switch if there is a change */
242 }
243 }
244
1c79356b
A
245 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
246 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
247
1c79356b
A
248 retval = Switch_context(old, continuation, new);
249 assert(retval != (struct thread_shuttle*)NULL);
250
de355530
A
251 if (branch_tracing_enabled())
252 per_proc_info[my_cpu].cpu_flags |= traceBE; /* restore branch tracing */
0b4e3aa0 253
1c79356b
A
254 /* We've returned from having switched context, so we should be
255 * back in the original context.
256 */
257
258 return retval;
259}
260
261/*
262 * Alter the thread's state so that a following thread_exception_return
263 * will make the thread return 'retval' from a syscall.
264 */
265void
266thread_set_syscall_return(
267 struct thread_shuttle *thread,
268 kern_return_t retval)
269{
1c79356b 270
de355530
A
271#if MACH_ASSERT
272 if (watchacts & WA_PCB)
273 printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval);
274#endif /* MACH_ASSERT */
275
9bccf70c 276 thread->top_act->mact.pcb->save_r3 = retval;
1c79356b
A
277}
278
279/*
280 * Initialize the machine-dependent state for a new thread.
281 */
282kern_return_t
283thread_machine_create(
284 struct thread_shuttle *thread,
285 thread_act_t thr_act,
286 void (*start_pos)(thread_t))
287{
288
289 savearea *sv; /* Pointer to newly allocated savearea */
290 unsigned int *CIsTooLimited, i;
291
292
de355530
A
293#if MACH_ASSERT
294 if (watchacts & WA_PCB)
295 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos);
296#endif /* MACH_ASSERT */
297
9bccf70c 298 hw_atomic_add(&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
1c79356b 299 for this activation */
9bccf70c 300 assert(thr_act->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */
1c79356b
A
301
302 sv = save_alloc(); /* Go get us a savearea */
303
9bccf70c
A
304 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */
305
306 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
307 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
308 sv->save_hdr.save_act = thr_act; /* Set who owns it */
de355530 309 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
9bccf70c
A
310 thr_act->mact.pcb = sv; /* Point to the save area */
311 thr_act->mact.curctx = &thr_act->mact.facctx; /* Initialize facility context */
312 thr_act->mact.facctx.facAct = thr_act; /* Initialize facility context pointer to activation */
1c79356b 313
de355530
A
314#if MACH_ASSERT
315 if (watchacts & WA_PCB)
316 printf("pcb_init(%x) pcb=%x\n", thr_act, sv);
317#endif /* MACH_ASSERT */
1c79356b
A
318 /*
319 * User threads will pull their context from the pcb when first
320 * returning to user mode, so fill in all the necessary values.
321 * Kernel threads are initialized from the save state structure
322 * at the base of the kernel stack (see stack_attach()).
323 */
324
de355530 325 sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */
1c79356b 326
de355530
A
327 CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */
328 for(i=0; i<16; i++) { /* Initialize all SRs */
329 CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */
330 }
331
1c79356b
A
332 return(KERN_SUCCESS);
333}
334
335/*
336 * Machine-dependent cleanup prior to destroying a thread
337 */
338void
339thread_machine_destroy( thread_t thread )
340{
341 spl_t s;
342
343 if (thread->kernel_stack) {
344 s = splsched();
345 stack_free(thread);
346 splx(s);
347 }
348}
349
350/*
351 * flush out any lazily evaluated HW state in the
352 * owning thread's context, before termination.
353 */
354void
355thread_machine_flush( thread_act_t cur_act )
356{
357}
358
359/*
360 * Number of times we needed to swap an activation back in before
361 * switching to it.
362 */
363int switch_act_swapins = 0;
364
365/*
366 * machine_switch_act
367 *
368 * Machine-dependent details of activation switching. Called with
369 * RPC locks held and preemption disabled.
370 */
371void
372machine_switch_act(
373 thread_t thread,
374 thread_act_t old,
375 thread_act_t new,
376 int cpu)
377{
378 pmap_t new_pmap;
9bccf70c 379 facility_context *fowner;
1c79356b
A
380
381 /* Our context might wake up on another processor, so we must
382 * not keep hot state in our FPU, it must go back to the pcb
383 * so that it can be found by the other if needed
384 */
9bccf70c 385 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
de355530 386 fowner = per_proc_info[cpu_number()].FPU_owner; /* Cache this because it may change */
9bccf70c
A
387 if(fowner) { /* Is there any live context? */
388 if(fowner->facAct == old) { /* Is it for us? */
389 fpu_save(fowner); /* Yes, save it */
390 }
391 }
de355530 392 fowner = per_proc_info[cpu_number()].VMX_owner; /* Cache this because it may change */
9bccf70c
A
393 if(fowner) { /* Is there any live context? */
394 if(fowner->facAct == old) { /* Is it for us? */
395 vec_save(fowner); /* Yes, save it */
396 }
397 }
1c79356b
A
398 }
399
400 active_stacks[cpu] = thread->kernel_stack;
401
402 ast_context(new, cpu);
403
404 /* Activations might have different pmaps
405 * (process->kernel->server, for example).
406 * Change space if needed
407 */
408
de355530
A
409 if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
410 pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
1c79356b
A
411 }
412 else { /* otherwise, we use the task's pmap */
413 new_pmap = new->task->map->pmap;
414 if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
415 pmap_switch(new_pmap);
416 }
417 }
418
419}
420
421void
422pcb_user_to_kernel(thread_act_t act)
423{
424
de355530 425 return; /* Not needed, I hope... */
1c79356b
A
426}
427
428
429/*
430 * act_machine_sv_free
431 * release saveareas associated with an act. if flag is true, release
432 * user level savearea(s) too, else don't
433 *
434 * this code cannot block so we call the proper save area free routine
435 */
436void
437act_machine_sv_free(thread_act_t act)
438{
9bccf70c 439 register savearea *pcb, *userpcb;
de355530
A
440 register savearea_vec *vsv, *vpsv;
441 register savearea_fpu *fsv, *fpsv;
1c79356b
A
442 register savearea *svp;
443 register int i;
444
445/*
9bccf70c 446 * This function will release all non-user state context.
1c79356b
A
447 */
448
9bccf70c
A
449/*
450 *
451 * Walk through and release all floating point and vector contexts that are not
452 * user state. We will also blow away live context if it belongs to non-user state.
453 *
454 */
455
456 if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */
457 toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */
de355530
A
458 act->mact.curctx->VMXlevel = 0; /* Mark as user state */
459 }
9bccf70c 460
de355530
A
461 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
462
463 while(vsv) { /* Any VMX saved state? */
464 vpsv = vsv; /* Remember so we can toss this */
465 if (!vsv->save_hdr.save_level) break; /* Done when hit user if any */
466 vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */
467 save_ret((savearea *)vpsv); /* Release it */
d7e50217 468 }
de355530
A
469
470 act->mact.curctx->VMXsave = vsv; /* Queue the user context to the top */
9bccf70c
A
471
472 if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */
de355530
A
473 toss_live_fpu(act->mact.curctx); /* Dump live float if is not user */
474 act->mact.curctx->FPUlevel = 0; /* Mark as user state */
475 }
1c79356b 476
de355530 477 fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */
9bccf70c 478
de355530
A
479 while(fsv) { /* Any float saved state? */
480 fpsv = fsv; /* Remember so we can toss this */
481 if (!fsv->save_hdr.save_level) break; /* Done when hit user if any */
482 fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */
483 save_ret((savearea *)fpsv); /* Release it */
1c79356b 484 }
de355530
A
485
486 act->mact.curctx->FPUsave = fsv; /* Queue the user context to the top */
1c79356b 487
9bccf70c
A
488/*
489 * free all regular saveareas except a user savearea, if any
490 */
1c79356b 491
9bccf70c
A
492 pcb = act->mact.pcb; /* Get the general savearea */
493 userpcb = 0; /* Assume no user context for now */
494
495 while(pcb) { /* Any float saved state? */
496 if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
497 userpcb = pcb; /* Remember so we can toss this */
498 break;
499 }
500 svp = pcb; /* Remember this */
501 pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */
502 save_ret(svp); /* Release it */
1c79356b 503 }
9bccf70c
A
504
505 act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
506
1c79356b
A
507}
508
509
510/*
511 * act_virtual_machine_destroy:
512 * Shutdown any virtual machines associated with a thread
513 */
514void
515act_virtual_machine_destroy(thread_act_t act)
516{
517 if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */
518 disable_bluebox_internal(act); /* Kill off bluebox */
519 }
520
521 if(act->mact.vmmControl) { /* Check if VMM is active */
522 vmm_tear_down_all(act); /* Kill off all VMM contexts */
523 }
524}
525
526/*
527 * act_machine_destroy: Shutdown any state associated with a thread pcb.
528 */
529void
530act_machine_destroy(thread_act_t act)
531{
9bccf70c
A
532
533 register savearea *pcb, *ppsv;
534 register savearea_vec *vsv, *vpsv;
535 register savearea_fpu *fsv, *fpsv;
536 register savearea *svp;
537 register int i;
1c79356b 538
de355530
A
539#if MACH_ASSERT
540 if (watchacts & WA_PCB)
541 printf("act_machine_destroy(0x%x)\n", act);
542#endif /* MACH_ASSERT */
543
9bccf70c
A
544/*
545 * This function will release all context.
546 */
1c79356b 547
9bccf70c
A
548 act_virtual_machine_destroy(act); /* Make sure all virtual machines are dead first */
549
1c79356b 550/*
9bccf70c
A
551 *
552 * Walk through and release all floating point and vector contexts. Also kill live context.
553 *
1c79356b
A
554 */
555
9bccf70c
A
556 toss_live_vec(act->mact.curctx); /* Dump live vectors */
557
558 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
1c79356b 559
9bccf70c
A
560 while(vsv) { /* Any VMX saved state? */
561 vpsv = vsv; /* Remember so we can toss this */
562 vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */
563 save_release((savearea *)vpsv); /* Release it */
1c79356b 564 }
9bccf70c
A
565
566 act->mact.curctx->VMXsave = 0; /* Kill chain */
567
568 toss_live_fpu(act->mact.curctx); /* Dump live float */
1c79356b 569
9bccf70c
A
570 fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */
571
572 while(fsv) { /* Any float saved state? */
573 fpsv = fsv; /* Remember so we can toss this */
574 fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */
575 save_release((savearea *)fpsv); /* Release it */
1c79356b 576 }
9bccf70c
A
577
578 act->mact.curctx->FPUsave = 0; /* Kill chain */
579
580/*
581 * free all regular saveareas.
582 */
1c79356b 583
9bccf70c 584 pcb = act->mact.pcb; /* Get the general savearea */
1c79356b 585
9bccf70c
A
586 while(pcb) { /* Any float saved state? */
587 ppsv = pcb; /* Remember so we can toss this */
588 pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */
589 save_release(ppsv); /* Release it */
1c79356b 590 }
9bccf70c
A
591
592 hw_atomic_sub(&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
1c79356b 593
1c79356b
A
594}
595
9bccf70c 596
1c79356b
A
597kern_return_t
598act_machine_create(task_t task, thread_act_t thr_act)
599{
600 /*
601 * Clear & Init the pcb (sets up user-mode s regs)
602 * We don't use this anymore.
603 */
604
1c79356b
A
605 return KERN_SUCCESS;
606}
607
608void act_machine_init()
609{
de355530
A
610#if MACH_ASSERT
611 if (watchacts & WA_PCB)
612 printf("act_machine_init()\n");
613#endif /* MACH_ASSERT */
1c79356b
A
614
615 /* Good to verify these once */
616 assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
617
618 assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
619 assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
620 assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
1c79356b
A
621
622 /*
623 * If we start using kernel activations,
624 * would normally create kernel_thread_pool here,
625 * populating it from the act_zone
626 */
627}
628
629void
630act_machine_return(int code)
631{
632 thread_act_t thr_act = current_act();
633
de355530
A
634#if MACH_ASSERT
635 if (watchacts & WA_EXIT)
636 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
637 code, thr_act, thr_act->ref_count,
638 thr_act->thread, thr_act->thread->ref_count);
639#endif /* MACH_ASSERT */
640
641
1c79356b
A
642 /*
643 * This code is called with nothing locked.
644 * It also returns with nothing locked, if it returns.
645 *
646 * This routine terminates the current thread activation.
647 * If this is the only activation associated with its
648 * thread shuttle, then the entire thread (shuttle plus
649 * activation) is terminated.
650 */
651 assert( code == KERN_TERMINATED );
652 assert( thr_act );
9bccf70c 653 assert(thr_act->thread->top_act == thr_act);
1c79356b
A
654
655 /* This is the only activation attached to the shuttle... */
656
1c79356b
A
657 thread_terminate_self();
658
659 /*NOTREACHED*/
660 panic("act_machine_return: TALKING ZOMBIE! (1)");
661}
662
663void
664thread_machine_set_current(struct thread_shuttle *thread)
665{
666 register int my_cpu = cpu_number();
667
9bccf70c
A
668 set_machine_current_thread(thread);
669 set_machine_current_act(thread->top_act);
1c79356b
A
670
671 active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
672}
673
674void
675thread_machine_init(void)
676{
677#ifdef MACHINE_STACK
678#if KERNEL_STACK_SIZE > PPC_PGBYTES
679 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
680#endif
681#endif
682}
683
684#if MACH_ASSERT
1c79356b
A
685
686void
687dump_thread(thread_t th)
688{
689 printf(" thread @ 0x%x:\n", th);
690}
691
692int
693 dump_act(thread_act_t thr_act)
694{
695 if (!thr_act)
696 return(0);
697
698 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
699 thr_act, thr_act->ref_count,
700 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
701 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
702
703 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
704 thr_act->alerts, thr_act->alert_mask,
705 thr_act->suspend_count, thr_act->active,
706 thr_act->higher, thr_act->lower);
707
708 return((int)thr_act);
709}
710
711#endif
712
713unsigned int
714get_useraddr()
715{
716
717 thread_act_t thr_act = current_act();
718
9bccf70c 719 return(thr_act->mact.pcb->save_srr0);
1c79356b
A
720}
721
722/*
723 * detach and return a kernel stack from a thread
724 */
725
726vm_offset_t
727stack_detach(thread_t thread)
728{
729 vm_offset_t stack;
730
9bccf70c
A
731 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
732 thread, thread->priority,
733 thread->sched_pri, 0, 0);
734
735 if (thread->top_act)
736 act_machine_sv_free(thread->top_act);
1c79356b
A
737
738 stack = thread->kernel_stack;
739 thread->kernel_stack = 0;
740 return(stack);
741}
742
743/*
744 * attach a kernel stack to a thread and initialize it
745 *
746 * attaches a stack to a thread. if there is no save
747 * area we allocate one. the top save area is then
748 * loaded with the pc (continuation address), the initial
749 * stack pointer, and a std kernel MSR. if the top
750 * save area is the user save area bad things will
751 * happen
752 *
753 */
754
755void
756stack_attach(struct thread_shuttle *thread,
757 vm_offset_t stack,
758 void (*start_pos)(thread_t))
759{
760 thread_act_t thr_act;
761 unsigned int *kss;
762 struct savearea *sv;
763
764 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
765 thread, thread->priority,
766 thread->sched_pri, start_pos,
767 0);
768
769 assert(stack);
770 kss = (unsigned int *)STACK_IKS(stack);
771 thread->kernel_stack = stack;
772
773 /* during initialization we sometimes do not have an
774 activation. in that case do not do anything */
775 if ((thr_act = thread->top_act) != 0) {
776 sv = save_get(); /* cannot block */
9bccf70c
A
777 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
778 sv->save_hdr.save_act = thr_act;
779 sv->save_hdr.save_prev = thr_act->mact.pcb;
780 thr_act->mact.pcb = sv;
1c79356b
A
781
782 sv->save_srr0 = (unsigned int) start_pos;
783 /* sv->save_r3 = ARG ? */
784 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
785 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
9bccf70c
A
786 sv->save_fpscr = 0; /* Clear all floating point exceptions */
787 sv->save_vrsave = 0; /* Set the vector save state */
788 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
1c79356b
A
789 *((int *)sv->save_r1) = 0;
790 thr_act->mact.ksp = 0;
791 }
792
793 return;
794}
795
796/*
797 * move a stack from old to new thread
798 */
799
800void
801stack_handoff(thread_t old,
802 thread_t new)
803{
804
9bccf70c
A
805 vm_offset_t stack;
806 pmap_t new_pmap;
807 facility_context *fowner;
d7e50217 808 int my_cpu;
9bccf70c
A
809
810 assert(new->top_act);
811 assert(old->top_act);
812
d7e50217 813 my_cpu = cpu_number();
9bccf70c
A
814 stack = stack_detach(old);
815 new->kernel_stack = stack;
816 if (stack == old->stack_privilege) {
817 assert(new->stack_privilege);
818 old->stack_privilege = new->stack_privilege;
819 new->stack_privilege = stack;
820 }
1c79356b 821
de355530 822 per_proc_info[my_cpu].cpu_flags &= ~traceBE;
0b4e3aa0 823
9bccf70c 824 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
de355530 825 fowner = per_proc_info[my_cpu].FPU_owner; /* Cache this because it may change */
9bccf70c
A
826 if(fowner) { /* Is there any live context? */
827 if(fowner->facAct == old->top_act) { /* Is it for us? */
828 fpu_save(fowner); /* Yes, save it */
829 }
830 }
de355530 831 fowner = per_proc_info[my_cpu].VMX_owner; /* Cache this because it may change */
9bccf70c
A
832 if(fowner) { /* Is there any live context? */
833 if(fowner->facAct == old->top_act) { /* Is it for us? */
834 vec_save(fowner); /* Yes, save it */
835 }
836 }
837 }
d7e50217
A
838 /*
839 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
840 * This bits can be modified in the per proc without updating the thread spcFlags
841 */
842 if(old->top_act->mact.specFlags & runningVM) { /* Is the current thread running a VM? */
843 old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
de355530 844 old->top_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode);
d7e50217 845 }
1c79356b 846
9bccf70c 847 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
1c79356b
A
848 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
849
850
851 if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
852 pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
de355530
A
853 per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new->top_act->mact.vmmCEntry->vmmContextPhys;
854 per_proc_info[my_cpu].FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
1c79356b
A
855 }
856 else { /* otherwise, we use the task's pmap */
857 new_pmap = new->top_act->task->map->pmap;
858 if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
859 pmap_switch(new_pmap);
860 }
861 }
862
9bccf70c 863 thread_machine_set_current(new);
d7e50217 864 active_stacks[my_cpu] = new->kernel_stack;
de355530 865 per_proc_info[my_cpu].Uassist = new->top_act->mact.cthread_self;
9bccf70c 866
de355530
A
867 per_proc_info[my_cpu].ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
868 per_proc_info[my_cpu].spcFlags = new->top_act->mact.specFlags;
9bccf70c
A
869
870 if (branch_tracing_enabled())
de355530 871 per_proc_info[my_cpu].cpu_flags |= traceBE;
765c9de3 872
de355530 873 if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act); /* Cut trace entry if tracing */
765c9de3 874
1c79356b
A
875 return;
876}
877
878/*
879 * clean and initialize the current kernel stack and go to
880 * the given continuation routine
881 */
882
883void
884call_continuation(void (*continuation)(void) )
885{
886
887 unsigned int *kss;
888 vm_offset_t tsp;
889
890 assert(current_thread()->kernel_stack);
891 kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
892 assert(continuation);
893
894 tsp = (vm_offset_t)((int)kss - KF_SIZE);
895 assert(tsp);
896 *((int *)tsp) = 0;
897
898 Call_continuation(continuation, tsp);
899
900 return;
901}
902
903void
904thread_swapin_mach_alloc(thread_t thread)
905{
906 struct savearea *sv;
907
908 assert(thread->top_act->mact.pcb == 0);
909
910 sv = save_alloc();
911 assert(sv);
9bccf70c
A
912 sv->save_hdr.save_prev = 0; /* Initialize back chain */
913 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
914 sv->save_hdr.save_act = thread->top_act; /* Initialize owner */
915 thread->top_act->mact.pcb = sv;
1c79356b
A
916
917}