]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pcb.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
36 *
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
43 *
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
45 */
46
47#include <cpus.h>
48#include <debug.h>
49
50#include <types.h>
51#include <kern/task.h>
52#include <kern/thread.h>
53#include <kern/thread_act.h>
54#include <kern/thread_swap.h>
55#include <mach/thread_status.h>
56#include <vm/vm_kern.h>
57#include <kern/mach_param.h>
58
59#include <kern/misc_protos.h>
60#include <ppc/misc_protos.h>
1c79356b
A
61#include <ppc/exception.h>
62#include <ppc/proc_reg.h>
63#include <kern/spl.h>
64#include <ppc/pmap.h>
65#include <ppc/trap.h>
66#include <ppc/mappings.h>
67#include <ppc/savearea.h>
68#include <ppc/Firmware.h>
69#include <ppc/asm.h>
70#include <ppc/thread_act.h>
71#include <ppc/vmachmon.h>
765c9de3 72#include <ppc/low_trace.h>
1c79356b
A
73
74#include <sys/kdebug.h>
75
76extern int real_ncpus; /* Number of actual CPUs */
77extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
78
79/*
80 * These constants are dumb. They should not be in asm.h!
81 */
82
83#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
84
85#if DEBUG
86int fpu_trap_count = 0;
87int fpu_switch_count = 0;
88int vec_trap_count = 0;
89int vec_switch_count = 0;
90#endif
91
92extern struct thread_shuttle *Switch_context(
93 struct thread_shuttle *old,
94 void (*cont)(void),
95 struct thread_shuttle *new);
96
97
98#if MACH_LDEBUG || MACH_KDB
99void log_thread_action (char *, long, long, long);
100#endif
101
102
103/*
104 * consider_machine_collect: try to collect machine-dependent pages
105 */
106void
107consider_machine_collect()
108{
109 /*
110 * none currently available
111 */
112 return;
113}
114
115void
116consider_machine_adjust()
117{
118 consider_mapping_adjust();
119}
120
121
122/*
123 * stack_attach: Attach a kernel stack to a thread.
124 */
125void
126machine_kernel_stack_init(
127 struct thread_shuttle *thread,
128 void (*start_pos)(thread_t))
129{
130 vm_offset_t stack;
131 unsigned int *kss;
132 struct savearea *sv;
133
134 assert(thread->top_act->mact.pcb);
135 assert(thread->kernel_stack);
136 stack = thread->kernel_stack;
137
138#if MACH_ASSERT
139 if (watchacts & WA_PCB)
140 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos);
141#endif /* MACH_ASSERT */
142
143 kss = (unsigned int *)STACK_IKS(stack);
9bccf70c 144 sv = thread->top_act->mact.pcb; /* This for the sake of C */
1c79356b 145
9bccf70c
A
146 sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */
147 sv->save_srr0 = (unsigned int) start_pos; /* Here too */
148 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */
1c79356b 149 sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */
9bccf70c
A
150 sv->save_fpscr = 0; /* Clear all floating point exceptions */
151 sv->save_vrsave = 0; /* Set the vector save state */
152 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
1c79356b
A
153
154 *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */
155 thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */
156
157}
158
159/*
160 * switch_context: Switch from one thread to another, needed for
161 * switching of space
162 *
163 */
164struct thread_shuttle*
165switch_context(
166 struct thread_shuttle *old,
167 void (*continuation)(void),
168 struct thread_shuttle *new)
169{
170 register thread_act_t old_act = old->top_act, new_act = new->top_act;
171 register struct thread_shuttle* retval;
172 pmap_t new_pmap;
9bccf70c
A
173 facility_context *fowner;
174
1c79356b
A
175#if MACH_LDEBUG || MACH_KDB
176 log_thread_action("switch",
177 (long)old,
178 (long)new,
179 (long)__builtin_return_address(0));
180#endif
9bccf70c
A
181
182 per_proc_info[cpu_number()].old_thread = (unsigned int)old;
0b4e3aa0 183 per_proc_info[cpu_number()].cpu_flags &= ~traceBE; /* disable branch tracing if on */
1c79356b
A
184 assert(old_act->kernel_loaded ||
185 active_stacks[cpu_number()] == old_act->thread->kernel_stack);
186
1c79356b
A
187 check_simple_locks();
188
189 /* Our context might wake up on another processor, so we must
190 * not keep hot state in our FPU, it must go back to the pcb
191 * so that it can be found by the other if needed
192 */
9bccf70c
A
193 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
194 fowner = per_proc_info[cpu_number()].FPU_owner; /* Cache this because it may change */
195 if(fowner) { /* Is there any live context? */
196 if(fowner->facAct == old->top_act) { /* Is it for us? */
197 fpu_save(fowner); /* Yes, save it */
198 }
199 }
200 fowner = per_proc_info[cpu_number()].VMX_owner; /* Cache this because it may change */
201 if(fowner) { /* Is there any live context? */
202 if(fowner->facAct == old->top_act) { /* Is it for us? */
203 vec_save(fowner); /* Yes, save it */
204 }
205 }
1c79356b
A
206 }
207
208#if DEBUG
209 if (watchacts & WA_PCB) {
210 printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
211 old,continuation,new);
212 }
213#endif /* DEBUG */
214
215 /*
216 * We do not have to worry about the PMAP module, so switch.
217 *
218 * We must not use top_act->map since this may not be the actual
219 * task map, but the map being used for a klcopyin/out.
220 */
221
222 if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
223 pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
224 }
225 else { /* otherwise, we use the task's pmap */
226 new_pmap = new_act->task->map->pmap;
227 if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
228 pmap_switch(new_pmap); /* Switch if there is a change */
229 }
230 }
231
1c79356b
A
232 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
233 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
234
1c79356b
A
235 retval = Switch_context(old, continuation, new);
236 assert(retval != (struct thread_shuttle*)NULL);
237
0b4e3aa0
A
238 if (branch_tracing_enabled())
239 per_proc_info[cpu_number()].cpu_flags |= traceBE; /* restore branch tracing */
240
1c79356b
A
241 /* We've returned from having switched context, so we should be
242 * back in the original context.
243 */
244
245 return retval;
246}
247
248/*
249 * Alter the thread's state so that a following thread_exception_return
250 * will make the thread return 'retval' from a syscall.
251 */
252void
253thread_set_syscall_return(
254 struct thread_shuttle *thread,
255 kern_return_t retval)
256{
1c79356b
A
257
258#if MACH_ASSERT
259 if (watchacts & WA_PCB)
260 printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval);
261#endif /* MACH_ASSERT */
262
9bccf70c 263 thread->top_act->mact.pcb->save_r3 = retval;
1c79356b
A
264}
265
266/*
267 * Initialize the machine-dependent state for a new thread.
268 */
269kern_return_t
270thread_machine_create(
271 struct thread_shuttle *thread,
272 thread_act_t thr_act,
273 void (*start_pos)(thread_t))
274{
275
276 savearea *sv; /* Pointer to newly allocated savearea */
277 unsigned int *CIsTooLimited, i;
278
279
280#if MACH_ASSERT
281 if (watchacts & WA_PCB)
282 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos);
283#endif /* MACH_ASSERT */
284
9bccf70c 285 hw_atomic_add(&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
1c79356b 286 for this activation */
9bccf70c 287 assert(thr_act->mact.pcb == (savearea *)0); /* Make sure there was no previous savearea */
1c79356b
A
288
289 sv = save_alloc(); /* Go get us a savearea */
290
9bccf70c
A
291 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */
292
293 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
294 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
295 sv->save_hdr.save_act = thr_act; /* Set who owns it */
296 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
297 thr_act->mact.pcb = sv; /* Point to the save area */
298 thr_act->mact.curctx = &thr_act->mact.facctx; /* Initialize facility context */
299 thr_act->mact.facctx.facAct = thr_act; /* Initialize facility context pointer to activation */
1c79356b 300
1c79356b
A
301#if MACH_ASSERT
302 if (watchacts & WA_PCB)
303 printf("pcb_init(%x) pcb=%x\n", thr_act, sv);
304#endif /* MACH_ASSERT */
305 /*
306 * User threads will pull their context from the pcb when first
307 * returning to user mode, so fill in all the necessary values.
308 * Kernel threads are initialized from the save state structure
309 * at the base of the kernel stack (see stack_attach()).
310 */
311
312 sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */
313
9bccf70c 314 CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */
1c79356b
A
315 for(i=0; i<16; i++) { /* Initialize all SRs */
316 CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */
317 }
1c79356b
A
318
319 return(KERN_SUCCESS);
320}
321
322/*
323 * Machine-dependent cleanup prior to destroying a thread
324 */
325void
326thread_machine_destroy( thread_t thread )
327{
328 spl_t s;
329
330 if (thread->kernel_stack) {
331 s = splsched();
332 stack_free(thread);
333 splx(s);
334 }
335}
336
337/*
338 * flush out any lazily evaluated HW state in the
339 * owning thread's context, before termination.
340 */
341void
342thread_machine_flush( thread_act_t cur_act )
343{
344}
345
346/*
347 * Number of times we needed to swap an activation back in before
348 * switching to it.
349 */
350int switch_act_swapins = 0;
351
352/*
353 * machine_switch_act
354 *
355 * Machine-dependent details of activation switching. Called with
356 * RPC locks held and preemption disabled.
357 */
358void
359machine_switch_act(
360 thread_t thread,
361 thread_act_t old,
362 thread_act_t new,
363 int cpu)
364{
365 pmap_t new_pmap;
9bccf70c 366 facility_context *fowner;
1c79356b
A
367
368 /* Our context might wake up on another processor, so we must
369 * not keep hot state in our FPU, it must go back to the pcb
370 * so that it can be found by the other if needed
371 */
9bccf70c
A
372 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
373 fowner = per_proc_info[cpu_number()].FPU_owner; /* Cache this because it may change */
374 if(fowner) { /* Is there any live context? */
375 if(fowner->facAct == old) { /* Is it for us? */
376 fpu_save(fowner); /* Yes, save it */
377 }
378 }
379 fowner = per_proc_info[cpu_number()].VMX_owner; /* Cache this because it may change */
380 if(fowner) { /* Is there any live context? */
381 if(fowner->facAct == old) { /* Is it for us? */
382 vec_save(fowner); /* Yes, save it */
383 }
384 }
1c79356b
A
385 }
386
387 active_stacks[cpu] = thread->kernel_stack;
388
389 ast_context(new, cpu);
390
391 /* Activations might have different pmaps
392 * (process->kernel->server, for example).
393 * Change space if needed
394 */
395
396 if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
397 pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
398 }
399 else { /* otherwise, we use the task's pmap */
400 new_pmap = new->task->map->pmap;
401 if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
402 pmap_switch(new_pmap);
403 }
404 }
405
406}
407
408void
409pcb_user_to_kernel(thread_act_t act)
410{
411
412 return; /* Not needed, I hope... */
413}
414
415
416/*
417 * act_machine_sv_free
418 * release saveareas associated with an act. if flag is true, release
419 * user level savearea(s) too, else don't
420 *
421 * this code cannot block so we call the proper save area free routine
422 */
423void
424act_machine_sv_free(thread_act_t act)
425{
9bccf70c
A
426 register savearea *pcb, *userpcb;
427 register savearea_vec *vsv, *vpsv;
428 register savearea_fpu *fsv, *fpsv;
1c79356b
A
429 register savearea *svp;
430 register int i;
431
432/*
9bccf70c 433 * This function will release all non-user state context.
1c79356b
A
434 */
435
9bccf70c
A
436/*
437 *
438 * Walk through and release all floating point and vector contexts that are not
439 * user state. We will also blow away live context if it belongs to non-user state.
440 *
441 */
442
443 if(act->mact.curctx->VMXlevel) { /* Is the current level user state? */
444 toss_live_vec(act->mact.curctx); /* Dump live vectors if is not user */
445 act->mact.curctx->VMXlevel = 0; /* Mark as user state */
446 }
447
448 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
449
450 while(vsv) { /* Any VMX saved state? */
451 vpsv = vsv; /* Remember so we can toss this */
452 if (!vsv->save_hdr.save_level) break; /* Done when hit user if any */
453 vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */
454 save_ret((savearea *)vpsv); /* Release it */
1c79356b 455 }
9bccf70c
A
456
457 act->mact.curctx->VMXsave = vsv; /* Queue the user context to the top */
458
459 if(act->mact.curctx->FPUlevel) { /* Is the current level user state? */
460 toss_live_fpu(act->mact.curctx); /* Dump live float if is not user */
461 act->mact.curctx->FPUlevel = 0; /* Mark as user state */
462 }
1c79356b 463
9bccf70c
A
464 fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */
465
466 while(fsv) { /* Any float saved state? */
467 fpsv = fsv; /* Remember so we can toss this */
468 if (!fsv->save_hdr.save_level) break; /* Done when hit user if any */
469 fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */
470 save_ret((savearea *)fpsv); /* Release it */
1c79356b 471 }
9bccf70c
A
472
473 act->mact.curctx->FPUsave = fsv; /* Queue the user context to the top */
1c79356b 474
9bccf70c
A
475/*
476 * free all regular saveareas except a user savearea, if any
477 */
1c79356b 478
9bccf70c
A
479 pcb = act->mact.pcb; /* Get the general savearea */
480 userpcb = 0; /* Assume no user context for now */
481
482 while(pcb) { /* Any float saved state? */
483 if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
484 userpcb = pcb; /* Remember so we can toss this */
485 break;
486 }
487 svp = pcb; /* Remember this */
488 pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */
489 save_ret(svp); /* Release it */
1c79356b 490 }
9bccf70c
A
491
492 act->mact.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
493
1c79356b
A
494}
495
496
497/*
498 * act_virtual_machine_destroy:
499 * Shutdown any virtual machines associated with a thread
500 */
501void
502act_virtual_machine_destroy(thread_act_t act)
503{
504 if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */
505 disable_bluebox_internal(act); /* Kill off bluebox */
506 }
507
508 if(act->mact.vmmControl) { /* Check if VMM is active */
509 vmm_tear_down_all(act); /* Kill off all VMM contexts */
510 }
511}
512
513/*
514 * act_machine_destroy: Shutdown any state associated with a thread pcb.
515 */
516void
517act_machine_destroy(thread_act_t act)
518{
9bccf70c
A
519
520 register savearea *pcb, *ppsv;
521 register savearea_vec *vsv, *vpsv;
522 register savearea_fpu *fsv, *fpsv;
523 register savearea *svp;
524 register int i;
1c79356b
A
525
526#if MACH_ASSERT
527 if (watchacts & WA_PCB)
528 printf("act_machine_destroy(0x%x)\n", act);
529#endif /* MACH_ASSERT */
530
9bccf70c
A
531/*
532 * This function will release all context.
533 */
1c79356b 534
9bccf70c
A
535 act_virtual_machine_destroy(act); /* Make sure all virtual machines are dead first */
536
1c79356b 537/*
9bccf70c
A
538 *
539 * Walk through and release all floating point and vector contexts. Also kill live context.
540 *
1c79356b
A
541 */
542
9bccf70c
A
543 toss_live_vec(act->mact.curctx); /* Dump live vectors */
544
545 vsv = act->mact.curctx->VMXsave; /* Get the top vector savearea */
1c79356b 546
9bccf70c
A
547 while(vsv) { /* Any VMX saved state? */
548 vpsv = vsv; /* Remember so we can toss this */
549 vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Get one underneath our's */
550 save_release((savearea *)vpsv); /* Release it */
1c79356b 551 }
9bccf70c
A
552
553 act->mact.curctx->VMXsave = 0; /* Kill chain */
554
555 toss_live_fpu(act->mact.curctx); /* Dump live float */
1c79356b 556
9bccf70c
A
557 fsv = act->mact.curctx->FPUsave; /* Get the top float savearea */
558
559 while(fsv) { /* Any float saved state? */
560 fpsv = fsv; /* Remember so we can toss this */
561 fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Get one underneath our's */
562 save_release((savearea *)fpsv); /* Release it */
1c79356b 563 }
9bccf70c
A
564
565 act->mact.curctx->FPUsave = 0; /* Kill chain */
566
567/*
568 * free all regular saveareas.
569 */
1c79356b 570
9bccf70c 571 pcb = act->mact.pcb; /* Get the general savearea */
1c79356b 572
9bccf70c
A
573 while(pcb) { /* Any float saved state? */
574 ppsv = pcb; /* Remember so we can toss this */
575 pcb = pcb->save_hdr.save_prev; /* Get one underneath our's */
576 save_release(ppsv); /* Release it */
1c79356b 577 }
9bccf70c
A
578
579 hw_atomic_sub(&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
1c79356b 580
1c79356b
A
581}
582
9bccf70c 583
1c79356b
A
584kern_return_t
585act_machine_create(task_t task, thread_act_t thr_act)
586{
587 /*
588 * Clear & Init the pcb (sets up user-mode s regs)
589 * We don't use this anymore.
590 */
591
1c79356b
A
592 return KERN_SUCCESS;
593}
594
595void act_machine_init()
596{
597#if MACH_ASSERT
598 if (watchacts & WA_PCB)
599 printf("act_machine_init()\n");
600#endif /* MACH_ASSERT */
601
602 /* Good to verify these once */
603 assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
604
605 assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
606 assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
607 assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
1c79356b
A
608
609 /*
610 * If we start using kernel activations,
611 * would normally create kernel_thread_pool here,
612 * populating it from the act_zone
613 */
614}
615
616void
617act_machine_return(int code)
618{
619 thread_act_t thr_act = current_act();
620
621#if MACH_ASSERT
622 if (watchacts & WA_EXIT)
623 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
624 code, thr_act, thr_act->ref_count,
625 thr_act->thread, thr_act->thread->ref_count);
626#endif /* MACH_ASSERT */
627
628
629 /*
630 * This code is called with nothing locked.
631 * It also returns with nothing locked, if it returns.
632 *
633 * This routine terminates the current thread activation.
634 * If this is the only activation associated with its
635 * thread shuttle, then the entire thread (shuttle plus
636 * activation) is terminated.
637 */
638 assert( code == KERN_TERMINATED );
639 assert( thr_act );
9bccf70c 640 assert(thr_act->thread->top_act == thr_act);
1c79356b
A
641
642 /* This is the only activation attached to the shuttle... */
643
1c79356b
A
644 thread_terminate_self();
645
646 /*NOTREACHED*/
647 panic("act_machine_return: TALKING ZOMBIE! (1)");
648}
649
650void
651thread_machine_set_current(struct thread_shuttle *thread)
652{
653 register int my_cpu = cpu_number();
654
9bccf70c
A
655 set_machine_current_thread(thread);
656 set_machine_current_act(thread->top_act);
1c79356b
A
657
658 active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
659}
660
661void
662thread_machine_init(void)
663{
664#ifdef MACHINE_STACK
665#if KERNEL_STACK_SIZE > PPC_PGBYTES
666 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
667#endif
668#endif
669}
670
671#if MACH_ASSERT
1c79356b
A
672
673void
674dump_thread(thread_t th)
675{
676 printf(" thread @ 0x%x:\n", th);
677}
678
679int
680 dump_act(thread_act_t thr_act)
681{
682 if (!thr_act)
683 return(0);
684
685 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
686 thr_act, thr_act->ref_count,
687 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
688 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
689
690 printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
691 thr_act->alerts, thr_act->alert_mask,
692 thr_act->suspend_count, thr_act->active,
693 thr_act->higher, thr_act->lower);
694
695 return((int)thr_act);
696}
697
698#endif
699
700unsigned int
701get_useraddr()
702{
703
704 thread_act_t thr_act = current_act();
705
9bccf70c 706 return(thr_act->mact.pcb->save_srr0);
1c79356b
A
707}
708
709/*
710 * detach and return a kernel stack from a thread
711 */
712
713vm_offset_t
714stack_detach(thread_t thread)
715{
716 vm_offset_t stack;
717
9bccf70c
A
718 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
719 thread, thread->priority,
720 thread->sched_pri, 0, 0);
721
722 if (thread->top_act)
723 act_machine_sv_free(thread->top_act);
1c79356b
A
724
725 stack = thread->kernel_stack;
726 thread->kernel_stack = 0;
727 return(stack);
728}
729
730/*
731 * attach a kernel stack to a thread and initialize it
732 *
733 * attaches a stack to a thread. if there is no save
734 * area we allocate one. the top save area is then
735 * loaded with the pc (continuation address), the initial
736 * stack pointer, and a std kernel MSR. if the top
737 * save area is the user save area bad things will
738 * happen
739 *
740 */
741
742void
743stack_attach(struct thread_shuttle *thread,
744 vm_offset_t stack,
745 void (*start_pos)(thread_t))
746{
747 thread_act_t thr_act;
748 unsigned int *kss;
749 struct savearea *sv;
750
751 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
752 thread, thread->priority,
753 thread->sched_pri, start_pos,
754 0);
755
756 assert(stack);
757 kss = (unsigned int *)STACK_IKS(stack);
758 thread->kernel_stack = stack;
759
760 /* during initialization we sometimes do not have an
761 activation. in that case do not do anything */
762 if ((thr_act = thread->top_act) != 0) {
763 sv = save_get(); /* cannot block */
9bccf70c
A
764 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
765 sv->save_hdr.save_act = thr_act;
766 sv->save_hdr.save_prev = thr_act->mact.pcb;
767 thr_act->mact.pcb = sv;
1c79356b
A
768
769 sv->save_srr0 = (unsigned int) start_pos;
770 /* sv->save_r3 = ARG ? */
771 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
772 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
9bccf70c
A
773 sv->save_fpscr = 0; /* Clear all floating point exceptions */
774 sv->save_vrsave = 0; /* Set the vector save state */
775 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
1c79356b
A
776 *((int *)sv->save_r1) = 0;
777 thr_act->mact.ksp = 0;
778 }
779
780 return;
781}
782
783/*
784 * move a stack from old to new thread
785 */
786
787void
788stack_handoff(thread_t old,
789 thread_t new)
790{
791
9bccf70c
A
792 vm_offset_t stack;
793 pmap_t new_pmap;
794 facility_context *fowner;
795
796 assert(new->top_act);
797 assert(old->top_act);
798
799 stack = stack_detach(old);
800 new->kernel_stack = stack;
801 if (stack == old->stack_privilege) {
802 assert(new->stack_privilege);
803 old->stack_privilege = new->stack_privilege;
804 new->stack_privilege = stack;
805 }
1c79356b 806
9bccf70c 807 per_proc_info[cpu_number()].cpu_flags &= ~traceBE;
0b4e3aa0 808
9bccf70c
A
809 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
810 fowner = per_proc_info[cpu_number()].FPU_owner; /* Cache this because it may change */
811 if(fowner) { /* Is there any live context? */
812 if(fowner->facAct == old->top_act) { /* Is it for us? */
813 fpu_save(fowner); /* Yes, save it */
814 }
815 }
816 fowner = per_proc_info[cpu_number()].VMX_owner; /* Cache this because it may change */
817 if(fowner) { /* Is there any live context? */
818 if(fowner->facAct == old->top_act) { /* Is it for us? */
819 vec_save(fowner); /* Yes, save it */
820 }
821 }
822 }
1c79356b 823
9bccf70c 824 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
1c79356b
A
825 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
826
827
828 if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */
829 pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
830 }
831 else { /* otherwise, we use the task's pmap */
832 new_pmap = new->top_act->task->map->pmap;
833 if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
834 pmap_switch(new_pmap);
835 }
836 }
837
9bccf70c
A
838 thread_machine_set_current(new);
839 active_stacks[cpu_number()] = new->kernel_stack;
840 per_proc_info[cpu_number()].Uassist = new->top_act->mact.cthread_self;
841
842 per_proc_info[cpu_number()].ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
843 per_proc_info[cpu_number()].spcFlags = new->top_act->mact.specFlags;
844
845 if (branch_tracing_enabled())
846 per_proc_info[cpu_number()].cpu_flags |= traceBE;
765c9de3 847
9bccf70c 848 if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act); /* Cut trace entry if tracing */
765c9de3 849
1c79356b
A
850 return;
851}
852
853/*
854 * clean and initialize the current kernel stack and go to
855 * the given continuation routine
856 */
857
858void
859call_continuation(void (*continuation)(void) )
860{
861
862 unsigned int *kss;
863 vm_offset_t tsp;
864
865 assert(current_thread()->kernel_stack);
866 kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
867 assert(continuation);
868
869 tsp = (vm_offset_t)((int)kss - KF_SIZE);
870 assert(tsp);
871 *((int *)tsp) = 0;
872
873 Call_continuation(continuation, tsp);
874
875 return;
876}
877
878void
879thread_swapin_mach_alloc(thread_t thread)
880{
881 struct savearea *sv;
882
883 assert(thread->top_act->mact.pcb == 0);
884
885 sv = save_alloc();
886 assert(sv);
9bccf70c
A
887 sv->save_hdr.save_prev = 0; /* Initialize back chain */
888 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
889 sv->save_hdr.save_act = thread->top_act; /* Initialize owner */
890 thread->top_act->mact.pcb = sv;
1c79356b
A
891
892}