]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pcb.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Copyright (c) 1990,1991,1992 The University of Utah and
27 * the Center for Software Science (CSS). All rights reserved.
28 *
29 * Permission to use, copy, modify and distribute this software is hereby
30 * granted provided that (1) source code retains these copyright, permission,
31 * and disclaimer notices, and (2) redistributions including binaries
32 * reproduce the notices in supporting documentation, and (3) all advertising
33 * materials mentioning features or use of this software display the following
34 * acknowledgement: ``This product includes software developed by the Center
35 * for Software Science at the University of Utah.''
36 *
37 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
38 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
39 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * CSS requests users of this software to return to css-dist@cs.utah.edu any
42 * improvements that they make and grant CSS redistribution rights.
43 *
44 * Utah $Hdr: pcb.c 1.23 92/06/27$
45 */
46
1c79356b
A
47#include <debug.h>
48
49#include <types.h>
91447636
A
50
51#include <mach/mach_types.h>
52#include <mach/thread_status.h>
53
54#include <kern/kern_types.h>
1c79356b
A
55#include <kern/task.h>
56#include <kern/thread.h>
91447636 57#include <kern/misc_protos.h>
1c79356b 58#include <kern/mach_param.h>
91447636
A
59#include <kern/spl.h>
60
61#include <vm/vm_map.h>
62#include <vm/vm_kern.h>
1c79356b 63
1c79356b 64#include <ppc/misc_protos.h>
91447636 65#include <ppc/cpu_internal.h>
1c79356b
A
66#include <ppc/exception.h>
67#include <ppc/proc_reg.h>
1c79356b
A
68#include <ppc/pmap.h>
69#include <ppc/trap.h>
70#include <ppc/mappings.h>
71#include <ppc/savearea.h>
72#include <ppc/Firmware.h>
73#include <ppc/asm.h>
91447636 74#include <ppc/thread.h>
1c79356b 75#include <ppc/vmachmon.h>
765c9de3 76#include <ppc/low_trace.h>
91447636 77#include <ppc/lowglobals.h>
1c79356b
A
78
79#include <sys/kdebug.h>
80
91447636 81void machine_act_terminate(thread_t);
55e303ae 82
1c79356b
A
83/*
84 * These constants are dumb. They should not be in asm.h!
85 */
86
87#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
88
89#if DEBUG
90int fpu_trap_count = 0;
91int fpu_switch_count = 0;
92int vec_trap_count = 0;
93int vec_switch_count = 0;
94#endif
95
1c79356b
A
96/*
97 * consider_machine_collect: try to collect machine-dependent pages
98 */
99void
100consider_machine_collect()
101{
102 /*
103 * none currently available
104 */
105 return;
106}
107
108void
109consider_machine_adjust()
110{
111 consider_mapping_adjust();
112}
113
1c79356b
A
114/*
115 * switch_context: Switch from one thread to another, needed for
116 * switching of space
117 *
118 */
55e303ae
A
119thread_t
120machine_switch_context(
121 thread_t old,
122 thread_continue_t continuation,
123 thread_t new)
1c79356b 124{
55e303ae 125 register thread_t retval;
1c79356b 126 pmap_t new_pmap;
9bccf70c 127 facility_context *fowner;
55e303ae
A
128 struct per_proc_info *ppinfo;
129
130 if (old == new)
131 panic("machine_switch_context");
9bccf70c 132
55e303ae
A
133 ppinfo = getPerProc(); /* Get our processor block */
134
135 ppinfo->old_thread = (unsigned int)old;
136 ppinfo->cpu_flags &= ~traceBE; /* disable branch tracing if on */
1c79356b 137
1c79356b
A
138 /* Our context might wake up on another processor, so we must
139 * not keep hot state in our FPU, it must go back to the pcb
140 * so that it can be found by the other if needed
141 */
9bccf70c 142 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 143 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c 144 if(fowner) { /* Is there any live context? */
91447636 145 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
146 fpu_save(fowner); /* Yes, save it */
147 }
148 }
55e303ae 149 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c 150 if(fowner) { /* Is there any live context? */
91447636 151 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
152 vec_save(fowner); /* Yes, save it */
153 }
154 }
1c79356b
A
155 }
156
d7e50217
A
157 /*
158 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
159 * This bits can be modified in the per proc without updating the thread spcFlags
160 */
91447636
A
161 if(old->machine.specFlags & runningVM) {
162 old->machine.specFlags &= ~(userProtKey|FamVMmode);
163 old->machine.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
1c79356b 164 }
91447636
A
165 old->machine.specFlags &= ~OnProc;
166 new->machine.specFlags |= OnProc;
1c79356b
A
167
168 /*
169 * We do not have to worry about the PMAP module, so switch.
170 *
91447636 171 * We must not use thread->map since this may not be the actual
1c79356b
A
172 * task map, but the map being used for a klcopyin/out.
173 */
174
91447636
A
175 if(new->machine.specFlags & runningVM) { /* Is the new guy running a VM? */
176 pmap_switch(new->machine.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
177 ppinfo->VMMareaPhys = new->machine.vmmCEntry->vmmContextPhys;
178 ppinfo->VMMXAFlgs = new->machine.vmmCEntry->vmmXAFlgs;
179 ppinfo->FAMintercept = new->machine.vmmCEntry->vmmFAMintercept;
1c79356b
A
180 }
181 else { /* otherwise, we use the task's pmap */
91447636
A
182 new_pmap = new->task->map->pmap;
183 if ((old->task->map->pmap != new_pmap) || (old->machine.specFlags & runningVM)) {
1c79356b
A
184 pmap_switch(new_pmap); /* Switch if there is a change */
185 }
186 }
187
91447636
A
188 if(old->machine.umwSpace != invalSpace) { /* Does our old guy have an active window? */
189 old->machine.umwSpace |= umwSwitchAway; /* Show we switched away from this guy */
190 hw_blow_seg(lowGlo.lgUMWvaddr); /* Blow off the first segment */
191 hw_blow_seg(lowGlo.lgUMWvaddr + 0x10000000ULL); /* Blow off the second segment */
55e303ae
A
192 }
193
1c79356b 194 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
55e303ae 195 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
1c79356b 196
1c79356b 197 retval = Switch_context(old, continuation, new);
91447636 198 assert(retval != NULL);
1c79356b 199
55e303ae
A
200 if (branch_tracing_enabled()) {
201 ppinfo = getPerProc(); /* Get our processor block */
202 ppinfo->cpu_flags |= traceBE; /* restore branch tracing */
203 }
0b4e3aa0 204
1c79356b
A
205 /* We've returned from having switched context, so we should be
206 * back in the original context.
207 */
208
209 return retval;
210}
211
1c79356b
A
212/*
213 * Initialize the machine-dependent state for a new thread.
214 */
215kern_return_t
55e303ae
A
216machine_thread_create(
217 thread_t thread,
218 task_t task)
1c79356b 219{
1c79356b
A
220 savearea *sv; /* Pointer to newly allocated savearea */
221 unsigned int *CIsTooLimited, i;
222
55e303ae 223 hw_atomic_add((uint32_t *)&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
1c79356b 224 for this activation */
91447636 225 assert(thread->machine.pcb == (savearea *)0); /* Make sure there was no previous savearea */
1c79356b
A
226
227 sv = save_alloc(); /* Go get us a savearea */
228
9bccf70c
A
229 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm))); /* Clear it */
230
231 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
232 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
91447636
A
233 sv->save_hdr.save_act = thread; /* Set who owns it */
234 thread->machine.pcb = sv; /* Point to the save area */
235 thread->machine.curctx = &thread->machine.facctx; /* Initialize facility context */
236 thread->machine.facctx.facAct = thread; /* Initialize facility context pointer to activation */
237 thread->machine.umwSpace = invalSpace; /* Initialize user memory window space to invalid */
238 thread->machine.preemption_count = 0; /* Initialize preemption counter */
55e303ae 239
1c79356b
A
240 /*
241 * User threads will pull their context from the pcb when first
242 * returning to user mode, so fill in all the necessary values.
243 * Kernel threads are initialized from the save state structure
244 * at the base of the kernel stack (see stack_attach()).
245 */
246
b36670ce 247 thread->machine.upcb = sv; /* Set user pcb */
55e303ae 248 sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */
91447636 249 if(task_has_64BitAddr(task)) sv->save_srr1 |= (uint64_t)MASK32(MSR_SF) << 32; /* If 64-bit task, force 64-bit mode */
55e303ae
A
250 sv->save_fpscr = 0; /* Clear all floating point exceptions */
251 sv->save_vrsave = 0; /* Set the vector save state */
252 sv->save_vscr[0] = 0x00000000;
253 sv->save_vscr[1] = 0x00000000;
254 sv->save_vscr[2] = 0x00000000;
255 sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */
1c79356b 256
1c79356b
A
257 return(KERN_SUCCESS);
258}
259
260/*
261 * Machine-dependent cleanup prior to destroying a thread
262 */
263void
55e303ae
A
264machine_thread_destroy(
265 thread_t thread)
1c79356b 266{
55e303ae
A
267 register savearea *pcb, *ppsv;
268 register savearea_vec *vsv, *vpsv;
269 register savearea_fpu *fsv, *fpsv;
270 register savearea *svp;
271 register int i;
b36670ce 272 boolean_t intr;
55e303ae
A
273
274/*
275 * This function will release all context.
276 */
277
278 machine_act_terminate(thread); /* Make sure all virtual machines are dead first */
279
280/*
281 *
282 * Walk through and release all floating point and vector contexts. Also kill live context.
283 *
284 */
b36670ce
A
285
286 intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */
55e303ae 287
b36670ce 288 toss_live_vec(thread->machine.curctx); /* Dump live vectors */
1c79356b 289
b36670ce 290 vsv = thread->machine.curctx->VMXsave; /* Get the top vector savearea */
55e303ae
A
291
292 while(vsv) { /* Any VMX saved state? */
293 vpsv = vsv; /* Remember so we can toss this */
294 vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Get one underneath our's */
295 save_release((savearea *)vpsv); /* Release it */
1c79356b 296 }
55e303ae 297
b36670ce 298 thread->machine.curctx->VMXsave = 0; /* Kill chain */
55e303ae 299
b36670ce 300 toss_live_fpu(thread->machine.curctx); /* Dump live float */
55e303ae 301
b36670ce 302 fsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */
55e303ae
A
303
304 while(fsv) { /* Any float saved state? */
305 fpsv = fsv; /* Remember so we can toss this */
306 fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Get one underneath our's */
307 save_release((savearea *)fpsv); /* Release it */
308 }
309
b36670ce 310 thread->machine.curctx->FPUsave = 0; /* Kill chain */
1c79356b
A
311
312/*
55e303ae 313 * free all regular saveareas.
1c79356b 314 */
55e303ae 315
b36670ce 316 pcb = thread->machine.pcb; /* Get the general savearea */
55e303ae
A
317
318 while(pcb) { /* Any float saved state? */
319 ppsv = pcb; /* Remember so we can toss this */
320 pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */
321 save_release(ppsv); /* Release it */
322 }
323
324 hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
b36670ce
A
325
326 (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */
327
1c79356b
A
328}
329
1c79356b
A
330/*
331 * act_machine_sv_free
332 * release saveareas associated with an act. if flag is true, release
333 * user level savearea(s) too, else don't
334 *
b36670ce
A
335 * This code must run with interruptions disabled because an interrupt handler could use
336 * floating point and/or vectors. If this happens and the thread we are blowing off owns
337 * the facility, we can deadlock.
1c79356b
A
338 */
339void
91447636 340act_machine_sv_free(thread_t act)
1c79356b 341{
9bccf70c 342 register savearea *pcb, *userpcb;
55e303ae
A
343 register savearea_vec *vsv, *vpst, *vsvt;
344 register savearea_fpu *fsv, *fpst, *fsvt;
1c79356b
A
345 register savearea *svp;
346 register int i;
b36670ce 347 boolean_t intr;
1c79356b
A
348
349/*
9bccf70c 350 * This function will release all non-user state context.
1c79356b
A
351 */
352
9bccf70c
A
353/*
354 *
355 * Walk through and release all floating point and vector contexts that are not
356 * user state. We will also blow away live context if it belongs to non-user state.
55e303ae
A
357 * Note that the level can not change while we are in this code. Nor can another
358 * context be pushed on the stack.
359 *
360 * We do nothing here if the current level is user. Otherwise,
361 * the live context is cleared. Then we find the user saved context.
362 * Next, we take the sync lock (to keep us from munging things in *_switch).
363 * The level is set to 0 and all stacked context other than user is dequeued.
364 * Then we unlock. Next, all of the old kernel contexts are released.
9bccf70c
A
365 *
366 */
b36670ce
A
367
368 intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */
369
91447636 370 if(act->machine.curctx->VMXlevel) { /* Is the current level user state? */
55e303ae 371
91447636 372 toss_live_vec(act->machine.curctx); /* Dump live vectors if is not user */
55e303ae 373
91447636 374 if(!hw_lock_to((hw_lock_t)&act->machine.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */
55e303ae
A
375 panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
376 }
b36670ce
A
377
378 vsv = act->machine.curctx->VMXsave; /* Get the top vector savearea */
379 while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */
55e303ae 380
b36670ce 381 vsvt = act->machine.curctx->VMXsave; /* Get the top of the chain */
91447636 382 act->machine.curctx->VMXsave = vsv; /* Point to the user context */
b36670ce 383 act->machine.curctx->VMXlevel = 0; /* Set the level to user */
91447636 384 hw_lock_unlock((hw_lock_t)&act->machine.curctx->VMXsync); /* Unlock */
55e303ae
A
385
386 while(vsvt) { /* Clear any VMX saved state */
387 if (vsvt == vsv) break; /* Done when hit user if any */
388 vpst = vsvt; /* Remember so we can toss this */
389 vsvt = (savearea_vec *)vsvt->save_hdr.save_prev; /* Get one underneath our's */
390 save_ret((savearea *)vpst); /* Release it */
391 }
392
393 }
9bccf70c 394
91447636 395 if(act->machine.curctx->FPUlevel) { /* Is the current level user state? */
55e303ae 396
91447636 397 toss_live_fpu(act->machine.curctx); /* Dump live floats if is not user */
1c79356b 398
91447636 399 if(!hw_lock_to((hw_lock_t)&act->machine.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */
55e303ae
A
400 panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
401 }
402
b36670ce
A
403 fsv = act->machine.curctx->FPUsave; /* Get the top floats savearea */
404 while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */
405
406 fsvt = act->machine.curctx->FPUsave; /* Get the top of the chain */
91447636 407 act->machine.curctx->FPUsave = fsv; /* Point to the user context */
b36670ce 408 act->machine.curctx->FPUlevel = 0; /* Set the level to user */
91447636 409 hw_lock_unlock((hw_lock_t)&act->machine.curctx->FPUsync); /* Unlock */
55e303ae
A
410
411 while(fsvt) { /* Clear any VMX saved state */
412 if (fsvt == fsv) break; /* Done when hit user if any */
413 fpst = fsvt; /* Remember so we can toss this */
414 fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev; /* Get one underneath our's */
415 save_ret((savearea *)fpst); /* Release it */
416 }
417
1c79356b
A
418 }
419
9bccf70c
A
420/*
421 * free all regular saveareas except a user savearea, if any
422 */
1c79356b 423
91447636 424 pcb = act->machine.pcb; /* Get the general savearea */
9bccf70c
A
425 userpcb = 0; /* Assume no user context for now */
426
427 while(pcb) { /* Any float saved state? */
428 if (pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
429 userpcb = pcb; /* Remember so we can toss this */
430 break;
431 }
432 svp = pcb; /* Remember this */
55e303ae 433 pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev); /* Get one underneath our's */
9bccf70c 434 save_ret(svp); /* Release it */
1c79356b 435 }
9bccf70c 436
91447636 437 act->machine.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
b36670ce
A
438 (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */
439
1c79356b
A
440}
441
1c79356b 442void
55e303ae 443machine_act_terminate(
91447636 444 thread_t act)
1c79356b 445{
b36670ce 446 if(act->machine.bbDescAddr) { /* Check if the Blue box assist is active */
1c79356b
A
447 disable_bluebox_internal(act); /* Kill off bluebox */
448 }
449
b36670ce 450 if(act->machine.vmmControl) { /* Check if VMM is active */
1c79356b
A
451 vmm_tear_down_all(act); /* Kill off all VMM contexts */
452 }
453}
454
1c79356b 455void
55e303ae 456machine_thread_terminate_self(void)
1c79356b 457{
91447636 458 machine_act_terminate(current_thread());
1c79356b
A
459}
460
461void
55e303ae 462machine_thread_init(void)
1c79356b
A
463{
464#ifdef MACHINE_STACK
465#if KERNEL_STACK_SIZE > PPC_PGBYTES
466 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
467#endif
468#endif
469}
470
471#if MACH_ASSERT
1c79356b
A
472
473void
474dump_thread(thread_t th)
475{
476 printf(" thread @ 0x%x:\n", th);
477}
478
479int
91447636 480 dump_act(thread_t thr_act)
1c79356b
A
481{
482 if (!thr_act)
483 return(0);
484
91447636 485 printf("thread(0x%x)(%d): task=%x(%d)\n",
1c79356b 486 thr_act, thr_act->ref_count,
1c79356b
A
487 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
488
91447636
A
489 printf("\tsusp=%x active=%x\n",
490 thr_act->suspend_count, thr_act->active);
1c79356b
A
491
492 return((int)thr_act);
493}
494
495#endif
496
91447636 497user_addr_t
1c79356b
A
498get_useraddr()
499{
91447636 500 return(current_thread()->machine.upcb->save_srr0);
1c79356b
A
501}
502
503/*
504 * detach and return a kernel stack from a thread
505 */
506
507vm_offset_t
55e303ae
A
508machine_stack_detach(
509 thread_t thread)
1c79356b
A
510{
511 vm_offset_t stack;
512
9bccf70c
A
513 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
514 thread, thread->priority,
515 thread->sched_pri, 0, 0);
516
91447636 517 act_machine_sv_free(thread);
1c79356b
A
518
519 stack = thread->kernel_stack;
520 thread->kernel_stack = 0;
521 return(stack);
522}
523
524/*
525 * attach a kernel stack to a thread and initialize it
526 *
527 * attaches a stack to a thread. if there is no save
528 * area we allocate one. the top save area is then
529 * loaded with the pc (continuation address), the initial
530 * stack pointer, and a std kernel MSR. if the top
531 * save area is the user save area bad things will
532 * happen
533 *
534 */
535
536void
55e303ae
A
537machine_stack_attach(
538 thread_t thread,
91447636 539 vm_offset_t stack)
1c79356b 540{
1c79356b
A
541 unsigned int *kss;
542 struct savearea *sv;
543
544 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
545 thread, thread->priority,
91447636 546 thread->sched_pri, 0, 0);
1c79356b
A
547
548 assert(stack);
549 kss = (unsigned int *)STACK_IKS(stack);
550 thread->kernel_stack = stack;
551
552 /* during initialization we sometimes do not have an
553 activation. in that case do not do anything */
91447636
A
554 sv = save_get(); /* cannot block */
555 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
556 sv->save_hdr.save_act = thread;
557 sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thread->machine.pcb);
558 thread->machine.pcb = sv;
559
560 sv->save_srr0 = (unsigned int)thread_continue;
561 /* sv->save_r3 = ARG ? */
562 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
563 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
564 sv->save_fpscr = 0; /* Clear all floating point exceptions */
565 sv->save_vrsave = 0; /* Set the vector save state */
566 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
567 *(CAST_DOWN(int *, sv->save_r1)) = 0;
568
569 thread->machine.ksp = 0;
1c79356b
A
570}
571
572/*
573 * move a stack from old to new thread
574 */
575
576void
55e303ae
A
577machine_stack_handoff(
578 thread_t old,
579 thread_t new)
1c79356b
A
580{
581
9bccf70c
A
582 vm_offset_t stack;
583 pmap_t new_pmap;
584 facility_context *fowner;
91447636 585 mapping_t *mp;
55e303ae 586 struct per_proc_info *ppinfo;
9bccf70c 587
91447636
A
588 assert(new);
589 assert(old);
55e303ae
A
590
591 if (old == new)
592 panic("machine_stack_handoff");
9bccf70c 593
55e303ae 594 stack = machine_stack_detach(old);
9bccf70c 595 new->kernel_stack = stack;
55e303ae
A
596 if (stack == old->reserved_stack) {
597 assert(new->reserved_stack);
598 old->reserved_stack = new->reserved_stack;
599 new->reserved_stack = stack;
9bccf70c 600 }
1c79356b 601
55e303ae
A
602 ppinfo = getPerProc(); /* Get our processor block */
603
604 ppinfo->cpu_flags &= ~traceBE; /* Turn off special branch trace */
0b4e3aa0 605
9bccf70c 606 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 607 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c 608 if(fowner) { /* Is there any live context? */
91447636 609 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
610 fpu_save(fowner); /* Yes, save it */
611 }
612 }
55e303ae 613 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c 614 if(fowner) { /* Is there any live context? */
91447636 615 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
616 vec_save(fowner); /* Yes, save it */
617 }
618 }
619 }
55e303ae 620
d7e50217
A
621 /*
622 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
623 * This bits can be modified in the per proc without updating the thread spcFlags
624 */
91447636
A
625 if(old->machine.specFlags & runningVM) { /* Is the current thread running a VM? */
626 old->machine.specFlags &= ~(userProtKey|FamVMmode);
627 old->machine.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
d7e50217 628 }
91447636
A
629 old->machine.specFlags &= ~OnProc;
630 new->machine.specFlags |= OnProc;
1c79356b 631
9bccf70c 632 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
55e303ae 633 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
1c79356b
A
634
635
91447636
A
636 if(new->machine.specFlags & runningVM) { /* Is the new guy running a VM? */
637 pmap_switch(new->machine.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
638 ppinfo->VMMareaPhys = new->machine.vmmCEntry->vmmContextPhys;
639 ppinfo->VMMXAFlgs = new->machine.vmmCEntry->vmmXAFlgs;
640 ppinfo->FAMintercept = new->machine.vmmCEntry->vmmFAMintercept;
1c79356b
A
641 }
642 else { /* otherwise, we use the task's pmap */
91447636
A
643 new_pmap = new->task->map->pmap;
644 if ((old->task->map->pmap != new_pmap) || (old->machine.specFlags & runningVM)) {
1c79356b
A
645 pmap_switch(new_pmap);
646 }
647 }
648
91447636
A
649 machine_set_current_thread(new);
650 ppinfo->Uassist = new->machine.cthread_self;
9bccf70c 651
91447636
A
652 ppinfo->ppbbTaskEnv = new->machine.bbTaskEnv;
653 ppinfo->spcFlags = new->machine.specFlags;
55e303ae 654
91447636
A
655 old->machine.umwSpace |= umwSwitchAway; /* Show we switched away from this guy */
656 mp = (mapping_t *)&ppinfo->ppUMWmp;
55e303ae 657 mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */
9bccf70c
A
658
659 if (branch_tracing_enabled())
55e303ae 660 ppinfo->cpu_flags |= traceBE;
765c9de3 661
b36670ce 662 if(trcWork.traceMask) dbgTrace(0x9903, (unsigned int)old, (unsigned int)new, 0, 0); /* Cut trace entry if tracing */
765c9de3 663
1c79356b
A
664 return;
665}
666
667/*
668 * clean and initialize the current kernel stack and go to
669 * the given continuation routine
670 */
671
672void
91447636
A
673call_continuation(
674 thread_continue_t continuation,
675 void *parameter,
676 wait_result_t wresult)
1c79356b 677{
91447636
A
678 thread_t self = current_thread();
679 unsigned int *kss;
680 vm_offset_t tsp;
1c79356b 681
91447636
A
682 assert(self->kernel_stack);
683 kss = (unsigned int *)STACK_IKS(self->kernel_stack);
684 assert(continuation);
1c79356b 685
91447636
A
686 tsp = (vm_offset_t)((int)kss - KF_SIZE);
687 assert(tsp);
688 *((int *)tsp) = 0;
1c79356b 689
91447636 690 Call_continuation(continuation, parameter, wresult, tsp);
1c79356b 691}