]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pcb.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / pcb.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1990,1991,1992 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software is hereby
36 * granted provided that (1) source code retains these copyright, permission,
37 * and disclaimer notices, and (2) redistributions including binaries
38 * reproduce the notices in supporting documentation, and (3) all advertising
39 * materials mentioning features or use of this software display the following
40 * acknowledgement: ``This product includes software developed by the Center
41 * for Software Science at the University of Utah.''
42 *
43 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
44 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
45 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 *
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
49 *
50 * Utah $Hdr: pcb.c 1.23 92/06/27$
51 */
52
1c79356b
A
53#include <debug.h>
54
55#include <types.h>
91447636
A
56
57#include <mach/mach_types.h>
58#include <mach/thread_status.h>
59
60#include <kern/kern_types.h>
1c79356b
A
61#include <kern/task.h>
62#include <kern/thread.h>
91447636 63#include <kern/misc_protos.h>
1c79356b 64#include <kern/mach_param.h>
91447636 65#include <kern/spl.h>
2d21ac55 66#include <kern/machine.h>
91447636
A
67
68#include <vm/vm_map.h>
69#include <vm/vm_kern.h>
2d21ac55 70#include <vm/vm_protos.h>
1c79356b 71
1c79356b 72#include <ppc/misc_protos.h>
91447636 73#include <ppc/cpu_internal.h>
1c79356b
A
74#include <ppc/exception.h>
75#include <ppc/proc_reg.h>
1c79356b
A
76#include <ppc/pmap.h>
77#include <ppc/trap.h>
78#include <ppc/mappings.h>
79#include <ppc/savearea.h>
80#include <ppc/Firmware.h>
81#include <ppc/asm.h>
91447636 82#include <ppc/thread.h>
1c79356b 83#include <ppc/vmachmon.h>
765c9de3 84#include <ppc/low_trace.h>
91447636 85#include <ppc/lowglobals.h>
2d21ac55 86#include <ppc/fpu_protos.h>
1c79356b
A
87
88#include <sys/kdebug.h>
89
91447636 90void machine_act_terminate(thread_t);
55e303ae 91
1c79356b
A
92/*
93 * These constants are dumb. They should not be in asm.h!
94 */
95
96#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
97
98#if DEBUG
99int fpu_trap_count = 0;
100int fpu_switch_count = 0;
101int vec_trap_count = 0;
102int vec_switch_count = 0;
103#endif
104
1c79356b
A
105/*
106 * consider_machine_collect: try to collect machine-dependent pages
107 */
108void
2d21ac55 109consider_machine_collect(void)
1c79356b
A
110{
111 /*
2d21ac55 112 * XXX none currently available
1c79356b 113 */
1c79356b
A
114}
115
116void
2d21ac55 117consider_machine_adjust(void)
1c79356b
A
118{
119 consider_mapping_adjust();
120}
121
1c79356b
A
122/*
123 * switch_context: Switch from one thread to another, needed for
124 * switching of space
125 *
126 */
55e303ae
A
127thread_t
128machine_switch_context(
129 thread_t old,
130 thread_continue_t continuation,
131 thread_t new)
1c79356b 132{
55e303ae 133 register thread_t retval;
1c79356b 134 pmap_t new_pmap;
9bccf70c 135 facility_context *fowner;
55e303ae
A
136 struct per_proc_info *ppinfo;
137
138 if (old == new)
139 panic("machine_switch_context");
9bccf70c 140
55e303ae
A
141 ppinfo = getPerProc(); /* Get our processor block */
142
143 ppinfo->old_thread = (unsigned int)old;
1c79356b 144
1c79356b
A
145 /* Our context might wake up on another processor, so we must
146 * not keep hot state in our FPU, it must go back to the pcb
147 * so that it can be found by the other if needed
148 */
9bccf70c 149 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 150 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c 151 if(fowner) { /* Is there any live context? */
91447636 152 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
153 fpu_save(fowner); /* Yes, save it */
154 }
155 }
55e303ae 156 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c 157 if(fowner) { /* Is there any live context? */
91447636 158 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
159 vec_save(fowner); /* Yes, save it */
160 }
161 }
1c79356b
A
162 }
163
d7e50217
A
164 /*
165 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
166 * This bits can be modified in the per proc without updating the thread spcFlags
167 */
91447636
A
168 if(old->machine.specFlags & runningVM) {
169 old->machine.specFlags &= ~(userProtKey|FamVMmode);
170 old->machine.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
1c79356b 171 }
91447636
A
172 old->machine.specFlags &= ~OnProc;
173 new->machine.specFlags |= OnProc;
1c79356b
A
174
175 /*
176 * We do not have to worry about the PMAP module, so switch.
177 *
91447636 178 * We must not use thread->map since this may not be the actual
1c79356b
A
179 * task map, but the map being used for a klcopyin/out.
180 */
181
91447636
A
182 if(new->machine.specFlags & runningVM) { /* Is the new guy running a VM? */
183 pmap_switch(new->machine.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
184 ppinfo->VMMareaPhys = new->machine.vmmCEntry->vmmContextPhys;
185 ppinfo->VMMXAFlgs = new->machine.vmmCEntry->vmmXAFlgs;
186 ppinfo->FAMintercept = new->machine.vmmCEntry->vmmFAMintercept;
1c79356b
A
187 }
188 else { /* otherwise, we use the task's pmap */
91447636
A
189 new_pmap = new->task->map->pmap;
190 if ((old->task->map->pmap != new_pmap) || (old->machine.specFlags & runningVM)) {
1c79356b
A
191 pmap_switch(new_pmap); /* Switch if there is a change */
192 }
193 }
194
91447636
A
195 if(old->machine.umwSpace != invalSpace) { /* Does our old guy have an active window? */
196 old->machine.umwSpace |= umwSwitchAway; /* Show we switched away from this guy */
197 hw_blow_seg(lowGlo.lgUMWvaddr); /* Blow off the first segment */
198 hw_blow_seg(lowGlo.lgUMWvaddr + 0x10000000ULL); /* Blow off the second segment */
55e303ae
A
199 }
200
1c79356b 201 retval = Switch_context(old, continuation, new);
91447636 202 assert(retval != NULL);
1c79356b
A
203
204 /* We've returned from having switched context, so we should be
205 * back in the original context.
206 */
207
208 return retval;
209}
210
1c79356b
A
211/*
212 * Initialize the machine-dependent state for a new thread.
213 */
214kern_return_t
55e303ae
A
215machine_thread_create(
216 thread_t thread,
217 task_t task)
1c79356b 218{
2d21ac55 219 struct savearea *sv; /* Pointer to newly allocated savearea */
1c79356b 220
2d21ac55 221 (void)hw_atomic_add(&saveanchor.savetarget, 4); /* Account for the number of saveareas we think we "need"
1c79356b 222 for this activation */
2d21ac55 223 assert(thread->machine.pcb == (struct savearea *)0); /* Make sure there was no previous savearea */
1c79356b
A
224
225 sv = save_alloc(); /* Go get us a savearea */
226
2d21ac55 227 bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(struct savearea) - sizeof(savearea_comm))); /* Clear it */
9bccf70c
A
228
229 sv->save_hdr.save_prev = 0; /* Clear the back pointer */
230 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
91447636
A
231 sv->save_hdr.save_act = thread; /* Set who owns it */
232 thread->machine.pcb = sv; /* Point to the save area */
233 thread->machine.curctx = &thread->machine.facctx; /* Initialize facility context */
234 thread->machine.facctx.facAct = thread; /* Initialize facility context pointer to activation */
235 thread->machine.umwSpace = invalSpace; /* Initialize user memory window space to invalid */
236 thread->machine.preemption_count = 0; /* Initialize preemption counter */
55e303ae 237
1c79356b
A
238 /*
239 * User threads will pull their context from the pcb when first
240 * returning to user mode, so fill in all the necessary values.
241 * Kernel threads are initialized from the save state structure
242 * at the base of the kernel stack (see stack_attach()).
243 */
244
b36670ce 245 thread->machine.upcb = sv; /* Set user pcb */
55e303ae 246 sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */
91447636 247 if(task_has_64BitAddr(task)) sv->save_srr1 |= (uint64_t)MASK32(MSR_SF) << 32; /* If 64-bit task, force 64-bit mode */
55e303ae
A
248 sv->save_fpscr = 0; /* Clear all floating point exceptions */
249 sv->save_vrsave = 0; /* Set the vector save state */
250 sv->save_vscr[0] = 0x00000000;
251 sv->save_vscr[1] = 0x00000000;
252 sv->save_vscr[2] = 0x00000000;
253 sv->save_vscr[3] = 0x00010000; /* Disable java mode and clear saturated */
1c79356b 254
1c79356b
A
255 return(KERN_SUCCESS);
256}
257
258/*
259 * Machine-dependent cleanup prior to destroying a thread
260 */
261void
55e303ae
A
262machine_thread_destroy(
263 thread_t thread)
1c79356b 264{
2d21ac55
A
265 struct savearea *local_pcb, *ppsv;
266 savearea_vec *vsv, *vpsv;
267 savearea_fpu *fsv, *fpsv;
b36670ce 268 boolean_t intr;
55e303ae
A
269
270/*
271 * This function will release all context.
272 */
273
274 machine_act_terminate(thread); /* Make sure all virtual machines are dead first */
275
276/*
277 *
278 * Walk through and release all floating point and vector contexts. Also kill live context.
279 *
280 */
b36670ce
A
281
282 intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */
55e303ae 283
b36670ce 284 toss_live_vec(thread->machine.curctx); /* Dump live vectors */
1c79356b 285
b36670ce 286 vsv = thread->machine.curctx->VMXsave; /* Get the top vector savearea */
55e303ae
A
287
288 while(vsv) { /* Any VMX saved state? */
289 vpsv = vsv; /* Remember so we can toss this */
2d21ac55 290 /* XXX save_prev should be a void * 4425537 */
55e303ae 291 vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev); /* Get one underneath our's */
2d21ac55 292 save_release((struct savearea *)vpsv); /* Release it */
1c79356b 293 }
55e303ae 294
2d21ac55 295 thread->machine.curctx->VMXsave = NULL; /* Kill chain */
55e303ae 296
b36670ce 297 toss_live_fpu(thread->machine.curctx); /* Dump live float */
55e303ae 298
b36670ce 299 fsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */
55e303ae
A
300
301 while(fsv) { /* Any float saved state? */
302 fpsv = fsv; /* Remember so we can toss this */
2d21ac55 303 /* XXX save_prev should be a void * 4425537 */
55e303ae 304 fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev); /* Get one underneath our's */
2d21ac55 305 save_release((struct savearea *)fpsv); /* Release it */
55e303ae
A
306 }
307
2d21ac55 308 thread->machine.curctx->FPUsave = NULL; /* Kill chain */
1c79356b
A
309
310/*
55e303ae 311 * free all regular saveareas.
1c79356b 312 */
55e303ae 313
2d21ac55 314 local_pcb = thread->machine.pcb; /* Get the general savearea */
55e303ae 315
2d21ac55
A
316 while(local_pcb) { /* Any float saved state? */
317 ppsv = local_pcb; /* Remember so we can toss this */
318 /* XXX save_prev should be a void * 4425537 */
319 local_pcb = CAST_DOWN(struct savearea *, local_pcb->save_hdr.save_prev); /* Get one underneath our's */
55e303ae
A
320 save_release(ppsv); /* Release it */
321 }
322
2d21ac55 323 (void)hw_atomic_sub(&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
b36670ce
A
324
325 (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */
326
1c79356b
A
327}
328
1c79356b
A
329/*
330 * act_machine_sv_free
2d21ac55 331 * release saveareas associated with a thread. if flag is true, release
1c79356b
A
332 * user level savearea(s) too, else don't
333 *
2d21ac55
A
334 * This code must run with interruptions disabled because an interrupt handler
335 * could use floating point and/or vectors. If this happens and the thread we
336 * are blowing off owns the facility, we can deadlock.
1c79356b
A
337 */
338void
2d21ac55 339act_machine_sv_free(thread_t act, __unused int flag)
1c79356b 340{
2d21ac55 341 struct savearea *local_pcb, *userpcb;
55e303ae
A
342 register savearea_vec *vsv, *vpst, *vsvt;
343 register savearea_fpu *fsv, *fpst, *fsvt;
2d21ac55 344 struct savearea *svp;
b36670ce 345 boolean_t intr;
1c79356b
A
346
347/*
9bccf70c 348 * This function will release all non-user state context.
1c79356b
A
349 */
350
9bccf70c
A
351/*
352 *
353 * Walk through and release all floating point and vector contexts that are not
354 * user state. We will also blow away live context if it belongs to non-user state.
55e303ae
A
355 * Note that the level can not change while we are in this code. Nor can another
356 * context be pushed on the stack.
357 *
358 * We do nothing here if the current level is user. Otherwise,
359 * the live context is cleared. Then we find the user saved context.
360 * Next, we take the sync lock (to keep us from munging things in *_switch).
361 * The level is set to 0 and all stacked context other than user is dequeued.
362 * Then we unlock. Next, all of the old kernel contexts are released.
9bccf70c
A
363 *
364 */
b36670ce
A
365
366 intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */
367
91447636 368 if(act->machine.curctx->VMXlevel) { /* Is the current level user state? */
55e303ae 369
91447636 370 toss_live_vec(act->machine.curctx); /* Dump live vectors if is not user */
55e303ae 371
91447636 372 if(!hw_lock_to((hw_lock_t)&act->machine.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */
55e303ae
A
373 panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
374 }
b36670ce
A
375
376 vsv = act->machine.curctx->VMXsave; /* Get the top vector savearea */
2d21ac55
A
377 while(vsv && vsv->save_hdr.save_level) /* Find user context if any */
378 /* XXX save_prev should be a void * 4425537 */
379 vsv = CAST_DOWN(savearea_vec *,
380 vsv->save_hdr.save_prev);
55e303ae 381
b36670ce 382 vsvt = act->machine.curctx->VMXsave; /* Get the top of the chain */
91447636 383 act->machine.curctx->VMXsave = vsv; /* Point to the user context */
2d21ac55 384 act->machine.curctx->VMXlevel = NULL; /* Set the level to user */
91447636 385 hw_lock_unlock((hw_lock_t)&act->machine.curctx->VMXsync); /* Unlock */
55e303ae
A
386
387 while(vsvt) { /* Clear any VMX saved state */
388 if (vsvt == vsv) break; /* Done when hit user if any */
389 vpst = vsvt; /* Remember so we can toss this */
2d21ac55
A
390 /* XXX save_prev should be a void * 4425537 */
391 vsvt = CAST_DOWN(savearea_vec *, vsvt->save_hdr.save_prev); /* Get one underneath our's */
392 save_ret((struct savearea *)vpst); /* Release it */
55e303ae
A
393 }
394
395 }
9bccf70c 396
91447636 397 if(act->machine.curctx->FPUlevel) { /* Is the current level user state? */
55e303ae 398
91447636 399 toss_live_fpu(act->machine.curctx); /* Dump live floats if is not user */
1c79356b 400
91447636 401 if(!hw_lock_to((hw_lock_t)&act->machine.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */
55e303ae
A
402 panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
403 }
404
b36670ce 405 fsv = act->machine.curctx->FPUsave; /* Get the top floats savearea */
2d21ac55
A
406 while(fsv && fsv->save_hdr.save_level) /* Find user context if any */
407 /* XXX save_prev should be a void * */
408 fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev);
b36670ce
A
409
410 fsvt = act->machine.curctx->FPUsave; /* Get the top of the chain */
91447636 411 act->machine.curctx->FPUsave = fsv; /* Point to the user context */
2d21ac55 412 act->machine.curctx->FPUlevel = NULL; /* Set the level to user */
91447636 413 hw_lock_unlock((hw_lock_t)&act->machine.curctx->FPUsync); /* Unlock */
55e303ae
A
414
415 while(fsvt) { /* Clear any VMX saved state */
416 if (fsvt == fsv) break; /* Done when hit user if any */
417 fpst = fsvt; /* Remember so we can toss this */
2d21ac55
A
418 /* XXX save_prev should be a void * 4425537 */
419 fsvt = CAST_DOWN(savearea_fpu *, fsvt->save_hdr.save_prev); /* Get one underneath our's */
420 save_ret((struct savearea *)fpst); /* Release it */
55e303ae
A
421 }
422
1c79356b
A
423 }
424
9bccf70c
A
425/*
426 * free all regular saveareas except a user savearea, if any
427 */
1c79356b 428
2d21ac55
A
429 local_pcb = act->machine.pcb; /* Get the general savearea */
430 userpcb = NULL; /* Assume no user context for now */
9bccf70c 431
2d21ac55
A
432 while(local_pcb) { /* Any float saved state? */
433 if (local_pcb->save_srr1 & MASK(MSR_PR)) { /* Is this a user savearea? */
434 userpcb = local_pcb; /* Remember so we can toss this */
9bccf70c
A
435 break;
436 }
2d21ac55
A
437 svp = local_pcb; /* Remember this */
438 /* XXX save_prev should be a void * 4425537 */
439 local_pcb = CAST_DOWN(struct savearea *, local_pcb->save_hdr.save_prev); /* Get one underneath our's */
9bccf70c 440 save_ret(svp); /* Release it */
1c79356b 441 }
9bccf70c 442
91447636 443 act->machine.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
b36670ce
A
444 (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */
445
1c79356b
A
446}
447
1c79356b 448void
55e303ae 449machine_act_terminate(
91447636 450 thread_t act)
1c79356b 451{
b36670ce 452 if(act->machine.bbDescAddr) { /* Check if the Blue box assist is active */
1c79356b
A
453 disable_bluebox_internal(act); /* Kill off bluebox */
454 }
455
b36670ce 456 if(act->machine.vmmControl) { /* Check if VMM is active */
1c79356b
A
457 vmm_tear_down_all(act); /* Kill off all VMM contexts */
458 }
459}
460
1c79356b 461void
55e303ae 462machine_thread_terminate_self(void)
1c79356b 463{
91447636 464 machine_act_terminate(current_thread());
1c79356b
A
465}
466
467void
55e303ae 468machine_thread_init(void)
1c79356b
A
469{
470#ifdef MACHINE_STACK
471#if KERNEL_STACK_SIZE > PPC_PGBYTES
472 panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
473#endif
474#endif
475}
476
477#if MACH_ASSERT
1c79356b
A
478void
479dump_thread(thread_t th)
480{
2d21ac55 481 printf(" thread @ %p:\n", th);
1c79356b 482}
2d21ac55 483#endif /* MACH_ASSERT */
1c79356b 484
91447636 485user_addr_t
2d21ac55 486get_useraddr(void)
1c79356b 487{
91447636 488 return(current_thread()->machine.upcb->save_srr0);
1c79356b
A
489}
490
491/*
492 * detach and return a kernel stack from a thread
493 */
494
495vm_offset_t
55e303ae
A
496machine_stack_detach(
497 thread_t thread)
1c79356b
A
498{
499 vm_offset_t stack;
500
9bccf70c
A
501 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
502 thread, thread->priority,
503 thread->sched_pri, 0, 0);
504
2d21ac55 505 act_machine_sv_free(thread, 0); /* XXX flag == 0 OK? */
1c79356b
A
506
507 stack = thread->kernel_stack;
508 thread->kernel_stack = 0;
509 return(stack);
510}
511
512/*
513 * attach a kernel stack to a thread and initialize it
514 *
515 * attaches a stack to a thread. if there is no save
516 * area we allocate one. the top save area is then
517 * loaded with the pc (continuation address), the initial
518 * stack pointer, and a std kernel MSR. if the top
519 * save area is the user save area bad things will
520 * happen
521 *
522 */
523
524void
55e303ae
A
525machine_stack_attach(
526 thread_t thread,
91447636 527 vm_offset_t stack)
1c79356b 528{
1c79356b
A
529 unsigned int *kss;
530 struct savearea *sv;
531
532 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
533 thread, thread->priority,
91447636 534 thread->sched_pri, 0, 0);
1c79356b
A
535
536 assert(stack);
537 kss = (unsigned int *)STACK_IKS(stack);
538 thread->kernel_stack = stack;
539
540 /* during initialization we sometimes do not have an
541 activation. in that case do not do anything */
91447636
A
542 sv = save_get(); /* cannot block */
543 sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft); /* Mark as in use */
544 sv->save_hdr.save_act = thread;
545 sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thread->machine.pcb);
546 thread->machine.pcb = sv;
547
548 sv->save_srr0 = (unsigned int)thread_continue;
549 /* sv->save_r3 = ARG ? */
550 sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
551 sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
552 sv->save_fpscr = 0; /* Clear all floating point exceptions */
553 sv->save_vrsave = 0; /* Set the vector save state */
554 sv->save_vscr[3] = 0x00010000; /* Supress java mode */
555 *(CAST_DOWN(int *, sv->save_r1)) = 0;
556
557 thread->machine.ksp = 0;
1c79356b
A
558}
559
560/*
561 * move a stack from old to new thread
562 */
563
564void
55e303ae
A
565machine_stack_handoff(
566 thread_t old,
567 thread_t new)
1c79356b
A
568{
569
9bccf70c
A
570 vm_offset_t stack;
571 pmap_t new_pmap;
572 facility_context *fowner;
91447636 573 mapping_t *mp;
55e303ae 574 struct per_proc_info *ppinfo;
9bccf70c 575
91447636
A
576 assert(new);
577 assert(old);
55e303ae
A
578
579 if (old == new)
580 panic("machine_stack_handoff");
9bccf70c 581
55e303ae 582 stack = machine_stack_detach(old);
9bccf70c 583 new->kernel_stack = stack;
55e303ae
A
584 if (stack == old->reserved_stack) {
585 assert(new->reserved_stack);
586 old->reserved_stack = new->reserved_stack;
587 new->reserved_stack = stack;
9bccf70c 588 }
1c79356b 589
55e303ae
A
590 ppinfo = getPerProc(); /* Get our processor block */
591
9bccf70c 592 if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */
55e303ae 593 fowner = ppinfo->FPU_owner; /* Cache this because it may change */
9bccf70c 594 if(fowner) { /* Is there any live context? */
91447636 595 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
596 fpu_save(fowner); /* Yes, save it */
597 }
598 }
55e303ae 599 fowner = ppinfo->VMX_owner; /* Cache this because it may change */
9bccf70c 600 if(fowner) { /* Is there any live context? */
91447636 601 if(fowner->facAct == old) { /* Is it for us? */
9bccf70c
A
602 vec_save(fowner); /* Yes, save it */
603 }
604 }
605 }
55e303ae 606
d7e50217
A
607 /*
608 * If old thread is running VM, save per proc userProtKey and FamVMmode spcFlags bits in the thread spcFlags
609 * This bits can be modified in the per proc without updating the thread spcFlags
610 */
91447636
A
611 if(old->machine.specFlags & runningVM) { /* Is the current thread running a VM? */
612 old->machine.specFlags &= ~(userProtKey|FamVMmode);
613 old->machine.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
d7e50217 614 }
91447636
A
615 old->machine.specFlags &= ~OnProc;
616 new->machine.specFlags |= OnProc;
1c79356b 617
91447636
A
618 if(new->machine.specFlags & runningVM) { /* Is the new guy running a VM? */
619 pmap_switch(new->machine.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */
620 ppinfo->VMMareaPhys = new->machine.vmmCEntry->vmmContextPhys;
621 ppinfo->VMMXAFlgs = new->machine.vmmCEntry->vmmXAFlgs;
622 ppinfo->FAMintercept = new->machine.vmmCEntry->vmmFAMintercept;
1c79356b
A
623 }
624 else { /* otherwise, we use the task's pmap */
91447636
A
625 new_pmap = new->task->map->pmap;
626 if ((old->task->map->pmap != new_pmap) || (old->machine.specFlags & runningVM)) {
1c79356b
A
627 pmap_switch(new_pmap);
628 }
629 }
630
91447636
A
631 machine_set_current_thread(new);
632 ppinfo->Uassist = new->machine.cthread_self;
9bccf70c 633
91447636
A
634 ppinfo->ppbbTaskEnv = new->machine.bbTaskEnv;
635 ppinfo->spcFlags = new->machine.specFlags;
55e303ae 636
91447636
A
637 old->machine.umwSpace |= umwSwitchAway; /* Show we switched away from this guy */
638 mp = (mapping_t *)&ppinfo->ppUMWmp;
55e303ae 639 mp->mpSpace = invalSpace; /* Since we can't handoff in the middle of copy in/out, just invalidate */
9bccf70c 640
b36670ce 641 if(trcWork.traceMask) dbgTrace(0x9903, (unsigned int)old, (unsigned int)new, 0, 0); /* Cut trace entry if tracing */
765c9de3 642
1c79356b
A
643 return;
644}
645
2d21ac55
A
646void Call_continuation(thread_continue_t, void *, wait_result_t, vm_offset_t);
647
1c79356b
A
648/*
649 * clean and initialize the current kernel stack and go to
650 * the given continuation routine
651 */
652
653void
91447636
A
654call_continuation(
655 thread_continue_t continuation,
656 void *parameter,
657 wait_result_t wresult)
1c79356b 658{
91447636
A
659 thread_t self = current_thread();
660 unsigned int *kss;
661 vm_offset_t tsp;
1c79356b 662
91447636
A
663 assert(self->kernel_stack);
664 kss = (unsigned int *)STACK_IKS(self->kernel_stack);
665 assert(continuation);
1c79356b 666
91447636
A
667 tsp = (vm_offset_t)((int)kss - KF_SIZE);
668 assert(tsp);
669 *((int *)tsp) = 0;
1c79356b 670
91447636 671 Call_continuation(continuation, parameter, wresult, tsp);
1c79356b 672}