]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Copyright (c) 1990,1991,1992 The University of Utah and | |
27 | * the Center for Software Science (CSS). All rights reserved. | |
28 | * | |
29 | * Permission to use, copy, modify and distribute this software is hereby | |
30 | * granted provided that (1) source code retains these copyright, permission, | |
31 | * and disclaimer notices, and (2) redistributions including binaries | |
32 | * reproduce the notices in supporting documentation, and (3) all advertising | |
33 | * materials mentioning features or use of this software display the following | |
34 | * acknowledgement: ``This product includes software developed by the Center | |
35 | * for Software Science at the University of Utah.'' | |
36 | * | |
37 | * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
38 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF | |
39 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
40 | * | |
41 | * CSS requests users of this software to return to css-dist@cs.utah.edu any | |
42 | * improvements that they make and grant CSS redistribution rights. | |
43 | * | |
44 | * Utah $Hdr: pcb.c 1.23 92/06/27$ | |
45 | */ | |
46 | ||
47 | #include <cpus.h> | |
48 | #include <debug.h> | |
49 | ||
50 | #include <types.h> | |
51 | #include <kern/task.h> | |
52 | #include <kern/thread.h> | |
53 | #include <kern/thread_act.h> | |
54 | #include <kern/thread_swap.h> | |
55 | #include <mach/thread_status.h> | |
56 | #include <vm/vm_kern.h> | |
57 | #include <kern/mach_param.h> | |
58 | ||
59 | #include <kern/misc_protos.h> | |
60 | #include <ppc/misc_protos.h> | |
61 | #include <ppc/fpu_protos.h> | |
62 | #include <ppc/exception.h> | |
63 | #include <ppc/proc_reg.h> | |
64 | #include <kern/spl.h> | |
65 | #include <ppc/pmap.h> | |
66 | #include <ppc/trap.h> | |
67 | #include <ppc/mappings.h> | |
68 | #include <ppc/savearea.h> | |
69 | #include <ppc/Firmware.h> | |
70 | #include <ppc/asm.h> | |
71 | #include <ppc/thread_act.h> | |
72 | #include <ppc/vmachmon.h> | |
73 | ||
74 | #include <sys/kdebug.h> | |
75 | ||
76 | extern int real_ncpus; /* Number of actual CPUs */ | |
77 | extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ | |
78 | ||
79 | /* | |
80 | * These constants are dumb. They should not be in asm.h! | |
81 | */ | |
82 | ||
83 | #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE) | |
84 | ||
85 | #if DEBUG | |
86 | int fpu_trap_count = 0; | |
87 | int fpu_switch_count = 0; | |
88 | int vec_trap_count = 0; | |
89 | int vec_switch_count = 0; | |
90 | #endif | |
91 | ||
92 | extern struct thread_shuttle *Switch_context( | |
93 | struct thread_shuttle *old, | |
94 | void (*cont)(void), | |
95 | struct thread_shuttle *new); | |
96 | ||
97 | ||
98 | #if MACH_LDEBUG || MACH_KDB | |
99 | void log_thread_action (char *, long, long, long); | |
100 | #endif | |
101 | ||
102 | ||
103 | /* | |
104 | * consider_machine_collect: try to collect machine-dependent pages | |
105 | */ | |
106 | void | |
107 | consider_machine_collect() | |
108 | { | |
109 | /* | |
110 | * none currently available | |
111 | */ | |
112 | return; | |
113 | } | |
114 | ||
115 | void | |
116 | consider_machine_adjust() | |
117 | { | |
118 | consider_mapping_adjust(); | |
119 | } | |
120 | ||
121 | ||
122 | /* | |
123 | * stack_attach: Attach a kernel stack to a thread. | |
124 | */ | |
125 | void | |
126 | machine_kernel_stack_init( | |
127 | struct thread_shuttle *thread, | |
128 | void (*start_pos)(thread_t)) | |
129 | { | |
130 | vm_offset_t stack; | |
131 | unsigned int *kss; | |
132 | struct savearea *sv; | |
133 | ||
134 | assert(thread->top_act->mact.pcb); | |
135 | assert(thread->kernel_stack); | |
136 | stack = thread->kernel_stack; | |
137 | ||
138 | #if MACH_ASSERT | |
139 | if (watchacts & WA_PCB) | |
140 | printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos); | |
141 | #endif /* MACH_ASSERT */ | |
142 | ||
143 | kss = (unsigned int *)STACK_IKS(stack); | |
144 | sv=(savearea *)(thread->top_act->mact.pcb); /* This for the sake of C */ | |
145 | ||
146 | sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */ | |
147 | sv->save_srr0 = (unsigned int) start_pos; /* Here too */ | |
148 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */ | |
149 | sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */ | |
150 | sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ | |
151 | sv->save_xfpscr = 0; /* Start with a clear fpscr */ | |
152 | ||
153 | *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */ | |
154 | thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */ | |
155 | ||
156 | } | |
157 | ||
158 | /* | |
159 | * switch_context: Switch from one thread to another, needed for | |
160 | * switching of space | |
161 | * | |
162 | */ | |
163 | struct thread_shuttle* | |
164 | switch_context( | |
165 | struct thread_shuttle *old, | |
166 | void (*continuation)(void), | |
167 | struct thread_shuttle *new) | |
168 | { | |
169 | register thread_act_t old_act = old->top_act, new_act = new->top_act; | |
170 | register struct thread_shuttle* retval; | |
171 | pmap_t new_pmap; | |
172 | #if MACH_LDEBUG || MACH_KDB | |
173 | log_thread_action("switch", | |
174 | (long)old, | |
175 | (long)new, | |
176 | (long)__builtin_return_address(0)); | |
177 | #endif | |
178 | per_proc_info[cpu_number()].old_thread = old; | |
0b4e3aa0 | 179 | per_proc_info[cpu_number()].cpu_flags &= ~traceBE; /* disable branch tracing if on */ |
1c79356b A |
180 | assert(old_act->kernel_loaded || |
181 | active_stacks[cpu_number()] == old_act->thread->kernel_stack); | |
182 | ||
1c79356b A |
183 | check_simple_locks(); |
184 | ||
185 | /* Our context might wake up on another processor, so we must | |
186 | * not keep hot state in our FPU, it must go back to the pcb | |
187 | * so that it can be found by the other if needed | |
188 | */ | |
189 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ | |
0b4e3aa0 A |
190 | fpu_save(old_act); /* Save floating point if used */ |
191 | vec_save(old_act); /* Save vector if used */ | |
1c79356b A |
192 | } |
193 | ||
194 | #if DEBUG | |
195 | if (watchacts & WA_PCB) { | |
196 | printf("switch_context(0x%08x, 0x%x, 0x%08x)\n", | |
197 | old,continuation,new); | |
198 | } | |
199 | #endif /* DEBUG */ | |
200 | ||
201 | /* | |
202 | * We do not have to worry about the PMAP module, so switch. | |
203 | * | |
204 | * We must not use top_act->map since this may not be the actual | |
205 | * task map, but the map being used for a klcopyin/out. | |
206 | */ | |
207 | ||
208 | if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
209 | pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
210 | } | |
211 | else { /* otherwise, we use the task's pmap */ | |
212 | new_pmap = new_act->task->map->pmap; | |
213 | if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) { | |
214 | pmap_switch(new_pmap); /* Switch if there is a change */ | |
215 | } | |
216 | } | |
217 | ||
218 | /* Sanity check - is the stack pointer inside the stack that | |
219 | * we're about to switch to? Is the execution address within | |
220 | * the kernel's VM space?? | |
221 | */ | |
222 | #if 0 | |
223 | printf("************* stack=%08X; R1=%08X; LR=%08X; old=%08X; cont=%08X; new=%08X\n", | |
224 | new->kernel_stack, new_act->mact.pcb->ss.r1, | |
225 | new_act->mact.pcb->ss.lr, old, continuation, new); /* (TEST/DEBUG) */ | |
226 | assert((new->kernel_stack < new_act->mact.pcb->ss.r1) && | |
227 | ((unsigned int)STACK_IKS(new->kernel_stack) > | |
228 | new_act->mact.pcb->ss.r1)); | |
229 | assert(new_act->mact.pcb->ss.lr < VM_MAX_KERNEL_ADDRESS); | |
230 | #endif | |
231 | ||
232 | ||
233 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, | |
234 | (int)old, (int)new, old->sched_pri, new->sched_pri, 0); | |
235 | ||
236 | ||
237 | retval = Switch_context(old, continuation, new); | |
238 | assert(retval != (struct thread_shuttle*)NULL); | |
239 | ||
0b4e3aa0 A |
240 | if (branch_tracing_enabled()) |
241 | per_proc_info[cpu_number()].cpu_flags |= traceBE; /* restore branch tracing */ | |
242 | ||
1c79356b A |
243 | /* We've returned from having switched context, so we should be |
244 | * back in the original context. | |
245 | */ | |
246 | ||
247 | return retval; | |
248 | } | |
249 | ||
250 | /* | |
251 | * Alter the thread's state so that a following thread_exception_return | |
252 | * will make the thread return 'retval' from a syscall. | |
253 | */ | |
254 | void | |
255 | thread_set_syscall_return( | |
256 | struct thread_shuttle *thread, | |
257 | kern_return_t retval) | |
258 | { | |
259 | struct ppc_saved_state *ssp = &thread->top_act->mact.pcb->ss; | |
260 | ||
261 | #if MACH_ASSERT | |
262 | if (watchacts & WA_PCB) | |
263 | printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval); | |
264 | #endif /* MACH_ASSERT */ | |
265 | ||
266 | ssp->r3 = retval; | |
267 | } | |
268 | ||
269 | /* | |
270 | * Initialize the machine-dependent state for a new thread. | |
271 | */ | |
272 | kern_return_t | |
273 | thread_machine_create( | |
274 | struct thread_shuttle *thread, | |
275 | thread_act_t thr_act, | |
276 | void (*start_pos)(thread_t)) | |
277 | { | |
278 | ||
279 | savearea *sv; /* Pointer to newly allocated savearea */ | |
280 | unsigned int *CIsTooLimited, i; | |
281 | ||
282 | ||
283 | #if MACH_ASSERT | |
284 | if (watchacts & WA_PCB) | |
285 | printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos); | |
286 | #endif /* MACH_ASSERT */ | |
287 | ||
288 | hw_atomic_add(&saveanchor.saveneed, 4); /* Account for the number of saveareas we think we "need" | |
289 | for this activation */ | |
290 | assert(thr_act->mact.pcb == (pcb_t)0); /* Make sure there was no previous savearea */ | |
291 | ||
292 | sv = save_alloc(); /* Go get us a savearea */ | |
293 | ||
294 | bzero((char *) sv, sizeof(struct pcb)); /* Clear out the whole shebang */ | |
295 | ||
296 | sv->save_act = thr_act; /* Set who owns it */ | |
297 | sv->save_vrsave = 0; | |
298 | thr_act->mact.pcb = (pcb_t)sv; /* Point to the save area */ | |
299 | ||
1c79356b A |
300 | #if MACH_ASSERT |
301 | if (watchacts & WA_PCB) | |
302 | printf("pcb_init(%x) pcb=%x\n", thr_act, sv); | |
303 | #endif /* MACH_ASSERT */ | |
304 | /* | |
305 | * User threads will pull their context from the pcb when first | |
306 | * returning to user mode, so fill in all the necessary values. | |
307 | * Kernel threads are initialized from the save state structure | |
308 | * at the base of the kernel stack (see stack_attach()). | |
309 | */ | |
310 | ||
311 | sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */ | |
312 | ||
313 | CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */ | |
314 | for(i=0; i<16; i++) { /* Initialize all SRs */ | |
315 | CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */ | |
316 | } | |
317 | sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | thr_act->task->map->pmap->space; /* Default the copyin */ | |
318 | ||
319 | return(KERN_SUCCESS); | |
320 | } | |
321 | ||
322 | /* | |
323 | * Machine-dependent cleanup prior to destroying a thread | |
324 | */ | |
325 | void | |
326 | thread_machine_destroy( thread_t thread ) | |
327 | { | |
328 | spl_t s; | |
329 | ||
330 | if (thread->kernel_stack) { | |
331 | s = splsched(); | |
332 | stack_free(thread); | |
333 | splx(s); | |
334 | } | |
335 | } | |
336 | ||
337 | /* | |
338 | * flush out any lazily evaluated HW state in the | |
339 | * owning thread's context, before termination. | |
340 | */ | |
341 | void | |
342 | thread_machine_flush( thread_act_t cur_act ) | |
343 | { | |
344 | } | |
345 | ||
346 | /* | |
347 | * Number of times we needed to swap an activation back in before | |
348 | * switching to it. | |
349 | */ | |
350 | int switch_act_swapins = 0; | |
351 | ||
352 | /* | |
353 | * machine_switch_act | |
354 | * | |
355 | * Machine-dependent details of activation switching. Called with | |
356 | * RPC locks held and preemption disabled. | |
357 | */ | |
358 | void | |
359 | machine_switch_act( | |
360 | thread_t thread, | |
361 | thread_act_t old, | |
362 | thread_act_t new, | |
363 | int cpu) | |
364 | { | |
365 | pmap_t new_pmap; | |
366 | ||
367 | /* Our context might wake up on another processor, so we must | |
368 | * not keep hot state in our FPU, it must go back to the pcb | |
369 | * so that it can be found by the other if needed | |
370 | */ | |
371 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ | |
0b4e3aa0 A |
372 | fpu_save(old); /* Save floating point if used */ |
373 | vec_save(old); /* Save vector if used */ | |
1c79356b A |
374 | } |
375 | ||
376 | active_stacks[cpu] = thread->kernel_stack; | |
377 | ||
378 | ast_context(new, cpu); | |
379 | ||
380 | /* Activations might have different pmaps | |
381 | * (process->kernel->server, for example). | |
382 | * Change space if needed | |
383 | */ | |
384 | ||
385 | if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
386 | pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
387 | } | |
388 | else { /* otherwise, we use the task's pmap */ | |
389 | new_pmap = new->task->map->pmap; | |
390 | if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) { | |
391 | pmap_switch(new_pmap); | |
392 | } | |
393 | } | |
394 | ||
395 | } | |
396 | ||
397 | void | |
398 | pcb_user_to_kernel(thread_act_t act) | |
399 | { | |
400 | ||
401 | return; /* Not needed, I hope... */ | |
402 | } | |
403 | ||
404 | ||
405 | /* | |
406 | * act_machine_sv_free | |
407 | * release saveareas associated with an act. if flag is true, release | |
408 | * user level savearea(s) too, else don't | |
409 | * | |
410 | * this code cannot block so we call the proper save area free routine | |
411 | */ | |
412 | void | |
413 | act_machine_sv_free(thread_act_t act) | |
414 | { | |
415 | register pcb_t pcb,userpcb,npcb; | |
416 | register savearea *svp; | |
417 | register int i; | |
418 | ||
419 | /* | |
420 | * This next bit insures that any live facility context for this thread is discarded on every processor | |
421 | * that may have it. We go through all per-processor blocks and zero the facility owner if | |
422 | * it is the thread being destroyed. This needs to be done via a compare-and-swap because | |
423 | * some other processor could change the owner while we are clearing it. It turns out that | |
424 | * this is the only place where we need the interlock, normal use of the owner field is cpu-local | |
425 | * and doesn't need the interlock. Because we are called during termintation, and a thread | |
426 | * terminates itself, the context on other processors has been saved (because we save it as | |
427 | * part of the context switch), even if it is still considered live. Since the dead thread is | |
428 | * not running elsewhere, and the context is saved, any other processor looking at the owner | |
429 | * field will not attempt to save context again, meaning that it doesn't matter if the owner | |
430 | * changes out from under it. | |
431 | */ | |
432 | ||
433 | /* | |
434 | * free VMX and FPU saveareas. do not free user save areas. | |
435 | * user VMX and FPU saveareas, if any, i'm told are last in | |
436 | * the chain so we just stop if we find them | |
437 | * we identify user VMX and FPU saveareas when we find a pcb | |
438 | * with a save level of 0. we identify user regular save | |
439 | * areas when we find one with MSR_PR set | |
440 | */ | |
441 | ||
442 | pcb = act->mact.VMX_pcb; /* Get the top vector savearea */ | |
443 | while(pcb) { /* Any VMX saved state? */ | |
444 | svp = (savearea *)pcb; /* save lots of casting later */ | |
445 | if (svp->save_level_vec == 0) break; /* done when hit user if any */ | |
446 | pcb = (pcb_t)svp->save_prev_vector; /* Get one underneath our's */ | |
447 | svp->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */ | |
448 | if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
449 | ||
450 | save_ret(svp); /* release it */ | |
451 | } | |
452 | } | |
453 | act->mact.VMX_pcb = pcb; | |
454 | if (act->mact.VMX_lvl != 0) { | |
455 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ | |
456 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */ | |
457 | } | |
458 | } | |
459 | ||
460 | pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */ | |
461 | while(pcb) { /* Any floating point saved state? */ | |
462 | svp = (savearea *)pcb; | |
463 | if (svp->save_level_fp == 0) break; /* done when hit user if any */ | |
464 | pcb = (pcb_t)svp->save_prev_float; /* Get one underneath our's */ | |
465 | svp->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */ | |
466 | if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
467 | save_ret(svp); /* Nope, release it */ | |
468 | } | |
469 | } | |
470 | act->mact.FPU_pcb = pcb; | |
471 | if (act->mact.FPU_lvl != 0) { | |
472 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ | |
473 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ | |
474 | } | |
475 | } | |
476 | ||
477 | /* | |
478 | * free all regular saveareas except a user savearea, if any | |
479 | */ | |
480 | ||
481 | pcb = act->mact.pcb; | |
482 | userpcb = (pcb_t)0; | |
483 | while(pcb) { | |
484 | svp = (savearea *)pcb; | |
485 | if ((svp->save_srr1 & MASK(MSR_PR))) { | |
486 | assert(userpcb == (pcb_t)0); | |
487 | userpcb = pcb; | |
488 | svp = (savearea *)userpcb; | |
489 | npcb = (pcb_t)svp->save_prev; | |
490 | svp->save_prev = (struct savearea *)0; | |
491 | } else { | |
492 | svp->save_flags &= ~SAVattach; /* Clear the attached flag */ | |
493 | npcb = (pcb_t)svp->save_prev; | |
494 | if(!(svp->save_flags & SAVinuse)) /* Anyone left with this one? */ | |
495 | save_ret(svp); | |
496 | } | |
497 | pcb = npcb; | |
498 | } | |
499 | act->mact.pcb = userpcb; | |
500 | ||
501 | } | |
502 | ||
503 | ||
504 | /* | |
505 | * act_virtual_machine_destroy: | |
506 | * Shutdown any virtual machines associated with a thread | |
507 | */ | |
508 | void | |
509 | act_virtual_machine_destroy(thread_act_t act) | |
510 | { | |
511 | if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */ | |
512 | disable_bluebox_internal(act); /* Kill off bluebox */ | |
513 | } | |
514 | ||
515 | if(act->mact.vmmControl) { /* Check if VMM is active */ | |
516 | vmm_tear_down_all(act); /* Kill off all VMM contexts */ | |
517 | } | |
518 | } | |
519 | ||
520 | /* | |
521 | * act_machine_destroy: Shutdown any state associated with a thread pcb. | |
522 | */ | |
523 | void | |
524 | act_machine_destroy(thread_act_t act) | |
525 | { | |
526 | register pcb_t pcb, opcb; | |
527 | int i; | |
528 | ||
529 | #if MACH_ASSERT | |
530 | if (watchacts & WA_PCB) | |
531 | printf("act_machine_destroy(0x%x)\n", act); | |
532 | #endif /* MACH_ASSERT */ | |
533 | ||
534 | act_virtual_machine_destroy(act); | |
535 | ||
536 | /* | |
537 | * This next bit insures that any live facility context for this thread is discarded on every processor | |
538 | * that may have it. We go through all per-processor blocks and zero the facility owner if | |
539 | * it is the thread being destroyed. This needs to be done via a compare-and-swap because | |
540 | * some other processor could change the owner while we are clearing it. It turns out that | |
541 | * this is the only place where we need the interlock, normal use of the owner field is cpu-local | |
542 | * and doesn't need the interlock. Because we are called during termintation, and a thread | |
543 | * terminates itself, the context on other processors has been saved (because we save it as | |
544 | * part of the context switch), even if it is still considered live. Since the dead thread is | |
545 | * not running elsewhere, and the context is saved, any other processor looking at the owner | |
546 | * field will not attempt to save context again, meaning that it doesn't matter if the owner | |
547 | * changes out from under it. | |
548 | */ | |
549 | ||
550 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ | |
551 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ | |
552 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */ | |
553 | } | |
554 | ||
555 | pcb = act->mact.VMX_pcb; /* Get the top vector savearea */ | |
556 | while(pcb) { /* Any VMX saved state? */ | |
557 | opcb = pcb; /* Save current savearea address */ | |
558 | pcb = (pcb_t)(((savearea *)pcb)->save_prev_vector); /* Get one underneath our's */ | |
559 | ((savearea *)opcb)->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */ | |
560 | ||
561 | if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
562 | save_release((savearea *)opcb); /* Nope, release it */ | |
563 | } | |
564 | } | |
565 | act->mact.VMX_pcb = (pcb_t)0; /* Clear pointer */ | |
566 | ||
567 | pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */ | |
568 | while(pcb) { /* Any floating point saved state? */ | |
569 | opcb = pcb; /* Save current savearea address */ | |
570 | pcb = (pcb_t)(((savearea *)pcb)->save_prev_float); /* Get one underneath our's */ | |
571 | ((savearea *)opcb)->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */ | |
572 | ||
573 | if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
574 | save_release((savearea *)opcb); /* Nope, release it */ | |
575 | } | |
576 | } | |
577 | act->mact.FPU_pcb = (pcb_t)0; /* Clear pointer */ | |
578 | ||
579 | pcb = act->mact.pcb; /* Get the top normal savearea */ | |
580 | act->mact.pcb = (pcb_t)0; /* Clear pointer */ | |
581 | ||
582 | while(pcb) { /* Any normal saved state left? */ | |
583 | opcb = pcb; /* Keep track of what we're working on */ | |
584 | pcb = (pcb_t)(((savearea *)pcb)->save_prev); /* Get one underneath our's */ | |
585 | ||
586 | ((savearea *)opcb)->save_flags = 0; /* Clear all flags since we release this in any case */ | |
587 | save_release((savearea *)opcb); /* Release this one */ | |
588 | } | |
589 | ||
590 | hw_atomic_sub(&saveanchor.saveneed, 4); /* Unaccount for the number of saveareas we think we "need" | |
591 | for this activation */ | |
592 | } | |
593 | ||
594 | kern_return_t | |
595 | act_machine_create(task_t task, thread_act_t thr_act) | |
596 | { | |
597 | /* | |
598 | * Clear & Init the pcb (sets up user-mode s regs) | |
599 | * We don't use this anymore. | |
600 | */ | |
601 | ||
602 | register pcb_t pcb; | |
603 | register int i; | |
604 | unsigned int *CIsTooLimited; | |
605 | pmap_t pmap; | |
606 | ||
607 | return KERN_SUCCESS; | |
608 | } | |
609 | ||
610 | void act_machine_init() | |
611 | { | |
612 | #if MACH_ASSERT | |
613 | if (watchacts & WA_PCB) | |
614 | printf("act_machine_init()\n"); | |
615 | #endif /* MACH_ASSERT */ | |
616 | ||
617 | /* Good to verify these once */ | |
618 | assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); | |
619 | ||
620 | assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT ); | |
621 | assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT ); | |
622 | assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT ); | |
623 | assert( THREAD_STATE_MAX >= sizeof(struct ppc_saved_state)/sizeof(int)); | |
624 | ||
625 | /* | |
626 | * If we start using kernel activations, | |
627 | * would normally create kernel_thread_pool here, | |
628 | * populating it from the act_zone | |
629 | */ | |
630 | } | |
631 | ||
632 | void | |
633 | act_machine_return(int code) | |
634 | { | |
635 | thread_act_t thr_act = current_act(); | |
636 | ||
637 | #if MACH_ASSERT | |
638 | if (watchacts & WA_EXIT) | |
639 | printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", | |
640 | code, thr_act, thr_act->ref_count, | |
641 | thr_act->thread, thr_act->thread->ref_count); | |
642 | #endif /* MACH_ASSERT */ | |
643 | ||
644 | ||
645 | /* | |
646 | * This code is called with nothing locked. | |
647 | * It also returns with nothing locked, if it returns. | |
648 | * | |
649 | * This routine terminates the current thread activation. | |
650 | * If this is the only activation associated with its | |
651 | * thread shuttle, then the entire thread (shuttle plus | |
652 | * activation) is terminated. | |
653 | */ | |
654 | assert( code == KERN_TERMINATED ); | |
655 | assert( thr_act ); | |
656 | ||
657 | act_lock_thread(thr_act); | |
658 | ||
659 | #ifdef CALLOUT_RPC_MODEL | |
660 | /* | |
661 | * JMM - This needs to get cleaned up to work under the much simpler | |
662 | * return (instead of callout model). | |
663 | */ | |
664 | if (thr_act->thread->top_act != thr_act) { | |
665 | /* | |
666 | * this is not the top activation; | |
667 | * if possible, we should clone the shuttle so that | |
668 | * both the root RPC-chain and the soon-to-be-orphaned | |
669 | * RPC-chain have shuttles | |
670 | * | |
671 | * JMM - Cloning is a horrible idea! Instead we should alert | |
672 | * the pieces upstream to return the shuttle. We will use | |
673 | * alerts for this. | |
674 | */ | |
675 | act_unlock_thread(thr_act); | |
676 | panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED"); | |
677 | } | |
678 | ||
679 | if (thr_act->lower != THR_ACT_NULL) { | |
680 | thread_t cur_thread = current_thread(); | |
681 | thread_act_t cur_act; | |
682 | struct ipc_port *iplock; | |
683 | ||
684 | /* terminate the entire thread (shuttle plus activation) */ | |
685 | /* terminate only this activation, send an appropriate */ | |
686 | /* return code back to the activation that invoked us. */ | |
687 | iplock = thr_act->pool_port; /* remember for unlock call */ | |
688 | thr_act->lower->alerts |= SERVER_TERMINATED; | |
689 | install_special_handler(thr_act->lower); | |
690 | ||
691 | /* Return to previous act with error code */ | |
692 | ||
693 | act_locked_act_reference(thr_act); /* keep it around */ | |
694 | act_switch_swapcheck(cur_thread, (ipc_port_t)0); | |
695 | ||
696 | (void) switch_act(THR_ACT_NULL); | |
697 | /* assert(thr_act->ref_count == 0); */ /* XXX */ | |
698 | cur_act = cur_thread->top_act; | |
699 | MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED; | |
700 | machine_kernel_stack_init(cur_thread, mach_rpc_return_error); | |
701 | /* | |
702 | * The following unlocks must be done separately since fields | |
703 | * used by `act_unlock_thread()' have been cleared, meaning | |
704 | * that it would not release all of the appropriate locks. | |
705 | */ | |
706 | rpc_unlock(cur_thread); | |
707 | if (iplock) ip_unlock(iplock); /* must be done separately */ | |
708 | act_unlock(thr_act); | |
709 | act_deallocate(thr_act); /* free it */ | |
710 | Load_context(cur_thread); | |
711 | /*NOTREACHED*/ | |
712 | ||
713 | panic("act_machine_return: TALKING ZOMBIE! (2)"); | |
714 | } | |
715 | ||
716 | #endif /* CALLOUT_RPC_MODEL */ | |
717 | ||
718 | /* This is the only activation attached to the shuttle... */ | |
719 | ||
720 | assert(thr_act->thread->top_act == thr_act); | |
721 | act_unlock_thread(thr_act); | |
722 | thread_terminate_self(); | |
723 | ||
724 | /*NOTREACHED*/ | |
725 | panic("act_machine_return: TALKING ZOMBIE! (1)"); | |
726 | } | |
727 | ||
728 | void | |
729 | thread_machine_set_current(struct thread_shuttle *thread) | |
730 | { | |
731 | register int my_cpu = cpu_number(); | |
732 | ||
733 | cpu_data[my_cpu].active_thread = thread; | |
734 | ||
735 | active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; | |
736 | } | |
737 | ||
738 | void | |
739 | thread_machine_init(void) | |
740 | { | |
741 | #ifdef MACHINE_STACK | |
742 | #if KERNEL_STACK_SIZE > PPC_PGBYTES | |
743 | panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n"); | |
744 | #endif | |
745 | #endif | |
746 | } | |
747 | ||
748 | #if MACH_ASSERT | |
749 | void | |
750 | dump_pcb(pcb_t pcb) | |
751 | { | |
752 | printf("pcb @ %8.8x:\n", pcb); | |
753 | #if DEBUG | |
754 | regDump(&pcb->ss); | |
755 | #endif /* DEBUG */ | |
756 | } | |
757 | ||
758 | void | |
759 | dump_thread(thread_t th) | |
760 | { | |
761 | printf(" thread @ 0x%x:\n", th); | |
762 | } | |
763 | ||
764 | int | |
765 | dump_act(thread_act_t thr_act) | |
766 | { | |
767 | if (!thr_act) | |
768 | return(0); | |
769 | ||
770 | printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", | |
771 | thr_act, thr_act->ref_count, | |
772 | thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, | |
773 | thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); | |
774 | ||
775 | printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n", | |
776 | thr_act->alerts, thr_act->alert_mask, | |
777 | thr_act->suspend_count, thr_act->active, | |
778 | thr_act->higher, thr_act->lower); | |
779 | ||
780 | return((int)thr_act); | |
781 | } | |
782 | ||
783 | #endif | |
784 | ||
785 | unsigned int | |
786 | get_useraddr() | |
787 | { | |
788 | ||
789 | thread_act_t thr_act = current_act(); | |
790 | ||
791 | return(thr_act->mact.pcb->ss.srr0); | |
792 | } | |
793 | ||
794 | /* | |
795 | * detach and return a kernel stack from a thread | |
796 | */ | |
797 | ||
798 | vm_offset_t | |
799 | stack_detach(thread_t thread) | |
800 | { | |
801 | vm_offset_t stack; | |
802 | ||
803 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), | |
804 | thread, thread->priority, | |
805 | thread->sched_pri, 0, | |
806 | 0); | |
807 | ||
808 | stack = thread->kernel_stack; | |
809 | thread->kernel_stack = 0; | |
810 | return(stack); | |
811 | } | |
812 | ||
813 | /* | |
814 | * attach a kernel stack to a thread and initialize it | |
815 | * | |
816 | * attaches a stack to a thread. if there is no save | |
817 | * area we allocate one. the top save area is then | |
818 | * loaded with the pc (continuation address), the initial | |
819 | * stack pointer, and a std kernel MSR. if the top | |
820 | * save area is the user save area bad things will | |
821 | * happen | |
822 | * | |
823 | */ | |
824 | ||
825 | void | |
826 | stack_attach(struct thread_shuttle *thread, | |
827 | vm_offset_t stack, | |
828 | void (*start_pos)(thread_t)) | |
829 | { | |
830 | thread_act_t thr_act; | |
831 | unsigned int *kss; | |
832 | struct savearea *sv; | |
833 | ||
834 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), | |
835 | thread, thread->priority, | |
836 | thread->sched_pri, start_pos, | |
837 | 0); | |
838 | ||
839 | assert(stack); | |
840 | kss = (unsigned int *)STACK_IKS(stack); | |
841 | thread->kernel_stack = stack; | |
842 | ||
843 | /* during initialization we sometimes do not have an | |
844 | activation. in that case do not do anything */ | |
845 | if ((thr_act = thread->top_act) != 0) { | |
846 | sv = save_get(); /* cannot block */ | |
847 | // bzero((char *) sv, sizeof(struct pcb)); | |
848 | sv->save_act = thr_act; | |
849 | sv->save_prev = (struct savearea *)thr_act->mact.pcb; | |
850 | thr_act->mact.pcb = (pcb_t)sv; | |
851 | ||
852 | sv->save_srr0 = (unsigned int) start_pos; | |
853 | /* sv->save_r3 = ARG ? */ | |
854 | sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE); | |
855 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; | |
856 | sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ | |
857 | sv->save_xfpscr = 0; /* Start with a clear fpscr */ | |
858 | *((int *)sv->save_r1) = 0; | |
859 | thr_act->mact.ksp = 0; | |
860 | } | |
861 | ||
862 | return; | |
863 | } | |
864 | ||
865 | /* | |
866 | * move a stack from old to new thread | |
867 | */ | |
868 | ||
869 | void | |
870 | stack_handoff(thread_t old, | |
871 | thread_t new) | |
872 | { | |
873 | ||
874 | vm_offset_t stack; | |
875 | pmap_t new_pmap; | |
876 | ||
877 | assert(new->top_act); | |
878 | assert(old->top_act); | |
879 | ||
880 | stack = stack_detach(old); | |
881 | new->kernel_stack = stack; | |
882 | ||
0b4e3aa0 A |
883 | per_proc_info[cpu_number()].cpu_flags &= ~traceBE; |
884 | ||
1c79356b A |
885 | #if NCPUS > 1 |
886 | if (real_ncpus > 1) { | |
0b4e3aa0 A |
887 | fpu_save(old->top_act); |
888 | vec_save(old->top_act); | |
1c79356b A |
889 | } |
890 | #endif | |
891 | ||
892 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, | |
893 | (int)old, (int)new, old->sched_pri, new->sched_pri, 0); | |
894 | ||
895 | ||
896 | if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
897 | pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
898 | } | |
899 | else { /* otherwise, we use the task's pmap */ | |
900 | new_pmap = new->top_act->task->map->pmap; | |
901 | if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) { | |
902 | pmap_switch(new_pmap); | |
903 | } | |
904 | } | |
905 | ||
906 | thread_machine_set_current(new); | |
907 | active_stacks[cpu_number()] = new->kernel_stack; | |
908 | per_proc_info[cpu_number()].Uassist = new->top_act->mact.cthread_self; | |
0b4e3aa0 A |
909 | #if 1 |
910 | per_proc_info[cpu_number()].ppbbTaskEnv = new->top_act->mact.bbTaskEnv; | |
911 | per_proc_info[cpu_number()].spcFlags = new->top_act->mact.specFlags; | |
912 | #endif | |
913 | if (branch_tracing_enabled()) | |
914 | per_proc_info[cpu_number()].cpu_flags |= traceBE; | |
1c79356b A |
915 | return; |
916 | } | |
917 | ||
918 | /* | |
919 | * clean and initialize the current kernel stack and go to | |
920 | * the given continuation routine | |
921 | */ | |
922 | ||
923 | void | |
924 | call_continuation(void (*continuation)(void) ) | |
925 | { | |
926 | ||
927 | unsigned int *kss; | |
928 | vm_offset_t tsp; | |
929 | ||
930 | assert(current_thread()->kernel_stack); | |
931 | kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack); | |
932 | assert(continuation); | |
933 | ||
934 | tsp = (vm_offset_t)((int)kss - KF_SIZE); | |
935 | assert(tsp); | |
936 | *((int *)tsp) = 0; | |
937 | ||
938 | Call_continuation(continuation, tsp); | |
939 | ||
940 | return; | |
941 | } | |
942 | ||
943 | void | |
944 | thread_swapin_mach_alloc(thread_t thread) | |
945 | { | |
946 | struct savearea *sv; | |
947 | ||
948 | assert(thread->top_act->mact.pcb == 0); | |
949 | ||
950 | sv = save_alloc(); | |
951 | assert(sv); | |
952 | // bzero((char *) sv, sizeof(struct pcb)); | |
953 | sv->save_act = thread->top_act; | |
954 | thread->top_act->mact.pcb = (pcb_t)sv; | |
955 | ||
956 | } |