]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Copyright (c) 1990,1991,1992 The University of Utah and | |
27 | * the Center for Software Science (CSS). All rights reserved. | |
28 | * | |
29 | * Permission to use, copy, modify and distribute this software is hereby | |
30 | * granted provided that (1) source code retains these copyright, permission, | |
31 | * and disclaimer notices, and (2) redistributions including binaries | |
32 | * reproduce the notices in supporting documentation, and (3) all advertising | |
33 | * materials mentioning features or use of this software display the following | |
34 | * acknowledgement: ``This product includes software developed by the Center | |
35 | * for Software Science at the University of Utah.'' | |
36 | * | |
37 | * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
38 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF | |
39 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
40 | * | |
41 | * CSS requests users of this software to return to css-dist@cs.utah.edu any | |
42 | * improvements that they make and grant CSS redistribution rights. | |
43 | * | |
44 | * Utah $Hdr: pcb.c 1.23 92/06/27$ | |
45 | */ | |
46 | ||
47 | #include <cpus.h> | |
48 | #include <debug.h> | |
49 | ||
50 | #include <types.h> | |
51 | #include <kern/task.h> | |
52 | #include <kern/thread.h> | |
53 | #include <kern/thread_act.h> | |
54 | #include <kern/thread_swap.h> | |
55 | #include <mach/thread_status.h> | |
56 | #include <vm/vm_kern.h> | |
57 | #include <kern/mach_param.h> | |
58 | ||
59 | #include <kern/misc_protos.h> | |
60 | #include <ppc/misc_protos.h> | |
61 | #include <ppc/fpu_protos.h> | |
62 | #include <ppc/exception.h> | |
63 | #include <ppc/proc_reg.h> | |
64 | #include <kern/spl.h> | |
65 | #include <ppc/pmap.h> | |
66 | #include <ppc/trap.h> | |
67 | #include <ppc/mappings.h> | |
68 | #include <ppc/savearea.h> | |
69 | #include <ppc/Firmware.h> | |
70 | #include <ppc/asm.h> | |
71 | #include <ppc/thread_act.h> | |
72 | #include <ppc/vmachmon.h> | |
765c9de3 | 73 | #include <ppc/low_trace.h> |
1c79356b A |
74 | |
75 | #include <sys/kdebug.h> | |
76 | ||
77 | extern int real_ncpus; /* Number of actual CPUs */ | |
78 | extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ | |
79 | ||
80 | /* | |
81 | * These constants are dumb. They should not be in asm.h! | |
82 | */ | |
83 | ||
84 | #define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE) | |
85 | ||
86 | #if DEBUG | |
87 | int fpu_trap_count = 0; | |
88 | int fpu_switch_count = 0; | |
89 | int vec_trap_count = 0; | |
90 | int vec_switch_count = 0; | |
91 | #endif | |
92 | ||
93 | extern struct thread_shuttle *Switch_context( | |
94 | struct thread_shuttle *old, | |
95 | void (*cont)(void), | |
96 | struct thread_shuttle *new); | |
97 | ||
98 | ||
99 | #if MACH_LDEBUG || MACH_KDB | |
100 | void log_thread_action (char *, long, long, long); | |
101 | #endif | |
102 | ||
103 | ||
104 | /* | |
105 | * consider_machine_collect: try to collect machine-dependent pages | |
106 | */ | |
107 | void | |
108 | consider_machine_collect() | |
109 | { | |
110 | /* | |
111 | * none currently available | |
112 | */ | |
113 | return; | |
114 | } | |
115 | ||
116 | void | |
117 | consider_machine_adjust() | |
118 | { | |
119 | consider_mapping_adjust(); | |
120 | } | |
121 | ||
122 | ||
123 | /* | |
124 | * stack_attach: Attach a kernel stack to a thread. | |
125 | */ | |
126 | void | |
127 | machine_kernel_stack_init( | |
128 | struct thread_shuttle *thread, | |
129 | void (*start_pos)(thread_t)) | |
130 | { | |
131 | vm_offset_t stack; | |
132 | unsigned int *kss; | |
133 | struct savearea *sv; | |
134 | ||
135 | assert(thread->top_act->mact.pcb); | |
136 | assert(thread->kernel_stack); | |
137 | stack = thread->kernel_stack; | |
138 | ||
139 | #if MACH_ASSERT | |
140 | if (watchacts & WA_PCB) | |
141 | printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos); | |
142 | #endif /* MACH_ASSERT */ | |
143 | ||
144 | kss = (unsigned int *)STACK_IKS(stack); | |
145 | sv=(savearea *)(thread->top_act->mact.pcb); /* This for the sake of C */ | |
146 | ||
147 | sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */ | |
148 | sv->save_srr0 = (unsigned int) start_pos; /* Here too */ | |
149 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */ | |
150 | sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */ | |
151 | sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ | |
152 | sv->save_xfpscr = 0; /* Start with a clear fpscr */ | |
153 | ||
154 | *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */ | |
155 | thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */ | |
156 | ||
157 | } | |
158 | ||
159 | /* | |
160 | * switch_context: Switch from one thread to another, needed for | |
161 | * switching of space | |
162 | * | |
163 | */ | |
164 | struct thread_shuttle* | |
165 | switch_context( | |
166 | struct thread_shuttle *old, | |
167 | void (*continuation)(void), | |
168 | struct thread_shuttle *new) | |
169 | { | |
170 | register thread_act_t old_act = old->top_act, new_act = new->top_act; | |
171 | register struct thread_shuttle* retval; | |
172 | pmap_t new_pmap; | |
173 | #if MACH_LDEBUG || MACH_KDB | |
174 | log_thread_action("switch", | |
175 | (long)old, | |
176 | (long)new, | |
177 | (long)__builtin_return_address(0)); | |
178 | #endif | |
179 | per_proc_info[cpu_number()].old_thread = old; | |
0b4e3aa0 | 180 | per_proc_info[cpu_number()].cpu_flags &= ~traceBE; /* disable branch tracing if on */ |
1c79356b A |
181 | assert(old_act->kernel_loaded || |
182 | active_stacks[cpu_number()] == old_act->thread->kernel_stack); | |
183 | ||
1c79356b A |
184 | check_simple_locks(); |
185 | ||
186 | /* Our context might wake up on another processor, so we must | |
187 | * not keep hot state in our FPU, it must go back to the pcb | |
188 | * so that it can be found by the other if needed | |
189 | */ | |
190 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ | |
0b4e3aa0 A |
191 | fpu_save(old_act); /* Save floating point if used */ |
192 | vec_save(old_act); /* Save vector if used */ | |
1c79356b A |
193 | } |
194 | ||
195 | #if DEBUG | |
196 | if (watchacts & WA_PCB) { | |
197 | printf("switch_context(0x%08x, 0x%x, 0x%08x)\n", | |
198 | old,continuation,new); | |
199 | } | |
200 | #endif /* DEBUG */ | |
201 | ||
202 | /* | |
203 | * We do not have to worry about the PMAP module, so switch. | |
204 | * | |
205 | * We must not use top_act->map since this may not be the actual | |
206 | * task map, but the map being used for a klcopyin/out. | |
207 | */ | |
208 | ||
209 | if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
210 | pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
211 | } | |
212 | else { /* otherwise, we use the task's pmap */ | |
213 | new_pmap = new_act->task->map->pmap; | |
214 | if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) { | |
215 | pmap_switch(new_pmap); /* Switch if there is a change */ | |
216 | } | |
217 | } | |
218 | ||
219 | /* Sanity check - is the stack pointer inside the stack that | |
220 | * we're about to switch to? Is the execution address within | |
221 | * the kernel's VM space?? | |
222 | */ | |
223 | #if 0 | |
224 | printf("************* stack=%08X; R1=%08X; LR=%08X; old=%08X; cont=%08X; new=%08X\n", | |
225 | new->kernel_stack, new_act->mact.pcb->ss.r1, | |
226 | new_act->mact.pcb->ss.lr, old, continuation, new); /* (TEST/DEBUG) */ | |
227 | assert((new->kernel_stack < new_act->mact.pcb->ss.r1) && | |
228 | ((unsigned int)STACK_IKS(new->kernel_stack) > | |
229 | new_act->mact.pcb->ss.r1)); | |
230 | assert(new_act->mact.pcb->ss.lr < VM_MAX_KERNEL_ADDRESS); | |
231 | #endif | |
232 | ||
233 | ||
234 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, | |
235 | (int)old, (int)new, old->sched_pri, new->sched_pri, 0); | |
236 | ||
237 | ||
238 | retval = Switch_context(old, continuation, new); | |
239 | assert(retval != (struct thread_shuttle*)NULL); | |
240 | ||
0b4e3aa0 A |
241 | if (branch_tracing_enabled()) |
242 | per_proc_info[cpu_number()].cpu_flags |= traceBE; /* restore branch tracing */ | |
243 | ||
1c79356b A |
244 | /* We've returned from having switched context, so we should be |
245 | * back in the original context. | |
246 | */ | |
247 | ||
248 | return retval; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Alter the thread's state so that a following thread_exception_return | |
253 | * will make the thread return 'retval' from a syscall. | |
254 | */ | |
255 | void | |
256 | thread_set_syscall_return( | |
257 | struct thread_shuttle *thread, | |
258 | kern_return_t retval) | |
259 | { | |
260 | struct ppc_saved_state *ssp = &thread->top_act->mact.pcb->ss; | |
261 | ||
262 | #if MACH_ASSERT | |
263 | if (watchacts & WA_PCB) | |
264 | printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval); | |
265 | #endif /* MACH_ASSERT */ | |
266 | ||
267 | ssp->r3 = retval; | |
268 | } | |
269 | ||
270 | /* | |
271 | * Initialize the machine-dependent state for a new thread. | |
272 | */ | |
273 | kern_return_t | |
274 | thread_machine_create( | |
275 | struct thread_shuttle *thread, | |
276 | thread_act_t thr_act, | |
277 | void (*start_pos)(thread_t)) | |
278 | { | |
279 | ||
280 | savearea *sv; /* Pointer to newly allocated savearea */ | |
281 | unsigned int *CIsTooLimited, i; | |
282 | ||
283 | ||
284 | #if MACH_ASSERT | |
285 | if (watchacts & WA_PCB) | |
286 | printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos); | |
287 | #endif /* MACH_ASSERT */ | |
288 | ||
289 | hw_atomic_add(&saveanchor.saveneed, 4); /* Account for the number of saveareas we think we "need" | |
290 | for this activation */ | |
291 | assert(thr_act->mact.pcb == (pcb_t)0); /* Make sure there was no previous savearea */ | |
292 | ||
293 | sv = save_alloc(); /* Go get us a savearea */ | |
294 | ||
295 | bzero((char *) sv, sizeof(struct pcb)); /* Clear out the whole shebang */ | |
296 | ||
297 | sv->save_act = thr_act; /* Set who owns it */ | |
298 | sv->save_vrsave = 0; | |
299 | thr_act->mact.pcb = (pcb_t)sv; /* Point to the save area */ | |
300 | ||
1c79356b A |
301 | #if MACH_ASSERT |
302 | if (watchacts & WA_PCB) | |
303 | printf("pcb_init(%x) pcb=%x\n", thr_act, sv); | |
304 | #endif /* MACH_ASSERT */ | |
305 | /* | |
306 | * User threads will pull their context from the pcb when first | |
307 | * returning to user mode, so fill in all the necessary values. | |
308 | * Kernel threads are initialized from the save state structure | |
309 | * at the base of the kernel stack (see stack_attach()). | |
310 | */ | |
311 | ||
312 | sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */ | |
313 | ||
314 | CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */ | |
315 | for(i=0; i<16; i++) { /* Initialize all SRs */ | |
316 | CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */ | |
317 | } | |
318 | sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | thr_act->task->map->pmap->space; /* Default the copyin */ | |
319 | ||
320 | return(KERN_SUCCESS); | |
321 | } | |
322 | ||
323 | /* | |
324 | * Machine-dependent cleanup prior to destroying a thread | |
325 | */ | |
326 | void | |
327 | thread_machine_destroy( thread_t thread ) | |
328 | { | |
329 | spl_t s; | |
330 | ||
331 | if (thread->kernel_stack) { | |
332 | s = splsched(); | |
333 | stack_free(thread); | |
334 | splx(s); | |
335 | } | |
336 | } | |
337 | ||
338 | /* | |
339 | * flush out any lazily evaluated HW state in the | |
340 | * owning thread's context, before termination. | |
341 | */ | |
342 | void | |
343 | thread_machine_flush( thread_act_t cur_act ) | |
344 | { | |
345 | } | |
346 | ||
347 | /* | |
348 | * Number of times we needed to swap an activation back in before | |
349 | * switching to it. | |
350 | */ | |
351 | int switch_act_swapins = 0; | |
352 | ||
353 | /* | |
354 | * machine_switch_act | |
355 | * | |
356 | * Machine-dependent details of activation switching. Called with | |
357 | * RPC locks held and preemption disabled. | |
358 | */ | |
359 | void | |
360 | machine_switch_act( | |
361 | thread_t thread, | |
362 | thread_act_t old, | |
363 | thread_act_t new, | |
364 | int cpu) | |
365 | { | |
366 | pmap_t new_pmap; | |
367 | ||
368 | /* Our context might wake up on another processor, so we must | |
369 | * not keep hot state in our FPU, it must go back to the pcb | |
370 | * so that it can be found by the other if needed | |
371 | */ | |
372 | if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ | |
0b4e3aa0 A |
373 | fpu_save(old); /* Save floating point if used */ |
374 | vec_save(old); /* Save vector if used */ | |
1c79356b A |
375 | } |
376 | ||
377 | active_stacks[cpu] = thread->kernel_stack; | |
378 | ||
379 | ast_context(new, cpu); | |
380 | ||
381 | /* Activations might have different pmaps | |
382 | * (process->kernel->server, for example). | |
383 | * Change space if needed | |
384 | */ | |
385 | ||
386 | if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
387 | pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
388 | } | |
389 | else { /* otherwise, we use the task's pmap */ | |
390 | new_pmap = new->task->map->pmap; | |
391 | if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) { | |
392 | pmap_switch(new_pmap); | |
393 | } | |
394 | } | |
395 | ||
396 | } | |
397 | ||
398 | void | |
399 | pcb_user_to_kernel(thread_act_t act) | |
400 | { | |
401 | ||
402 | return; /* Not needed, I hope... */ | |
403 | } | |
404 | ||
405 | ||
406 | /* | |
407 | * act_machine_sv_free | |
408 | * release saveareas associated with an act. if flag is true, release | |
409 | * user level savearea(s) too, else don't | |
410 | * | |
411 | * this code cannot block so we call the proper save area free routine | |
412 | */ | |
413 | void | |
414 | act_machine_sv_free(thread_act_t act) | |
415 | { | |
416 | register pcb_t pcb,userpcb,npcb; | |
417 | register savearea *svp; | |
418 | register int i; | |
419 | ||
420 | /* | |
421 | * This next bit insures that any live facility context for this thread is discarded on every processor | |
422 | * that may have it. We go through all per-processor blocks and zero the facility owner if | |
423 | * it is the thread being destroyed. This needs to be done via a compare-and-swap because | |
424 | * some other processor could change the owner while we are clearing it. It turns out that | |
425 | * this is the only place where we need the interlock, normal use of the owner field is cpu-local | |
426 | * and doesn't need the interlock. Because we are called during termintation, and a thread | |
427 | * terminates itself, the context on other processors has been saved (because we save it as | |
428 | * part of the context switch), even if it is still considered live. Since the dead thread is | |
429 | * not running elsewhere, and the context is saved, any other processor looking at the owner | |
430 | * field will not attempt to save context again, meaning that it doesn't matter if the owner | |
431 | * changes out from under it. | |
432 | */ | |
433 | ||
434 | /* | |
435 | * free VMX and FPU saveareas. do not free user save areas. | |
436 | * user VMX and FPU saveareas, if any, i'm told are last in | |
437 | * the chain so we just stop if we find them | |
438 | * we identify user VMX and FPU saveareas when we find a pcb | |
439 | * with a save level of 0. we identify user regular save | |
440 | * areas when we find one with MSR_PR set | |
441 | */ | |
442 | ||
443 | pcb = act->mact.VMX_pcb; /* Get the top vector savearea */ | |
444 | while(pcb) { /* Any VMX saved state? */ | |
445 | svp = (savearea *)pcb; /* save lots of casting later */ | |
446 | if (svp->save_level_vec == 0) break; /* done when hit user if any */ | |
447 | pcb = (pcb_t)svp->save_prev_vector; /* Get one underneath our's */ | |
448 | svp->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */ | |
449 | if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
450 | ||
451 | save_ret(svp); /* release it */ | |
452 | } | |
453 | } | |
454 | act->mact.VMX_pcb = pcb; | |
455 | if (act->mact.VMX_lvl != 0) { | |
456 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ | |
457 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */ | |
458 | } | |
459 | } | |
460 | ||
461 | pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */ | |
462 | while(pcb) { /* Any floating point saved state? */ | |
463 | svp = (savearea *)pcb; | |
464 | if (svp->save_level_fp == 0) break; /* done when hit user if any */ | |
465 | pcb = (pcb_t)svp->save_prev_float; /* Get one underneath our's */ | |
466 | svp->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */ | |
467 | if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
468 | save_ret(svp); /* Nope, release it */ | |
469 | } | |
470 | } | |
471 | act->mact.FPU_pcb = pcb; | |
472 | if (act->mact.FPU_lvl != 0) { | |
473 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ | |
474 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ | |
475 | } | |
476 | } | |
477 | ||
478 | /* | |
479 | * free all regular saveareas except a user savearea, if any | |
480 | */ | |
481 | ||
482 | pcb = act->mact.pcb; | |
483 | userpcb = (pcb_t)0; | |
484 | while(pcb) { | |
485 | svp = (savearea *)pcb; | |
486 | if ((svp->save_srr1 & MASK(MSR_PR))) { | |
487 | assert(userpcb == (pcb_t)0); | |
488 | userpcb = pcb; | |
489 | svp = (savearea *)userpcb; | |
490 | npcb = (pcb_t)svp->save_prev; | |
491 | svp->save_prev = (struct savearea *)0; | |
492 | } else { | |
493 | svp->save_flags &= ~SAVattach; /* Clear the attached flag */ | |
494 | npcb = (pcb_t)svp->save_prev; | |
495 | if(!(svp->save_flags & SAVinuse)) /* Anyone left with this one? */ | |
496 | save_ret(svp); | |
497 | } | |
498 | pcb = npcb; | |
499 | } | |
500 | act->mact.pcb = userpcb; | |
501 | ||
502 | } | |
503 | ||
504 | ||
505 | /* | |
506 | * act_virtual_machine_destroy: | |
507 | * Shutdown any virtual machines associated with a thread | |
508 | */ | |
509 | void | |
510 | act_virtual_machine_destroy(thread_act_t act) | |
511 | { | |
512 | if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */ | |
513 | disable_bluebox_internal(act); /* Kill off bluebox */ | |
514 | } | |
515 | ||
516 | if(act->mact.vmmControl) { /* Check if VMM is active */ | |
517 | vmm_tear_down_all(act); /* Kill off all VMM contexts */ | |
518 | } | |
519 | } | |
520 | ||
521 | /* | |
522 | * act_machine_destroy: Shutdown any state associated with a thread pcb. | |
523 | */ | |
524 | void | |
525 | act_machine_destroy(thread_act_t act) | |
526 | { | |
527 | register pcb_t pcb, opcb; | |
528 | int i; | |
529 | ||
530 | #if MACH_ASSERT | |
531 | if (watchacts & WA_PCB) | |
532 | printf("act_machine_destroy(0x%x)\n", act); | |
533 | #endif /* MACH_ASSERT */ | |
534 | ||
535 | act_virtual_machine_destroy(act); | |
536 | ||
537 | /* | |
538 | * This next bit insures that any live facility context for this thread is discarded on every processor | |
539 | * that may have it. We go through all per-processor blocks and zero the facility owner if | |
540 | * it is the thread being destroyed. This needs to be done via a compare-and-swap because | |
541 | * some other processor could change the owner while we are clearing it. It turns out that | |
542 | * this is the only place where we need the interlock, normal use of the owner field is cpu-local | |
543 | * and doesn't need the interlock. Because we are called during termintation, and a thread | |
544 | * terminates itself, the context on other processors has been saved (because we save it as | |
545 | * part of the context switch), even if it is still considered live. Since the dead thread is | |
546 | * not running elsewhere, and the context is saved, any other processor looking at the owner | |
547 | * field will not attempt to save context again, meaning that it doesn't matter if the owner | |
548 | * changes out from under it. | |
549 | */ | |
550 | ||
551 | for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ | |
552 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ | |
553 | (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */ | |
554 | } | |
555 | ||
556 | pcb = act->mact.VMX_pcb; /* Get the top vector savearea */ | |
557 | while(pcb) { /* Any VMX saved state? */ | |
558 | opcb = pcb; /* Save current savearea address */ | |
559 | pcb = (pcb_t)(((savearea *)pcb)->save_prev_vector); /* Get one underneath our's */ | |
560 | ((savearea *)opcb)->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */ | |
561 | ||
562 | if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
563 | save_release((savearea *)opcb); /* Nope, release it */ | |
564 | } | |
565 | } | |
566 | act->mact.VMX_pcb = (pcb_t)0; /* Clear pointer */ | |
567 | ||
568 | pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */ | |
569 | while(pcb) { /* Any floating point saved state? */ | |
570 | opcb = pcb; /* Save current savearea address */ | |
571 | pcb = (pcb_t)(((savearea *)pcb)->save_prev_float); /* Get one underneath our's */ | |
572 | ((savearea *)opcb)->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */ | |
573 | ||
574 | if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */ | |
575 | save_release((savearea *)opcb); /* Nope, release it */ | |
576 | } | |
577 | } | |
578 | act->mact.FPU_pcb = (pcb_t)0; /* Clear pointer */ | |
579 | ||
580 | pcb = act->mact.pcb; /* Get the top normal savearea */ | |
581 | act->mact.pcb = (pcb_t)0; /* Clear pointer */ | |
582 | ||
583 | while(pcb) { /* Any normal saved state left? */ | |
584 | opcb = pcb; /* Keep track of what we're working on */ | |
585 | pcb = (pcb_t)(((savearea *)pcb)->save_prev); /* Get one underneath our's */ | |
586 | ||
587 | ((savearea *)opcb)->save_flags = 0; /* Clear all flags since we release this in any case */ | |
588 | save_release((savearea *)opcb); /* Release this one */ | |
589 | } | |
590 | ||
591 | hw_atomic_sub(&saveanchor.saveneed, 4); /* Unaccount for the number of saveareas we think we "need" | |
592 | for this activation */ | |
593 | } | |
594 | ||
595 | kern_return_t | |
596 | act_machine_create(task_t task, thread_act_t thr_act) | |
597 | { | |
598 | /* | |
599 | * Clear & Init the pcb (sets up user-mode s regs) | |
600 | * We don't use this anymore. | |
601 | */ | |
602 | ||
603 | register pcb_t pcb; | |
604 | register int i; | |
605 | unsigned int *CIsTooLimited; | |
606 | pmap_t pmap; | |
607 | ||
608 | return KERN_SUCCESS; | |
609 | } | |
610 | ||
611 | void act_machine_init() | |
612 | { | |
613 | #if MACH_ASSERT | |
614 | if (watchacts & WA_PCB) | |
615 | printf("act_machine_init()\n"); | |
616 | #endif /* MACH_ASSERT */ | |
617 | ||
618 | /* Good to verify these once */ | |
619 | assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); | |
620 | ||
621 | assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT ); | |
622 | assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT ); | |
623 | assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT ); | |
624 | assert( THREAD_STATE_MAX >= sizeof(struct ppc_saved_state)/sizeof(int)); | |
625 | ||
626 | /* | |
627 | * If we start using kernel activations, | |
628 | * would normally create kernel_thread_pool here, | |
629 | * populating it from the act_zone | |
630 | */ | |
631 | } | |
632 | ||
633 | void | |
634 | act_machine_return(int code) | |
635 | { | |
636 | thread_act_t thr_act = current_act(); | |
637 | ||
638 | #if MACH_ASSERT | |
639 | if (watchacts & WA_EXIT) | |
640 | printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", | |
641 | code, thr_act, thr_act->ref_count, | |
642 | thr_act->thread, thr_act->thread->ref_count); | |
643 | #endif /* MACH_ASSERT */ | |
644 | ||
645 | ||
646 | /* | |
647 | * This code is called with nothing locked. | |
648 | * It also returns with nothing locked, if it returns. | |
649 | * | |
650 | * This routine terminates the current thread activation. | |
651 | * If this is the only activation associated with its | |
652 | * thread shuttle, then the entire thread (shuttle plus | |
653 | * activation) is terminated. | |
654 | */ | |
655 | assert( code == KERN_TERMINATED ); | |
656 | assert( thr_act ); | |
657 | ||
658 | act_lock_thread(thr_act); | |
659 | ||
660 | #ifdef CALLOUT_RPC_MODEL | |
661 | /* | |
662 | * JMM - This needs to get cleaned up to work under the much simpler | |
663 | * return (instead of callout model). | |
664 | */ | |
665 | if (thr_act->thread->top_act != thr_act) { | |
666 | /* | |
667 | * this is not the top activation; | |
668 | * if possible, we should clone the shuttle so that | |
669 | * both the root RPC-chain and the soon-to-be-orphaned | |
670 | * RPC-chain have shuttles | |
671 | * | |
672 | * JMM - Cloning is a horrible idea! Instead we should alert | |
673 | * the pieces upstream to return the shuttle. We will use | |
674 | * alerts for this. | |
675 | */ | |
676 | act_unlock_thread(thr_act); | |
677 | panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED"); | |
678 | } | |
679 | ||
680 | if (thr_act->lower != THR_ACT_NULL) { | |
681 | thread_t cur_thread = current_thread(); | |
682 | thread_act_t cur_act; | |
683 | struct ipc_port *iplock; | |
684 | ||
685 | /* terminate the entire thread (shuttle plus activation) */ | |
686 | /* terminate only this activation, send an appropriate */ | |
687 | /* return code back to the activation that invoked us. */ | |
688 | iplock = thr_act->pool_port; /* remember for unlock call */ | |
689 | thr_act->lower->alerts |= SERVER_TERMINATED; | |
690 | install_special_handler(thr_act->lower); | |
691 | ||
692 | /* Return to previous act with error code */ | |
693 | ||
694 | act_locked_act_reference(thr_act); /* keep it around */ | |
695 | act_switch_swapcheck(cur_thread, (ipc_port_t)0); | |
696 | ||
697 | (void) switch_act(THR_ACT_NULL); | |
698 | /* assert(thr_act->ref_count == 0); */ /* XXX */ | |
699 | cur_act = cur_thread->top_act; | |
700 | MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED; | |
701 | machine_kernel_stack_init(cur_thread, mach_rpc_return_error); | |
702 | /* | |
703 | * The following unlocks must be done separately since fields | |
704 | * used by `act_unlock_thread()' have been cleared, meaning | |
705 | * that it would not release all of the appropriate locks. | |
706 | */ | |
707 | rpc_unlock(cur_thread); | |
708 | if (iplock) ip_unlock(iplock); /* must be done separately */ | |
709 | act_unlock(thr_act); | |
710 | act_deallocate(thr_act); /* free it */ | |
711 | Load_context(cur_thread); | |
712 | /*NOTREACHED*/ | |
713 | ||
714 | panic("act_machine_return: TALKING ZOMBIE! (2)"); | |
715 | } | |
716 | ||
717 | #endif /* CALLOUT_RPC_MODEL */ | |
718 | ||
719 | /* This is the only activation attached to the shuttle... */ | |
720 | ||
721 | assert(thr_act->thread->top_act == thr_act); | |
722 | act_unlock_thread(thr_act); | |
723 | thread_terminate_self(); | |
724 | ||
725 | /*NOTREACHED*/ | |
726 | panic("act_machine_return: TALKING ZOMBIE! (1)"); | |
727 | } | |
728 | ||
729 | void | |
730 | thread_machine_set_current(struct thread_shuttle *thread) | |
731 | { | |
732 | register int my_cpu = cpu_number(); | |
733 | ||
734 | cpu_data[my_cpu].active_thread = thread; | |
735 | ||
736 | active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; | |
737 | } | |
738 | ||
739 | void | |
740 | thread_machine_init(void) | |
741 | { | |
742 | #ifdef MACHINE_STACK | |
743 | #if KERNEL_STACK_SIZE > PPC_PGBYTES | |
744 | panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n"); | |
745 | #endif | |
746 | #endif | |
747 | } | |
748 | ||
749 | #if MACH_ASSERT | |
750 | void | |
751 | dump_pcb(pcb_t pcb) | |
752 | { | |
753 | printf("pcb @ %8.8x:\n", pcb); | |
754 | #if DEBUG | |
755 | regDump(&pcb->ss); | |
756 | #endif /* DEBUG */ | |
757 | } | |
758 | ||
759 | void | |
760 | dump_thread(thread_t th) | |
761 | { | |
762 | printf(" thread @ 0x%x:\n", th); | |
763 | } | |
764 | ||
765 | int | |
766 | dump_act(thread_act_t thr_act) | |
767 | { | |
768 | if (!thr_act) | |
769 | return(0); | |
770 | ||
771 | printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", | |
772 | thr_act, thr_act->ref_count, | |
773 | thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, | |
774 | thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); | |
775 | ||
776 | printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n", | |
777 | thr_act->alerts, thr_act->alert_mask, | |
778 | thr_act->suspend_count, thr_act->active, | |
779 | thr_act->higher, thr_act->lower); | |
780 | ||
781 | return((int)thr_act); | |
782 | } | |
783 | ||
784 | #endif | |
785 | ||
786 | unsigned int | |
787 | get_useraddr() | |
788 | { | |
789 | ||
790 | thread_act_t thr_act = current_act(); | |
791 | ||
792 | return(thr_act->mact.pcb->ss.srr0); | |
793 | } | |
794 | ||
795 | /* | |
796 | * detach and return a kernel stack from a thread | |
797 | */ | |
798 | ||
799 | vm_offset_t | |
800 | stack_detach(thread_t thread) | |
801 | { | |
802 | vm_offset_t stack; | |
803 | ||
804 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), | |
805 | thread, thread->priority, | |
806 | thread->sched_pri, 0, | |
807 | 0); | |
808 | ||
809 | stack = thread->kernel_stack; | |
810 | thread->kernel_stack = 0; | |
811 | return(stack); | |
812 | } | |
813 | ||
814 | /* | |
815 | * attach a kernel stack to a thread and initialize it | |
816 | * | |
817 | * attaches a stack to a thread. if there is no save | |
818 | * area we allocate one. the top save area is then | |
819 | * loaded with the pc (continuation address), the initial | |
820 | * stack pointer, and a std kernel MSR. if the top | |
821 | * save area is the user save area bad things will | |
822 | * happen | |
823 | * | |
824 | */ | |
825 | ||
826 | void | |
827 | stack_attach(struct thread_shuttle *thread, | |
828 | vm_offset_t stack, | |
829 | void (*start_pos)(thread_t)) | |
830 | { | |
831 | thread_act_t thr_act; | |
832 | unsigned int *kss; | |
833 | struct savearea *sv; | |
834 | ||
835 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), | |
836 | thread, thread->priority, | |
837 | thread->sched_pri, start_pos, | |
838 | 0); | |
839 | ||
840 | assert(stack); | |
841 | kss = (unsigned int *)STACK_IKS(stack); | |
842 | thread->kernel_stack = stack; | |
843 | ||
844 | /* during initialization we sometimes do not have an | |
845 | activation. in that case do not do anything */ | |
846 | if ((thr_act = thread->top_act) != 0) { | |
847 | sv = save_get(); /* cannot block */ | |
848 | // bzero((char *) sv, sizeof(struct pcb)); | |
849 | sv->save_act = thr_act; | |
850 | sv->save_prev = (struct savearea *)thr_act->mact.pcb; | |
851 | thr_act->mact.pcb = (pcb_t)sv; | |
852 | ||
853 | sv->save_srr0 = (unsigned int) start_pos; | |
854 | /* sv->save_r3 = ARG ? */ | |
855 | sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE); | |
856 | sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; | |
857 | sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ | |
858 | sv->save_xfpscr = 0; /* Start with a clear fpscr */ | |
859 | *((int *)sv->save_r1) = 0; | |
860 | thr_act->mact.ksp = 0; | |
861 | } | |
862 | ||
863 | return; | |
864 | } | |
865 | ||
866 | /* | |
867 | * move a stack from old to new thread | |
868 | */ | |
869 | ||
870 | void | |
871 | stack_handoff(thread_t old, | |
872 | thread_t new) | |
873 | { | |
874 | ||
875 | vm_offset_t stack; | |
876 | pmap_t new_pmap; | |
877 | ||
878 | assert(new->top_act); | |
879 | assert(old->top_act); | |
880 | ||
881 | stack = stack_detach(old); | |
882 | new->kernel_stack = stack; | |
883 | ||
0b4e3aa0 A |
884 | per_proc_info[cpu_number()].cpu_flags &= ~traceBE; |
885 | ||
1c79356b A |
886 | #if NCPUS > 1 |
887 | if (real_ncpus > 1) { | |
0b4e3aa0 A |
888 | fpu_save(old->top_act); |
889 | vec_save(old->top_act); | |
1c79356b A |
890 | } |
891 | #endif | |
892 | ||
893 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, | |
894 | (int)old, (int)new, old->sched_pri, new->sched_pri, 0); | |
895 | ||
896 | ||
897 | if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ | |
898 | pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ | |
899 | } | |
900 | else { /* otherwise, we use the task's pmap */ | |
901 | new_pmap = new->top_act->task->map->pmap; | |
902 | if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) { | |
903 | pmap_switch(new_pmap); | |
904 | } | |
905 | } | |
906 | ||
907 | thread_machine_set_current(new); | |
908 | active_stacks[cpu_number()] = new->kernel_stack; | |
909 | per_proc_info[cpu_number()].Uassist = new->top_act->mact.cthread_self; | |
0b4e3aa0 A |
910 | #if 1 |
911 | per_proc_info[cpu_number()].ppbbTaskEnv = new->top_act->mact.bbTaskEnv; | |
912 | per_proc_info[cpu_number()].spcFlags = new->top_act->mact.specFlags; | |
913 | #endif | |
914 | if (branch_tracing_enabled()) | |
915 | per_proc_info[cpu_number()].cpu_flags |= traceBE; | |
765c9de3 A |
916 | |
917 | if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act); /* Cut trace entry if tracing */ | |
918 | ||
1c79356b A |
919 | return; |
920 | } | |
921 | ||
922 | /* | |
923 | * clean and initialize the current kernel stack and go to | |
924 | * the given continuation routine | |
925 | */ | |
926 | ||
927 | void | |
928 | call_continuation(void (*continuation)(void) ) | |
929 | { | |
930 | ||
931 | unsigned int *kss; | |
932 | vm_offset_t tsp; | |
933 | ||
934 | assert(current_thread()->kernel_stack); | |
935 | kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack); | |
936 | assert(continuation); | |
937 | ||
938 | tsp = (vm_offset_t)((int)kss - KF_SIZE); | |
939 | assert(tsp); | |
940 | *((int *)tsp) = 0; | |
941 | ||
942 | Call_continuation(continuation, tsp); | |
943 | ||
944 | return; | |
945 | } | |
946 | ||
947 | void | |
948 | thread_swapin_mach_alloc(thread_t thread) | |
949 | { | |
950 | struct savearea *sv; | |
951 | ||
952 | assert(thread->top_act->mact.pcb == 0); | |
953 | ||
954 | sv = save_alloc(); | |
955 | assert(sv); | |
956 | // bzero((char *) sv, sizeof(struct pcb)); | |
957 | sv->save_act = thread->top_act; | |
958 | thread->top_act->mact.pcb = (pcb_t)sv; | |
959 | ||
960 | } |