]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | ||
51 | #include <cpus.h> | |
52 | #include <mach_rt.h> | |
53 | #include <mach_debug.h> | |
54 | #include <mach_ldebug.h> | |
55 | ||
56 | #include <sys/kdebug.h> | |
57 | ||
58 | #include <mach/kern_return.h> | |
59 | #include <mach/thread_status.h> | |
60 | #include <mach/vm_param.h> | |
61 | #include <mach/rpc.h> | |
62 | ||
63 | #include <kern/counters.h> | |
64 | #include <kern/mach_param.h> | |
65 | #include <kern/task.h> | |
66 | #include <kern/thread.h> | |
67 | #include <kern/thread_act.h> | |
68 | #include <kern/thread_swap.h> | |
69 | #include <kern/sched_prim.h> | |
70 | #include <kern/misc_protos.h> | |
71 | #include <kern/assert.h> | |
72 | #include <kern/spl.h> | |
73 | #include <ipc/ipc_port.h> | |
74 | #include <vm/vm_kern.h> | |
75 | #include <vm/pmap.h> | |
76 | ||
77 | #include <i386/thread.h> | |
78 | #include <i386/eflags.h> | |
79 | #include <i386/proc_reg.h> | |
80 | #include <i386/seg.h> | |
81 | #include <i386/tss.h> | |
82 | #include <i386/user_ldt.h> | |
83 | #include <i386/fpu.h> | |
84 | #include <i386/iopb_entries.h> | |
85 | ||
86 | /* | |
87 | * Maps state flavor to number of words in the state: | |
88 | */ | |
89 | unsigned int state_count[] = { | |
90 | /* FLAVOR_LIST */ 0, | |
91 | i386_NEW_THREAD_STATE_COUNT, | |
92 | i386_FLOAT_STATE_COUNT, | |
93 | i386_ISA_PORT_MAP_STATE_COUNT, | |
94 | i386_V86_ASSIST_STATE_COUNT, | |
95 | i386_REGS_SEGS_STATE_COUNT, | |
96 | i386_THREAD_SYSCALL_STATE_COUNT, | |
97 | /* THREAD_STATE_NONE */ 0, | |
98 | i386_SAVED_STATE_COUNT, | |
99 | }; | |
100 | ||
101 | /* Forward */ | |
102 | ||
103 | void act_machine_throughcall(thread_act_t thr_act); | |
104 | extern thread_t Switch_context( | |
105 | thread_t old, | |
106 | void (*cont)(void), | |
107 | thread_t new); | |
108 | extern void Thread_continue(void); | |
109 | extern void Load_context( | |
110 | thread_t thread); | |
111 | ||
112 | /* | |
113 | * consider_machine_collect: | |
114 | * | |
115 | * Try to collect machine-dependent pages | |
116 | */ | |
117 | void | |
118 | consider_machine_collect() | |
119 | { | |
120 | } | |
121 | ||
122 | void | |
123 | consider_machine_adjust() | |
124 | { | |
125 | } | |
126 | ||
127 | ||
128 | /* | |
129 | * machine_kernel_stack_init: | |
130 | * | |
131 | * Initialize a kernel stack which has already been | |
132 | * attached to its thread_activation. | |
133 | */ | |
134 | ||
135 | void | |
136 | machine_kernel_stack_init( | |
137 | thread_t thread, | |
138 | void (*start_pos)(thread_t)) | |
139 | { | |
140 | thread_act_t thr_act = thread->top_act; | |
141 | vm_offset_t stack; | |
142 | ||
143 | assert(thr_act); | |
144 | stack = thread->kernel_stack; | |
145 | assert(stack); | |
146 | ||
147 | #if MACH_ASSERT | |
148 | if (watchacts & WA_PCB) { | |
149 | printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", | |
150 | thread,stack,start_pos); | |
151 | printf("\tstack_iks=%x, stack_iel=%x\n", | |
152 | STACK_IKS(stack), STACK_IEL(stack)); | |
153 | } | |
154 | #endif /* MACH_ASSERT */ | |
155 | ||
156 | /* | |
157 | * We want to run at start_pos, giving it as an argument | |
158 | * the return value from Load_context/Switch_context. | |
159 | * Thread_continue takes care of the mismatch between | |
160 | * the argument-passing/return-value conventions. | |
161 | * This function will not return normally, | |
162 | * so we don`t have to worry about a return address. | |
163 | */ | |
164 | STACK_IKS(stack)->k_eip = (int) Thread_continue; | |
165 | STACK_IKS(stack)->k_ebx = (int) start_pos; | |
166 | STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack); | |
167 | ||
168 | /* | |
169 | * Point top of kernel stack to user`s registers. | |
170 | */ | |
171 | STACK_IEL(stack)->saved_state = &thr_act->mact.pcb->iss; | |
172 | } | |
173 | ||
174 | ||
175 | #if NCPUS > 1 | |
176 | #define curr_gdt(mycpu) (mp_gdt[mycpu]) | |
177 | #define curr_ktss(mycpu) (mp_ktss[mycpu]) | |
178 | #else | |
179 | #define curr_gdt(mycpu) (gdt) | |
180 | #define curr_ktss(mycpu) (&ktss) | |
181 | #endif | |
182 | ||
183 | #define gdt_desc_p(mycpu,sel) \ | |
184 | ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)]) | |
185 | ||
186 | void | |
187 | act_machine_switch_pcb( thread_act_t new_act ) | |
188 | { | |
189 | pcb_t pcb = new_act->mact.pcb; | |
190 | int mycpu; | |
191 | { | |
192 | register iopb_tss_t tss = pcb->ims.io_tss; | |
193 | vm_offset_t pcb_stack_top; | |
194 | ||
195 | assert(new_act->thread != NULL); | |
196 | assert(new_act->thread->kernel_stack != 0); | |
197 | STACK_IEL(new_act->thread->kernel_stack)->saved_state = | |
198 | &new_act->mact.pcb->iss; | |
199 | ||
200 | /* | |
201 | * Save a pointer to the top of the "kernel" stack - | |
202 | * actually the place in the PCB where a trap into | |
203 | * kernel mode will push the registers. | |
204 | * The location depends on V8086 mode. If we are | |
205 | * not in V8086 mode, then a trap into the kernel | |
206 | * won`t save the v86 segments, so we leave room. | |
207 | */ | |
208 | ||
209 | pcb_stack_top = (pcb->iss.efl & EFL_VM) | |
210 | ? (int) (&pcb->iss + 1) | |
211 | : (int) (&pcb->iss.v86_segs); | |
212 | ||
213 | mp_disable_preemption(); | |
214 | mycpu = cpu_number(); | |
215 | ||
216 | if (tss == 0) { | |
217 | /* | |
218 | * No per-thread IO permissions. | |
219 | * Use standard kernel TSS. | |
220 | */ | |
221 | if (!(gdt_desc_p(mycpu,KERNEL_TSS)->access & ACC_TSS_BUSY)) | |
222 | set_tr(KERNEL_TSS); | |
223 | curr_ktss(mycpu)->esp0 = pcb_stack_top; | |
224 | } | |
225 | else { | |
226 | /* | |
227 | * Set the IO permissions. Use this thread`s TSS. | |
228 | */ | |
229 | *gdt_desc_p(mycpu,USER_TSS) | |
230 | = *(struct real_descriptor *)tss->iopb_desc; | |
231 | tss->tss.esp0 = pcb_stack_top; | |
232 | set_tr(USER_TSS); | |
233 | gdt_desc_p(mycpu,KERNEL_TSS)->access &= ~ ACC_TSS_BUSY; | |
234 | } | |
235 | } | |
236 | ||
237 | { | |
238 | register user_ldt_t ldt = pcb->ims.ldt; | |
239 | /* | |
240 | * Set the thread`s LDT. | |
241 | */ | |
242 | if (ldt == 0) { | |
243 | /* | |
244 | * Use system LDT. | |
245 | */ | |
246 | set_ldt(KERNEL_LDT); | |
247 | } | |
248 | else { | |
249 | /* | |
250 | * Thread has its own LDT. | |
251 | */ | |
252 | *gdt_desc_p(mycpu,USER_LDT) = ldt->desc; | |
253 | set_ldt(USER_LDT); | |
254 | } | |
255 | } | |
256 | mp_enable_preemption(); | |
257 | /* | |
258 | * Load the floating-point context, if necessary. | |
259 | */ | |
260 | fpu_load_context(pcb); | |
261 | ||
262 | } | |
263 | ||
264 | /* | |
265 | * flush out any lazily evaluated HW state in the | |
266 | * owning thread's context, before termination. | |
267 | */ | |
268 | void | |
269 | thread_machine_flush( thread_act_t cur_act ) | |
270 | { | |
271 | fpflush(cur_act); | |
272 | } | |
273 | ||
274 | /* | |
275 | * Switch to the first thread on a CPU. | |
276 | */ | |
277 | void | |
278 | load_context( | |
279 | thread_t new) | |
280 | { | |
281 | act_machine_switch_pcb(new->top_act); | |
282 | Load_context(new); | |
283 | } | |
284 | ||
285 | /* | |
286 | * Number of times we needed to swap an activation back in before | |
287 | * switching to it. | |
288 | */ | |
289 | int switch_act_swapins = 0; | |
290 | ||
291 | /* | |
292 | * machine_switch_act | |
293 | * | |
294 | * Machine-dependent details of activation switching. Called with | |
295 | * RPC locks held and preemption disabled. | |
296 | */ | |
297 | void | |
298 | machine_switch_act( | |
299 | thread_t thread, | |
300 | thread_act_t old, | |
301 | thread_act_t new, | |
302 | int cpu) | |
303 | { | |
304 | /* | |
305 | * Switch the vm, ast and pcb context. | |
306 | * Save FP registers if in use and set TS (task switch) bit. | |
307 | */ | |
308 | fpu_save_context(thread); | |
309 | ||
310 | active_stacks[cpu] = thread->kernel_stack; | |
311 | ast_context(new, cpu); | |
312 | ||
313 | PMAP_SWITCH_CONTEXT(old, new, cpu); | |
314 | act_machine_switch_pcb(new); | |
315 | } | |
316 | ||
317 | /* | |
318 | * Switch to a new thread. | |
319 | * Save the old thread`s kernel state or continuation, | |
320 | * and return it. | |
321 | */ | |
322 | thread_t | |
323 | switch_context( | |
324 | thread_t old, | |
325 | void (*continuation)(void), | |
326 | thread_t new) | |
327 | { | |
328 | register thread_act_t old_act = old->top_act, | |
329 | new_act = new->top_act; | |
330 | ||
331 | #if MACH_RT | |
332 | assert(old_act->kernel_loaded || | |
333 | active_stacks[cpu_number()] == old_act->thread->kernel_stack); | |
334 | assert (get_preemption_level() == 1); | |
335 | #endif | |
336 | check_simple_locks(); | |
337 | ||
338 | /* | |
339 | * Save FP registers if in use. | |
340 | */ | |
341 | fpu_save_context(old); | |
342 | ||
343 | #if MACH_ASSERT | |
344 | if (watchacts & WA_SWITCH) | |
345 | printf("\tswitch_context(old=%x con=%x new=%x)\n", | |
346 | old, continuation, new); | |
347 | #endif /* MACH_ASSERT */ | |
348 | ||
349 | /* | |
350 | * Switch address maps if need be, even if not switching tasks. | |
351 | * (A server activation may be "borrowing" a client map.) | |
352 | */ | |
353 | { | |
354 | int mycpu = cpu_number(); | |
355 | ||
356 | PMAP_SWITCH_CONTEXT(old_act, new_act, mycpu) | |
357 | } | |
358 | ||
359 | /* | |
360 | * Load the rest of the user state for the new thread | |
361 | */ | |
362 | act_machine_switch_pcb(new_act); | |
363 | return(Switch_context(old, continuation, new)); | |
364 | } | |
365 | ||
366 | void | |
367 | pcb_module_init(void) | |
368 | { | |
369 | fpu_module_init(); | |
370 | iopb_init(); | |
371 | } | |
372 | ||
373 | void | |
374 | pcb_init( register thread_act_t thr_act ) | |
375 | { | |
376 | register pcb_t pcb; | |
377 | ||
378 | assert(thr_act->mact.pcb == (pcb_t)0); | |
379 | pcb = thr_act->mact.pcb = &thr_act->mact.xxx_pcb; | |
380 | ||
381 | #if MACH_ASSERT | |
382 | if (watchacts & WA_PCB) | |
383 | printf("pcb_init(%x) pcb=%x\n", thr_act, pcb); | |
384 | #endif /* MACH_ASSERT */ | |
385 | ||
386 | /* | |
387 | * We can't let random values leak out to the user. | |
388 | * (however, act_create() zeroed the entire thr_act, mact, pcb) | |
389 | * bzero((char *) pcb, sizeof *pcb); | |
390 | */ | |
391 | simple_lock_init(&pcb->lock, ETAP_MISC_PCB); | |
392 | ||
393 | /* | |
394 | * Guarantee that the bootstrapped thread will be in user | |
395 | * mode. | |
396 | */ | |
397 | pcb->iss.cs = USER_CS; | |
398 | pcb->iss.ss = USER_DS; | |
399 | pcb->iss.ds = USER_DS; | |
400 | pcb->iss.es = USER_DS; | |
401 | pcb->iss.fs = USER_DS; | |
402 | pcb->iss.gs = USER_DS; | |
403 | pcb->iss.efl = EFL_USER_SET; | |
404 | } | |
405 | ||
406 | /* | |
407 | * Adjust saved register state for thread belonging to task | |
408 | * created with kernel_task_create(). | |
409 | */ | |
410 | void | |
411 | pcb_user_to_kernel( | |
412 | thread_act_t thr_act) | |
413 | { | |
414 | register pcb_t pcb = thr_act->mact.pcb; | |
415 | ||
416 | pcb->iss.cs = KERNEL_CS; | |
417 | pcb->iss.ss = KERNEL_DS; | |
418 | pcb->iss.ds = KERNEL_DS; | |
419 | pcb->iss.es = KERNEL_DS; | |
420 | pcb->iss.fs = KERNEL_DS; | |
421 | pcb->iss.gs = CPU_DATA; | |
422 | } | |
423 | ||
424 | void | |
425 | pcb_terminate( | |
426 | register thread_act_t thr_act) | |
427 | { | |
428 | register pcb_t pcb = thr_act->mact.pcb; | |
429 | ||
430 | assert(pcb); | |
431 | ||
432 | if (pcb->ims.io_tss != 0) | |
433 | iopb_destroy(pcb->ims.io_tss); | |
434 | if (pcb->ims.ifps != 0) | |
435 | fp_free(pcb->ims.ifps); | |
436 | if (pcb->ims.ldt != 0) | |
437 | user_ldt_free(pcb->ims.ldt); | |
438 | thr_act->mact.pcb = (pcb_t)0; | |
439 | } | |
440 | ||
441 | /* | |
442 | * pcb_collect: | |
443 | * | |
444 | * Attempt to free excess pcb memory. | |
445 | */ | |
446 | ||
447 | void | |
448 | pcb_collect( | |
449 | register thread_act_t thr_act) | |
450 | { | |
451 | /* accomplishes very little */ | |
452 | } | |
453 | ||
454 | /* | |
455 | * act_machine_sv_free | |
456 | * release saveareas associated with an act. if flag is true, release | |
457 | * user level savearea(s) too, else don't | |
458 | */ | |
459 | void | |
460 | act_machine_sv_free(thread_act_t act, int flag) | |
461 | { | |
462 | ||
463 | } | |
464 | ||
465 | /* | |
466 | * act_machine_set_state: | |
467 | * | |
468 | * Set the status of the specified thread. Called with "appropriate" | |
469 | * thread-related locks held (see act_lock_thread()), so | |
470 | * thr_act->thread is guaranteed not to change. | |
471 | */ | |
472 | ||
473 | kern_return_t | |
474 | act_machine_set_state( | |
475 | thread_act_t thr_act, | |
476 | thread_flavor_t flavor, | |
477 | thread_state_t tstate, | |
478 | mach_msg_type_number_t count) | |
479 | { | |
480 | int kernel_act = thr_act->kernel_loading || | |
481 | thr_act->kernel_loaded; | |
482 | ||
483 | #if MACH_ASSERT | |
484 | if (watchacts & WA_STATE) | |
485 | printf("act_%x act_m_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n", | |
486 | current_act(), thr_act, flavor, tstate, count); | |
487 | #endif /* MACH_ASSERT */ | |
488 | ||
489 | switch (flavor) { | |
490 | case THREAD_SYSCALL_STATE: | |
491 | { | |
492 | register struct thread_syscall_state *state; | |
493 | register struct i386_saved_state *saved_state = USER_REGS(thr_act); | |
494 | ||
495 | state = (struct thread_syscall_state *) tstate; | |
496 | saved_state->eax = state->eax; | |
497 | saved_state->edx = state->edx; | |
498 | if (kernel_act) | |
499 | saved_state->efl = state->efl; | |
500 | else | |
501 | saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET; | |
502 | saved_state->eip = state->eip; | |
503 | saved_state->uesp = state->esp; | |
504 | break; | |
505 | } | |
506 | ||
507 | case i386_SAVED_STATE: | |
508 | { | |
509 | register struct i386_saved_state *state; | |
510 | register struct i386_saved_state *saved_state; | |
511 | ||
512 | if (count < i386_SAVED_STATE_COUNT) { | |
513 | return(KERN_INVALID_ARGUMENT); | |
514 | } | |
515 | ||
516 | state = (struct i386_saved_state *) tstate; | |
517 | ||
518 | saved_state = USER_REGS(thr_act); | |
519 | ||
520 | /* | |
521 | * General registers | |
522 | */ | |
523 | saved_state->edi = state->edi; | |
524 | saved_state->esi = state->esi; | |
525 | saved_state->ebp = state->ebp; | |
526 | saved_state->uesp = state->uesp; | |
527 | saved_state->ebx = state->ebx; | |
528 | saved_state->edx = state->edx; | |
529 | saved_state->ecx = state->ecx; | |
530 | saved_state->eax = state->eax; | |
531 | saved_state->eip = state->eip; | |
532 | if (kernel_act) | |
533 | saved_state->efl = state->efl; | |
534 | else | |
535 | saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | |
536 | | EFL_USER_SET; | |
537 | ||
538 | /* | |
539 | * Segment registers. Set differently in V8086 mode. | |
540 | */ | |
541 | if (state->efl & EFL_VM) { | |
542 | /* | |
543 | * Set V8086 mode segment registers. | |
544 | */ | |
545 | saved_state->cs = state->cs & 0xffff; | |
546 | saved_state->ss = state->ss & 0xffff; | |
547 | saved_state->v86_segs.v86_ds = state->ds & 0xffff; | |
548 | saved_state->v86_segs.v86_es = state->es & 0xffff; | |
549 | saved_state->v86_segs.v86_fs = state->fs & 0xffff; | |
550 | saved_state->v86_segs.v86_gs = state->gs & 0xffff; | |
551 | ||
552 | /* | |
553 | * Zero protected mode segment registers. | |
554 | */ | |
555 | saved_state->ds = 0; | |
556 | saved_state->es = 0; | |
557 | saved_state->fs = 0; | |
558 | saved_state->gs = 0; | |
559 | ||
560 | if (thr_act->mact.pcb->ims.v86s.int_table) { | |
561 | /* | |
562 | * Hardware assist on. | |
563 | */ | |
564 | thr_act->mact.pcb->ims.v86s.flags = | |
565 | state->efl & (EFL_TF | EFL_IF); | |
566 | } | |
567 | } | |
568 | else if (!kernel_act) { | |
569 | /* | |
570 | * 386 mode. Set segment registers for flat | |
571 | * 32-bit address space. | |
572 | */ | |
573 | saved_state->cs = USER_CS; | |
574 | saved_state->ss = USER_DS; | |
575 | saved_state->ds = USER_DS; | |
576 | saved_state->es = USER_DS; | |
577 | saved_state->fs = USER_DS; | |
578 | saved_state->gs = USER_DS; | |
579 | } | |
580 | else { | |
581 | /* | |
582 | * User setting segment registers. | |
583 | * Code and stack selectors have already been | |
584 | * checked. Others will be reset by 'iret' | |
585 | * if they are not valid. | |
586 | */ | |
587 | saved_state->cs = state->cs; | |
588 | saved_state->ss = state->ss; | |
589 | saved_state->ds = state->ds; | |
590 | saved_state->es = state->es; | |
591 | saved_state->fs = state->fs; | |
592 | saved_state->gs = state->gs; | |
593 | } | |
594 | break; | |
595 | } | |
596 | ||
597 | case i386_NEW_THREAD_STATE: | |
598 | case i386_REGS_SEGS_STATE: | |
599 | { | |
600 | register struct i386_new_thread_state *state; | |
601 | register struct i386_saved_state *saved_state; | |
602 | ||
603 | if (count < i386_NEW_THREAD_STATE_COUNT) { | |
604 | return(KERN_INVALID_ARGUMENT); | |
605 | } | |
606 | ||
607 | if (flavor == i386_REGS_SEGS_STATE) { | |
608 | /* | |
609 | * Code and stack selectors must not be null, | |
610 | * and must have user protection levels. | |
611 | * Only the low 16 bits are valid. | |
612 | */ | |
613 | state->cs &= 0xffff; | |
614 | state->ss &= 0xffff; | |
615 | state->ds &= 0xffff; | |
616 | state->es &= 0xffff; | |
617 | state->fs &= 0xffff; | |
618 | state->gs &= 0xffff; | |
619 | ||
620 | if (!kernel_act && | |
621 | (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U | |
622 | || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U)) | |
623 | return KERN_INVALID_ARGUMENT; | |
624 | } | |
625 | ||
626 | state = (struct i386_new_thread_state *) tstate; | |
627 | ||
628 | saved_state = USER_REGS(thr_act); | |
629 | ||
630 | /* | |
631 | * General registers | |
632 | */ | |
633 | saved_state->edi = state->edi; | |
634 | saved_state->esi = state->esi; | |
635 | saved_state->ebp = state->ebp; | |
636 | saved_state->uesp = state->uesp; | |
637 | saved_state->ebx = state->ebx; | |
638 | saved_state->edx = state->edx; | |
639 | saved_state->ecx = state->ecx; | |
640 | saved_state->eax = state->eax; | |
641 | saved_state->eip = state->eip; | |
642 | if (kernel_act) | |
643 | saved_state->efl = state->efl; | |
644 | else | |
645 | saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | |
646 | | EFL_USER_SET; | |
647 | ||
648 | /* | |
649 | * Segment registers. Set differently in V8086 mode. | |
650 | */ | |
651 | if (state->efl & EFL_VM) { | |
652 | /* | |
653 | * Set V8086 mode segment registers. | |
654 | */ | |
655 | saved_state->cs = state->cs & 0xffff; | |
656 | saved_state->ss = state->ss & 0xffff; | |
657 | saved_state->v86_segs.v86_ds = state->ds & 0xffff; | |
658 | saved_state->v86_segs.v86_es = state->es & 0xffff; | |
659 | saved_state->v86_segs.v86_fs = state->fs & 0xffff; | |
660 | saved_state->v86_segs.v86_gs = state->gs & 0xffff; | |
661 | ||
662 | /* | |
663 | * Zero protected mode segment registers. | |
664 | */ | |
665 | saved_state->ds = 0; | |
666 | saved_state->es = 0; | |
667 | saved_state->fs = 0; | |
668 | saved_state->gs = 0; | |
669 | ||
670 | if (thr_act->mact.pcb->ims.v86s.int_table) { | |
671 | /* | |
672 | * Hardware assist on. | |
673 | */ | |
674 | thr_act->mact.pcb->ims.v86s.flags = | |
675 | state->efl & (EFL_TF | EFL_IF); | |
676 | } | |
677 | } | |
678 | else if (flavor == i386_NEW_THREAD_STATE && !kernel_act) { | |
679 | /* | |
680 | * 386 mode. Set segment registers for flat | |
681 | * 32-bit address space. | |
682 | */ | |
683 | saved_state->cs = USER_CS; | |
684 | saved_state->ss = USER_DS; | |
685 | saved_state->ds = USER_DS; | |
686 | saved_state->es = USER_DS; | |
687 | saved_state->fs = USER_DS; | |
688 | saved_state->gs = USER_DS; | |
689 | } | |
690 | else { | |
691 | /* | |
692 | * User setting segment registers. | |
693 | * Code and stack selectors have already been | |
694 | * checked. Others will be reset by 'iret' | |
695 | * if they are not valid. | |
696 | */ | |
697 | saved_state->cs = state->cs; | |
698 | saved_state->ss = state->ss; | |
699 | saved_state->ds = state->ds; | |
700 | saved_state->es = state->es; | |
701 | saved_state->fs = state->fs; | |
702 | saved_state->gs = state->gs; | |
703 | } | |
704 | break; | |
705 | } | |
706 | ||
707 | case i386_FLOAT_STATE: { | |
708 | ||
709 | if (count < i386_FLOAT_STATE_COUNT) | |
710 | return(KERN_INVALID_ARGUMENT); | |
711 | ||
712 | return fpu_set_state(thr_act,(struct i386_float_state*)tstate); | |
713 | } | |
714 | ||
715 | /* | |
716 | * Temporary - replace by i386_io_map | |
717 | */ | |
718 | case i386_ISA_PORT_MAP_STATE: { | |
719 | register struct i386_isa_port_map_state *state; | |
720 | register iopb_tss_t tss; | |
721 | ||
722 | if (count < i386_ISA_PORT_MAP_STATE_COUNT) | |
723 | return(KERN_INVALID_ARGUMENT); | |
724 | ||
725 | break; | |
726 | } | |
727 | ||
728 | case i386_V86_ASSIST_STATE: | |
729 | { | |
730 | register struct i386_v86_assist_state *state; | |
731 | vm_offset_t int_table; | |
732 | int int_count; | |
733 | ||
734 | if (count < i386_V86_ASSIST_STATE_COUNT) | |
735 | return KERN_INVALID_ARGUMENT; | |
736 | ||
737 | state = (struct i386_v86_assist_state *) tstate; | |
738 | int_table = state->int_table; | |
739 | int_count = state->int_count; | |
740 | ||
741 | if (int_table >= VM_MAX_ADDRESS || | |
742 | int_table + | |
743 | int_count * sizeof(struct v86_interrupt_table) | |
744 | > VM_MAX_ADDRESS) | |
745 | return KERN_INVALID_ARGUMENT; | |
746 | ||
747 | thr_act->mact.pcb->ims.v86s.int_table = int_table; | |
748 | thr_act->mact.pcb->ims.v86s.int_count = int_count; | |
749 | ||
750 | thr_act->mact.pcb->ims.v86s.flags = | |
751 | USER_REGS(thr_act)->efl & (EFL_TF | EFL_IF); | |
752 | break; | |
753 | } | |
754 | ||
755 | case i386_THREAD_STATE: { | |
756 | struct i386_saved_state *saved_state; | |
757 | i386_thread_state_t *state25; | |
758 | ||
759 | saved_state = USER_REGS(thr_act); | |
760 | state25 = (i386_thread_state_t *)tstate; | |
761 | ||
762 | saved_state->eax = state25->eax; | |
763 | saved_state->ebx = state25->ebx; | |
764 | saved_state->ecx = state25->ecx; | |
765 | saved_state->edx = state25->edx; | |
766 | saved_state->edi = state25->edi; | |
767 | saved_state->esi = state25->esi; | |
768 | saved_state->ebp = state25->ebp; | |
769 | saved_state->uesp = state25->esp; | |
770 | saved_state->efl = (state25->eflags & ~EFL_USER_CLEAR) | |
771 | | EFL_USER_SET; | |
772 | saved_state->eip = state25->eip; | |
773 | saved_state->cs = USER_CS; /* FIXME? */ | |
774 | saved_state->ss = USER_DS; | |
775 | saved_state->ds = USER_DS; | |
776 | saved_state->es = USER_DS; | |
777 | saved_state->fs = USER_DS; | |
778 | saved_state->gs = USER_DS; | |
779 | } | |
780 | break; | |
781 | ||
782 | default: | |
783 | return(KERN_INVALID_ARGUMENT); | |
784 | } | |
785 | ||
786 | return(KERN_SUCCESS); | |
787 | } | |
788 | ||
789 | /* | |
790 | * thread_getstatus: | |
791 | * | |
792 | * Get the status of the specified thread. | |
793 | */ | |
794 | ||
795 | ||
796 | kern_return_t | |
797 | act_machine_get_state( | |
798 | thread_act_t thr_act, | |
799 | thread_flavor_t flavor, | |
800 | thread_state_t tstate, | |
801 | mach_msg_type_number_t *count) | |
802 | { | |
803 | #if MACH_ASSERT | |
804 | if (watchacts & WA_STATE) | |
805 | printf("act_%x act_m_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n", | |
806 | current_act(), thr_act, flavor, tstate, | |
807 | count, (count ? *count : 0)); | |
808 | #endif /* MACH_ASSERT */ | |
809 | ||
810 | switch (flavor) { | |
811 | ||
812 | case i386_SAVED_STATE: | |
813 | { | |
814 | register struct i386_saved_state *state; | |
815 | register struct i386_saved_state *saved_state; | |
816 | ||
817 | if (*count < i386_SAVED_STATE_COUNT) | |
818 | return(KERN_INVALID_ARGUMENT); | |
819 | ||
820 | state = (struct i386_saved_state *) tstate; | |
821 | saved_state = USER_REGS(thr_act); | |
822 | ||
823 | /* | |
824 | * First, copy everything: | |
825 | */ | |
826 | *state = *saved_state; | |
827 | ||
828 | if (saved_state->efl & EFL_VM) { | |
829 | /* | |
830 | * V8086 mode. | |
831 | */ | |
832 | state->ds = saved_state->v86_segs.v86_ds & 0xffff; | |
833 | state->es = saved_state->v86_segs.v86_es & 0xffff; | |
834 | state->fs = saved_state->v86_segs.v86_fs & 0xffff; | |
835 | state->gs = saved_state->v86_segs.v86_gs & 0xffff; | |
836 | ||
837 | if (thr_act->mact.pcb->ims.v86s.int_table) { | |
838 | /* | |
839 | * Hardware assist on | |
840 | */ | |
841 | if ((thr_act->mact.pcb->ims.v86s.flags & | |
842 | (EFL_IF|V86_IF_PENDING)) == 0) | |
843 | state->efl &= ~EFL_IF; | |
844 | } | |
845 | } | |
846 | else { | |
847 | /* | |
848 | * 386 mode. | |
849 | */ | |
850 | state->ds = saved_state->ds & 0xffff; | |
851 | state->es = saved_state->es & 0xffff; | |
852 | state->fs = saved_state->fs & 0xffff; | |
853 | state->gs = saved_state->gs & 0xffff; | |
854 | } | |
855 | *count = i386_SAVED_STATE_COUNT; | |
856 | break; | |
857 | } | |
858 | ||
859 | case i386_NEW_THREAD_STATE: | |
860 | case i386_REGS_SEGS_STATE: | |
861 | { | |
862 | register struct i386_new_thread_state *state; | |
863 | register struct i386_saved_state *saved_state; | |
864 | ||
865 | if (*count < i386_NEW_THREAD_STATE_COUNT) | |
866 | return(KERN_INVALID_ARGUMENT); | |
867 | ||
868 | state = (struct i386_new_thread_state *) tstate; | |
869 | saved_state = USER_REGS(thr_act); | |
870 | ||
871 | /* | |
872 | * General registers. | |
873 | */ | |
874 | state->edi = saved_state->edi; | |
875 | state->esi = saved_state->esi; | |
876 | state->ebp = saved_state->ebp; | |
877 | state->ebx = saved_state->ebx; | |
878 | state->edx = saved_state->edx; | |
879 | state->ecx = saved_state->ecx; | |
880 | state->eax = saved_state->eax; | |
881 | state->eip = saved_state->eip; | |
882 | state->efl = saved_state->efl; | |
883 | state->uesp = saved_state->uesp; | |
884 | ||
885 | state->cs = saved_state->cs; | |
886 | state->ss = saved_state->ss; | |
887 | if (saved_state->efl & EFL_VM) { | |
888 | /* | |
889 | * V8086 mode. | |
890 | */ | |
891 | state->ds = saved_state->v86_segs.v86_ds & 0xffff; | |
892 | state->es = saved_state->v86_segs.v86_es & 0xffff; | |
893 | state->fs = saved_state->v86_segs.v86_fs & 0xffff; | |
894 | state->gs = saved_state->v86_segs.v86_gs & 0xffff; | |
895 | ||
896 | if (thr_act->mact.pcb->ims.v86s.int_table) { | |
897 | /* | |
898 | * Hardware assist on | |
899 | */ | |
900 | if ((thr_act->mact.pcb->ims.v86s.flags & | |
901 | (EFL_IF|V86_IF_PENDING)) == 0) | |
902 | state->efl &= ~EFL_IF; | |
903 | } | |
904 | } | |
905 | else { | |
906 | /* | |
907 | * 386 mode. | |
908 | */ | |
909 | state->ds = saved_state->ds & 0xffff; | |
910 | state->es = saved_state->es & 0xffff; | |
911 | state->fs = saved_state->fs & 0xffff; | |
912 | state->gs = saved_state->gs & 0xffff; | |
913 | } | |
914 | *count = i386_NEW_THREAD_STATE_COUNT; | |
915 | break; | |
916 | } | |
917 | ||
918 | case THREAD_SYSCALL_STATE: | |
919 | { | |
920 | register struct thread_syscall_state *state; | |
921 | register struct i386_saved_state *saved_state = USER_REGS(thr_act); | |
922 | ||
923 | state = (struct thread_syscall_state *) tstate; | |
924 | state->eax = saved_state->eax; | |
925 | state->edx = saved_state->edx; | |
926 | state->efl = saved_state->efl; | |
927 | state->eip = saved_state->eip; | |
928 | state->esp = saved_state->uesp; | |
929 | *count = i386_THREAD_SYSCALL_STATE_COUNT; | |
930 | break; | |
931 | } | |
932 | ||
933 | case THREAD_STATE_FLAVOR_LIST: | |
934 | if (*count < 5) | |
935 | return (KERN_INVALID_ARGUMENT); | |
936 | tstate[0] = i386_NEW_THREAD_STATE; | |
937 | tstate[1] = i386_FLOAT_STATE; | |
938 | tstate[2] = i386_ISA_PORT_MAP_STATE; | |
939 | tstate[3] = i386_V86_ASSIST_STATE; | |
940 | tstate[4] = THREAD_SYSCALL_STATE; | |
941 | *count = 5; | |
942 | break; | |
943 | ||
944 | case i386_FLOAT_STATE: { | |
945 | ||
946 | if (*count < i386_FLOAT_STATE_COUNT) | |
947 | return(KERN_INVALID_ARGUMENT); | |
948 | ||
949 | *count = i386_FLOAT_STATE_COUNT; | |
950 | return fpu_get_state(thr_act,(struct i386_float_state *)tstate); | |
951 | } | |
952 | ||
953 | /* | |
954 | * Temporary - replace by i386_io_map | |
955 | */ | |
956 | case i386_ISA_PORT_MAP_STATE: { | |
957 | register struct i386_isa_port_map_state *state; | |
958 | register iopb_tss_t tss; | |
959 | ||
960 | if (*count < i386_ISA_PORT_MAP_STATE_COUNT) | |
961 | return(KERN_INVALID_ARGUMENT); | |
962 | ||
963 | state = (struct i386_isa_port_map_state *) tstate; | |
964 | tss = thr_act->mact.pcb->ims.io_tss; | |
965 | ||
966 | if (tss == 0) { | |
967 | int i; | |
968 | ||
969 | /* | |
970 | * The thread has no ktss, so no IO permissions. | |
971 | */ | |
972 | ||
973 | for (i = 0; i < sizeof state->pm; i++) | |
974 | state->pm[i] = 0xff; | |
975 | } else { | |
976 | /* | |
977 | * The thread has its own ktss. | |
978 | */ | |
979 | ||
980 | bcopy((char *) tss->bitmap, | |
981 | (char *) state->pm, | |
982 | sizeof state->pm); | |
983 | } | |
984 | ||
985 | *count = i386_ISA_PORT_MAP_STATE_COUNT; | |
986 | break; | |
987 | } | |
988 | ||
989 | case i386_V86_ASSIST_STATE: | |
990 | { | |
991 | register struct i386_v86_assist_state *state; | |
992 | ||
993 | if (*count < i386_V86_ASSIST_STATE_COUNT) | |
994 | return KERN_INVALID_ARGUMENT; | |
995 | ||
996 | state = (struct i386_v86_assist_state *) tstate; | |
997 | state->int_table = thr_act->mact.pcb->ims.v86s.int_table; | |
998 | state->int_count = thr_act->mact.pcb->ims.v86s.int_count; | |
999 | ||
1000 | *count = i386_V86_ASSIST_STATE_COUNT; | |
1001 | break; | |
1002 | } | |
1003 | ||
1004 | case i386_THREAD_STATE: { | |
1005 | struct i386_saved_state *saved_state; | |
1006 | i386_thread_state_t *state; | |
1007 | ||
1008 | saved_state = USER_REGS(thr_act); | |
1009 | state = (i386_thread_state_t *)tstate; | |
1010 | ||
1011 | state->eax = saved_state->eax; | |
1012 | state->ebx = saved_state->ebx; | |
1013 | state->ecx = saved_state->ecx; | |
1014 | state->edx = saved_state->edx; | |
1015 | state->edi = saved_state->edi; | |
1016 | state->esi = saved_state->esi; | |
1017 | state->ebp = saved_state->ebp; | |
1018 | state->esp = saved_state->uesp; | |
1019 | state->eflags = saved_state->efl; | |
1020 | state->eip = saved_state->eip; | |
1021 | state->cs = saved_state->cs; | |
1022 | state->ss = saved_state->ss; | |
1023 | state->ds = saved_state->ds; | |
1024 | state->es = saved_state->es; | |
1025 | state->fs = saved_state->fs; | |
1026 | state->gs = saved_state->gs; | |
1027 | break; | |
1028 | } | |
1029 | ||
1030 | default: | |
1031 | return(KERN_INVALID_ARGUMENT); | |
1032 | } | |
1033 | ||
1034 | return(KERN_SUCCESS); | |
1035 | } | |
1036 | ||
1037 | /* | |
1038 | * Alter the thread`s state so that a following thread_exception_return | |
1039 | * will make the thread return 'retval' from a syscall. | |
1040 | */ | |
1041 | void | |
1042 | thread_set_syscall_return( | |
1043 | thread_t thread, | |
1044 | kern_return_t retval) | |
1045 | { | |
1046 | thread->top_act->mact.pcb->iss.eax = retval; | |
1047 | } | |
1048 | ||
1049 | /* | |
1050 | * Initialize the machine-dependent state for a new thread. | |
1051 | */ | |
1052 | kern_return_t | |
1053 | thread_machine_create(thread_t thread, thread_act_t thr_act, void (*start_pos)(thread_t)) | |
1054 | { | |
1055 | MachineThrAct_t mact = &thr_act->mact; | |
1056 | ||
1057 | #if MACH_ASSERT | |
1058 | if (watchacts & WA_PCB) | |
1059 | printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", | |
1060 | thread, thr_act, start_pos); | |
1061 | #endif /* MACH_ASSERT */ | |
1062 | ||
1063 | assert(thread != NULL); | |
1064 | assert(thr_act != NULL); | |
1065 | ||
1066 | /* | |
1067 | * Allocate a kernel stack per shuttle | |
1068 | */ | |
1069 | thread->kernel_stack = (int)stack_alloc(thread,start_pos); | |
1070 | assert(thread->kernel_stack != 0); | |
1071 | ||
1072 | /* | |
1073 | * Point top of kernel stack to user`s registers. | |
1074 | */ | |
1075 | STACK_IEL(thread->kernel_stack)->saved_state = &mact->pcb->iss; | |
1076 | ||
1077 | /* | |
1078 | * Utah code fiddles with pcb here - (we don't need to) | |
1079 | */ | |
1080 | return(KERN_SUCCESS); | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * Machine-dependent cleanup prior to destroying a thread | |
1085 | */ | |
1086 | void | |
1087 | thread_machine_destroy( thread_t thread ) | |
1088 | { | |
1089 | spl_t s; | |
1090 | ||
1091 | if (thread->kernel_stack != 0) { | |
1092 | s = splsched(); | |
1093 | stack_free(thread); | |
1094 | splx(s); | |
1095 | } | |
1096 | } | |
1097 | ||
1098 | /* | |
1099 | * This is used to set the current thr_act/thread | |
1100 | * when starting up a new processor | |
1101 | */ | |
1102 | void | |
1103 | thread_machine_set_current( thread_t thread ) | |
1104 | { | |
1105 | register int my_cpu; | |
1106 | ||
1107 | mp_disable_preemption(); | |
1108 | my_cpu = cpu_number(); | |
1109 | ||
1110 | cpu_data[my_cpu].active_thread = thread; | |
1111 | active_kloaded[my_cpu] = | |
1112 | thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; | |
1113 | ||
1114 | mp_enable_preemption(); | |
1115 | } | |
1116 | ||
1117 | ||
1118 | /* | |
1119 | * Pool of kernel activations. | |
1120 | */ | |
1121 | ||
1122 | void act_machine_init() | |
1123 | { | |
1124 | int i; | |
1125 | thread_act_t thr_act; | |
1126 | ||
1127 | #if MACH_ASSERT | |
1128 | if (watchacts & WA_PCB) | |
1129 | printf("act_machine_init()\n"); | |
1130 | #endif /* MACH_ASSERT */ | |
1131 | ||
1132 | /* Good to verify this once */ | |
1133 | assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); | |
1134 | ||
1135 | /* | |
1136 | * If we start using kernel activations, | |
1137 | * would normally create kernel_thread_pool here, | |
1138 | * populating it from the act_zone | |
1139 | */ | |
1140 | } | |
1141 | ||
1142 | kern_return_t | |
1143 | act_machine_create(task_t task, thread_act_t thr_act) | |
1144 | { | |
1145 | MachineThrAct_t mact = &thr_act->mact; | |
1146 | pcb_t pcb; | |
1147 | ||
1148 | #if MACH_ASSERT | |
1149 | if (watchacts & WA_PCB) | |
1150 | printf("act_machine_create(task=%x,thr_act=%x) pcb=%x\n", | |
1151 | task,thr_act, &mact->xxx_pcb); | |
1152 | #endif /* MACH_ASSERT */ | |
1153 | ||
1154 | /* | |
1155 | * Clear & Init the pcb (sets up user-mode s regs) | |
1156 | */ | |
1157 | pcb_init(thr_act); | |
1158 | ||
1159 | return KERN_SUCCESS; | |
1160 | } | |
1161 | ||
1162 | void | |
1163 | act_virtual_machine_destroy(thread_act_t thr_act) | |
1164 | { | |
1165 | return; | |
1166 | } | |
1167 | ||
1168 | void | |
1169 | act_machine_destroy(thread_act_t thr_act) | |
1170 | { | |
1171 | ||
1172 | #if MACH_ASSERT | |
1173 | if (watchacts & WA_PCB) | |
1174 | printf("act_machine_destroy(0x%x)\n", thr_act); | |
1175 | #endif /* MACH_ASSERT */ | |
1176 | ||
1177 | pcb_terminate(thr_act); | |
1178 | } | |
1179 | ||
1180 | void | |
1181 | act_machine_return(int code) | |
1182 | { | |
1183 | thread_act_t thr_act = current_act(); | |
1184 | ||
1185 | #if MACH_ASSERT | |
1186 | /* | |
1187 | * We don't go through the locking dance here needed to | |
1188 | * acquire thr_act->thread safely. | |
1189 | */ | |
1190 | ||
1191 | if (watchacts & WA_EXIT) | |
1192 | printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", | |
1193 | code, thr_act, thr_act->ref_count, | |
1194 | thr_act->thread, thr_act->thread->ref_count); | |
1195 | #endif /* MACH_ASSERT */ | |
1196 | ||
1197 | /* | |
1198 | * This code is called with nothing locked. | |
1199 | * It also returns with nothing locked, if it returns. | |
1200 | * | |
1201 | * This routine terminates the current thread activation. | |
1202 | * If this is the only activation associated with its | |
1203 | * thread shuttle, then the entire thread (shuttle plus | |
1204 | * activation) is terminated. | |
1205 | */ | |
1206 | assert( code == KERN_TERMINATED ); | |
1207 | assert( thr_act ); | |
1208 | ||
1209 | #ifdef CALLOUT_RPC_MODEL | |
1210 | /* | |
1211 | * JMM - RPC is not going to be done with a callout/direct- | |
1212 | * stack manipulation mechanism. Instead we will return/ | |
1213 | * unwind normally as if from a continuation. | |
1214 | */ | |
1215 | act_lock_thread(thr_act); | |
1216 | ||
1217 | if (thr_act->thread->top_act != thr_act) { | |
1218 | /* | |
1219 | * this is not the top activation; | |
1220 | * if possible, we should clone the shuttle so that | |
1221 | * both the root RPC-chain and the soon-to-be-orphaned | |
1222 | * RPC-chain have shuttles | |
1223 | * | |
1224 | * JMM - Cloning shuttles isn't the right approach. We | |
1225 | * need to alert the higher up activations to return our | |
1226 | * shuttle (because scheduling attributes may TRUELY be | |
1227 | * unique and not cloneable. | |
1228 | */ | |
1229 | act_unlock_thread(thr_act); | |
1230 | panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED"); | |
1231 | } | |
1232 | ||
1233 | if (thr_act->lower != THR_ACT_NULL) { | |
1234 | thread_t cur_thread = current_thread(); | |
1235 | thread_act_t cur_act; | |
1236 | struct ipc_port *iplock; | |
1237 | ||
1238 | /* send it an appropriate return code */ | |
1239 | thr_act->lower->alerts |= SERVER_TERMINATED; | |
1240 | install_special_handler(thr_act->lower); | |
1241 | ||
1242 | /* Return to previous act with error code */ | |
1243 | act_locked_act_reference(thr_act); /* keep it around */ | |
1244 | act_switch_swapcheck(cur_thread, (ipc_port_t)0); | |
1245 | (void) switch_act(THR_ACT_NULL); | |
1246 | /* assert(thr_act->ref_count == 0); */ /* XXX */ | |
1247 | cur_act = cur_thread->top_act; | |
1248 | MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED; | |
1249 | ||
1250 | machine_kernel_stack_init(cur_thread, mach_rpc_return_error); | |
1251 | /* | |
1252 | * The following unlocks must be done separately since fields | |
1253 | * used by `act_unlock_thread()' have been cleared, meaning | |
1254 | * that it would not release all of the appropriate locks. | |
1255 | */ | |
1256 | iplock = thr_act->pool_port; /* remember for unlock call */ | |
1257 | rpc_unlock(cur_thread); | |
1258 | if (iplock) ip_unlock(iplock); /* must be done separately */ | |
1259 | act_unlock(thr_act); | |
1260 | act_deallocate(thr_act); /* free it */ | |
1261 | Load_context(cur_thread); | |
1262 | /*NOTREACHED*/ | |
1263 | ||
1264 | panic("act_machine_return: TALKING ZOMBIE! (2)"); | |
1265 | } | |
1266 | act_unlock_thread(thr_act); | |
1267 | ||
1268 | #endif /* CALLOUT_RPC_MODEL */ | |
1269 | ||
1270 | /* This is the only activation attached to the shuttle... */ | |
1271 | /* terminate the entire thread (shuttle plus activation) */ | |
1272 | ||
1273 | assert(thr_act->thread->top_act == thr_act); | |
1274 | thread_terminate_self(); | |
1275 | ||
1276 | /*NOTREACHED*/ | |
1277 | ||
1278 | panic("act_machine_return: TALKING ZOMBIE! (1)"); | |
1279 | } | |
1280 | ||
1281 | ||
1282 | /* | |
1283 | * Perform machine-dependent per-thread initializations | |
1284 | */ | |
1285 | void | |
1286 | thread_machine_init(void) | |
1287 | { | |
1288 | pcb_module_init(); | |
1289 | } | |
1290 | ||
1291 | /* | |
1292 | * Some routines for debugging activation code | |
1293 | */ | |
1294 | static void dump_handlers(thread_act_t); | |
1295 | void dump_regs(thread_act_t); | |
1296 | ||
1297 | static void | |
1298 | dump_handlers(thread_act_t thr_act) | |
1299 | { | |
1300 | ReturnHandler *rhp = thr_act->handlers; | |
1301 | int counter = 0; | |
1302 | ||
1303 | printf("\t"); | |
1304 | while (rhp) { | |
1305 | if (rhp == &thr_act->special_handler){ | |
1306 | if (rhp->next) | |
1307 | printf("[NON-Zero next ptr(%x)]", rhp->next); | |
1308 | printf("special_handler()->"); | |
1309 | break; | |
1310 | } | |
1311 | printf("hdlr_%d(%x)->",counter,rhp->handler); | |
1312 | rhp = rhp->next; | |
1313 | if (++counter > 32) { | |
1314 | printf("Aborting: HUGE handler chain\n"); | |
1315 | break; | |
1316 | } | |
1317 | } | |
1318 | printf("HLDR_NULL\n"); | |
1319 | } | |
1320 | ||
1321 | void | |
1322 | dump_regs(thread_act_t thr_act) | |
1323 | { | |
1324 | if (thr_act->mact.pcb) { | |
1325 | register struct i386_saved_state *ssp = USER_REGS(thr_act); | |
1326 | /* Print out user register state */ | |
1327 | printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n", | |
1328 | ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx); | |
1329 | printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n", | |
1330 | ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp); | |
1331 | printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss); | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | int | |
1336 | dump_act(thread_act_t thr_act) | |
1337 | { | |
1338 | if (!thr_act) | |
1339 | return(0); | |
1340 | ||
1341 | printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", | |
1342 | thr_act, thr_act->ref_count, | |
1343 | thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, | |
1344 | thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); | |
1345 | ||
1346 | if (thr_act->pool_port) { | |
1347 | thread_pool_t actpp = &thr_act->pool_port->ip_thread_pool; | |
1348 | printf("\tpool(acts_p=%x, waiting=%d) pool_next %x\n", | |
1349 | actpp->thr_acts, actpp->waiting, thr_act->thread_pool_next); | |
1350 | }else | |
1351 | printf("\tno thread_pool\n"); | |
1352 | ||
1353 | printf("\talerts=%x mask=%x susp=%d user_stop=%d active=%x ast=%x\n", | |
1354 | thr_act->alerts, thr_act->alert_mask, | |
1355 | thr_act->suspend_count, thr_act->user_stop_count, | |
1356 | thr_act->active, thr_act->ast); | |
1357 | printf("\thi=%x lo=%x\n", thr_act->higher, thr_act->lower); | |
1358 | printf("\tpcb=%x\n", thr_act->mact.pcb); | |
1359 | ||
1360 | if (thr_act->thread && thr_act->thread->kernel_stack) { | |
1361 | vm_offset_t stack = thr_act->thread->kernel_stack; | |
1362 | ||
1363 | printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n", | |
1364 | stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx, | |
1365 | STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state); | |
1366 | } | |
1367 | ||
1368 | dump_handlers(thr_act); | |
1369 | dump_regs(thr_act); | |
1370 | return((int)thr_act); | |
1371 | } | |
1372 | unsigned int | |
1373 | get_useraddr() | |
1374 | { | |
1375 | ||
1376 | thread_act_t thr_act = current_act(); | |
1377 | ||
1378 | if (thr_act->mact.pcb) | |
1379 | return(thr_act->mact.pcb->iss.eip); | |
1380 | else | |
1381 | return(0); | |
1382 | ||
1383 | } | |
1384 | ||
1385 | void | |
1386 | thread_swapin_mach_alloc(thread_t thread) | |
1387 | { | |
1388 | ||
1389 | /* 386 does not have saveareas */ | |
1390 | ||
1391 | } | |
1392 | /* | |
1393 | * detach and return a kernel stack from a thread | |
1394 | */ | |
1395 | ||
1396 | vm_offset_t | |
1397 | stack_detach(thread_t thread) | |
1398 | { | |
1399 | vm_offset_t stack; | |
1400 | ||
1401 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), | |
1402 | thread, thread->priority, | |
1403 | thread->sched_pri, 0, | |
1404 | 0); | |
1405 | ||
1406 | stack = thread->kernel_stack; | |
1407 | thread->kernel_stack = 0; | |
1408 | return(stack); | |
1409 | } | |
1410 | ||
1411 | /* | |
1412 | * attach a kernel stack to a thread and initialize it | |
1413 | */ | |
1414 | ||
1415 | void | |
1416 | stack_attach(struct thread_shuttle *thread, | |
1417 | vm_offset_t stack, | |
1418 | void (*start_pos)(thread_t)) | |
1419 | { | |
1420 | struct i386_kernel_state *statep; | |
1421 | thread_act_t thr_act; | |
1422 | ||
1423 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), | |
1424 | thread, thread->priority, | |
1425 | thread->sched_pri, continuation, | |
1426 | 0); | |
1427 | ||
1428 | assert(stack); | |
1429 | statep = STACK_IKS(stack); | |
1430 | thread->kernel_stack = stack; | |
1431 | ||
1432 | statep->k_eip = (unsigned long) Thread_continue; | |
1433 | statep->k_ebx = (unsigned long) start_pos; | |
1434 | statep->k_esp = (unsigned long) STACK_IEL(stack); | |
1435 | ||
1436 | STACK_IEL(stack)->saved_state = &thr_act->mact.pcb->iss; | |
1437 | ||
1438 | return; | |
1439 | } | |
1440 | ||
1441 | /* | |
1442 | * move a stack from old to new thread | |
1443 | */ | |
1444 | ||
1445 | void | |
1446 | stack_handoff(thread_t old, | |
1447 | thread_t new) | |
1448 | { | |
1449 | ||
1450 | vm_offset_t stack; | |
1451 | pmap_t new_pmap; | |
1452 | ||
1453 | KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF), | |
1454 | thread, thread->priority, | |
1455 | thread->sched_pri, continuation, | |
1456 | 0); | |
1457 | ||
1458 | assert(new->top_act); | |
1459 | assert(old->top_act); | |
1460 | ||
1461 | stack = stack_detach(old); | |
1462 | stack_attach(new, stack, 0); | |
1463 | ||
1464 | new_pmap = new->top_act->task->map->pmap; | |
1465 | if (old->top_act->task->map->pmap != new_pmap) | |
1466 | PMAP_ACTIVATE_MAP(new->top_act->task->map, cpu_number()); | |
1467 | ||
1468 | thread_machine_set_current(new); | |
1469 | ||
1470 | active_stacks[cpu_number()] = new->kernel_stack; | |
1471 | ||
1472 | return; | |
1473 | } |