]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
27a48c05824f21dfe2b664e78d88a24a7e56afbc
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51
52 #include <mach_rt.h>
53 #include <mach_debug.h>
54 #include <mach_ldebug.h>
55
56 #include <sys/kdebug.h>
57
58 #include <mach/kern_return.h>
59 #include <mach/thread_status.h>
60 #include <mach/vm_param.h>
61
62 #include <i386/cpu_data.h>
63 #include <i386/cpu_number.h>
64
65 #include <kern/counters.h>
66 #include <kern/kalloc.h>
67 #include <kern/mach_param.h>
68 #include <kern/processor.h>
69 #include <kern/cpu_data.h>
70 #include <kern/cpu_number.h>
71 #include <kern/task.h>
72 #include <kern/thread.h>
73 #include <kern/sched_prim.h>
74 #include <kern/misc_protos.h>
75 #include <kern/assert.h>
76 #include <kern/spl.h>
77 #include <kern/machine.h>
78 #include <ipc/ipc_port.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_protos.h>
83
84 #include <i386/thread.h>
85 #include <i386/eflags.h>
86 #include <i386/proc_reg.h>
87 #include <i386/seg.h>
88 #include <i386/tss.h>
89 #include <i386/user_ldt.h>
90 #include <i386/fpu.h>
91 #include <i386/iopb_entries.h>
92 #include <i386/mp_desc.h>
93 #include <i386/cpu_data.h>
94
95
96 /*
97 * Maps state flavor to number of words in the state:
98 */
99 __private_extern__
100 unsigned int _MachineStateCount[] = {
101 /* FLAVOR_LIST */ 0,
102 i386_NEW_THREAD_STATE_COUNT,
103 i386_FLOAT_STATE_COUNT,
104 i386_ISA_PORT_MAP_STATE_COUNT,
105 i386_V86_ASSIST_STATE_COUNT,
106 i386_REGS_SEGS_STATE_COUNT,
107 i386_THREAD_SYSCALL_STATE_COUNT,
108 /* THREAD_STATE_NONE */ 0,
109 i386_SAVED_STATE_COUNT,
110 };
111
112 /* Forward */
113
114 void act_machine_throughcall(thread_t thr_act);
115 user_addr_t get_useraddr(void);
116 void act_machine_return(int);
117 void act_machine_sv_free(thread_t, int);
118
119 extern thread_t Switch_context(
120 thread_t old,
121 thread_continue_t cont,
122 thread_t new);
123 extern void Thread_continue(void);
124 extern void Load_context(
125 thread_t thread);
126
127 /*
128 * consider_machine_collect:
129 *
130 * Try to collect machine-dependent pages
131 */
132 void
133 consider_machine_collect(void)
134 {
135 }
136
137 void
138 consider_machine_adjust(void)
139 {
140 }
141
142
143 // DEBUG
144 int DEBUG_kldt = 0;
145 int DEBUG_uldt = 0;
146
147 static void
148 act_machine_switch_pcb( thread_t new )
149 {
150 pcb_t pcb = new->machine.pcb;
151 int mycpu;
152 register iopb_tss_t tss = pcb->ims.io_tss;
153 vm_offset_t pcb_stack_top;
154 register user_ldt_t uldt = pcb->ims.ldt;
155
156 assert(new->kernel_stack != 0);
157 STACK_IEL(new->kernel_stack)->saved_state =
158 &new->machine.pcb->iss;
159
160 /*
161 * Save a pointer to the top of the "kernel" stack -
162 * actually the place in the PCB where a trap into
163 * kernel mode will push the registers.
164 * The location depends on V8086 mode. If we are
165 * not in V8086 mode, then a trap into the kernel
166 * won`t save the v86 segments, so we leave room.
167 */
168
169 pcb_stack_top = (pcb->iss.efl & EFL_VM)
170 ? (int) (&pcb->iss + 1)
171 : (int) (&pcb->iss.v86_segs);
172
173 mp_disable_preemption();
174 mycpu = cpu_number();
175
176 if (tss == 0) {
177 /*
178 * No per-thread IO permissions.
179 * Use standard kernel TSS.
180 */
181 if (!(gdt_desc_p(KERNEL_TSS)->access & ACC_TSS_BUSY))
182 set_tr(KERNEL_TSS);
183 current_ktss()->esp0 = pcb_stack_top;
184 }
185 else {
186 /*
187 * Set the IO permissions. Use this thread`s TSS.
188 */
189 *gdt_desc_p(USER_TSS)
190 = *(struct real_descriptor *)tss->iopb_desc;
191 tss->tss.esp0 = pcb_stack_top;
192 set_tr(USER_TSS);
193 gdt_desc_p(KERNEL_TSS)->access &= ~ ACC_TSS_BUSY;
194 }
195
196 /*
197 * Set the thread`s LDT or LDT entry.
198 */
199 if (uldt == 0) {
200 struct real_descriptor *ldtp;
201 /*
202 * Use system LDT.
203 */
204 // Set up the tasks specific ldt entries if extant
205 ldtp = (struct real_descriptor *)current_ldt();
206 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
207 if (pcb->uldt_selector != 0)
208 ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
209 set_ldt(KERNEL_LDT);
210 }
211 else {
212 /*
213 * Thread has its own LDT. // THIS SHOULD BE REMOVED!!!!
214 */
215 *gdt_desc_p(USER_LDT) = uldt->desc;
216 set_ldt(USER_LDT);
217 /*debug*/
218 if ((DEBUG_uldt++ % 0x7fff) == 0)
219 printf("KERNEL----> setting user ldt");
220
221 }
222
223 mp_enable_preemption();
224 /*
225 * Load the floating-point context, if necessary.
226 */
227 fpu_load_context(pcb);
228
229 }
230
231 /*
232 * Switch to the first thread on a CPU.
233 */
234 void
235 machine_load_context(
236 thread_t new)
237 {
238 act_machine_switch_pcb(new);
239 Load_context(new);
240 }
241
242 /*
243 * Switch to a new thread.
244 * Save the old thread`s kernel state or continuation,
245 * and return it.
246 */
247 thread_t
248 machine_switch_context(
249 thread_t old,
250 thread_continue_t continuation,
251 thread_t new)
252 {
253 #if MACH_RT
254 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
255 #endif
256
257 /*
258 * Save FP registers if in use.
259 */
260 fpu_save_context(old);
261
262 /*
263 * Switch address maps if need be, even if not switching tasks.
264 * (A server activation may be "borrowing" a client map.)
265 */
266 {
267 int mycpu = cpu_number();
268
269 PMAP_SWITCH_CONTEXT(old, new, mycpu)
270 }
271
272 /*
273 * Load the rest of the user state for the new thread
274 */
275 act_machine_switch_pcb(new);
276 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
277 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
278 old->continuation = NULL;
279 return(Switch_context(old, continuation, new));
280 }
281
282 /*
283 * act_machine_sv_free
284 * release saveareas associated with an act. if flag is true, release
285 * user level savearea(s) too, else don't
286 */
287 void
288 act_machine_sv_free(__unused thread_t act, __unused int flag)
289 {
290 }
291
292
293 /*
294 * This is where registers that are not normally specified by the mach-o
295 * file on an execve would be nullified, perhaps to avoid a covert channel.
296 */
297 kern_return_t
298 machine_thread_state_initialize(
299 thread_t thread)
300 {
301 #pragma unused (thread)
302
303 return KERN_SUCCESS;
304 }
305
306
307 /*
308 * act_machine_set_state:
309 *
310 * Set the status of the specified thread.
311 */
312
313 kern_return_t
314 machine_thread_set_state(
315 thread_t thr_act,
316 thread_flavor_t flavor,
317 thread_state_t tstate,
318 mach_msg_type_number_t count)
319 {
320 int kernel_act = 0;
321
322 switch (flavor) {
323 case THREAD_SYSCALL_STATE:
324 {
325 register struct thread_syscall_state *state;
326 register struct i386_saved_state *saved_state = USER_REGS(thr_act);
327
328 state = (struct thread_syscall_state *) tstate;
329 saved_state->eax = state->eax;
330 saved_state->edx = state->edx;
331 if (kernel_act)
332 saved_state->efl = state->efl;
333 else
334 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
335 saved_state->eip = state->eip;
336 saved_state->uesp = state->esp;
337 break;
338 }
339
340 case i386_SAVED_STATE:
341 {
342 register struct i386_saved_state *state;
343 register struct i386_saved_state *saved_state;
344
345 if (count < i386_SAVED_STATE_COUNT) {
346 return(KERN_INVALID_ARGUMENT);
347 }
348
349 state = (struct i386_saved_state *) tstate;
350
351 /* Check segment selectors are safe */
352 if (!kernel_act &&
353 !valid_user_segment_selectors(state->cs,
354 state->ss,
355 state->ds,
356 state->es,
357 state->fs,
358 state->gs))
359 return KERN_INVALID_ARGUMENT;
360
361 saved_state = USER_REGS(thr_act);
362
363 /*
364 * General registers
365 */
366 saved_state->edi = state->edi;
367 saved_state->esi = state->esi;
368 saved_state->ebp = state->ebp;
369 saved_state->uesp = state->uesp;
370 saved_state->ebx = state->ebx;
371 saved_state->edx = state->edx;
372 saved_state->ecx = state->ecx;
373 saved_state->eax = state->eax;
374 saved_state->eip = state->eip;
375 if (kernel_act)
376 saved_state->efl = state->efl;
377 else
378 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
379 | EFL_USER_SET;
380
381 /*
382 * Segment registers. Set differently in V8086 mode.
383 */
384 if (state->efl & EFL_VM) {
385 /*
386 * Set V8086 mode segment registers.
387 */
388 saved_state->cs = state->cs & 0xffff;
389 saved_state->ss = state->ss & 0xffff;
390 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
391 saved_state->v86_segs.v86_es = state->es & 0xffff;
392 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
393 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
394
395 /*
396 * Zero protected mode segment registers.
397 */
398 saved_state->ds = 0;
399 saved_state->es = 0;
400 saved_state->fs = 0;
401 saved_state->gs = 0;
402
403 if (thr_act->machine.pcb->ims.v86s.int_table) {
404 /*
405 * Hardware assist on.
406 */
407 thr_act->machine.pcb->ims.v86s.flags =
408 state->efl & (EFL_TF | EFL_IF);
409 }
410 }
411 else if (kernel_act) {
412 /*
413 * 386 mode. Set segment registers for flat
414 * 32-bit address space.
415 */
416 saved_state->cs = KERNEL_CS;
417 saved_state->ss = KERNEL_DS;
418 saved_state->ds = KERNEL_DS;
419 saved_state->es = KERNEL_DS;
420 saved_state->fs = KERNEL_DS;
421 saved_state->gs = CPU_DATA_GS;
422 }
423 else {
424 /*
425 * User setting segment registers.
426 * Code and stack selectors have already been
427 * checked. Others will be reset by 'iret'
428 * if they are not valid.
429 */
430 saved_state->cs = state->cs;
431 saved_state->ss = state->ss;
432 saved_state->ds = state->ds;
433 saved_state->es = state->es;
434 saved_state->fs = state->fs;
435 saved_state->gs = state->gs;
436 }
437 break;
438 }
439
440 case i386_NEW_THREAD_STATE:
441 case i386_REGS_SEGS_STATE:
442 {
443 register struct i386_new_thread_state *state;
444 register struct i386_saved_state *saved_state;
445
446 if (count < i386_NEW_THREAD_STATE_COUNT) {
447 return(KERN_INVALID_ARGUMENT);
448 }
449
450 state = (struct i386_new_thread_state *) tstate;
451
452 if (flavor == i386_REGS_SEGS_STATE) {
453 /*
454 * Code and stack selectors must not be null,
455 * and must have user protection levels.
456 * Only the low 16 bits are valid.
457 */
458 state->cs &= 0xffff;
459 state->ss &= 0xffff;
460 state->ds &= 0xffff;
461 state->es &= 0xffff;
462 state->fs &= 0xffff;
463 state->gs &= 0xffff;
464
465 if (!kernel_act &&
466 !valid_user_segment_selectors(state->cs,
467 state->ss,
468 state->ds,
469 state->es,
470 state->fs,
471 state->gs))
472 return KERN_INVALID_ARGUMENT;
473 }
474
475 saved_state = USER_REGS(thr_act);
476
477 /*
478 * General registers
479 */
480 saved_state->edi = state->edi;
481 saved_state->esi = state->esi;
482 saved_state->ebp = state->ebp;
483 saved_state->uesp = state->uesp;
484 saved_state->ebx = state->ebx;
485 saved_state->edx = state->edx;
486 saved_state->ecx = state->ecx;
487 saved_state->eax = state->eax;
488 saved_state->eip = state->eip;
489 if (kernel_act)
490 saved_state->efl = state->efl;
491 else
492 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
493 | EFL_USER_SET;
494
495 /*
496 * Segment registers. Set differently in V8086 mode.
497 */
498 if (state->efl & EFL_VM) {
499 /*
500 * Set V8086 mode segment registers.
501 */
502 saved_state->cs = state->cs & 0xffff;
503 saved_state->ss = state->ss & 0xffff;
504 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
505 saved_state->v86_segs.v86_es = state->es & 0xffff;
506 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
507 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
508
509 /*
510 * Zero protected mode segment registers.
511 */
512 saved_state->ds = 0;
513 saved_state->es = 0;
514 saved_state->fs = 0;
515 saved_state->gs = 0;
516
517 if (thr_act->machine.pcb->ims.v86s.int_table) {
518 /*
519 * Hardware assist on.
520 */
521 thr_act->machine.pcb->ims.v86s.flags =
522 state->efl & (EFL_TF | EFL_IF);
523 }
524 }
525 else if (flavor == i386_NEW_THREAD_STATE && kernel_act) {
526 /*
527 * 386 mode. Set segment registers for flat
528 * 32-bit address space.
529 */
530 saved_state->cs = KERNEL_CS;
531 saved_state->ss = KERNEL_DS;
532 saved_state->ds = KERNEL_DS;
533 saved_state->es = KERNEL_DS;
534 saved_state->fs = KERNEL_DS;
535 saved_state->gs = CPU_DATA_GS;
536 }
537 else {
538 /*
539 * User setting segment registers.
540 * Code and stack selectors have already been
541 * checked. Others will be reset by 'iret'
542 * if they are not valid.
543 */
544 saved_state->cs = state->cs;
545 saved_state->ss = state->ss;
546 saved_state->ds = state->ds;
547 saved_state->es = state->es;
548 saved_state->fs = state->fs;
549 saved_state->gs = state->gs;
550 }
551 break;
552 }
553
554 case i386_FLOAT_STATE: {
555 if (count < i386_old_FLOAT_STATE_COUNT)
556 return(KERN_INVALID_ARGUMENT);
557 if (count < i386_FLOAT_STATE_COUNT)
558 return fpu_set_state(thr_act,(struct i386_float_state*)tstate);
559 else return fpu_set_fxstate(thr_act,(struct i386_float_state*)tstate);
560 }
561
562 /*
563 * Temporary - replace by i386_io_map
564 */
565 case i386_ISA_PORT_MAP_STATE: {
566 if (count < i386_ISA_PORT_MAP_STATE_COUNT)
567 return(KERN_INVALID_ARGUMENT);
568
569 break;
570 }
571
572 case i386_V86_ASSIST_STATE:
573 {
574 register struct i386_v86_assist_state *state;
575 vm_offset_t int_table;
576 int int_count;
577
578 if (count < i386_V86_ASSIST_STATE_COUNT)
579 return KERN_INVALID_ARGUMENT;
580
581 state = (struct i386_v86_assist_state *) tstate;
582 int_table = state->int_table;
583 int_count = state->int_count;
584
585 if (int_table >= VM_MAX_ADDRESS ||
586 int_table +
587 int_count * sizeof(struct v86_interrupt_table)
588 > VM_MAX_ADDRESS)
589 return KERN_INVALID_ARGUMENT;
590
591 thr_act->machine.pcb->ims.v86s.int_table = int_table;
592 thr_act->machine.pcb->ims.v86s.int_count = int_count;
593
594 thr_act->machine.pcb->ims.v86s.flags =
595 USER_REGS(thr_act)->efl & (EFL_TF | EFL_IF);
596 break;
597 }
598
599 case i386_THREAD_STATE: {
600 struct i386_saved_state *saved_state;
601 i386_thread_state_t *state25;
602
603 saved_state = USER_REGS(thr_act);
604 state25 = (i386_thread_state_t *)tstate;
605
606 saved_state->eax = state25->eax;
607 saved_state->ebx = state25->ebx;
608 saved_state->ecx = state25->ecx;
609 saved_state->edx = state25->edx;
610 saved_state->edi = state25->edi;
611 saved_state->esi = state25->esi;
612 saved_state->ebp = state25->ebp;
613 saved_state->uesp = state25->esp;
614 saved_state->efl = (state25->eflags & ~EFL_USER_CLEAR)
615 | EFL_USER_SET;
616 saved_state->eip = state25->eip;
617 saved_state->cs = USER_CS; /* FIXME? */
618 saved_state->ss = USER_DS;
619 saved_state->ds = USER_DS;
620 saved_state->es = USER_DS;
621 saved_state->fs = state25->fs;
622 saved_state->gs = state25->gs;
623 }
624 break;
625
626 default:
627 return(KERN_INVALID_ARGUMENT);
628 }
629
630 return(KERN_SUCCESS);
631 }
632
633 /*
634 * thread_getstatus:
635 *
636 * Get the status of the specified thread.
637 */
638
639
640 kern_return_t
641 machine_thread_get_state(
642 thread_t thr_act,
643 thread_flavor_t flavor,
644 thread_state_t tstate,
645 mach_msg_type_number_t *count)
646 {
647 switch (flavor) {
648
649 case i386_SAVED_STATE:
650 {
651 register struct i386_saved_state *state;
652 register struct i386_saved_state *saved_state;
653
654 if (*count < i386_SAVED_STATE_COUNT)
655 return(KERN_INVALID_ARGUMENT);
656
657 state = (struct i386_saved_state *) tstate;
658 saved_state = USER_REGS(thr_act);
659
660 /*
661 * First, copy everything:
662 */
663 *state = *saved_state;
664
665 if (saved_state->efl & EFL_VM) {
666 /*
667 * V8086 mode.
668 */
669 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
670 state->es = saved_state->v86_segs.v86_es & 0xffff;
671 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
672 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
673
674 if (thr_act->machine.pcb->ims.v86s.int_table) {
675 /*
676 * Hardware assist on
677 */
678 if ((thr_act->machine.pcb->ims.v86s.flags &
679 (EFL_IF|V86_IF_PENDING)) == 0)
680 state->efl &= ~EFL_IF;
681 }
682 }
683 else {
684 /*
685 * 386 mode.
686 */
687 state->ds = saved_state->ds & 0xffff;
688 state->es = saved_state->es & 0xffff;
689 state->fs = saved_state->fs & 0xffff;
690 state->gs = saved_state->gs & 0xffff;
691 }
692 *count = i386_SAVED_STATE_COUNT;
693 break;
694 }
695
696 case i386_NEW_THREAD_STATE:
697 case i386_REGS_SEGS_STATE:
698 {
699 register struct i386_new_thread_state *state;
700 register struct i386_saved_state *saved_state;
701
702 if (*count < i386_NEW_THREAD_STATE_COUNT)
703 return(KERN_INVALID_ARGUMENT);
704
705 state = (struct i386_new_thread_state *) tstate;
706 saved_state = USER_REGS(thr_act);
707
708 /*
709 * General registers.
710 */
711 state->edi = saved_state->edi;
712 state->esi = saved_state->esi;
713 state->ebp = saved_state->ebp;
714 state->ebx = saved_state->ebx;
715 state->edx = saved_state->edx;
716 state->ecx = saved_state->ecx;
717 state->eax = saved_state->eax;
718 state->eip = saved_state->eip;
719 state->efl = saved_state->efl;
720 state->uesp = saved_state->uesp;
721
722 state->cs = saved_state->cs;
723 state->ss = saved_state->ss;
724 if (saved_state->efl & EFL_VM) {
725 /*
726 * V8086 mode.
727 */
728 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
729 state->es = saved_state->v86_segs.v86_es & 0xffff;
730 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
731 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
732
733 if (thr_act->machine.pcb->ims.v86s.int_table) {
734 /*
735 * Hardware assist on
736 */
737 if ((thr_act->machine.pcb->ims.v86s.flags &
738 (EFL_IF|V86_IF_PENDING)) == 0)
739 state->efl &= ~EFL_IF;
740 }
741 }
742 else {
743 /*
744 * 386 mode.
745 */
746 state->ds = saved_state->ds & 0xffff;
747 state->es = saved_state->es & 0xffff;
748 state->fs = saved_state->fs & 0xffff;
749 state->gs = saved_state->gs & 0xffff;
750 }
751 *count = i386_NEW_THREAD_STATE_COUNT;
752 break;
753 }
754
755 case THREAD_SYSCALL_STATE:
756 {
757 register struct thread_syscall_state *state;
758 register struct i386_saved_state *saved_state = USER_REGS(thr_act);
759
760 state = (struct thread_syscall_state *) tstate;
761 state->eax = saved_state->eax;
762 state->edx = saved_state->edx;
763 state->efl = saved_state->efl;
764 state->eip = saved_state->eip;
765 state->esp = saved_state->uesp;
766 *count = i386_THREAD_SYSCALL_STATE_COUNT;
767 break;
768 }
769
770 case THREAD_STATE_FLAVOR_LIST:
771 if (*count < 5)
772 return (KERN_INVALID_ARGUMENT);
773 tstate[0] = i386_NEW_THREAD_STATE;
774 tstate[1] = i386_FLOAT_STATE;
775 tstate[2] = i386_ISA_PORT_MAP_STATE;
776 tstate[3] = i386_V86_ASSIST_STATE;
777 tstate[4] = THREAD_SYSCALL_STATE;
778 *count = 5;
779 break;
780
781 case i386_FLOAT_STATE: {
782 if (*count < i386_old_FLOAT_STATE_COUNT)
783 return(KERN_INVALID_ARGUMENT);
784 if (*count< i386_FLOAT_STATE_COUNT) {
785 *count = i386_old_FLOAT_STATE_COUNT;
786 return fpu_get_state(thr_act,(struct i386_float_state *)tstate);
787 } else {
788 *count = i386_FLOAT_STATE_COUNT;
789 return fpu_get_fxstate(thr_act,(struct i386_float_state *)tstate);
790 }
791 }
792
793 /*
794 * Temporary - replace by i386_io_map
795 */
796 case i386_ISA_PORT_MAP_STATE: {
797 register struct i386_isa_port_map_state *state;
798 register iopb_tss_t tss;
799
800 if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
801 return(KERN_INVALID_ARGUMENT);
802
803 state = (struct i386_isa_port_map_state *) tstate;
804 tss = thr_act->machine.pcb->ims.io_tss;
805
806 if (tss == 0) {
807 unsigned int i;
808
809 /*
810 * The thread has no ktss, so no IO permissions.
811 */
812
813 for (i = 0; i < sizeof state->pm; i++)
814 state->pm[i] = 0xff;
815 } else {
816 /*
817 * The thread has its own ktss.
818 */
819
820 bcopy((char *) tss->bitmap,
821 (char *) state->pm,
822 sizeof state->pm);
823 }
824
825 *count = i386_ISA_PORT_MAP_STATE_COUNT;
826 break;
827 }
828
829 case i386_V86_ASSIST_STATE:
830 {
831 register struct i386_v86_assist_state *state;
832
833 if (*count < i386_V86_ASSIST_STATE_COUNT)
834 return KERN_INVALID_ARGUMENT;
835
836 state = (struct i386_v86_assist_state *) tstate;
837 state->int_table = thr_act->machine.pcb->ims.v86s.int_table;
838 state->int_count = thr_act->machine.pcb->ims.v86s.int_count;
839
840 *count = i386_V86_ASSIST_STATE_COUNT;
841 break;
842 }
843
844 case i386_THREAD_STATE: {
845 struct i386_saved_state *saved_state;
846 i386_thread_state_t *state;
847
848 saved_state = USER_REGS(thr_act);
849 state = (i386_thread_state_t *)tstate;
850
851 state->eax = saved_state->eax;
852 state->ebx = saved_state->ebx;
853 state->ecx = saved_state->ecx;
854 state->edx = saved_state->edx;
855 state->edi = saved_state->edi;
856 state->esi = saved_state->esi;
857 state->ebp = saved_state->ebp;
858 state->esp = saved_state->uesp;
859 state->eflags = saved_state->efl;
860 state->eip = saved_state->eip;
861 state->cs = saved_state->cs;
862 state->ss = saved_state->ss;
863 state->ds = saved_state->ds;
864 state->es = saved_state->es;
865 state->fs = saved_state->fs;
866 state->gs = saved_state->gs;
867 break;
868 }
869
870 default:
871 return(KERN_INVALID_ARGUMENT);
872 }
873
874 return(KERN_SUCCESS);
875 }
876
877 /*
878 * Initialize the machine-dependent state for a new thread.
879 */
880 kern_return_t
881 machine_thread_create(
882 thread_t thread,
883 __unused task_t task)
884 {
885 pcb_t pcb = &thread->machine.xxx_pcb;
886
887 thread->machine.pcb = pcb;
888
889 simple_lock_init(&pcb->lock, 0);
890
891 /*
892 * Guarantee that the bootstrapped thread will be in user
893 * mode.
894 */
895 pcb->iss.cs = USER_CS;
896 pcb->iss.ss = USER_DS;
897 pcb->iss.ds = USER_DS;
898 pcb->iss.es = USER_DS;
899 pcb->iss.fs = USER_DS;
900 pcb->iss.gs = USER_DS;
901 pcb->iss.efl = EFL_USER_SET;
902 {
903 struct real_descriptor *ldtp;
904 ldtp = (struct real_descriptor *)ldt;
905 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
906 pcb->uldt_desc = ldtp[sel_idx(USER_DS)];
907 pcb->uldt_selector = 0;
908 }
909
910 /*
911 * Allocate a kernel stack per thread.
912 */
913 stack_alloc(thread);
914
915 return(KERN_SUCCESS);
916 }
917
918 /*
919 * Machine-dependent cleanup prior to destroying a thread
920 */
921 void
922 machine_thread_destroy(
923 thread_t thread)
924 {
925 register pcb_t pcb = thread->machine.pcb;
926
927 assert(pcb);
928
929 if (pcb->ims.io_tss != 0)
930 iopb_destroy(pcb->ims.io_tss);
931 if (pcb->ims.ifps != 0)
932 fpu_free(pcb->ims.ifps);
933 if (pcb->ims.ldt != 0)
934 user_ldt_free(pcb->ims.ldt);
935 thread->machine.pcb = (pcb_t)0;
936 }
937
938 /*
939 * This is used to set the current thr_act/thread
940 * when starting up a new processor
941 */
942 void
943 machine_set_current_thread( thread_t thread )
944 {
945 mp_disable_preemption();
946
947 current_cpu_datap()->cpu_active_thread = thread;
948 current_cpu_datap()->cpu_active_kloaded = THREAD_NULL;
949
950 mp_enable_preemption();
951 }
952
953 void
954 machine_thread_terminate_self(void)
955 {
956 }
957
958 void
959 act_machine_return(int code)
960 {
961 /*
962 * This code is called with nothing locked.
963 * It also returns with nothing locked, if it returns.
964 *
965 * This routine terminates the current thread activation.
966 * If this is the only activation associated with its
967 * thread shuttle, then the entire thread (shuttle plus
968 * activation) is terminated.
969 */
970 assert( code == KERN_TERMINATED );
971
972 thread_terminate_self();
973
974 /*NOTREACHED*/
975
976 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code);
977 }
978
979
980 /*
981 * Perform machine-dependent per-thread initializations
982 */
983 void
984 machine_thread_init(void)
985 {
986 fpu_module_init();
987 iopb_init();
988 }
989
990 /*
991 * Some routines for debugging activation code
992 */
993 static void dump_handlers(thread_t);
994 void dump_regs(thread_t);
995 int dump_act(thread_t thr_act);
996
997 static void
998 dump_handlers(thread_t thr_act)
999 {
1000 ReturnHandler *rhp = thr_act->handlers;
1001 int counter = 0;
1002
1003 printf("\t");
1004 while (rhp) {
1005 if (rhp == &thr_act->special_handler){
1006 if (rhp->next)
1007 printf("[NON-Zero next ptr(%x)]", rhp->next);
1008 printf("special_handler()->");
1009 break;
1010 }
1011 printf("hdlr_%d(%x)->",counter,rhp->handler);
1012 rhp = rhp->next;
1013 if (++counter > 32) {
1014 printf("Aborting: HUGE handler chain\n");
1015 break;
1016 }
1017 }
1018 printf("HLDR_NULL\n");
1019 }
1020
1021 void
1022 dump_regs(thread_t thr_act)
1023 {
1024 if (thr_act->machine.pcb) {
1025 register struct i386_saved_state *ssp = USER_REGS(thr_act);
1026 /* Print out user register state */
1027 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1028 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
1029 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1030 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
1031 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1032 }
1033 }
1034
1035 int
1036 dump_act(thread_t thr_act)
1037 {
1038 if (!thr_act)
1039 return(0);
1040
1041 printf("thread(0x%x)(%d): task=%x(%d)\n",
1042 thr_act, thr_act->ref_count,
1043 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
1044
1045 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1046 thr_act->suspend_count, thr_act->user_stop_count,
1047 thr_act->active, thr_act->ast);
1048 printf("\tpcb=%x\n", thr_act->machine.pcb);
1049
1050 if (thr_act->kernel_stack) {
1051 vm_offset_t stack = thr_act->kernel_stack;
1052
1053 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1054 stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1055 STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state);
1056 }
1057
1058 dump_handlers(thr_act);
1059 dump_regs(thr_act);
1060 return((int)thr_act);
1061 }
1062
1063 user_addr_t
1064 get_useraddr(void)
1065 {
1066
1067 thread_t thr_act = current_thread();
1068
1069 if (thr_act->machine.pcb)
1070 return(thr_act->machine.pcb->iss.eip);
1071 else
1072 return(0);
1073
1074 }
1075
1076 /*
1077 * detach and return a kernel stack from a thread
1078 */
1079
1080 vm_offset_t
1081 machine_stack_detach(thread_t thread)
1082 {
1083 vm_offset_t stack;
1084
1085 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
1086 thread, thread->priority,
1087 thread->sched_pri, 0,
1088 0);
1089
1090 stack = thread->kernel_stack;
1091 thread->kernel_stack = 0;
1092 return(stack);
1093 }
1094
1095 /*
1096 * attach a kernel stack to a thread and initialize it
1097 */
1098
1099 void
1100 machine_stack_attach(
1101 thread_t thread,
1102 vm_offset_t stack)
1103 {
1104 struct i386_kernel_state *statep;
1105
1106 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
1107 thread, thread->priority,
1108 thread->sched_pri, 0, 0);
1109
1110 assert(stack);
1111 statep = STACK_IKS(stack);
1112 thread->kernel_stack = stack;
1113
1114 statep->k_eip = (unsigned long) Thread_continue;
1115 statep->k_ebx = (unsigned long) thread_continue;
1116 statep->k_esp = (unsigned long) STACK_IEL(stack);
1117
1118 STACK_IEL(stack)->saved_state = &thread->machine.pcb->iss;
1119
1120 return;
1121 }
1122
1123 /*
1124 * move a stack from old to new thread
1125 */
1126
1127 void
1128 machine_stack_handoff(thread_t old,
1129 thread_t new)
1130 {
1131 vm_offset_t stack;
1132
1133 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF),
1134 thread, thread->priority,
1135 thread->sched_pri, 0, 0);
1136
1137 assert(new);
1138 assert(old);
1139
1140 stack = machine_stack_detach(old);
1141 machine_stack_attach(new, stack);
1142
1143 PMAP_SWITCH_CONTEXT(old->task, new->task, cpu_number());
1144
1145 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
1146 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
1147
1148 machine_set_current_thread(new);
1149
1150 current_cpu_datap()->cpu_active_stack = new->kernel_stack;
1151
1152 return;
1153 }
1154
1155 struct i386_act_context {
1156 struct i386_saved_state ss;
1157 struct i386_float_state fs;
1158 };
1159
1160 void *
1161 act_thread_csave(void)
1162 {
1163 struct i386_act_context *ic;
1164 kern_return_t kret;
1165 int val;
1166
1167 ic = (struct i386_act_context *)kalloc(sizeof(struct i386_act_context));
1168
1169 if (ic == (struct i386_act_context *)NULL)
1170 return((void *)0);
1171
1172 val = i386_SAVED_STATE_COUNT;
1173 kret = machine_thread_get_state(current_thread(),
1174 i386_SAVED_STATE,
1175 (thread_state_t) &ic->ss,
1176 &val);
1177 if (kret != KERN_SUCCESS) {
1178 kfree(ic,sizeof(struct i386_act_context));
1179 return((void *)0);
1180 }
1181 val = i386_FLOAT_STATE_COUNT;
1182 kret = machine_thread_get_state(current_thread(),
1183 i386_FLOAT_STATE,
1184 (thread_state_t) &ic->fs,
1185 &val);
1186 if (kret != KERN_SUCCESS) {
1187 kfree(ic,sizeof(struct i386_act_context));
1188 return((void *)0);
1189 }
1190 return(ic);
1191 }
1192 void
1193 act_thread_catt(void *ctx)
1194 {
1195 struct i386_act_context *ic;
1196 kern_return_t kret;
1197
1198 ic = (struct i386_act_context *)ctx;
1199
1200 if (ic == (struct i386_act_context *)NULL)
1201 return;
1202
1203 kret = machine_thread_set_state(current_thread(),
1204 i386_SAVED_STATE,
1205 (thread_state_t) &ic->ss,
1206 i386_SAVED_STATE_COUNT);
1207 if (kret != KERN_SUCCESS)
1208 goto out;
1209
1210 kret = machine_thread_set_state(current_thread(),
1211 i386_FLOAT_STATE,
1212 (thread_state_t) &ic->fs,
1213 i386_FLOAT_STATE_COUNT);
1214 if (kret != KERN_SUCCESS)
1215 goto out;
1216 out:
1217 kfree(ic,sizeof(struct i386_act_context));
1218 }
1219
1220 void act_thread_cfree(void *ctx)
1221 {
1222 kfree(ctx,sizeof(struct i386_act_context));
1223 }
1224