2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 #include <mach_debug.h>
54 #include <mach_ldebug.h>
56 #include <sys/kdebug.h>
58 #include <mach/kern_return.h>
59 #include <mach/thread_status.h>
60 #include <mach/vm_param.h>
62 #include <kern/counters.h>
63 #include <kern/mach_param.h>
64 #include <kern/task.h>
65 #include <kern/thread.h>
66 #include <kern/thread_act.h>
67 #include <kern/thread_swap.h>
68 #include <kern/sched_prim.h>
69 #include <kern/misc_protos.h>
70 #include <kern/assert.h>
72 #include <ipc/ipc_port.h>
73 #include <vm/vm_kern.h>
76 #include <i386/thread.h>
77 #include <i386/eflags.h>
78 #include <i386/proc_reg.h>
81 #include <i386/user_ldt.h>
83 #include <i386/iopb_entries.h>
85 vm_offset_t active_stacks
[NCPUS
];
86 vm_offset_t kernel_stack
[NCPUS
];
87 thread_act_t active_kloaded
[NCPUS
];
90 * Maps state flavor to number of words in the state:
92 unsigned int state_count
[] = {
94 i386_NEW_THREAD_STATE_COUNT
,
95 i386_FLOAT_STATE_COUNT
,
96 i386_ISA_PORT_MAP_STATE_COUNT
,
97 i386_V86_ASSIST_STATE_COUNT
,
98 i386_REGS_SEGS_STATE_COUNT
,
99 i386_THREAD_SYSCALL_STATE_COUNT
,
100 /* THREAD_STATE_NONE */ 0,
101 i386_SAVED_STATE_COUNT
,
106 void act_machine_throughcall(thread_act_t thr_act
);
107 extern thread_t
Switch_context(
111 extern void Thread_continue(void);
112 extern void Load_context(
116 * consider_machine_collect:
118 * Try to collect machine-dependent pages
121 consider_machine_collect()
126 consider_machine_adjust()
132 * machine_kernel_stack_init:
134 * Initialize a kernel stack which has already been
135 * attached to its thread_activation.
139 machine_kernel_stack_init(
141 void (*start_pos
)(thread_t
))
143 thread_act_t thr_act
= thread
->top_act
;
147 stack
= thread
->kernel_stack
;
151 * We want to run at start_pos, giving it as an argument
152 * the return value from Load_context/Switch_context.
153 * Thread_continue takes care of the mismatch between
154 * the argument-passing/return-value conventions.
155 * This function will not return normally,
156 * so we don`t have to worry about a return address.
158 STACK_IKS(stack
)->k_eip
= (int) Thread_continue
;
159 STACK_IKS(stack
)->k_ebx
= (int) start_pos
;
160 STACK_IKS(stack
)->k_esp
= (int) STACK_IEL(stack
);
163 * Point top of kernel stack to user`s registers.
165 STACK_IEL(stack
)->saved_state
= &thr_act
->mact
.pcb
->iss
;
170 #define curr_gdt(mycpu) (mp_gdt[mycpu])
171 #define curr_ldt(mycpu) (mp_ldt[mycpu])
172 #define curr_ktss(mycpu) (mp_ktss[mycpu])
174 #define curr_gdt(mycpu) (gdt)
175 #define curr_ldt(mycpu) (ldt)
176 #define curr_ktss(mycpu) (&ktss)
179 #define gdt_desc_p(mycpu,sel) \
180 ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
183 act_machine_switch_pcb( thread_act_t new_act
)
185 pcb_t pcb
= new_act
->mact
.pcb
;
187 register iopb_tss_t tss
= pcb
->ims
.io_tss
;
188 vm_offset_t pcb_stack_top
;
189 register user_ldt_t ldt
= pcb
->ims
.ldt
;
191 assert(new_act
->thread
!= NULL
);
192 assert(new_act
->thread
->kernel_stack
!= 0);
193 STACK_IEL(new_act
->thread
->kernel_stack
)->saved_state
=
194 &new_act
->mact
.pcb
->iss
;
197 * Save a pointer to the top of the "kernel" stack -
198 * actually the place in the PCB where a trap into
199 * kernel mode will push the registers.
200 * The location depends on V8086 mode. If we are
201 * not in V8086 mode, then a trap into the kernel
202 * won`t save the v86 segments, so we leave room.
205 pcb_stack_top
= (pcb
->iss
.efl
& EFL_VM
)
206 ? (int) (&pcb
->iss
+ 1)
207 : (int) (&pcb
->iss
.v86_segs
);
209 mp_disable_preemption();
210 mycpu
= cpu_number();
214 * No per-thread IO permissions.
215 * Use standard kernel TSS.
217 if (!(gdt_desc_p(mycpu
,KERNEL_TSS
)->access
& ACC_TSS_BUSY
))
219 curr_ktss(mycpu
)->esp0
= pcb_stack_top
;
223 * Set the IO permissions. Use this thread`s TSS.
225 *gdt_desc_p(mycpu
,USER_TSS
)
226 = *(struct real_descriptor
*)tss
->iopb_desc
;
227 tss
->tss
.esp0
= pcb_stack_top
;
229 gdt_desc_p(mycpu
,KERNEL_TSS
)->access
&= ~ ACC_TSS_BUSY
;
233 * Set the thread`s LDT.
236 struct real_descriptor
*ldtp
;
240 ldtp
= (struct real_descriptor
*)curr_ldt(mycpu
);
241 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
246 * Thread has its own LDT.
248 *gdt_desc_p(mycpu
,USER_LDT
) = ldt
->desc
;
252 mp_enable_preemption();
254 * Load the floating-point context, if necessary.
256 fpu_load_context(pcb
);
261 * Switch to the first thread on a CPU.
264 machine_load_context(
267 act_machine_switch_pcb(new->top_act
);
272 * Number of times we needed to swap an activation back in before
275 int switch_act_swapins
= 0;
280 * Machine-dependent details of activation switching. Called with
281 * RPC locks held and preemption disabled.
289 int cpu
= cpu_number();
292 * Switch the vm, ast and pcb context.
293 * Save FP registers if in use and set TS (task switch) bit.
295 fpu_save_context(thread
);
297 active_stacks
[cpu
] = thread
->kernel_stack
;
298 ast_context(new, cpu
);
300 PMAP_SWITCH_CONTEXT(old
, new, cpu
);
301 act_machine_switch_pcb(new);
305 * Switch to a new thread.
306 * Save the old thread`s kernel state or continuation,
310 machine_switch_context(
312 void (*continuation
)(void),
315 register thread_act_t old_act
= old
->top_act
,
316 new_act
= new->top_act
;
319 assert(active_stacks
[cpu_number()] == old_act
->thread
->kernel_stack
);
321 check_simple_locks();
324 * Save FP registers if in use.
326 fpu_save_context(old
);
329 * Switch address maps if need be, even if not switching tasks.
330 * (A server activation may be "borrowing" a client map.)
333 int mycpu
= cpu_number();
335 PMAP_SWITCH_CONTEXT(old_act
, new_act
, mycpu
)
339 * Load the rest of the user state for the new thread
341 act_machine_switch_pcb(new_act
);
342 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
343 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
344 old
->continuation
= NULL
;
345 return(Switch_context(old
, continuation
, new));
349 * act_machine_sv_free
350 * release saveareas associated with an act. if flag is true, release
351 * user level savearea(s) too, else don't
354 act_machine_sv_free(thread_act_t act
, int flag
)
359 * act_machine_set_state:
361 * Set the status of the specified thread. Called with "appropriate"
362 * thread-related locks held (see act_lock_thread()), so
363 * thr_act->thread is guaranteed not to change.
367 machine_thread_set_state(
368 thread_act_t thr_act
,
369 thread_flavor_t flavor
,
370 thread_state_t tstate
,
371 mach_msg_type_number_t count
)
376 case THREAD_SYSCALL_STATE
:
378 register struct thread_syscall_state
*state
;
379 register struct i386_saved_state
*saved_state
= USER_REGS(thr_act
);
381 state
= (struct thread_syscall_state
*) tstate
;
382 saved_state
->eax
= state
->eax
;
383 saved_state
->edx
= state
->edx
;
385 saved_state
->efl
= state
->efl
;
387 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
388 saved_state
->eip
= state
->eip
;
389 saved_state
->uesp
= state
->esp
;
393 case i386_SAVED_STATE
:
395 register struct i386_saved_state
*state
;
396 register struct i386_saved_state
*saved_state
;
398 if (count
< i386_SAVED_STATE_COUNT
) {
399 return(KERN_INVALID_ARGUMENT
);
402 state
= (struct i386_saved_state
*) tstate
;
404 saved_state
= USER_REGS(thr_act
);
409 saved_state
->edi
= state
->edi
;
410 saved_state
->esi
= state
->esi
;
411 saved_state
->ebp
= state
->ebp
;
412 saved_state
->uesp
= state
->uesp
;
413 saved_state
->ebx
= state
->ebx
;
414 saved_state
->edx
= state
->edx
;
415 saved_state
->ecx
= state
->ecx
;
416 saved_state
->eax
= state
->eax
;
417 saved_state
->eip
= state
->eip
;
419 saved_state
->efl
= state
->efl
;
421 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
)
425 * Segment registers. Set differently in V8086 mode.
427 if (state
->efl
& EFL_VM
) {
429 * Set V8086 mode segment registers.
431 saved_state
->cs
= state
->cs
& 0xffff;
432 saved_state
->ss
= state
->ss
& 0xffff;
433 saved_state
->v86_segs
.v86_ds
= state
->ds
& 0xffff;
434 saved_state
->v86_segs
.v86_es
= state
->es
& 0xffff;
435 saved_state
->v86_segs
.v86_fs
= state
->fs
& 0xffff;
436 saved_state
->v86_segs
.v86_gs
= state
->gs
& 0xffff;
439 * Zero protected mode segment registers.
446 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
448 * Hardware assist on.
450 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
451 state
->efl
& (EFL_TF
| EFL_IF
);
454 else if (kernel_act
) {
456 * 386 mode. Set segment registers for flat
457 * 32-bit address space.
459 saved_state
->cs
= KERNEL_CS
;
460 saved_state
->ss
= KERNEL_DS
;
461 saved_state
->ds
= KERNEL_DS
;
462 saved_state
->es
= KERNEL_DS
;
463 saved_state
->fs
= KERNEL_DS
;
464 saved_state
->gs
= CPU_DATA
;
468 * User setting segment registers.
469 * Code and stack selectors have already been
470 * checked. Others will be reset by 'iret'
471 * if they are not valid.
473 saved_state
->cs
= state
->cs
;
474 saved_state
->ss
= state
->ss
;
475 saved_state
->ds
= state
->ds
;
476 saved_state
->es
= state
->es
;
477 saved_state
->fs
= state
->fs
;
478 saved_state
->gs
= state
->gs
;
483 case i386_NEW_THREAD_STATE
:
484 case i386_REGS_SEGS_STATE
:
486 register struct i386_new_thread_state
*state
;
487 register struct i386_saved_state
*saved_state
;
489 if (count
< i386_NEW_THREAD_STATE_COUNT
) {
490 return(KERN_INVALID_ARGUMENT
);
493 if (flavor
== i386_REGS_SEGS_STATE
) {
495 * Code and stack selectors must not be null,
496 * and must have user protection levels.
497 * Only the low 16 bits are valid.
507 (state
->cs
== 0 || (state
->cs
& SEL_PL
) != SEL_PL_U
508 || state
->ss
== 0 || (state
->ss
& SEL_PL
) != SEL_PL_U
))
509 return KERN_INVALID_ARGUMENT
;
512 state
= (struct i386_new_thread_state
*) tstate
;
514 saved_state
= USER_REGS(thr_act
);
519 saved_state
->edi
= state
->edi
;
520 saved_state
->esi
= state
->esi
;
521 saved_state
->ebp
= state
->ebp
;
522 saved_state
->uesp
= state
->uesp
;
523 saved_state
->ebx
= state
->ebx
;
524 saved_state
->edx
= state
->edx
;
525 saved_state
->ecx
= state
->ecx
;
526 saved_state
->eax
= state
->eax
;
527 saved_state
->eip
= state
->eip
;
529 saved_state
->efl
= state
->efl
;
531 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
)
535 * Segment registers. Set differently in V8086 mode.
537 if (state
->efl
& EFL_VM
) {
539 * Set V8086 mode segment registers.
541 saved_state
->cs
= state
->cs
& 0xffff;
542 saved_state
->ss
= state
->ss
& 0xffff;
543 saved_state
->v86_segs
.v86_ds
= state
->ds
& 0xffff;
544 saved_state
->v86_segs
.v86_es
= state
->es
& 0xffff;
545 saved_state
->v86_segs
.v86_fs
= state
->fs
& 0xffff;
546 saved_state
->v86_segs
.v86_gs
= state
->gs
& 0xffff;
549 * Zero protected mode segment registers.
556 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
558 * Hardware assist on.
560 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
561 state
->efl
& (EFL_TF
| EFL_IF
);
564 else if (flavor
== i386_NEW_THREAD_STATE
&& kernel_act
) {
566 * 386 mode. Set segment registers for flat
567 * 32-bit address space.
569 saved_state
->cs
= KERNEL_CS
;
570 saved_state
->ss
= KERNEL_DS
;
571 saved_state
->ds
= KERNEL_DS
;
572 saved_state
->es
= KERNEL_DS
;
573 saved_state
->fs
= KERNEL_DS
;
574 saved_state
->gs
= CPU_DATA
;
578 * User setting segment registers.
579 * Code and stack selectors have already been
580 * checked. Others will be reset by 'iret'
581 * if they are not valid.
583 saved_state
->cs
= state
->cs
;
584 saved_state
->ss
= state
->ss
;
585 saved_state
->ds
= state
->ds
;
586 saved_state
->es
= state
->es
;
587 saved_state
->fs
= state
->fs
;
588 saved_state
->gs
= state
->gs
;
593 case i386_FLOAT_STATE
: {
594 struct i386_float_state
*state
= (struct i386_float_state
*)tstate
;
595 if (count
< i386_old_FLOAT_STATE_COUNT
)
596 return(KERN_INVALID_ARGUMENT
);
597 if (count
< i386_FLOAT_STATE_COUNT
)
598 return fpu_set_state(thr_act
,(struct i386_float_state
*)tstate
);
599 else return fpu_set_fxstate(thr_act
,(struct i386_float_state
*)tstate
);
603 * Temporary - replace by i386_io_map
605 case i386_ISA_PORT_MAP_STATE
: {
606 register struct i386_isa_port_map_state
*state
;
607 register iopb_tss_t tss
;
609 if (count
< i386_ISA_PORT_MAP_STATE_COUNT
)
610 return(KERN_INVALID_ARGUMENT
);
615 case i386_V86_ASSIST_STATE
:
617 register struct i386_v86_assist_state
*state
;
618 vm_offset_t int_table
;
621 if (count
< i386_V86_ASSIST_STATE_COUNT
)
622 return KERN_INVALID_ARGUMENT
;
624 state
= (struct i386_v86_assist_state
*) tstate
;
625 int_table
= state
->int_table
;
626 int_count
= state
->int_count
;
628 if (int_table
>= VM_MAX_ADDRESS
||
630 int_count
* sizeof(struct v86_interrupt_table
)
632 return KERN_INVALID_ARGUMENT
;
634 thr_act
->mact
.pcb
->ims
.v86s
.int_table
= int_table
;
635 thr_act
->mact
.pcb
->ims
.v86s
.int_count
= int_count
;
637 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
638 USER_REGS(thr_act
)->efl
& (EFL_TF
| EFL_IF
);
642 case i386_THREAD_STATE
: {
643 struct i386_saved_state
*saved_state
;
644 i386_thread_state_t
*state25
;
646 saved_state
= USER_REGS(thr_act
);
647 state25
= (i386_thread_state_t
*)tstate
;
649 saved_state
->eax
= state25
->eax
;
650 saved_state
->ebx
= state25
->ebx
;
651 saved_state
->ecx
= state25
->ecx
;
652 saved_state
->edx
= state25
->edx
;
653 saved_state
->edi
= state25
->edi
;
654 saved_state
->esi
= state25
->esi
;
655 saved_state
->ebp
= state25
->ebp
;
656 saved_state
->uesp
= state25
->esp
;
657 saved_state
->efl
= (state25
->eflags
& ~EFL_USER_CLEAR
)
659 saved_state
->eip
= state25
->eip
;
660 saved_state
->cs
= USER_CS
; /* FIXME? */
661 saved_state
->ss
= USER_DS
;
662 saved_state
->ds
= USER_DS
;
663 saved_state
->es
= USER_DS
;
664 saved_state
->fs
= state25
->fs
;
665 saved_state
->gs
= state25
->gs
;
670 return(KERN_INVALID_ARGUMENT
);
673 return(KERN_SUCCESS
);
679 * Get the status of the specified thread.
684 machine_thread_get_state(
685 thread_act_t thr_act
,
686 thread_flavor_t flavor
,
687 thread_state_t tstate
,
688 mach_msg_type_number_t
*count
)
692 case i386_SAVED_STATE
:
694 register struct i386_saved_state
*state
;
695 register struct i386_saved_state
*saved_state
;
697 if (*count
< i386_SAVED_STATE_COUNT
)
698 return(KERN_INVALID_ARGUMENT
);
700 state
= (struct i386_saved_state
*) tstate
;
701 saved_state
= USER_REGS(thr_act
);
704 * First, copy everything:
706 *state
= *saved_state
;
708 if (saved_state
->efl
& EFL_VM
) {
712 state
->ds
= saved_state
->v86_segs
.v86_ds
& 0xffff;
713 state
->es
= saved_state
->v86_segs
.v86_es
& 0xffff;
714 state
->fs
= saved_state
->v86_segs
.v86_fs
& 0xffff;
715 state
->gs
= saved_state
->v86_segs
.v86_gs
& 0xffff;
717 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
721 if ((thr_act
->mact
.pcb
->ims
.v86s
.flags
&
722 (EFL_IF
|V86_IF_PENDING
)) == 0)
723 state
->efl
&= ~EFL_IF
;
730 state
->ds
= saved_state
->ds
& 0xffff;
731 state
->es
= saved_state
->es
& 0xffff;
732 state
->fs
= saved_state
->fs
& 0xffff;
733 state
->gs
= saved_state
->gs
& 0xffff;
735 *count
= i386_SAVED_STATE_COUNT
;
739 case i386_NEW_THREAD_STATE
:
740 case i386_REGS_SEGS_STATE
:
742 register struct i386_new_thread_state
*state
;
743 register struct i386_saved_state
*saved_state
;
745 if (*count
< i386_NEW_THREAD_STATE_COUNT
)
746 return(KERN_INVALID_ARGUMENT
);
748 state
= (struct i386_new_thread_state
*) tstate
;
749 saved_state
= USER_REGS(thr_act
);
754 state
->edi
= saved_state
->edi
;
755 state
->esi
= saved_state
->esi
;
756 state
->ebp
= saved_state
->ebp
;
757 state
->ebx
= saved_state
->ebx
;
758 state
->edx
= saved_state
->edx
;
759 state
->ecx
= saved_state
->ecx
;
760 state
->eax
= saved_state
->eax
;
761 state
->eip
= saved_state
->eip
;
762 state
->efl
= saved_state
->efl
;
763 state
->uesp
= saved_state
->uesp
;
765 state
->cs
= saved_state
->cs
;
766 state
->ss
= saved_state
->ss
;
767 if (saved_state
->efl
& EFL_VM
) {
771 state
->ds
= saved_state
->v86_segs
.v86_ds
& 0xffff;
772 state
->es
= saved_state
->v86_segs
.v86_es
& 0xffff;
773 state
->fs
= saved_state
->v86_segs
.v86_fs
& 0xffff;
774 state
->gs
= saved_state
->v86_segs
.v86_gs
& 0xffff;
776 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
780 if ((thr_act
->mact
.pcb
->ims
.v86s
.flags
&
781 (EFL_IF
|V86_IF_PENDING
)) == 0)
782 state
->efl
&= ~EFL_IF
;
789 state
->ds
= saved_state
->ds
& 0xffff;
790 state
->es
= saved_state
->es
& 0xffff;
791 state
->fs
= saved_state
->fs
& 0xffff;
792 state
->gs
= saved_state
->gs
& 0xffff;
794 *count
= i386_NEW_THREAD_STATE_COUNT
;
798 case THREAD_SYSCALL_STATE
:
800 register struct thread_syscall_state
*state
;
801 register struct i386_saved_state
*saved_state
= USER_REGS(thr_act
);
803 state
= (struct thread_syscall_state
*) tstate
;
804 state
->eax
= saved_state
->eax
;
805 state
->edx
= saved_state
->edx
;
806 state
->efl
= saved_state
->efl
;
807 state
->eip
= saved_state
->eip
;
808 state
->esp
= saved_state
->uesp
;
809 *count
= i386_THREAD_SYSCALL_STATE_COUNT
;
813 case THREAD_STATE_FLAVOR_LIST
:
815 return (KERN_INVALID_ARGUMENT
);
816 tstate
[0] = i386_NEW_THREAD_STATE
;
817 tstate
[1] = i386_FLOAT_STATE
;
818 tstate
[2] = i386_ISA_PORT_MAP_STATE
;
819 tstate
[3] = i386_V86_ASSIST_STATE
;
820 tstate
[4] = THREAD_SYSCALL_STATE
;
824 case i386_FLOAT_STATE
: {
825 struct i386_float_state
*state
= (struct i386_float_state
*)tstate
;
827 if (*count
< i386_old_FLOAT_STATE_COUNT
)
828 return(KERN_INVALID_ARGUMENT
);
829 if (*count
< i386_FLOAT_STATE_COUNT
) {
830 *count
= i386_old_FLOAT_STATE_COUNT
;
831 return fpu_get_state(thr_act
,(struct i386_float_state
*)tstate
);
833 *count
= i386_FLOAT_STATE_COUNT
;
834 return fpu_get_fxstate(thr_act
,(struct i386_float_state
*)tstate
);
839 * Temporary - replace by i386_io_map
841 case i386_ISA_PORT_MAP_STATE
: {
842 register struct i386_isa_port_map_state
*state
;
843 register iopb_tss_t tss
;
845 if (*count
< i386_ISA_PORT_MAP_STATE_COUNT
)
846 return(KERN_INVALID_ARGUMENT
);
848 state
= (struct i386_isa_port_map_state
*) tstate
;
849 tss
= thr_act
->mact
.pcb
->ims
.io_tss
;
855 * The thread has no ktss, so no IO permissions.
858 for (i
= 0; i
< sizeof state
->pm
; i
++)
862 * The thread has its own ktss.
865 bcopy((char *) tss
->bitmap
,
870 *count
= i386_ISA_PORT_MAP_STATE_COUNT
;
874 case i386_V86_ASSIST_STATE
:
876 register struct i386_v86_assist_state
*state
;
878 if (*count
< i386_V86_ASSIST_STATE_COUNT
)
879 return KERN_INVALID_ARGUMENT
;
881 state
= (struct i386_v86_assist_state
*) tstate
;
882 state
->int_table
= thr_act
->mact
.pcb
->ims
.v86s
.int_table
;
883 state
->int_count
= thr_act
->mact
.pcb
->ims
.v86s
.int_count
;
885 *count
= i386_V86_ASSIST_STATE_COUNT
;
889 case i386_THREAD_STATE
: {
890 struct i386_saved_state
*saved_state
;
891 i386_thread_state_t
*state
;
893 saved_state
= USER_REGS(thr_act
);
894 state
= (i386_thread_state_t
*)tstate
;
896 state
->eax
= saved_state
->eax
;
897 state
->ebx
= saved_state
->ebx
;
898 state
->ecx
= saved_state
->ecx
;
899 state
->edx
= saved_state
->edx
;
900 state
->edi
= saved_state
->edi
;
901 state
->esi
= saved_state
->esi
;
902 state
->ebp
= saved_state
->ebp
;
903 state
->esp
= saved_state
->uesp
;
904 state
->eflags
= saved_state
->efl
;
905 state
->eip
= saved_state
->eip
;
906 state
->cs
= saved_state
->cs
;
907 state
->ss
= saved_state
->ss
;
908 state
->ds
= saved_state
->ds
;
909 state
->es
= saved_state
->es
;
910 state
->fs
= saved_state
->fs
;
911 state
->gs
= saved_state
->gs
;
916 return(KERN_INVALID_ARGUMENT
);
919 return(KERN_SUCCESS
);
923 * Initialize the machine-dependent state for a new thread.
926 machine_thread_create(
930 pcb_t pcb
= &thread
->mact
.xxx_pcb
;
932 thread
->mact
.pcb
= pcb
;
934 simple_lock_init(&pcb
->lock
, ETAP_MISC_PCB
);
937 * Guarantee that the bootstrapped thread will be in user
940 pcb
->iss
.cs
= USER_CS
;
941 pcb
->iss
.ss
= USER_DS
;
942 pcb
->iss
.ds
= USER_DS
;
943 pcb
->iss
.es
= USER_DS
;
944 pcb
->iss
.fs
= USER_DS
;
945 pcb
->iss
.gs
= USER_DS
;
946 pcb
->iss
.efl
= EFL_USER_SET
;
948 extern struct fake_descriptor ldt
[];
949 struct real_descriptor
*ldtp
;
950 ldtp
= (struct real_descriptor
*)ldt
;
951 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
955 * Allocate a kernel stack per shuttle
957 thread
->kernel_stack
= (int)stack_alloc(thread
, thread_continue
);
958 thread
->state
&= ~TH_STACK_HANDOFF
;
959 assert(thread
->kernel_stack
!= 0);
962 * Point top of kernel stack to user`s registers.
964 STACK_IEL(thread
->kernel_stack
)->saved_state
= &pcb
->iss
;
966 return(KERN_SUCCESS
);
970 * Machine-dependent cleanup prior to destroying a thread
973 machine_thread_destroy(
976 register pcb_t pcb
= thread
->mact
.pcb
;
980 if (pcb
->ims
.io_tss
!= 0)
981 iopb_destroy(pcb
->ims
.io_tss
);
982 if (pcb
->ims
.ifps
!= 0)
983 fp_free(pcb
->ims
.ifps
);
984 if (pcb
->ims
.ldt
!= 0)
985 user_ldt_free(pcb
->ims
.ldt
);
986 thread
->mact
.pcb
= (pcb_t
)0;
990 * This is used to set the current thr_act/thread
991 * when starting up a new processor
994 machine_thread_set_current( thread_t thread
)
998 mp_disable_preemption();
999 my_cpu
= cpu_number();
1001 cpu_data
[my_cpu
].active_thread
= thread
->top_act
;
1002 active_kloaded
[my_cpu
] = THR_ACT_NULL
;
1004 mp_enable_preemption();
1008 machine_thread_terminate_self(void)
1013 act_machine_return(int code
)
1015 thread_act_t thr_act
= current_act();
1018 * This code is called with nothing locked.
1019 * It also returns with nothing locked, if it returns.
1021 * This routine terminates the current thread activation.
1022 * If this is the only activation associated with its
1023 * thread shuttle, then the entire thread (shuttle plus
1024 * activation) is terminated.
1026 assert( code
== KERN_TERMINATED
);
1029 /* This is the only activation attached to the shuttle... */
1030 /* terminate the entire thread (shuttle plus activation) */
1032 assert(thr_act
->thread
->top_act
== thr_act
);
1033 thread_terminate_self();
1037 panic("act_machine_return: TALKING ZOMBIE! (1)");
1042 * Perform machine-dependent per-thread initializations
1045 machine_thread_init(void)
1052 * Some routines for debugging activation code
1054 static void dump_handlers(thread_act_t
);
1055 void dump_regs(thread_act_t
);
1058 dump_handlers(thread_act_t thr_act
)
1060 ReturnHandler
*rhp
= thr_act
->handlers
;
1065 if (rhp
== &thr_act
->special_handler
){
1067 printf("[NON-Zero next ptr(%x)]", rhp
->next
);
1068 printf("special_handler()->");
1071 printf("hdlr_%d(%x)->",counter
,rhp
->handler
);
1073 if (++counter
> 32) {
1074 printf("Aborting: HUGE handler chain\n");
1078 printf("HLDR_NULL\n");
1082 dump_regs(thread_act_t thr_act
)
1084 if (thr_act
->mact
.pcb
) {
1085 register struct i386_saved_state
*ssp
= USER_REGS(thr_act
);
1086 /* Print out user register state */
1087 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1088 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1089 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1090 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1091 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1096 dump_act(thread_act_t thr_act
)
1101 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
1102 thr_act
, thr_act
->ref_count
,
1103 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
1104 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
1106 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1107 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1108 thr_act
->active
, thr_act
->ast
);
1109 printf("\thi=%x lo=%x\n", thr_act
->higher
, thr_act
->lower
);
1110 printf("\tpcb=%x\n", thr_act
->mact
.pcb
);
1112 if (thr_act
->thread
&& thr_act
->thread
->kernel_stack
) {
1113 vm_offset_t stack
= thr_act
->thread
->kernel_stack
;
1115 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1116 stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1117 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
1120 dump_handlers(thr_act
);
1122 return((int)thr_act
);
1128 thread_act_t thr_act
= current_act();
1130 if (thr_act
->mact
.pcb
)
1131 return(thr_act
->mact
.pcb
->iss
.eip
);
1138 thread_swapin_mach_alloc(thread_t thread
)
1141 /* 386 does not have saveareas */
1145 * detach and return a kernel stack from a thread
1149 machine_stack_detach(thread_t thread
)
1153 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
1154 thread
, thread
->priority
,
1155 thread
->sched_pri
, 0,
1158 stack
= thread
->kernel_stack
;
1159 thread
->kernel_stack
= 0;
1164 * attach a kernel stack to a thread and initialize it
1168 machine_stack_attach(thread_t thread
,
1170 void (*start_pos
)(thread_t
))
1172 struct i386_kernel_state
*statep
;
1174 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
1175 thread
, thread
->priority
,
1176 thread
->sched_pri
, continuation
,
1180 statep
= STACK_IKS(stack
);
1181 thread
->kernel_stack
= stack
;
1183 statep
->k_eip
= (unsigned long) Thread_continue
;
1184 statep
->k_ebx
= (unsigned long) start_pos
;
1185 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
1187 STACK_IEL(stack
)->saved_state
= &thread
->mact
.pcb
->iss
;
1193 * move a stack from old to new thread
1197 machine_stack_handoff(thread_t old
,
1203 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
),
1204 thread
, thread
->priority
,
1205 thread
->sched_pri
, continuation
,
1208 assert(new->top_act
);
1209 assert(old
->top_act
);
1211 stack
= machine_stack_detach(old
);
1212 machine_stack_attach(new, stack
, 0);
1214 PMAP_SWITCH_CONTEXT(old
->top_act
->task
, new->top_act
->task
, cpu_number());
1216 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
1217 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
1219 machine_thread_set_current(new);
1221 active_stacks
[cpu_number()] = new->kernel_stack
;
1226 struct i386_act_context
{
1227 struct i386_saved_state ss
;
1228 struct i386_float_state fs
;
1232 act_thread_csave(void)
1234 struct i386_act_context
*ic
;
1238 ic
= (struct i386_act_context
*)kalloc(sizeof(struct i386_act_context
));
1240 if (ic
== (struct i386_act_context
*)NULL
)
1243 val
= i386_SAVED_STATE_COUNT
;
1244 kret
= machine_thread_get_state(current_act(),
1246 (thread_state_t
) &ic
->ss
,
1248 if (kret
!= KERN_SUCCESS
) {
1249 kfree((vm_offset_t
)ic
,sizeof(struct i386_act_context
));
1252 val
= i386_FLOAT_STATE_COUNT
;
1253 kret
= machine_thread_get_state(current_act(),
1255 (thread_state_t
) &ic
->fs
,
1257 if (kret
!= KERN_SUCCESS
) {
1258 kfree((vm_offset_t
)ic
,sizeof(struct i386_act_context
));
1264 act_thread_catt(void *ctx
)
1266 struct i386_act_context
*ic
;
1270 ic
= (struct i386_act_context
*)ctx
;
1272 if (ic
== (struct i386_act_context
*)NULL
)
1275 kret
= machine_thread_set_state(current_act(),
1277 (thread_state_t
) &ic
->ss
,
1278 i386_SAVED_STATE_COUNT
);
1279 if (kret
!= KERN_SUCCESS
)
1282 kret
= machine_thread_set_state(current_act(),
1284 (thread_state_t
) &ic
->fs
,
1285 i386_FLOAT_STATE_COUNT
);
1286 if (kret
!= KERN_SUCCESS
)
1289 kfree((vm_offset_t
)ic
,sizeof(struct i386_act_context
));
1292 void act_thread_cfree(void *ctx
)
1294 kfree((vm_offset_t
)ctx
,sizeof(struct i386_act_context
));