2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 #include <mach_debug.h>
57 #include <mach_ldebug.h>
59 #include <sys/kdebug.h>
61 #include <mach/kern_return.h>
62 #include <mach/thread_status.h>
63 #include <mach/vm_param.h>
65 #include <kern/counters.h>
66 #include <kern/mach_param.h>
67 #include <kern/task.h>
68 #include <kern/thread.h>
69 #include <kern/thread_act.h>
70 #include <kern/thread_swap.h>
71 #include <kern/sched_prim.h>
72 #include <kern/misc_protos.h>
73 #include <kern/assert.h>
75 #include <ipc/ipc_port.h>
76 #include <vm/vm_kern.h>
79 #include <i386/thread.h>
80 #include <i386/eflags.h>
81 #include <i386/proc_reg.h>
84 #include <i386/user_ldt.h>
86 #include <i386/iopb_entries.h>
88 vm_offset_t active_stacks
[NCPUS
];
89 vm_offset_t kernel_stack
[NCPUS
];
90 thread_act_t active_kloaded
[NCPUS
];
93 * Maps state flavor to number of words in the state:
95 unsigned int state_count
[] = {
97 i386_NEW_THREAD_STATE_COUNT
,
98 i386_FLOAT_STATE_COUNT
,
99 i386_ISA_PORT_MAP_STATE_COUNT
,
100 i386_V86_ASSIST_STATE_COUNT
,
101 i386_REGS_SEGS_STATE_COUNT
,
102 i386_THREAD_SYSCALL_STATE_COUNT
,
103 /* THREAD_STATE_NONE */ 0,
104 i386_SAVED_STATE_COUNT
,
109 void act_machine_throughcall(thread_act_t thr_act
);
110 extern thread_t
Switch_context(
114 extern void Thread_continue(void);
115 extern void Load_context(
119 * consider_machine_collect:
121 * Try to collect machine-dependent pages
124 consider_machine_collect()
129 consider_machine_adjust()
135 * machine_kernel_stack_init:
137 * Initialize a kernel stack which has already been
138 * attached to its thread_activation.
142 machine_kernel_stack_init(
144 void (*start_pos
)(thread_t
))
146 thread_act_t thr_act
= thread
->top_act
;
150 stack
= thread
->kernel_stack
;
154 * We want to run at start_pos, giving it as an argument
155 * the return value from Load_context/Switch_context.
156 * Thread_continue takes care of the mismatch between
157 * the argument-passing/return-value conventions.
158 * This function will not return normally,
159 * so we don`t have to worry about a return address.
161 STACK_IKS(stack
)->k_eip
= (int) Thread_continue
;
162 STACK_IKS(stack
)->k_ebx
= (int) start_pos
;
163 STACK_IKS(stack
)->k_esp
= (int) STACK_IEL(stack
);
166 * Point top of kernel stack to user`s registers.
168 STACK_IEL(stack
)->saved_state
= &thr_act
->mact
.pcb
->iss
;
173 #define curr_gdt(mycpu) (mp_gdt[mycpu])
174 #define curr_ldt(mycpu) (mp_ldt[mycpu])
175 #define curr_ktss(mycpu) (mp_ktss[mycpu])
177 #define curr_gdt(mycpu) (gdt)
178 #define curr_ldt(mycpu) (ldt)
179 #define curr_ktss(mycpu) (&ktss)
182 #define gdt_desc_p(mycpu,sel) \
183 ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
186 act_machine_switch_pcb( thread_act_t new_act
)
188 pcb_t pcb
= new_act
->mact
.pcb
;
190 register iopb_tss_t tss
= pcb
->ims
.io_tss
;
191 vm_offset_t pcb_stack_top
;
192 register user_ldt_t ldt
= pcb
->ims
.ldt
;
194 assert(new_act
->thread
!= NULL
);
195 assert(new_act
->thread
->kernel_stack
!= 0);
196 STACK_IEL(new_act
->thread
->kernel_stack
)->saved_state
=
197 &new_act
->mact
.pcb
->iss
;
200 * Save a pointer to the top of the "kernel" stack -
201 * actually the place in the PCB where a trap into
202 * kernel mode will push the registers.
203 * The location depends on V8086 mode. If we are
204 * not in V8086 mode, then a trap into the kernel
205 * won`t save the v86 segments, so we leave room.
208 pcb_stack_top
= (pcb
->iss
.efl
& EFL_VM
)
209 ? (int) (&pcb
->iss
+ 1)
210 : (int) (&pcb
->iss
.v86_segs
);
212 mp_disable_preemption();
213 mycpu
= cpu_number();
217 * No per-thread IO permissions.
218 * Use standard kernel TSS.
220 if (!(gdt_desc_p(mycpu
,KERNEL_TSS
)->access
& ACC_TSS_BUSY
))
222 curr_ktss(mycpu
)->esp0
= pcb_stack_top
;
226 * Set the IO permissions. Use this thread`s TSS.
228 *gdt_desc_p(mycpu
,USER_TSS
)
229 = *(struct real_descriptor
*)tss
->iopb_desc
;
230 tss
->tss
.esp0
= pcb_stack_top
;
232 gdt_desc_p(mycpu
,KERNEL_TSS
)->access
&= ~ ACC_TSS_BUSY
;
236 * Set the thread`s LDT.
239 struct real_descriptor
*ldtp
;
243 ldtp
= (struct real_descriptor
*)curr_ldt(mycpu
);
244 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
249 * Thread has its own LDT.
251 *gdt_desc_p(mycpu
,USER_LDT
) = ldt
->desc
;
255 mp_enable_preemption();
257 * Load the floating-point context, if necessary.
259 fpu_load_context(pcb
);
264 * Switch to the first thread on a CPU.
267 machine_load_context(
270 act_machine_switch_pcb(new->top_act
);
275 * Number of times we needed to swap an activation back in before
278 int switch_act_swapins
= 0;
283 * Machine-dependent details of activation switching. Called with
284 * RPC locks held and preemption disabled.
292 int cpu
= cpu_number();
295 * Switch the vm, ast and pcb context.
296 * Save FP registers if in use and set TS (task switch) bit.
298 fpu_save_context(thread
);
300 active_stacks
[cpu
] = thread
->kernel_stack
;
301 ast_context(new, cpu
);
303 PMAP_SWITCH_CONTEXT(old
, new, cpu
);
304 act_machine_switch_pcb(new);
308 * Switch to a new thread.
309 * Save the old thread`s kernel state or continuation,
313 machine_switch_context(
315 void (*continuation
)(void),
318 register thread_act_t old_act
= old
->top_act
,
319 new_act
= new->top_act
;
322 assert(active_stacks
[cpu_number()] == old_act
->thread
->kernel_stack
);
324 check_simple_locks();
327 * Save FP registers if in use.
329 fpu_save_context(old
);
332 * Switch address maps if need be, even if not switching tasks.
333 * (A server activation may be "borrowing" a client map.)
336 int mycpu
= cpu_number();
338 PMAP_SWITCH_CONTEXT(old_act
, new_act
, mycpu
)
342 * Load the rest of the user state for the new thread
344 act_machine_switch_pcb(new_act
);
345 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
346 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
347 old
->continuation
= NULL
;
348 return(Switch_context(old
, continuation
, new));
352 * act_machine_sv_free
353 * release saveareas associated with an act. if flag is true, release
354 * user level savearea(s) too, else don't
357 act_machine_sv_free(thread_act_t act
, int flag
)
362 * act_machine_set_state:
364 * Set the status of the specified thread. Called with "appropriate"
365 * thread-related locks held (see act_lock_thread()), so
366 * thr_act->thread is guaranteed not to change.
370 machine_thread_set_state(
371 thread_act_t thr_act
,
372 thread_flavor_t flavor
,
373 thread_state_t tstate
,
374 mach_msg_type_number_t count
)
379 case THREAD_SYSCALL_STATE
:
381 register struct thread_syscall_state
*state
;
382 register struct i386_saved_state
*saved_state
= USER_REGS(thr_act
);
384 state
= (struct thread_syscall_state
*) tstate
;
385 saved_state
->eax
= state
->eax
;
386 saved_state
->edx
= state
->edx
;
388 saved_state
->efl
= state
->efl
;
390 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
391 saved_state
->eip
= state
->eip
;
392 saved_state
->uesp
= state
->esp
;
396 case i386_SAVED_STATE
:
398 register struct i386_saved_state
*state
;
399 register struct i386_saved_state
*saved_state
;
401 if (count
< i386_SAVED_STATE_COUNT
) {
402 return(KERN_INVALID_ARGUMENT
);
405 state
= (struct i386_saved_state
*) tstate
;
407 saved_state
= USER_REGS(thr_act
);
412 saved_state
->edi
= state
->edi
;
413 saved_state
->esi
= state
->esi
;
414 saved_state
->ebp
= state
->ebp
;
415 saved_state
->uesp
= state
->uesp
;
416 saved_state
->ebx
= state
->ebx
;
417 saved_state
->edx
= state
->edx
;
418 saved_state
->ecx
= state
->ecx
;
419 saved_state
->eax
= state
->eax
;
420 saved_state
->eip
= state
->eip
;
422 saved_state
->efl
= state
->efl
;
424 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
)
428 * Segment registers. Set differently in V8086 mode.
430 if (state
->efl
& EFL_VM
) {
432 * Set V8086 mode segment registers.
434 saved_state
->cs
= state
->cs
& 0xffff;
435 saved_state
->ss
= state
->ss
& 0xffff;
436 saved_state
->v86_segs
.v86_ds
= state
->ds
& 0xffff;
437 saved_state
->v86_segs
.v86_es
= state
->es
& 0xffff;
438 saved_state
->v86_segs
.v86_fs
= state
->fs
& 0xffff;
439 saved_state
->v86_segs
.v86_gs
= state
->gs
& 0xffff;
442 * Zero protected mode segment registers.
449 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
451 * Hardware assist on.
453 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
454 state
->efl
& (EFL_TF
| EFL_IF
);
457 else if (kernel_act
) {
459 * 386 mode. Set segment registers for flat
460 * 32-bit address space.
462 saved_state
->cs
= KERNEL_CS
;
463 saved_state
->ss
= KERNEL_DS
;
464 saved_state
->ds
= KERNEL_DS
;
465 saved_state
->es
= KERNEL_DS
;
466 saved_state
->fs
= KERNEL_DS
;
467 saved_state
->gs
= CPU_DATA
;
471 * User setting segment registers.
472 * Code and stack selectors have already been
473 * checked. Others will be reset by 'iret'
474 * if they are not valid.
476 saved_state
->cs
= state
->cs
;
477 saved_state
->ss
= state
->ss
;
478 saved_state
->ds
= state
->ds
;
479 saved_state
->es
= state
->es
;
480 saved_state
->fs
= state
->fs
;
481 saved_state
->gs
= state
->gs
;
486 case i386_NEW_THREAD_STATE
:
487 case i386_REGS_SEGS_STATE
:
489 register struct i386_new_thread_state
*state
;
490 register struct i386_saved_state
*saved_state
;
492 if (count
< i386_NEW_THREAD_STATE_COUNT
) {
493 return(KERN_INVALID_ARGUMENT
);
496 if (flavor
== i386_REGS_SEGS_STATE
) {
498 * Code and stack selectors must not be null,
499 * and must have user protection levels.
500 * Only the low 16 bits are valid.
510 (state
->cs
== 0 || (state
->cs
& SEL_PL
) != SEL_PL_U
511 || state
->ss
== 0 || (state
->ss
& SEL_PL
) != SEL_PL_U
))
512 return KERN_INVALID_ARGUMENT
;
515 state
= (struct i386_new_thread_state
*) tstate
;
517 saved_state
= USER_REGS(thr_act
);
522 saved_state
->edi
= state
->edi
;
523 saved_state
->esi
= state
->esi
;
524 saved_state
->ebp
= state
->ebp
;
525 saved_state
->uesp
= state
->uesp
;
526 saved_state
->ebx
= state
->ebx
;
527 saved_state
->edx
= state
->edx
;
528 saved_state
->ecx
= state
->ecx
;
529 saved_state
->eax
= state
->eax
;
530 saved_state
->eip
= state
->eip
;
532 saved_state
->efl
= state
->efl
;
534 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
)
538 * Segment registers. Set differently in V8086 mode.
540 if (state
->efl
& EFL_VM
) {
542 * Set V8086 mode segment registers.
544 saved_state
->cs
= state
->cs
& 0xffff;
545 saved_state
->ss
= state
->ss
& 0xffff;
546 saved_state
->v86_segs
.v86_ds
= state
->ds
& 0xffff;
547 saved_state
->v86_segs
.v86_es
= state
->es
& 0xffff;
548 saved_state
->v86_segs
.v86_fs
= state
->fs
& 0xffff;
549 saved_state
->v86_segs
.v86_gs
= state
->gs
& 0xffff;
552 * Zero protected mode segment registers.
559 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
561 * Hardware assist on.
563 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
564 state
->efl
& (EFL_TF
| EFL_IF
);
567 else if (flavor
== i386_NEW_THREAD_STATE
&& kernel_act
) {
569 * 386 mode. Set segment registers for flat
570 * 32-bit address space.
572 saved_state
->cs
= KERNEL_CS
;
573 saved_state
->ss
= KERNEL_DS
;
574 saved_state
->ds
= KERNEL_DS
;
575 saved_state
->es
= KERNEL_DS
;
576 saved_state
->fs
= KERNEL_DS
;
577 saved_state
->gs
= CPU_DATA
;
581 * User setting segment registers.
582 * Code and stack selectors have already been
583 * checked. Others will be reset by 'iret'
584 * if they are not valid.
586 saved_state
->cs
= state
->cs
;
587 saved_state
->ss
= state
->ss
;
588 saved_state
->ds
= state
->ds
;
589 saved_state
->es
= state
->es
;
590 saved_state
->fs
= state
->fs
;
591 saved_state
->gs
= state
->gs
;
596 case i386_FLOAT_STATE
: {
597 struct i386_float_state
*state
= (struct i386_float_state
*)tstate
;
598 if (count
< i386_old_FLOAT_STATE_COUNT
)
599 return(KERN_INVALID_ARGUMENT
);
600 if (count
< i386_FLOAT_STATE_COUNT
)
601 return fpu_set_state(thr_act
,(struct i386_float_state
*)tstate
);
602 else return fpu_set_fxstate(thr_act
,(struct i386_float_state
*)tstate
);
606 * Temporary - replace by i386_io_map
608 case i386_ISA_PORT_MAP_STATE
: {
609 register struct i386_isa_port_map_state
*state
;
610 register iopb_tss_t tss
;
612 if (count
< i386_ISA_PORT_MAP_STATE_COUNT
)
613 return(KERN_INVALID_ARGUMENT
);
618 case i386_V86_ASSIST_STATE
:
620 register struct i386_v86_assist_state
*state
;
621 vm_offset_t int_table
;
624 if (count
< i386_V86_ASSIST_STATE_COUNT
)
625 return KERN_INVALID_ARGUMENT
;
627 state
= (struct i386_v86_assist_state
*) tstate
;
628 int_table
= state
->int_table
;
629 int_count
= state
->int_count
;
631 if (int_table
>= VM_MAX_ADDRESS
||
633 int_count
* sizeof(struct v86_interrupt_table
)
635 return KERN_INVALID_ARGUMENT
;
637 thr_act
->mact
.pcb
->ims
.v86s
.int_table
= int_table
;
638 thr_act
->mact
.pcb
->ims
.v86s
.int_count
= int_count
;
640 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
641 USER_REGS(thr_act
)->efl
& (EFL_TF
| EFL_IF
);
645 case i386_THREAD_STATE
: {
646 struct i386_saved_state
*saved_state
;
647 i386_thread_state_t
*state25
;
649 saved_state
= USER_REGS(thr_act
);
650 state25
= (i386_thread_state_t
*)tstate
;
652 saved_state
->eax
= state25
->eax
;
653 saved_state
->ebx
= state25
->ebx
;
654 saved_state
->ecx
= state25
->ecx
;
655 saved_state
->edx
= state25
->edx
;
656 saved_state
->edi
= state25
->edi
;
657 saved_state
->esi
= state25
->esi
;
658 saved_state
->ebp
= state25
->ebp
;
659 saved_state
->uesp
= state25
->esp
;
660 saved_state
->efl
= (state25
->eflags
& ~EFL_USER_CLEAR
)
662 saved_state
->eip
= state25
->eip
;
663 saved_state
->cs
= USER_CS
; /* FIXME? */
664 saved_state
->ss
= USER_DS
;
665 saved_state
->ds
= USER_DS
;
666 saved_state
->es
= USER_DS
;
667 saved_state
->fs
= state25
->fs
;
668 saved_state
->gs
= state25
->gs
;
673 return(KERN_INVALID_ARGUMENT
);
676 return(KERN_SUCCESS
);
682 * Get the status of the specified thread.
687 machine_thread_get_state(
688 thread_act_t thr_act
,
689 thread_flavor_t flavor
,
690 thread_state_t tstate
,
691 mach_msg_type_number_t
*count
)
695 case i386_SAVED_STATE
:
697 register struct i386_saved_state
*state
;
698 register struct i386_saved_state
*saved_state
;
700 if (*count
< i386_SAVED_STATE_COUNT
)
701 return(KERN_INVALID_ARGUMENT
);
703 state
= (struct i386_saved_state
*) tstate
;
704 saved_state
= USER_REGS(thr_act
);
707 * First, copy everything:
709 *state
= *saved_state
;
711 if (saved_state
->efl
& EFL_VM
) {
715 state
->ds
= saved_state
->v86_segs
.v86_ds
& 0xffff;
716 state
->es
= saved_state
->v86_segs
.v86_es
& 0xffff;
717 state
->fs
= saved_state
->v86_segs
.v86_fs
& 0xffff;
718 state
->gs
= saved_state
->v86_segs
.v86_gs
& 0xffff;
720 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
724 if ((thr_act
->mact
.pcb
->ims
.v86s
.flags
&
725 (EFL_IF
|V86_IF_PENDING
)) == 0)
726 state
->efl
&= ~EFL_IF
;
733 state
->ds
= saved_state
->ds
& 0xffff;
734 state
->es
= saved_state
->es
& 0xffff;
735 state
->fs
= saved_state
->fs
& 0xffff;
736 state
->gs
= saved_state
->gs
& 0xffff;
738 *count
= i386_SAVED_STATE_COUNT
;
742 case i386_NEW_THREAD_STATE
:
743 case i386_REGS_SEGS_STATE
:
745 register struct i386_new_thread_state
*state
;
746 register struct i386_saved_state
*saved_state
;
748 if (*count
< i386_NEW_THREAD_STATE_COUNT
)
749 return(KERN_INVALID_ARGUMENT
);
751 state
= (struct i386_new_thread_state
*) tstate
;
752 saved_state
= USER_REGS(thr_act
);
757 state
->edi
= saved_state
->edi
;
758 state
->esi
= saved_state
->esi
;
759 state
->ebp
= saved_state
->ebp
;
760 state
->ebx
= saved_state
->ebx
;
761 state
->edx
= saved_state
->edx
;
762 state
->ecx
= saved_state
->ecx
;
763 state
->eax
= saved_state
->eax
;
764 state
->eip
= saved_state
->eip
;
765 state
->efl
= saved_state
->efl
;
766 state
->uesp
= saved_state
->uesp
;
768 state
->cs
= saved_state
->cs
;
769 state
->ss
= saved_state
->ss
;
770 if (saved_state
->efl
& EFL_VM
) {
774 state
->ds
= saved_state
->v86_segs
.v86_ds
& 0xffff;
775 state
->es
= saved_state
->v86_segs
.v86_es
& 0xffff;
776 state
->fs
= saved_state
->v86_segs
.v86_fs
& 0xffff;
777 state
->gs
= saved_state
->v86_segs
.v86_gs
& 0xffff;
779 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
783 if ((thr_act
->mact
.pcb
->ims
.v86s
.flags
&
784 (EFL_IF
|V86_IF_PENDING
)) == 0)
785 state
->efl
&= ~EFL_IF
;
792 state
->ds
= saved_state
->ds
& 0xffff;
793 state
->es
= saved_state
->es
& 0xffff;
794 state
->fs
= saved_state
->fs
& 0xffff;
795 state
->gs
= saved_state
->gs
& 0xffff;
797 *count
= i386_NEW_THREAD_STATE_COUNT
;
801 case THREAD_SYSCALL_STATE
:
803 register struct thread_syscall_state
*state
;
804 register struct i386_saved_state
*saved_state
= USER_REGS(thr_act
);
806 state
= (struct thread_syscall_state
*) tstate
;
807 state
->eax
= saved_state
->eax
;
808 state
->edx
= saved_state
->edx
;
809 state
->efl
= saved_state
->efl
;
810 state
->eip
= saved_state
->eip
;
811 state
->esp
= saved_state
->uesp
;
812 *count
= i386_THREAD_SYSCALL_STATE_COUNT
;
816 case THREAD_STATE_FLAVOR_LIST
:
818 return (KERN_INVALID_ARGUMENT
);
819 tstate
[0] = i386_NEW_THREAD_STATE
;
820 tstate
[1] = i386_FLOAT_STATE
;
821 tstate
[2] = i386_ISA_PORT_MAP_STATE
;
822 tstate
[3] = i386_V86_ASSIST_STATE
;
823 tstate
[4] = THREAD_SYSCALL_STATE
;
827 case i386_FLOAT_STATE
: {
828 struct i386_float_state
*state
= (struct i386_float_state
*)tstate
;
830 if (*count
< i386_old_FLOAT_STATE_COUNT
)
831 return(KERN_INVALID_ARGUMENT
);
832 if (*count
< i386_FLOAT_STATE_COUNT
) {
833 *count
= i386_old_FLOAT_STATE_COUNT
;
834 return fpu_get_state(thr_act
,(struct i386_float_state
*)tstate
);
836 *count
= i386_FLOAT_STATE_COUNT
;
837 return fpu_get_fxstate(thr_act
,(struct i386_float_state
*)tstate
);
842 * Temporary - replace by i386_io_map
844 case i386_ISA_PORT_MAP_STATE
: {
845 register struct i386_isa_port_map_state
*state
;
846 register iopb_tss_t tss
;
848 if (*count
< i386_ISA_PORT_MAP_STATE_COUNT
)
849 return(KERN_INVALID_ARGUMENT
);
851 state
= (struct i386_isa_port_map_state
*) tstate
;
852 tss
= thr_act
->mact
.pcb
->ims
.io_tss
;
858 * The thread has no ktss, so no IO permissions.
861 for (i
= 0; i
< sizeof state
->pm
; i
++)
865 * The thread has its own ktss.
868 bcopy((char *) tss
->bitmap
,
873 *count
= i386_ISA_PORT_MAP_STATE_COUNT
;
877 case i386_V86_ASSIST_STATE
:
879 register struct i386_v86_assist_state
*state
;
881 if (*count
< i386_V86_ASSIST_STATE_COUNT
)
882 return KERN_INVALID_ARGUMENT
;
884 state
= (struct i386_v86_assist_state
*) tstate
;
885 state
->int_table
= thr_act
->mact
.pcb
->ims
.v86s
.int_table
;
886 state
->int_count
= thr_act
->mact
.pcb
->ims
.v86s
.int_count
;
888 *count
= i386_V86_ASSIST_STATE_COUNT
;
892 case i386_THREAD_STATE
: {
893 struct i386_saved_state
*saved_state
;
894 i386_thread_state_t
*state
;
896 saved_state
= USER_REGS(thr_act
);
897 state
= (i386_thread_state_t
*)tstate
;
899 state
->eax
= saved_state
->eax
;
900 state
->ebx
= saved_state
->ebx
;
901 state
->ecx
= saved_state
->ecx
;
902 state
->edx
= saved_state
->edx
;
903 state
->edi
= saved_state
->edi
;
904 state
->esi
= saved_state
->esi
;
905 state
->ebp
= saved_state
->ebp
;
906 state
->esp
= saved_state
->uesp
;
907 state
->eflags
= saved_state
->efl
;
908 state
->eip
= saved_state
->eip
;
909 state
->cs
= saved_state
->cs
;
910 state
->ss
= saved_state
->ss
;
911 state
->ds
= saved_state
->ds
;
912 state
->es
= saved_state
->es
;
913 state
->fs
= saved_state
->fs
;
914 state
->gs
= saved_state
->gs
;
919 return(KERN_INVALID_ARGUMENT
);
922 return(KERN_SUCCESS
);
926 * Initialize the machine-dependent state for a new thread.
929 machine_thread_create(
933 pcb_t pcb
= &thread
->mact
.xxx_pcb
;
935 thread
->mact
.pcb
= pcb
;
937 simple_lock_init(&pcb
->lock
, ETAP_MISC_PCB
);
940 * Guarantee that the bootstrapped thread will be in user
943 pcb
->iss
.cs
= USER_CS
;
944 pcb
->iss
.ss
= USER_DS
;
945 pcb
->iss
.ds
= USER_DS
;
946 pcb
->iss
.es
= USER_DS
;
947 pcb
->iss
.fs
= USER_DS
;
948 pcb
->iss
.gs
= USER_DS
;
949 pcb
->iss
.efl
= EFL_USER_SET
;
951 extern struct fake_descriptor ldt
[];
952 struct real_descriptor
*ldtp
;
953 ldtp
= (struct real_descriptor
*)ldt
;
954 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
958 * Allocate a kernel stack per shuttle
960 thread
->kernel_stack
= (int)stack_alloc(thread
, thread_continue
);
961 thread
->state
&= ~TH_STACK_HANDOFF
;
962 assert(thread
->kernel_stack
!= 0);
965 * Point top of kernel stack to user`s registers.
967 STACK_IEL(thread
->kernel_stack
)->saved_state
= &pcb
->iss
;
969 return(KERN_SUCCESS
);
973 * Machine-dependent cleanup prior to destroying a thread
976 machine_thread_destroy(
979 register pcb_t pcb
= thread
->mact
.pcb
;
983 if (pcb
->ims
.io_tss
!= 0)
984 iopb_destroy(pcb
->ims
.io_tss
);
985 if (pcb
->ims
.ifps
!= 0)
986 fp_free(pcb
->ims
.ifps
);
987 if (pcb
->ims
.ldt
!= 0)
988 user_ldt_free(pcb
->ims
.ldt
);
989 thread
->mact
.pcb
= (pcb_t
)0;
993 * This is used to set the current thr_act/thread
994 * when starting up a new processor
997 machine_thread_set_current( thread_t thread
)
1001 mp_disable_preemption();
1002 my_cpu
= cpu_number();
1004 cpu_data
[my_cpu
].active_thread
= thread
->top_act
;
1005 active_kloaded
[my_cpu
] = THR_ACT_NULL
;
1007 mp_enable_preemption();
1011 machine_thread_terminate_self(void)
1016 act_machine_return(int code
)
1018 thread_act_t thr_act
= current_act();
1021 * This code is called with nothing locked.
1022 * It also returns with nothing locked, if it returns.
1024 * This routine terminates the current thread activation.
1025 * If this is the only activation associated with its
1026 * thread shuttle, then the entire thread (shuttle plus
1027 * activation) is terminated.
1029 assert( code
== KERN_TERMINATED
);
1032 /* This is the only activation attached to the shuttle... */
1033 /* terminate the entire thread (shuttle plus activation) */
1035 assert(thr_act
->thread
->top_act
== thr_act
);
1036 thread_terminate_self();
1040 panic("act_machine_return: TALKING ZOMBIE! (1)");
1045 * Perform machine-dependent per-thread initializations
1048 machine_thread_init(void)
1055 * Some routines for debugging activation code
1057 static void dump_handlers(thread_act_t
);
1058 void dump_regs(thread_act_t
);
1061 dump_handlers(thread_act_t thr_act
)
1063 ReturnHandler
*rhp
= thr_act
->handlers
;
1068 if (rhp
== &thr_act
->special_handler
){
1070 printf("[NON-Zero next ptr(%x)]", rhp
->next
);
1071 printf("special_handler()->");
1074 printf("hdlr_%d(%x)->",counter
,rhp
->handler
);
1076 if (++counter
> 32) {
1077 printf("Aborting: HUGE handler chain\n");
1081 printf("HLDR_NULL\n");
1085 dump_regs(thread_act_t thr_act
)
1087 if (thr_act
->mact
.pcb
) {
1088 register struct i386_saved_state
*ssp
= USER_REGS(thr_act
);
1089 /* Print out user register state */
1090 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1091 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1092 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1093 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1094 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1099 dump_act(thread_act_t thr_act
)
1104 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
1105 thr_act
, thr_act
->ref_count
,
1106 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
1107 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
1109 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1110 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1111 thr_act
->active
, thr_act
->ast
);
1112 printf("\thi=%x lo=%x\n", thr_act
->higher
, thr_act
->lower
);
1113 printf("\tpcb=%x\n", thr_act
->mact
.pcb
);
1115 if (thr_act
->thread
&& thr_act
->thread
->kernel_stack
) {
1116 vm_offset_t stack
= thr_act
->thread
->kernel_stack
;
1118 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1119 stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1120 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
1123 dump_handlers(thr_act
);
1125 return((int)thr_act
);
1131 thread_act_t thr_act
= current_act();
1133 if (thr_act
->mact
.pcb
)
1134 return(thr_act
->mact
.pcb
->iss
.eip
);
1141 thread_swapin_mach_alloc(thread_t thread
)
1144 /* 386 does not have saveareas */
1148 * detach and return a kernel stack from a thread
1152 machine_stack_detach(thread_t thread
)
1156 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
1157 thread
, thread
->priority
,
1158 thread
->sched_pri
, 0,
1161 stack
= thread
->kernel_stack
;
1162 thread
->kernel_stack
= 0;
1167 * attach a kernel stack to a thread and initialize it
1171 machine_stack_attach(thread_t thread
,
1173 void (*start_pos
)(thread_t
))
1175 struct i386_kernel_state
*statep
;
1177 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
1178 thread
, thread
->priority
,
1179 thread
->sched_pri
, continuation
,
1183 statep
= STACK_IKS(stack
);
1184 thread
->kernel_stack
= stack
;
1186 statep
->k_eip
= (unsigned long) Thread_continue
;
1187 statep
->k_ebx
= (unsigned long) start_pos
;
1188 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
1190 STACK_IEL(stack
)->saved_state
= &thread
->mact
.pcb
->iss
;
1196 * move a stack from old to new thread
1200 machine_stack_handoff(thread_t old
,
1206 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
),
1207 thread
, thread
->priority
,
1208 thread
->sched_pri
, continuation
,
1211 assert(new->top_act
);
1212 assert(old
->top_act
);
1214 stack
= machine_stack_detach(old
);
1215 machine_stack_attach(new, stack
, 0);
1217 PMAP_SWITCH_CONTEXT(old
->top_act
->task
, new->top_act
->task
, cpu_number());
1219 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
) | DBG_FUNC_NONE
,
1220 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
1222 machine_thread_set_current(new);
1224 active_stacks
[cpu_number()] = new->kernel_stack
;
1229 struct i386_act_context
{
1230 struct i386_saved_state ss
;
1231 struct i386_float_state fs
;
1235 act_thread_csave(void)
1237 struct i386_act_context
*ic
;
1241 ic
= (struct i386_act_context
*)kalloc(sizeof(struct i386_act_context
));
1243 if (ic
== (struct i386_act_context
*)NULL
)
1246 val
= i386_SAVED_STATE_COUNT
;
1247 kret
= machine_thread_get_state(current_act(),
1249 (thread_state_t
) &ic
->ss
,
1251 if (kret
!= KERN_SUCCESS
) {
1252 kfree((vm_offset_t
)ic
,sizeof(struct i386_act_context
));
1255 val
= i386_FLOAT_STATE_COUNT
;
1256 kret
= machine_thread_get_state(current_act(),
1258 (thread_state_t
) &ic
->fs
,
1260 if (kret
!= KERN_SUCCESS
) {
1261 kfree((vm_offset_t
)ic
,sizeof(struct i386_act_context
));
1267 act_thread_catt(void *ctx
)
1269 struct i386_act_context
*ic
;
1273 ic
= (struct i386_act_context
*)ctx
;
1275 if (ic
== (struct i386_act_context
*)NULL
)
1278 kret
= machine_thread_set_state(current_act(),
1280 (thread_state_t
) &ic
->ss
,
1281 i386_SAVED_STATE_COUNT
);
1282 if (kret
!= KERN_SUCCESS
)
1285 kret
= machine_thread_set_state(current_act(),
1287 (thread_state_t
) &ic
->fs
,
1288 i386_FLOAT_STATE_COUNT
);
1289 if (kret
!= KERN_SUCCESS
)
1292 kfree((vm_offset_t
)ic
,sizeof(struct i386_act_context
));
1295 void act_thread_cfree(void *ctx
)
1297 kfree((vm_offset_t
)ctx
,sizeof(struct i386_act_context
));