2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 #include <mach_debug.h>
54 #include <mach_ldebug.h>
56 #include <sys/kdebug.h>
58 #include <mach/kern_return.h>
59 #include <mach/thread_status.h>
60 #include <mach/vm_param.h>
63 #include <kern/counters.h>
64 #include <kern/mach_param.h>
65 #include <kern/task.h>
66 #include <kern/thread.h>
67 #include <kern/thread_act.h>
68 #include <kern/thread_swap.h>
69 #include <kern/sched_prim.h>
70 #include <kern/misc_protos.h>
71 #include <kern/assert.h>
73 #include <ipc/ipc_port.h>
74 #include <vm/vm_kern.h>
77 #include <i386/thread.h>
78 #include <i386/eflags.h>
79 #include <i386/proc_reg.h>
82 #include <i386/user_ldt.h>
84 #include <i386/iopb_entries.h>
87 * Maps state flavor to number of words in the state:
89 unsigned int state_count
[] = {
91 i386_NEW_THREAD_STATE_COUNT
,
92 i386_FLOAT_STATE_COUNT
,
93 i386_ISA_PORT_MAP_STATE_COUNT
,
94 i386_V86_ASSIST_STATE_COUNT
,
95 i386_REGS_SEGS_STATE_COUNT
,
96 i386_THREAD_SYSCALL_STATE_COUNT
,
97 /* THREAD_STATE_NONE */ 0,
98 i386_SAVED_STATE_COUNT
,
103 void act_machine_throughcall(thread_act_t thr_act
);
104 extern thread_t
Switch_context(
108 extern void Thread_continue(void);
109 extern void Load_context(
113 * consider_machine_collect:
115 * Try to collect machine-dependent pages
118 consider_machine_collect()
123 consider_machine_adjust()
129 * machine_kernel_stack_init:
131 * Initialize a kernel stack which has already been
132 * attached to its thread_activation.
136 machine_kernel_stack_init(
138 void (*start_pos
)(thread_t
))
140 thread_act_t thr_act
= thread
->top_act
;
144 stack
= thread
->kernel_stack
;
148 if (watchacts
& WA_PCB
) {
149 printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n",
150 thread
,stack
,start_pos
);
151 printf("\tstack_iks=%x, stack_iel=%x\n",
152 STACK_IKS(stack
), STACK_IEL(stack
));
154 #endif /* MACH_ASSERT */
157 * We want to run at start_pos, giving it as an argument
158 * the return value from Load_context/Switch_context.
159 * Thread_continue takes care of the mismatch between
160 * the argument-passing/return-value conventions.
161 * This function will not return normally,
162 * so we don`t have to worry about a return address.
164 STACK_IKS(stack
)->k_eip
= (int) Thread_continue
;
165 STACK_IKS(stack
)->k_ebx
= (int) start_pos
;
166 STACK_IKS(stack
)->k_esp
= (int) STACK_IEL(stack
);
169 * Point top of kernel stack to user`s registers.
171 STACK_IEL(stack
)->saved_state
= &thr_act
->mact
.pcb
->iss
;
176 #define curr_gdt(mycpu) (mp_gdt[mycpu])
177 #define curr_ktss(mycpu) (mp_ktss[mycpu])
179 #define curr_gdt(mycpu) (gdt)
180 #define curr_ktss(mycpu) (&ktss)
183 #define gdt_desc_p(mycpu,sel) \
184 ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
187 act_machine_switch_pcb( thread_act_t new_act
)
189 pcb_t pcb
= new_act
->mact
.pcb
;
192 register iopb_tss_t tss
= pcb
->ims
.io_tss
;
193 vm_offset_t pcb_stack_top
;
195 assert(new_act
->thread
!= NULL
);
196 assert(new_act
->thread
->kernel_stack
!= 0);
197 STACK_IEL(new_act
->thread
->kernel_stack
)->saved_state
=
198 &new_act
->mact
.pcb
->iss
;
201 * Save a pointer to the top of the "kernel" stack -
202 * actually the place in the PCB where a trap into
203 * kernel mode will push the registers.
204 * The location depends on V8086 mode. If we are
205 * not in V8086 mode, then a trap into the kernel
206 * won`t save the v86 segments, so we leave room.
209 pcb_stack_top
= (pcb
->iss
.efl
& EFL_VM
)
210 ? (int) (&pcb
->iss
+ 1)
211 : (int) (&pcb
->iss
.v86_segs
);
213 mp_disable_preemption();
214 mycpu
= cpu_number();
218 * No per-thread IO permissions.
219 * Use standard kernel TSS.
221 if (!(gdt_desc_p(mycpu
,KERNEL_TSS
)->access
& ACC_TSS_BUSY
))
223 curr_ktss(mycpu
)->esp0
= pcb_stack_top
;
227 * Set the IO permissions. Use this thread`s TSS.
229 *gdt_desc_p(mycpu
,USER_TSS
)
230 = *(struct real_descriptor
*)tss
->iopb_desc
;
231 tss
->tss
.esp0
= pcb_stack_top
;
233 gdt_desc_p(mycpu
,KERNEL_TSS
)->access
&= ~ ACC_TSS_BUSY
;
238 register user_ldt_t ldt
= pcb
->ims
.ldt
;
240 * Set the thread`s LDT.
250 * Thread has its own LDT.
252 *gdt_desc_p(mycpu
,USER_LDT
) = ldt
->desc
;
256 mp_enable_preemption();
258 * Load the floating-point context, if necessary.
260 fpu_load_context(pcb
);
265 * flush out any lazily evaluated HW state in the
266 * owning thread's context, before termination.
269 thread_machine_flush( thread_act_t cur_act
)
275 * Switch to the first thread on a CPU.
281 act_machine_switch_pcb(new->top_act
);
286 * Number of times we needed to swap an activation back in before
289 int switch_act_swapins
= 0;
294 * Machine-dependent details of activation switching. Called with
295 * RPC locks held and preemption disabled.
305 * Switch the vm, ast and pcb context.
306 * Save FP registers if in use and set TS (task switch) bit.
308 fpu_save_context(thread
);
310 active_stacks
[cpu
] = thread
->kernel_stack
;
311 ast_context(new, cpu
);
313 PMAP_SWITCH_CONTEXT(old
, new, cpu
);
314 act_machine_switch_pcb(new);
318 * Switch to a new thread.
319 * Save the old thread`s kernel state or continuation,
325 void (*continuation
)(void),
328 register thread_act_t old_act
= old
->top_act
,
329 new_act
= new->top_act
;
332 assert(old_act
->kernel_loaded
||
333 active_stacks
[cpu_number()] == old_act
->thread
->kernel_stack
);
334 assert (get_preemption_level() == 1);
336 check_simple_locks();
339 * Save FP registers if in use.
341 fpu_save_context(old
);
344 if (watchacts
& WA_SWITCH
)
345 printf("\tswitch_context(old=%x con=%x new=%x)\n",
346 old
, continuation
, new);
347 #endif /* MACH_ASSERT */
350 * Switch address maps if need be, even if not switching tasks.
351 * (A server activation may be "borrowing" a client map.)
354 int mycpu
= cpu_number();
356 PMAP_SWITCH_CONTEXT(old_act
, new_act
, mycpu
)
360 * Load the rest of the user state for the new thread
362 act_machine_switch_pcb(new_act
);
363 return(Switch_context(old
, continuation
, new));
367 pcb_module_init(void)
374 pcb_init( register thread_act_t thr_act
)
378 assert(thr_act
->mact
.pcb
== (pcb_t
)0);
379 pcb
= thr_act
->mact
.pcb
= &thr_act
->mact
.xxx_pcb
;
382 if (watchacts
& WA_PCB
)
383 printf("pcb_init(%x) pcb=%x\n", thr_act
, pcb
);
384 #endif /* MACH_ASSERT */
387 * We can't let random values leak out to the user.
388 * (however, act_create() zeroed the entire thr_act, mact, pcb)
389 * bzero((char *) pcb, sizeof *pcb);
391 simple_lock_init(&pcb
->lock
, ETAP_MISC_PCB
);
394 * Guarantee that the bootstrapped thread will be in user
397 pcb
->iss
.cs
= USER_CS
;
398 pcb
->iss
.ss
= USER_DS
;
399 pcb
->iss
.ds
= USER_DS
;
400 pcb
->iss
.es
= USER_DS
;
401 pcb
->iss
.fs
= USER_DS
;
402 pcb
->iss
.gs
= USER_DS
;
403 pcb
->iss
.efl
= EFL_USER_SET
;
407 * Adjust saved register state for thread belonging to task
408 * created with kernel_task_create().
412 thread_act_t thr_act
)
414 register pcb_t pcb
= thr_act
->mact
.pcb
;
416 pcb
->iss
.cs
= KERNEL_CS
;
417 pcb
->iss
.ss
= KERNEL_DS
;
418 pcb
->iss
.ds
= KERNEL_DS
;
419 pcb
->iss
.es
= KERNEL_DS
;
420 pcb
->iss
.fs
= KERNEL_DS
;
421 pcb
->iss
.gs
= CPU_DATA
;
426 register thread_act_t thr_act
)
428 register pcb_t pcb
= thr_act
->mact
.pcb
;
432 if (pcb
->ims
.io_tss
!= 0)
433 iopb_destroy(pcb
->ims
.io_tss
);
434 if (pcb
->ims
.ifps
!= 0)
435 fp_free(pcb
->ims
.ifps
);
436 if (pcb
->ims
.ldt
!= 0)
437 user_ldt_free(pcb
->ims
.ldt
);
438 thr_act
->mact
.pcb
= (pcb_t
)0;
444 * Attempt to free excess pcb memory.
449 register thread_act_t thr_act
)
451 /* accomplishes very little */
455 * act_machine_sv_free
456 * release saveareas associated with an act. if flag is true, release
457 * user level savearea(s) too, else don't
460 act_machine_sv_free(thread_act_t act
, int flag
)
466 * act_machine_set_state:
468 * Set the status of the specified thread. Called with "appropriate"
469 * thread-related locks held (see act_lock_thread()), so
470 * thr_act->thread is guaranteed not to change.
474 act_machine_set_state(
475 thread_act_t thr_act
,
476 thread_flavor_t flavor
,
477 thread_state_t tstate
,
478 mach_msg_type_number_t count
)
480 int kernel_act
= thr_act
->kernel_loading
||
481 thr_act
->kernel_loaded
;
484 if (watchacts
& WA_STATE
)
485 printf("act_%x act_m_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n",
486 current_act(), thr_act
, flavor
, tstate
, count
);
487 #endif /* MACH_ASSERT */
490 case THREAD_SYSCALL_STATE
:
492 register struct thread_syscall_state
*state
;
493 register struct i386_saved_state
*saved_state
= USER_REGS(thr_act
);
495 state
= (struct thread_syscall_state
*) tstate
;
496 saved_state
->eax
= state
->eax
;
497 saved_state
->edx
= state
->edx
;
499 saved_state
->efl
= state
->efl
;
501 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
502 saved_state
->eip
= state
->eip
;
503 saved_state
->uesp
= state
->esp
;
507 case i386_SAVED_STATE
:
509 register struct i386_saved_state
*state
;
510 register struct i386_saved_state
*saved_state
;
512 if (count
< i386_SAVED_STATE_COUNT
) {
513 return(KERN_INVALID_ARGUMENT
);
516 state
= (struct i386_saved_state
*) tstate
;
518 saved_state
= USER_REGS(thr_act
);
523 saved_state
->edi
= state
->edi
;
524 saved_state
->esi
= state
->esi
;
525 saved_state
->ebp
= state
->ebp
;
526 saved_state
->uesp
= state
->uesp
;
527 saved_state
->ebx
= state
->ebx
;
528 saved_state
->edx
= state
->edx
;
529 saved_state
->ecx
= state
->ecx
;
530 saved_state
->eax
= state
->eax
;
531 saved_state
->eip
= state
->eip
;
533 saved_state
->efl
= state
->efl
;
535 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
)
539 * Segment registers. Set differently in V8086 mode.
541 if (state
->efl
& EFL_VM
) {
543 * Set V8086 mode segment registers.
545 saved_state
->cs
= state
->cs
& 0xffff;
546 saved_state
->ss
= state
->ss
& 0xffff;
547 saved_state
->v86_segs
.v86_ds
= state
->ds
& 0xffff;
548 saved_state
->v86_segs
.v86_es
= state
->es
& 0xffff;
549 saved_state
->v86_segs
.v86_fs
= state
->fs
& 0xffff;
550 saved_state
->v86_segs
.v86_gs
= state
->gs
& 0xffff;
553 * Zero protected mode segment registers.
560 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
562 * Hardware assist on.
564 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
565 state
->efl
& (EFL_TF
| EFL_IF
);
568 else if (!kernel_act
) {
570 * 386 mode. Set segment registers for flat
571 * 32-bit address space.
573 saved_state
->cs
= USER_CS
;
574 saved_state
->ss
= USER_DS
;
575 saved_state
->ds
= USER_DS
;
576 saved_state
->es
= USER_DS
;
577 saved_state
->fs
= USER_DS
;
578 saved_state
->gs
= USER_DS
;
582 * User setting segment registers.
583 * Code and stack selectors have already been
584 * checked. Others will be reset by 'iret'
585 * if they are not valid.
587 saved_state
->cs
= state
->cs
;
588 saved_state
->ss
= state
->ss
;
589 saved_state
->ds
= state
->ds
;
590 saved_state
->es
= state
->es
;
591 saved_state
->fs
= state
->fs
;
592 saved_state
->gs
= state
->gs
;
597 case i386_NEW_THREAD_STATE
:
598 case i386_REGS_SEGS_STATE
:
600 register struct i386_new_thread_state
*state
;
601 register struct i386_saved_state
*saved_state
;
603 if (count
< i386_NEW_THREAD_STATE_COUNT
) {
604 return(KERN_INVALID_ARGUMENT
);
607 if (flavor
== i386_REGS_SEGS_STATE
) {
609 * Code and stack selectors must not be null,
610 * and must have user protection levels.
611 * Only the low 16 bits are valid.
621 (state
->cs
== 0 || (state
->cs
& SEL_PL
) != SEL_PL_U
622 || state
->ss
== 0 || (state
->ss
& SEL_PL
) != SEL_PL_U
))
623 return KERN_INVALID_ARGUMENT
;
626 state
= (struct i386_new_thread_state
*) tstate
;
628 saved_state
= USER_REGS(thr_act
);
633 saved_state
->edi
= state
->edi
;
634 saved_state
->esi
= state
->esi
;
635 saved_state
->ebp
= state
->ebp
;
636 saved_state
->uesp
= state
->uesp
;
637 saved_state
->ebx
= state
->ebx
;
638 saved_state
->edx
= state
->edx
;
639 saved_state
->ecx
= state
->ecx
;
640 saved_state
->eax
= state
->eax
;
641 saved_state
->eip
= state
->eip
;
643 saved_state
->efl
= state
->efl
;
645 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
)
649 * Segment registers. Set differently in V8086 mode.
651 if (state
->efl
& EFL_VM
) {
653 * Set V8086 mode segment registers.
655 saved_state
->cs
= state
->cs
& 0xffff;
656 saved_state
->ss
= state
->ss
& 0xffff;
657 saved_state
->v86_segs
.v86_ds
= state
->ds
& 0xffff;
658 saved_state
->v86_segs
.v86_es
= state
->es
& 0xffff;
659 saved_state
->v86_segs
.v86_fs
= state
->fs
& 0xffff;
660 saved_state
->v86_segs
.v86_gs
= state
->gs
& 0xffff;
663 * Zero protected mode segment registers.
670 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
672 * Hardware assist on.
674 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
675 state
->efl
& (EFL_TF
| EFL_IF
);
678 else if (flavor
== i386_NEW_THREAD_STATE
&& !kernel_act
) {
680 * 386 mode. Set segment registers for flat
681 * 32-bit address space.
683 saved_state
->cs
= USER_CS
;
684 saved_state
->ss
= USER_DS
;
685 saved_state
->ds
= USER_DS
;
686 saved_state
->es
= USER_DS
;
687 saved_state
->fs
= USER_DS
;
688 saved_state
->gs
= USER_DS
;
692 * User setting segment registers.
693 * Code and stack selectors have already been
694 * checked. Others will be reset by 'iret'
695 * if they are not valid.
697 saved_state
->cs
= state
->cs
;
698 saved_state
->ss
= state
->ss
;
699 saved_state
->ds
= state
->ds
;
700 saved_state
->es
= state
->es
;
701 saved_state
->fs
= state
->fs
;
702 saved_state
->gs
= state
->gs
;
707 case i386_FLOAT_STATE
: {
709 if (count
< i386_FLOAT_STATE_COUNT
)
710 return(KERN_INVALID_ARGUMENT
);
712 return fpu_set_state(thr_act
,(struct i386_float_state
*)tstate
);
716 * Temporary - replace by i386_io_map
718 case i386_ISA_PORT_MAP_STATE
: {
719 register struct i386_isa_port_map_state
*state
;
720 register iopb_tss_t tss
;
722 if (count
< i386_ISA_PORT_MAP_STATE_COUNT
)
723 return(KERN_INVALID_ARGUMENT
);
728 case i386_V86_ASSIST_STATE
:
730 register struct i386_v86_assist_state
*state
;
731 vm_offset_t int_table
;
734 if (count
< i386_V86_ASSIST_STATE_COUNT
)
735 return KERN_INVALID_ARGUMENT
;
737 state
= (struct i386_v86_assist_state
*) tstate
;
738 int_table
= state
->int_table
;
739 int_count
= state
->int_count
;
741 if (int_table
>= VM_MAX_ADDRESS
||
743 int_count
* sizeof(struct v86_interrupt_table
)
745 return KERN_INVALID_ARGUMENT
;
747 thr_act
->mact
.pcb
->ims
.v86s
.int_table
= int_table
;
748 thr_act
->mact
.pcb
->ims
.v86s
.int_count
= int_count
;
750 thr_act
->mact
.pcb
->ims
.v86s
.flags
=
751 USER_REGS(thr_act
)->efl
& (EFL_TF
| EFL_IF
);
755 case i386_THREAD_STATE
: {
756 struct i386_saved_state
*saved_state
;
757 i386_thread_state_t
*state25
;
759 saved_state
= USER_REGS(thr_act
);
760 state25
= (i386_thread_state_t
*)tstate
;
762 saved_state
->eax
= state25
->eax
;
763 saved_state
->ebx
= state25
->ebx
;
764 saved_state
->ecx
= state25
->ecx
;
765 saved_state
->edx
= state25
->edx
;
766 saved_state
->edi
= state25
->edi
;
767 saved_state
->esi
= state25
->esi
;
768 saved_state
->ebp
= state25
->ebp
;
769 saved_state
->uesp
= state25
->esp
;
770 saved_state
->efl
= (state25
->eflags
& ~EFL_USER_CLEAR
)
772 saved_state
->eip
= state25
->eip
;
773 saved_state
->cs
= USER_CS
; /* FIXME? */
774 saved_state
->ss
= USER_DS
;
775 saved_state
->ds
= USER_DS
;
776 saved_state
->es
= USER_DS
;
777 saved_state
->fs
= USER_DS
;
778 saved_state
->gs
= USER_DS
;
783 return(KERN_INVALID_ARGUMENT
);
786 return(KERN_SUCCESS
);
792 * Get the status of the specified thread.
797 act_machine_get_state(
798 thread_act_t thr_act
,
799 thread_flavor_t flavor
,
800 thread_state_t tstate
,
801 mach_msg_type_number_t
*count
)
804 if (watchacts
& WA_STATE
)
805 printf("act_%x act_m_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n",
806 current_act(), thr_act
, flavor
, tstate
,
807 count
, (count
? *count
: 0));
808 #endif /* MACH_ASSERT */
812 case i386_SAVED_STATE
:
814 register struct i386_saved_state
*state
;
815 register struct i386_saved_state
*saved_state
;
817 if (*count
< i386_SAVED_STATE_COUNT
)
818 return(KERN_INVALID_ARGUMENT
);
820 state
= (struct i386_saved_state
*) tstate
;
821 saved_state
= USER_REGS(thr_act
);
824 * First, copy everything:
826 *state
= *saved_state
;
828 if (saved_state
->efl
& EFL_VM
) {
832 state
->ds
= saved_state
->v86_segs
.v86_ds
& 0xffff;
833 state
->es
= saved_state
->v86_segs
.v86_es
& 0xffff;
834 state
->fs
= saved_state
->v86_segs
.v86_fs
& 0xffff;
835 state
->gs
= saved_state
->v86_segs
.v86_gs
& 0xffff;
837 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
841 if ((thr_act
->mact
.pcb
->ims
.v86s
.flags
&
842 (EFL_IF
|V86_IF_PENDING
)) == 0)
843 state
->efl
&= ~EFL_IF
;
850 state
->ds
= saved_state
->ds
& 0xffff;
851 state
->es
= saved_state
->es
& 0xffff;
852 state
->fs
= saved_state
->fs
& 0xffff;
853 state
->gs
= saved_state
->gs
& 0xffff;
855 *count
= i386_SAVED_STATE_COUNT
;
859 case i386_NEW_THREAD_STATE
:
860 case i386_REGS_SEGS_STATE
:
862 register struct i386_new_thread_state
*state
;
863 register struct i386_saved_state
*saved_state
;
865 if (*count
< i386_NEW_THREAD_STATE_COUNT
)
866 return(KERN_INVALID_ARGUMENT
);
868 state
= (struct i386_new_thread_state
*) tstate
;
869 saved_state
= USER_REGS(thr_act
);
874 state
->edi
= saved_state
->edi
;
875 state
->esi
= saved_state
->esi
;
876 state
->ebp
= saved_state
->ebp
;
877 state
->ebx
= saved_state
->ebx
;
878 state
->edx
= saved_state
->edx
;
879 state
->ecx
= saved_state
->ecx
;
880 state
->eax
= saved_state
->eax
;
881 state
->eip
= saved_state
->eip
;
882 state
->efl
= saved_state
->efl
;
883 state
->uesp
= saved_state
->uesp
;
885 state
->cs
= saved_state
->cs
;
886 state
->ss
= saved_state
->ss
;
887 if (saved_state
->efl
& EFL_VM
) {
891 state
->ds
= saved_state
->v86_segs
.v86_ds
& 0xffff;
892 state
->es
= saved_state
->v86_segs
.v86_es
& 0xffff;
893 state
->fs
= saved_state
->v86_segs
.v86_fs
& 0xffff;
894 state
->gs
= saved_state
->v86_segs
.v86_gs
& 0xffff;
896 if (thr_act
->mact
.pcb
->ims
.v86s
.int_table
) {
900 if ((thr_act
->mact
.pcb
->ims
.v86s
.flags
&
901 (EFL_IF
|V86_IF_PENDING
)) == 0)
902 state
->efl
&= ~EFL_IF
;
909 state
->ds
= saved_state
->ds
& 0xffff;
910 state
->es
= saved_state
->es
& 0xffff;
911 state
->fs
= saved_state
->fs
& 0xffff;
912 state
->gs
= saved_state
->gs
& 0xffff;
914 *count
= i386_NEW_THREAD_STATE_COUNT
;
918 case THREAD_SYSCALL_STATE
:
920 register struct thread_syscall_state
*state
;
921 register struct i386_saved_state
*saved_state
= USER_REGS(thr_act
);
923 state
= (struct thread_syscall_state
*) tstate
;
924 state
->eax
= saved_state
->eax
;
925 state
->edx
= saved_state
->edx
;
926 state
->efl
= saved_state
->efl
;
927 state
->eip
= saved_state
->eip
;
928 state
->esp
= saved_state
->uesp
;
929 *count
= i386_THREAD_SYSCALL_STATE_COUNT
;
933 case THREAD_STATE_FLAVOR_LIST
:
935 return (KERN_INVALID_ARGUMENT
);
936 tstate
[0] = i386_NEW_THREAD_STATE
;
937 tstate
[1] = i386_FLOAT_STATE
;
938 tstate
[2] = i386_ISA_PORT_MAP_STATE
;
939 tstate
[3] = i386_V86_ASSIST_STATE
;
940 tstate
[4] = THREAD_SYSCALL_STATE
;
944 case i386_FLOAT_STATE
: {
946 if (*count
< i386_FLOAT_STATE_COUNT
)
947 return(KERN_INVALID_ARGUMENT
);
949 *count
= i386_FLOAT_STATE_COUNT
;
950 return fpu_get_state(thr_act
,(struct i386_float_state
*)tstate
);
954 * Temporary - replace by i386_io_map
956 case i386_ISA_PORT_MAP_STATE
: {
957 register struct i386_isa_port_map_state
*state
;
958 register iopb_tss_t tss
;
960 if (*count
< i386_ISA_PORT_MAP_STATE_COUNT
)
961 return(KERN_INVALID_ARGUMENT
);
963 state
= (struct i386_isa_port_map_state
*) tstate
;
964 tss
= thr_act
->mact
.pcb
->ims
.io_tss
;
970 * The thread has no ktss, so no IO permissions.
973 for (i
= 0; i
< sizeof state
->pm
; i
++)
977 * The thread has its own ktss.
980 bcopy((char *) tss
->bitmap
,
985 *count
= i386_ISA_PORT_MAP_STATE_COUNT
;
989 case i386_V86_ASSIST_STATE
:
991 register struct i386_v86_assist_state
*state
;
993 if (*count
< i386_V86_ASSIST_STATE_COUNT
)
994 return KERN_INVALID_ARGUMENT
;
996 state
= (struct i386_v86_assist_state
*) tstate
;
997 state
->int_table
= thr_act
->mact
.pcb
->ims
.v86s
.int_table
;
998 state
->int_count
= thr_act
->mact
.pcb
->ims
.v86s
.int_count
;
1000 *count
= i386_V86_ASSIST_STATE_COUNT
;
1004 case i386_THREAD_STATE
: {
1005 struct i386_saved_state
*saved_state
;
1006 i386_thread_state_t
*state
;
1008 saved_state
= USER_REGS(thr_act
);
1009 state
= (i386_thread_state_t
*)tstate
;
1011 state
->eax
= saved_state
->eax
;
1012 state
->ebx
= saved_state
->ebx
;
1013 state
->ecx
= saved_state
->ecx
;
1014 state
->edx
= saved_state
->edx
;
1015 state
->edi
= saved_state
->edi
;
1016 state
->esi
= saved_state
->esi
;
1017 state
->ebp
= saved_state
->ebp
;
1018 state
->esp
= saved_state
->uesp
;
1019 state
->eflags
= saved_state
->efl
;
1020 state
->eip
= saved_state
->eip
;
1021 state
->cs
= saved_state
->cs
;
1022 state
->ss
= saved_state
->ss
;
1023 state
->ds
= saved_state
->ds
;
1024 state
->es
= saved_state
->es
;
1025 state
->fs
= saved_state
->fs
;
1026 state
->gs
= saved_state
->gs
;
1031 return(KERN_INVALID_ARGUMENT
);
1034 return(KERN_SUCCESS
);
1038 * Alter the thread`s state so that a following thread_exception_return
1039 * will make the thread return 'retval' from a syscall.
1042 thread_set_syscall_return(
1044 kern_return_t retval
)
1046 thread
->top_act
->mact
.pcb
->iss
.eax
= retval
;
1050 * Initialize the machine-dependent state for a new thread.
1053 thread_machine_create(thread_t thread
, thread_act_t thr_act
, void (*start_pos
)(thread_t
))
1055 MachineThrAct_t mact
= &thr_act
->mact
;
1058 if (watchacts
& WA_PCB
)
1059 printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n",
1060 thread
, thr_act
, start_pos
);
1061 #endif /* MACH_ASSERT */
1063 assert(thread
!= NULL
);
1064 assert(thr_act
!= NULL
);
1067 * Allocate a kernel stack per shuttle
1069 thread
->kernel_stack
= (int)stack_alloc(thread
,start_pos
);
1070 assert(thread
->kernel_stack
!= 0);
1073 * Point top of kernel stack to user`s registers.
1075 STACK_IEL(thread
->kernel_stack
)->saved_state
= &mact
->pcb
->iss
;
1078 * Utah code fiddles with pcb here - (we don't need to)
1080 return(KERN_SUCCESS
);
1084 * Machine-dependent cleanup prior to destroying a thread
1087 thread_machine_destroy( thread_t thread
)
1091 if (thread
->kernel_stack
!= 0) {
1099 * This is used to set the current thr_act/thread
1100 * when starting up a new processor
1103 thread_machine_set_current( thread_t thread
)
1105 register int my_cpu
;
1107 mp_disable_preemption();
1108 my_cpu
= cpu_number();
1110 cpu_data
[my_cpu
].active_thread
= thread
;
1111 active_kloaded
[my_cpu
] =
1112 thread
->top_act
->kernel_loaded
? thread
->top_act
: THR_ACT_NULL
;
1114 mp_enable_preemption();
1119 * Pool of kernel activations.
1122 void act_machine_init()
1125 thread_act_t thr_act
;
1128 if (watchacts
& WA_PCB
)
1129 printf("act_machine_init()\n");
1130 #endif /* MACH_ASSERT */
1132 /* Good to verify this once */
1133 assert( THREAD_MACHINE_STATE_MAX
<= THREAD_STATE_MAX
);
1136 * If we start using kernel activations,
1137 * would normally create kernel_thread_pool here,
1138 * populating it from the act_zone
1143 act_machine_create(task_t task
, thread_act_t thr_act
)
1145 MachineThrAct_t mact
= &thr_act
->mact
;
1149 if (watchacts
& WA_PCB
)
1150 printf("act_machine_create(task=%x,thr_act=%x) pcb=%x\n",
1151 task
,thr_act
, &mact
->xxx_pcb
);
1152 #endif /* MACH_ASSERT */
1155 * Clear & Init the pcb (sets up user-mode s regs)
1159 return KERN_SUCCESS
;
1163 act_virtual_machine_destroy(thread_act_t thr_act
)
1169 act_machine_destroy(thread_act_t thr_act
)
1173 if (watchacts
& WA_PCB
)
1174 printf("act_machine_destroy(0x%x)\n", thr_act
);
1175 #endif /* MACH_ASSERT */
1177 pcb_terminate(thr_act
);
1181 act_machine_return(int code
)
1183 thread_act_t thr_act
= current_act();
1187 * We don't go through the locking dance here needed to
1188 * acquire thr_act->thread safely.
1191 if (watchacts
& WA_EXIT
)
1192 printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
1193 code
, thr_act
, thr_act
->ref_count
,
1194 thr_act
->thread
, thr_act
->thread
->ref_count
);
1195 #endif /* MACH_ASSERT */
1198 * This code is called with nothing locked.
1199 * It also returns with nothing locked, if it returns.
1201 * This routine terminates the current thread activation.
1202 * If this is the only activation associated with its
1203 * thread shuttle, then the entire thread (shuttle plus
1204 * activation) is terminated.
1206 assert( code
== KERN_TERMINATED
);
1209 #ifdef CALLOUT_RPC_MODEL
1211 * JMM - RPC is not going to be done with a callout/direct-
1212 * stack manipulation mechanism. Instead we will return/
1213 * unwind normally as if from a continuation.
1215 act_lock_thread(thr_act
);
1217 if (thr_act
->thread
->top_act
!= thr_act
) {
1219 * this is not the top activation;
1220 * if possible, we should clone the shuttle so that
1221 * both the root RPC-chain and the soon-to-be-orphaned
1222 * RPC-chain have shuttles
1224 * JMM - Cloning shuttles isn't the right approach. We
1225 * need to alert the higher up activations to return our
1226 * shuttle (because scheduling attributes may TRUELY be
1227 * unique and not cloneable.
1229 act_unlock_thread(thr_act
);
1230 panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED");
1233 if (thr_act
->lower
!= THR_ACT_NULL
) {
1234 thread_t cur_thread
= current_thread();
1235 thread_act_t cur_act
;
1236 struct ipc_port
*iplock
;
1238 /* send it an appropriate return code */
1239 thr_act
->lower
->alerts
|= SERVER_TERMINATED
;
1240 install_special_handler(thr_act
->lower
);
1242 /* Return to previous act with error code */
1243 act_locked_act_reference(thr_act
); /* keep it around */
1244 act_switch_swapcheck(cur_thread
, (ipc_port_t
)0);
1245 (void) switch_act(THR_ACT_NULL
);
1246 /* assert(thr_act->ref_count == 0); */ /* XXX */
1247 cur_act
= cur_thread
->top_act
;
1248 MACH_RPC_RET(cur_act
) = KERN_RPC_SERVER_TERMINATED
;
1250 machine_kernel_stack_init(cur_thread
, mach_rpc_return_error
);
1252 * The following unlocks must be done separately since fields
1253 * used by `act_unlock_thread()' have been cleared, meaning
1254 * that it would not release all of the appropriate locks.
1256 iplock
= thr_act
->pool_port
; /* remember for unlock call */
1257 rpc_unlock(cur_thread
);
1258 if (iplock
) ip_unlock(iplock
); /* must be done separately */
1259 act_unlock(thr_act
);
1260 act_deallocate(thr_act
); /* free it */
1261 Load_context(cur_thread
);
1264 panic("act_machine_return: TALKING ZOMBIE! (2)");
1266 act_unlock_thread(thr_act
);
1268 #endif /* CALLOUT_RPC_MODEL */
1270 /* This is the only activation attached to the shuttle... */
1271 /* terminate the entire thread (shuttle plus activation) */
1273 assert(thr_act
->thread
->top_act
== thr_act
);
1274 thread_terminate_self();
1278 panic("act_machine_return: TALKING ZOMBIE! (1)");
1283 * Perform machine-dependent per-thread initializations
1286 thread_machine_init(void)
1292 * Some routines for debugging activation code
1294 static void dump_handlers(thread_act_t
);
1295 void dump_regs(thread_act_t
);
1298 dump_handlers(thread_act_t thr_act
)
1300 ReturnHandler
*rhp
= thr_act
->handlers
;
1305 if (rhp
== &thr_act
->special_handler
){
1307 printf("[NON-Zero next ptr(%x)]", rhp
->next
);
1308 printf("special_handler()->");
1311 printf("hdlr_%d(%x)->",counter
,rhp
->handler
);
1313 if (++counter
> 32) {
1314 printf("Aborting: HUGE handler chain\n");
1318 printf("HLDR_NULL\n");
1322 dump_regs(thread_act_t thr_act
)
1324 if (thr_act
->mact
.pcb
) {
1325 register struct i386_saved_state
*ssp
= USER_REGS(thr_act
);
1326 /* Print out user register state */
1327 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1328 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1329 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1330 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1331 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1336 dump_act(thread_act_t thr_act
)
1341 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
1342 thr_act
, thr_act
->ref_count
,
1343 thr_act
->thread
, thr_act
->thread
? thr_act
->thread
->ref_count
:0,
1344 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
1346 if (thr_act
->pool_port
) {
1347 thread_pool_t actpp
= &thr_act
->pool_port
->ip_thread_pool
;
1348 printf("\tpool(acts_p=%x, waiting=%d) pool_next %x\n",
1349 actpp
->thr_acts
, actpp
->waiting
, thr_act
->thread_pool_next
);
1351 printf("\tno thread_pool\n");
1353 printf("\talerts=%x mask=%x susp=%d user_stop=%d active=%x ast=%x\n",
1354 thr_act
->alerts
, thr_act
->alert_mask
,
1355 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1356 thr_act
->active
, thr_act
->ast
);
1357 printf("\thi=%x lo=%x\n", thr_act
->higher
, thr_act
->lower
);
1358 printf("\tpcb=%x\n", thr_act
->mact
.pcb
);
1360 if (thr_act
->thread
&& thr_act
->thread
->kernel_stack
) {
1361 vm_offset_t stack
= thr_act
->thread
->kernel_stack
;
1363 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1364 stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1365 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
1368 dump_handlers(thr_act
);
1370 return((int)thr_act
);
1376 thread_act_t thr_act
= current_act();
1378 if (thr_act
->mact
.pcb
)
1379 return(thr_act
->mact
.pcb
->iss
.eip
);
1386 thread_swapin_mach_alloc(thread_t thread
)
1389 /* 386 does not have saveareas */
1393 * detach and return a kernel stack from a thread
1397 stack_detach(thread_t thread
)
1401 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_DETACH
),
1402 thread
, thread
->priority
,
1403 thread
->sched_pri
, 0,
1406 stack
= thread
->kernel_stack
;
1407 thread
->kernel_stack
= 0;
1412 * attach a kernel stack to a thread and initialize it
1416 stack_attach(struct thread_shuttle
*thread
,
1418 void (*start_pos
)(thread_t
))
1420 struct i386_kernel_state
*statep
;
1421 thread_act_t thr_act
;
1423 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_ATTACH
),
1424 thread
, thread
->priority
,
1425 thread
->sched_pri
, continuation
,
1429 statep
= STACK_IKS(stack
);
1430 thread
->kernel_stack
= stack
;
1432 statep
->k_eip
= (unsigned long) Thread_continue
;
1433 statep
->k_ebx
= (unsigned long) start_pos
;
1434 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
1436 STACK_IEL(stack
)->saved_state
= &thr_act
->mact
.pcb
->iss
;
1442 * move a stack from old to new thread
1446 stack_handoff(thread_t old
,
1453 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_STACK_HANDOFF
),
1454 thread
, thread
->priority
,
1455 thread
->sched_pri
, continuation
,
1458 assert(new->top_act
);
1459 assert(old
->top_act
);
1461 stack
= stack_detach(old
);
1462 stack_attach(new, stack
, 0);
1464 new_pmap
= new->top_act
->task
->map
->pmap
;
1465 if (old
->top_act
->task
->map
->pmap
!= new_pmap
)
1466 PMAP_ACTIVATE_MAP(new->top_act
->task
->map
, cpu_number());
1468 thread_machine_set_current(new);
1470 active_stacks
[cpu_number()] = new->kernel_stack
;