]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51#include <cpus.h>
52#include <mach_rt.h>
53#include <mach_debug.h>
54#include <mach_ldebug.h>
55
56#include <sys/kdebug.h>
57
58#include <mach/kern_return.h>
59#include <mach/thread_status.h>
60#include <mach/vm_param.h>
1c79356b
A
61
62#include <kern/counters.h>
63#include <kern/mach_param.h>
64#include <kern/task.h>
65#include <kern/thread.h>
66#include <kern/thread_act.h>
67#include <kern/thread_swap.h>
68#include <kern/sched_prim.h>
69#include <kern/misc_protos.h>
70#include <kern/assert.h>
71#include <kern/spl.h>
72#include <ipc/ipc_port.h>
73#include <vm/vm_kern.h>
74#include <vm/pmap.h>
75
76#include <i386/thread.h>
77#include <i386/eflags.h>
78#include <i386/proc_reg.h>
79#include <i386/seg.h>
80#include <i386/tss.h>
81#include <i386/user_ldt.h>
82#include <i386/fpu.h>
83#include <i386/iopb_entries.h>
84
55e303ae
A
85vm_offset_t active_stacks[NCPUS];
86vm_offset_t kernel_stack[NCPUS];
87thread_act_t active_kloaded[NCPUS];
88
1c79356b
A
89/*
90 * Maps state flavor to number of words in the state:
91 */
92unsigned int state_count[] = {
93 /* FLAVOR_LIST */ 0,
94 i386_NEW_THREAD_STATE_COUNT,
95 i386_FLOAT_STATE_COUNT,
96 i386_ISA_PORT_MAP_STATE_COUNT,
97 i386_V86_ASSIST_STATE_COUNT,
98 i386_REGS_SEGS_STATE_COUNT,
99 i386_THREAD_SYSCALL_STATE_COUNT,
100 /* THREAD_STATE_NONE */ 0,
101 i386_SAVED_STATE_COUNT,
102};
103
104/* Forward */
105
106void act_machine_throughcall(thread_act_t thr_act);
107extern thread_t Switch_context(
108 thread_t old,
109 void (*cont)(void),
110 thread_t new);
111extern void Thread_continue(void);
112extern void Load_context(
113 thread_t thread);
114
115/*
116 * consider_machine_collect:
117 *
118 * Try to collect machine-dependent pages
119 */
120void
121consider_machine_collect()
122{
123}
124
125void
126consider_machine_adjust()
127{
128}
129
130
131/*
132 * machine_kernel_stack_init:
133 *
134 * Initialize a kernel stack which has already been
135 * attached to its thread_activation.
136 */
137
138void
139machine_kernel_stack_init(
140 thread_t thread,
141 void (*start_pos)(thread_t))
142{
143 thread_act_t thr_act = thread->top_act;
144 vm_offset_t stack;
145
146 assert(thr_act);
147 stack = thread->kernel_stack;
148 assert(stack);
149
1c79356b
A
150 /*
151 * We want to run at start_pos, giving it as an argument
152 * the return value from Load_context/Switch_context.
153 * Thread_continue takes care of the mismatch between
154 * the argument-passing/return-value conventions.
155 * This function will not return normally,
156 * so we don`t have to worry about a return address.
157 */
158 STACK_IKS(stack)->k_eip = (int) Thread_continue;
159 STACK_IKS(stack)->k_ebx = (int) start_pos;
160 STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack);
161
162 /*
163 * Point top of kernel stack to user`s registers.
164 */
165 STACK_IEL(stack)->saved_state = &thr_act->mact.pcb->iss;
166}
167
168
169#if NCPUS > 1
170#define curr_gdt(mycpu) (mp_gdt[mycpu])
55e303ae 171#define curr_ldt(mycpu) (mp_ldt[mycpu])
1c79356b
A
172#define curr_ktss(mycpu) (mp_ktss[mycpu])
173#else
174#define curr_gdt(mycpu) (gdt)
55e303ae 175#define curr_ldt(mycpu) (ldt)
1c79356b
A
176#define curr_ktss(mycpu) (&ktss)
177#endif
178
179#define gdt_desc_p(mycpu,sel) \
180 ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
181
182void
183act_machine_switch_pcb( thread_act_t new_act )
184{
185 pcb_t pcb = new_act->mact.pcb;
186 int mycpu;
1c79356b
A
187 register iopb_tss_t tss = pcb->ims.io_tss;
188 vm_offset_t pcb_stack_top;
55e303ae 189 register user_ldt_t ldt = pcb->ims.ldt;
1c79356b
A
190
191 assert(new_act->thread != NULL);
192 assert(new_act->thread->kernel_stack != 0);
193 STACK_IEL(new_act->thread->kernel_stack)->saved_state =
194 &new_act->mact.pcb->iss;
195
196 /*
197 * Save a pointer to the top of the "kernel" stack -
198 * actually the place in the PCB where a trap into
199 * kernel mode will push the registers.
200 * The location depends on V8086 mode. If we are
201 * not in V8086 mode, then a trap into the kernel
202 * won`t save the v86 segments, so we leave room.
203 */
204
205 pcb_stack_top = (pcb->iss.efl & EFL_VM)
206 ? (int) (&pcb->iss + 1)
207 : (int) (&pcb->iss.v86_segs);
208
209 mp_disable_preemption();
210 mycpu = cpu_number();
211
212 if (tss == 0) {
213 /*
214 * No per-thread IO permissions.
215 * Use standard kernel TSS.
216 */
217 if (!(gdt_desc_p(mycpu,KERNEL_TSS)->access & ACC_TSS_BUSY))
218 set_tr(KERNEL_TSS);
219 curr_ktss(mycpu)->esp0 = pcb_stack_top;
220 }
221 else {
222 /*
223 * Set the IO permissions. Use this thread`s TSS.
224 */
225 *gdt_desc_p(mycpu,USER_TSS)
226 = *(struct real_descriptor *)tss->iopb_desc;
227 tss->tss.esp0 = pcb_stack_top;
228 set_tr(USER_TSS);
229 gdt_desc_p(mycpu,KERNEL_TSS)->access &= ~ ACC_TSS_BUSY;
230 }
1c79356b 231
1c79356b
A
232 /*
233 * Set the thread`s LDT.
234 */
235 if (ldt == 0) {
55e303ae 236 struct real_descriptor *ldtp;
1c79356b
A
237 /*
238 * Use system LDT.
239 */
55e303ae
A
240 ldtp = (struct real_descriptor *)curr_ldt(mycpu);
241 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
1c79356b
A
242 set_ldt(KERNEL_LDT);
243 }
244 else {
245 /*
246 * Thread has its own LDT.
247 */
248 *gdt_desc_p(mycpu,USER_LDT) = ldt->desc;
249 set_ldt(USER_LDT);
250 }
55e303ae 251
1c79356b
A
252 mp_enable_preemption();
253 /*
254 * Load the floating-point context, if necessary.
255 */
256 fpu_load_context(pcb);
257
258}
259
1c79356b
A
260/*
261 * Switch to the first thread on a CPU.
262 */
263void
55e303ae 264machine_load_context(
1c79356b
A
265 thread_t new)
266{
267 act_machine_switch_pcb(new->top_act);
268 Load_context(new);
269}
270
271/*
272 * Number of times we needed to swap an activation back in before
273 * switching to it.
274 */
275int switch_act_swapins = 0;
276
277/*
278 * machine_switch_act
279 *
280 * Machine-dependent details of activation switching. Called with
281 * RPC locks held and preemption disabled.
282 */
283void
284machine_switch_act(
285 thread_t thread,
286 thread_act_t old,
55e303ae 287 thread_act_t new)
1c79356b 288{
55e303ae
A
289 int cpu = cpu_number();
290
1c79356b
A
291 /*
292 * Switch the vm, ast and pcb context.
293 * Save FP registers if in use and set TS (task switch) bit.
294 */
295 fpu_save_context(thread);
296
297 active_stacks[cpu] = thread->kernel_stack;
298 ast_context(new, cpu);
299
300 PMAP_SWITCH_CONTEXT(old, new, cpu);
301 act_machine_switch_pcb(new);
302}
303
304/*
305 * Switch to a new thread.
306 * Save the old thread`s kernel state or continuation,
307 * and return it.
308 */
309thread_t
55e303ae 310machine_switch_context(
1c79356b
A
311 thread_t old,
312 void (*continuation)(void),
313 thread_t new)
314{
315 register thread_act_t old_act = old->top_act,
316 new_act = new->top_act;
317
318#if MACH_RT
55e303ae 319 assert(active_stacks[cpu_number()] == old_act->thread->kernel_stack);
1c79356b
A
320#endif
321 check_simple_locks();
322
323 /*
324 * Save FP registers if in use.
325 */
326 fpu_save_context(old);
327
1c79356b
A
328 /*
329 * Switch address maps if need be, even if not switching tasks.
330 * (A server activation may be "borrowing" a client map.)
331 */
332 {
333 int mycpu = cpu_number();
334
335 PMAP_SWITCH_CONTEXT(old_act, new_act, mycpu)
336 }
337
338 /*
339 * Load the rest of the user state for the new thread
340 */
341 act_machine_switch_pcb(new_act);
9bccf70c
A
342 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
343 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
55e303ae 344 old->continuation = NULL;
1c79356b
A
345 return(Switch_context(old, continuation, new));
346}
347
1c79356b
A
348/*
349 * act_machine_sv_free
350 * release saveareas associated with an act. if flag is true, release
351 * user level savearea(s) too, else don't
352 */
353void
354act_machine_sv_free(thread_act_t act, int flag)
355{
1c79356b
A
356}
357
358/*
359 * act_machine_set_state:
360 *
361 * Set the status of the specified thread. Called with "appropriate"
362 * thread-related locks held (see act_lock_thread()), so
363 * thr_act->thread is guaranteed not to change.
364 */
365
366kern_return_t
55e303ae 367machine_thread_set_state(
1c79356b
A
368 thread_act_t thr_act,
369 thread_flavor_t flavor,
370 thread_state_t tstate,
371 mach_msg_type_number_t count)
372{
55e303ae 373 int kernel_act = 0;
1c79356b
A
374
375 switch (flavor) {
376 case THREAD_SYSCALL_STATE:
377 {
378 register struct thread_syscall_state *state;
379 register struct i386_saved_state *saved_state = USER_REGS(thr_act);
380
381 state = (struct thread_syscall_state *) tstate;
382 saved_state->eax = state->eax;
383 saved_state->edx = state->edx;
384 if (kernel_act)
385 saved_state->efl = state->efl;
386 else
387 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
388 saved_state->eip = state->eip;
389 saved_state->uesp = state->esp;
390 break;
391 }
392
393 case i386_SAVED_STATE:
394 {
395 register struct i386_saved_state *state;
396 register struct i386_saved_state *saved_state;
397
398 if (count < i386_SAVED_STATE_COUNT) {
399 return(KERN_INVALID_ARGUMENT);
400 }
401
402 state = (struct i386_saved_state *) tstate;
403
404 saved_state = USER_REGS(thr_act);
405
406 /*
407 * General registers
408 */
409 saved_state->edi = state->edi;
410 saved_state->esi = state->esi;
411 saved_state->ebp = state->ebp;
412 saved_state->uesp = state->uesp;
413 saved_state->ebx = state->ebx;
414 saved_state->edx = state->edx;
415 saved_state->ecx = state->ecx;
416 saved_state->eax = state->eax;
417 saved_state->eip = state->eip;
418 if (kernel_act)
419 saved_state->efl = state->efl;
420 else
421 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
422 | EFL_USER_SET;
423
424 /*
425 * Segment registers. Set differently in V8086 mode.
426 */
427 if (state->efl & EFL_VM) {
428 /*
429 * Set V8086 mode segment registers.
430 */
431 saved_state->cs = state->cs & 0xffff;
432 saved_state->ss = state->ss & 0xffff;
433 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
434 saved_state->v86_segs.v86_es = state->es & 0xffff;
435 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
436 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
437
438 /*
439 * Zero protected mode segment registers.
440 */
441 saved_state->ds = 0;
442 saved_state->es = 0;
443 saved_state->fs = 0;
444 saved_state->gs = 0;
445
446 if (thr_act->mact.pcb->ims.v86s.int_table) {
447 /*
448 * Hardware assist on.
449 */
450 thr_act->mact.pcb->ims.v86s.flags =
451 state->efl & (EFL_TF | EFL_IF);
452 }
453 }
55e303ae 454 else if (kernel_act) {
1c79356b
A
455 /*
456 * 386 mode. Set segment registers for flat
457 * 32-bit address space.
458 */
55e303ae
A
459 saved_state->cs = KERNEL_CS;
460 saved_state->ss = KERNEL_DS;
461 saved_state->ds = KERNEL_DS;
462 saved_state->es = KERNEL_DS;
463 saved_state->fs = KERNEL_DS;
464 saved_state->gs = CPU_DATA;
1c79356b
A
465 }
466 else {
467 /*
468 * User setting segment registers.
469 * Code and stack selectors have already been
470 * checked. Others will be reset by 'iret'
471 * if they are not valid.
472 */
473 saved_state->cs = state->cs;
474 saved_state->ss = state->ss;
475 saved_state->ds = state->ds;
476 saved_state->es = state->es;
477 saved_state->fs = state->fs;
478 saved_state->gs = state->gs;
479 }
480 break;
481 }
482
483 case i386_NEW_THREAD_STATE:
484 case i386_REGS_SEGS_STATE:
485 {
486 register struct i386_new_thread_state *state;
487 register struct i386_saved_state *saved_state;
488
489 if (count < i386_NEW_THREAD_STATE_COUNT) {
490 return(KERN_INVALID_ARGUMENT);
491 }
492
493 if (flavor == i386_REGS_SEGS_STATE) {
494 /*
495 * Code and stack selectors must not be null,
496 * and must have user protection levels.
497 * Only the low 16 bits are valid.
498 */
499 state->cs &= 0xffff;
500 state->ss &= 0xffff;
501 state->ds &= 0xffff;
502 state->es &= 0xffff;
503 state->fs &= 0xffff;
504 state->gs &= 0xffff;
505
506 if (!kernel_act &&
507 (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U
508 || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U))
509 return KERN_INVALID_ARGUMENT;
510 }
511
512 state = (struct i386_new_thread_state *) tstate;
513
514 saved_state = USER_REGS(thr_act);
515
516 /*
517 * General registers
518 */
519 saved_state->edi = state->edi;
520 saved_state->esi = state->esi;
521 saved_state->ebp = state->ebp;
522 saved_state->uesp = state->uesp;
523 saved_state->ebx = state->ebx;
524 saved_state->edx = state->edx;
525 saved_state->ecx = state->ecx;
526 saved_state->eax = state->eax;
527 saved_state->eip = state->eip;
528 if (kernel_act)
529 saved_state->efl = state->efl;
530 else
531 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
532 | EFL_USER_SET;
533
534 /*
535 * Segment registers. Set differently in V8086 mode.
536 */
537 if (state->efl & EFL_VM) {
538 /*
539 * Set V8086 mode segment registers.
540 */
541 saved_state->cs = state->cs & 0xffff;
542 saved_state->ss = state->ss & 0xffff;
543 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
544 saved_state->v86_segs.v86_es = state->es & 0xffff;
545 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
546 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
547
548 /*
549 * Zero protected mode segment registers.
550 */
551 saved_state->ds = 0;
552 saved_state->es = 0;
553 saved_state->fs = 0;
554 saved_state->gs = 0;
555
556 if (thr_act->mact.pcb->ims.v86s.int_table) {
557 /*
558 * Hardware assist on.
559 */
560 thr_act->mact.pcb->ims.v86s.flags =
561 state->efl & (EFL_TF | EFL_IF);
562 }
563 }
55e303ae 564 else if (flavor == i386_NEW_THREAD_STATE && kernel_act) {
1c79356b
A
565 /*
566 * 386 mode. Set segment registers for flat
567 * 32-bit address space.
568 */
55e303ae
A
569 saved_state->cs = KERNEL_CS;
570 saved_state->ss = KERNEL_DS;
571 saved_state->ds = KERNEL_DS;
572 saved_state->es = KERNEL_DS;
573 saved_state->fs = KERNEL_DS;
574 saved_state->gs = CPU_DATA;
1c79356b
A
575 }
576 else {
577 /*
578 * User setting segment registers.
579 * Code and stack selectors have already been
580 * checked. Others will be reset by 'iret'
581 * if they are not valid.
582 */
583 saved_state->cs = state->cs;
584 saved_state->ss = state->ss;
585 saved_state->ds = state->ds;
586 saved_state->es = state->es;
587 saved_state->fs = state->fs;
588 saved_state->gs = state->gs;
589 }
590 break;
591 }
592
593 case i386_FLOAT_STATE: {
55e303ae
A
594 struct i386_float_state *state = (struct i386_float_state*)tstate;
595 if (count < i386_old_FLOAT_STATE_COUNT)
1c79356b 596 return(KERN_INVALID_ARGUMENT);
55e303ae
A
597 if (count < i386_FLOAT_STATE_COUNT)
598 return fpu_set_state(thr_act,(struct i386_float_state*)tstate);
599 else return fpu_set_fxstate(thr_act,(struct i386_float_state*)tstate);
1c79356b
A
600 }
601
602 /*
603 * Temporary - replace by i386_io_map
604 */
605 case i386_ISA_PORT_MAP_STATE: {
606 register struct i386_isa_port_map_state *state;
607 register iopb_tss_t tss;
608
609 if (count < i386_ISA_PORT_MAP_STATE_COUNT)
610 return(KERN_INVALID_ARGUMENT);
611
612 break;
613 }
614
615 case i386_V86_ASSIST_STATE:
616 {
617 register struct i386_v86_assist_state *state;
618 vm_offset_t int_table;
619 int int_count;
620
621 if (count < i386_V86_ASSIST_STATE_COUNT)
622 return KERN_INVALID_ARGUMENT;
623
624 state = (struct i386_v86_assist_state *) tstate;
625 int_table = state->int_table;
626 int_count = state->int_count;
627
628 if (int_table >= VM_MAX_ADDRESS ||
629 int_table +
630 int_count * sizeof(struct v86_interrupt_table)
631 > VM_MAX_ADDRESS)
632 return KERN_INVALID_ARGUMENT;
633
634 thr_act->mact.pcb->ims.v86s.int_table = int_table;
635 thr_act->mact.pcb->ims.v86s.int_count = int_count;
636
637 thr_act->mact.pcb->ims.v86s.flags =
638 USER_REGS(thr_act)->efl & (EFL_TF | EFL_IF);
639 break;
640 }
641
642 case i386_THREAD_STATE: {
643 struct i386_saved_state *saved_state;
644 i386_thread_state_t *state25;
645
646 saved_state = USER_REGS(thr_act);
647 state25 = (i386_thread_state_t *)tstate;
648
649 saved_state->eax = state25->eax;
650 saved_state->ebx = state25->ebx;
651 saved_state->ecx = state25->ecx;
652 saved_state->edx = state25->edx;
653 saved_state->edi = state25->edi;
654 saved_state->esi = state25->esi;
655 saved_state->ebp = state25->ebp;
656 saved_state->uesp = state25->esp;
657 saved_state->efl = (state25->eflags & ~EFL_USER_CLEAR)
658 | EFL_USER_SET;
659 saved_state->eip = state25->eip;
660 saved_state->cs = USER_CS; /* FIXME? */
661 saved_state->ss = USER_DS;
662 saved_state->ds = USER_DS;
663 saved_state->es = USER_DS;
55e303ae
A
664 saved_state->fs = state25->fs;
665 saved_state->gs = state25->gs;
1c79356b
A
666 }
667 break;
668
669 default:
670 return(KERN_INVALID_ARGUMENT);
671 }
672
673 return(KERN_SUCCESS);
674}
675
676/*
677 * thread_getstatus:
678 *
679 * Get the status of the specified thread.
680 */
681
682
683kern_return_t
55e303ae 684machine_thread_get_state(
1c79356b
A
685 thread_act_t thr_act,
686 thread_flavor_t flavor,
687 thread_state_t tstate,
688 mach_msg_type_number_t *count)
689{
1c79356b
A
690 switch (flavor) {
691
692 case i386_SAVED_STATE:
693 {
694 register struct i386_saved_state *state;
695 register struct i386_saved_state *saved_state;
696
697 if (*count < i386_SAVED_STATE_COUNT)
698 return(KERN_INVALID_ARGUMENT);
699
700 state = (struct i386_saved_state *) tstate;
701 saved_state = USER_REGS(thr_act);
702
703 /*
704 * First, copy everything:
705 */
706 *state = *saved_state;
707
708 if (saved_state->efl & EFL_VM) {
709 /*
710 * V8086 mode.
711 */
712 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
713 state->es = saved_state->v86_segs.v86_es & 0xffff;
714 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
715 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
716
717 if (thr_act->mact.pcb->ims.v86s.int_table) {
718 /*
719 * Hardware assist on
720 */
721 if ((thr_act->mact.pcb->ims.v86s.flags &
722 (EFL_IF|V86_IF_PENDING)) == 0)
723 state->efl &= ~EFL_IF;
724 }
725 }
726 else {
727 /*
728 * 386 mode.
729 */
730 state->ds = saved_state->ds & 0xffff;
731 state->es = saved_state->es & 0xffff;
732 state->fs = saved_state->fs & 0xffff;
733 state->gs = saved_state->gs & 0xffff;
734 }
735 *count = i386_SAVED_STATE_COUNT;
736 break;
737 }
738
739 case i386_NEW_THREAD_STATE:
740 case i386_REGS_SEGS_STATE:
741 {
742 register struct i386_new_thread_state *state;
743 register struct i386_saved_state *saved_state;
744
745 if (*count < i386_NEW_THREAD_STATE_COUNT)
746 return(KERN_INVALID_ARGUMENT);
747
748 state = (struct i386_new_thread_state *) tstate;
749 saved_state = USER_REGS(thr_act);
750
751 /*
752 * General registers.
753 */
754 state->edi = saved_state->edi;
755 state->esi = saved_state->esi;
756 state->ebp = saved_state->ebp;
757 state->ebx = saved_state->ebx;
758 state->edx = saved_state->edx;
759 state->ecx = saved_state->ecx;
760 state->eax = saved_state->eax;
761 state->eip = saved_state->eip;
762 state->efl = saved_state->efl;
763 state->uesp = saved_state->uesp;
764
765 state->cs = saved_state->cs;
766 state->ss = saved_state->ss;
767 if (saved_state->efl & EFL_VM) {
768 /*
769 * V8086 mode.
770 */
771 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
772 state->es = saved_state->v86_segs.v86_es & 0xffff;
773 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
774 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
775
776 if (thr_act->mact.pcb->ims.v86s.int_table) {
777 /*
778 * Hardware assist on
779 */
780 if ((thr_act->mact.pcb->ims.v86s.flags &
781 (EFL_IF|V86_IF_PENDING)) == 0)
782 state->efl &= ~EFL_IF;
783 }
784 }
785 else {
786 /*
787 * 386 mode.
788 */
789 state->ds = saved_state->ds & 0xffff;
790 state->es = saved_state->es & 0xffff;
791 state->fs = saved_state->fs & 0xffff;
792 state->gs = saved_state->gs & 0xffff;
793 }
794 *count = i386_NEW_THREAD_STATE_COUNT;
795 break;
796 }
797
798 case THREAD_SYSCALL_STATE:
799 {
800 register struct thread_syscall_state *state;
801 register struct i386_saved_state *saved_state = USER_REGS(thr_act);
802
803 state = (struct thread_syscall_state *) tstate;
804 state->eax = saved_state->eax;
805 state->edx = saved_state->edx;
806 state->efl = saved_state->efl;
807 state->eip = saved_state->eip;
808 state->esp = saved_state->uesp;
809 *count = i386_THREAD_SYSCALL_STATE_COUNT;
810 break;
811 }
812
813 case THREAD_STATE_FLAVOR_LIST:
814 if (*count < 5)
815 return (KERN_INVALID_ARGUMENT);
816 tstate[0] = i386_NEW_THREAD_STATE;
817 tstate[1] = i386_FLOAT_STATE;
818 tstate[2] = i386_ISA_PORT_MAP_STATE;
819 tstate[3] = i386_V86_ASSIST_STATE;
820 tstate[4] = THREAD_SYSCALL_STATE;
821 *count = 5;
822 break;
823
824 case i386_FLOAT_STATE: {
55e303ae 825 struct i386_float_state *state = (struct i386_float_state*)tstate;
1c79356b 826
55e303ae 827 if (*count < i386_old_FLOAT_STATE_COUNT)
1c79356b 828 return(KERN_INVALID_ARGUMENT);
55e303ae
A
829 if (*count< i386_FLOAT_STATE_COUNT) {
830 *count = i386_old_FLOAT_STATE_COUNT;
831 return fpu_get_state(thr_act,(struct i386_float_state *)tstate);
832 } else {
833 *count = i386_FLOAT_STATE_COUNT;
834 return fpu_get_fxstate(thr_act,(struct i386_float_state *)tstate);
835 }
1c79356b
A
836 }
837
838 /*
839 * Temporary - replace by i386_io_map
840 */
841 case i386_ISA_PORT_MAP_STATE: {
842 register struct i386_isa_port_map_state *state;
843 register iopb_tss_t tss;
844
845 if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
846 return(KERN_INVALID_ARGUMENT);
847
848 state = (struct i386_isa_port_map_state *) tstate;
849 tss = thr_act->mact.pcb->ims.io_tss;
850
851 if (tss == 0) {
852 int i;
853
854 /*
855 * The thread has no ktss, so no IO permissions.
856 */
857
858 for (i = 0; i < sizeof state->pm; i++)
859 state->pm[i] = 0xff;
860 } else {
861 /*
862 * The thread has its own ktss.
863 */
864
865 bcopy((char *) tss->bitmap,
866 (char *) state->pm,
867 sizeof state->pm);
868 }
869
870 *count = i386_ISA_PORT_MAP_STATE_COUNT;
871 break;
872 }
873
874 case i386_V86_ASSIST_STATE:
875 {
876 register struct i386_v86_assist_state *state;
877
878 if (*count < i386_V86_ASSIST_STATE_COUNT)
879 return KERN_INVALID_ARGUMENT;
880
881 state = (struct i386_v86_assist_state *) tstate;
882 state->int_table = thr_act->mact.pcb->ims.v86s.int_table;
883 state->int_count = thr_act->mact.pcb->ims.v86s.int_count;
884
885 *count = i386_V86_ASSIST_STATE_COUNT;
886 break;
887 }
888
889 case i386_THREAD_STATE: {
890 struct i386_saved_state *saved_state;
891 i386_thread_state_t *state;
892
893 saved_state = USER_REGS(thr_act);
894 state = (i386_thread_state_t *)tstate;
895
896 state->eax = saved_state->eax;
897 state->ebx = saved_state->ebx;
898 state->ecx = saved_state->ecx;
899 state->edx = saved_state->edx;
900 state->edi = saved_state->edi;
901 state->esi = saved_state->esi;
902 state->ebp = saved_state->ebp;
903 state->esp = saved_state->uesp;
904 state->eflags = saved_state->efl;
905 state->eip = saved_state->eip;
906 state->cs = saved_state->cs;
907 state->ss = saved_state->ss;
908 state->ds = saved_state->ds;
909 state->es = saved_state->es;
910 state->fs = saved_state->fs;
911 state->gs = saved_state->gs;
912 break;
913 }
914
915 default:
916 return(KERN_INVALID_ARGUMENT);
917 }
918
919 return(KERN_SUCCESS);
920}
921
1c79356b
A
922/*
923 * Initialize the machine-dependent state for a new thread.
924 */
925kern_return_t
55e303ae
A
926machine_thread_create(
927 thread_t thread,
928 task_t task)
1c79356b 929{
55e303ae 930 pcb_t pcb = &thread->mact.xxx_pcb;
1c79356b 931
55e303ae 932 thread->mact.pcb = pcb;
1c79356b 933
55e303ae
A
934 simple_lock_init(&pcb->lock, ETAP_MISC_PCB);
935
936 /*
937 * Guarantee that the bootstrapped thread will be in user
938 * mode.
939 */
940 pcb->iss.cs = USER_CS;
941 pcb->iss.ss = USER_DS;
942 pcb->iss.ds = USER_DS;
943 pcb->iss.es = USER_DS;
944 pcb->iss.fs = USER_DS;
945 pcb->iss.gs = USER_DS;
946 pcb->iss.efl = EFL_USER_SET;
947 {
948 extern struct fake_descriptor ldt[];
949 struct real_descriptor *ldtp;
950 ldtp = (struct real_descriptor *)ldt;
951 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
952 }
1c79356b 953
0b4e3aa0
A
954 /*
955 * Allocate a kernel stack per shuttle
956 */
55e303ae 957 thread->kernel_stack = (int)stack_alloc(thread, thread_continue);
9bccf70c 958 thread->state &= ~TH_STACK_HANDOFF;
0b4e3aa0 959 assert(thread->kernel_stack != 0);
1c79356b
A
960
961 /*
0b4e3aa0 962 * Point top of kernel stack to user`s registers.
1c79356b 963 */
55e303ae 964 STACK_IEL(thread->kernel_stack)->saved_state = &pcb->iss;
0b4e3aa0 965
1c79356b
A
966 return(KERN_SUCCESS);
967}
968
969/*
970 * Machine-dependent cleanup prior to destroying a thread
971 */
972void
55e303ae
A
973machine_thread_destroy(
974 thread_t thread)
1c79356b 975{
55e303ae 976 register pcb_t pcb = thread->mact.pcb;
1c79356b 977
55e303ae
A
978 assert(pcb);
979
980 if (pcb->ims.io_tss != 0)
981 iopb_destroy(pcb->ims.io_tss);
982 if (pcb->ims.ifps != 0)
983 fp_free(pcb->ims.ifps);
984 if (pcb->ims.ldt != 0)
985 user_ldt_free(pcb->ims.ldt);
986 thread->mact.pcb = (pcb_t)0;
1c79356b
A
987}
988
989/*
990 * This is used to set the current thr_act/thread
991 * when starting up a new processor
992 */
993void
55e303ae 994machine_thread_set_current( thread_t thread )
1c79356b
A
995{
996 register int my_cpu;
997
998 mp_disable_preemption();
999 my_cpu = cpu_number();
1000
55e303ae
A
1001 cpu_data[my_cpu].active_thread = thread->top_act;
1002 active_kloaded[my_cpu] = THR_ACT_NULL;
1c79356b
A
1003
1004 mp_enable_preemption();
1005}
1006
1c79356b 1007void
55e303ae 1008machine_thread_terminate_self(void)
1c79356b 1009{
1c79356b
A
1010}
1011
1012void
1013act_machine_return(int code)
1014{
1015 thread_act_t thr_act = current_act();
1016
1c79356b
A
1017 /*
1018 * This code is called with nothing locked.
1019 * It also returns with nothing locked, if it returns.
1020 *
1021 * This routine terminates the current thread activation.
1022 * If this is the only activation associated with its
1023 * thread shuttle, then the entire thread (shuttle plus
1024 * activation) is terminated.
1025 */
1026 assert( code == KERN_TERMINATED );
1027 assert( thr_act );
1028
1c79356b
A
1029 /* This is the only activation attached to the shuttle... */
1030 /* terminate the entire thread (shuttle plus activation) */
1031
1032 assert(thr_act->thread->top_act == thr_act);
1033 thread_terminate_self();
1034
1035 /*NOTREACHED*/
1036
1037 panic("act_machine_return: TALKING ZOMBIE! (1)");
1038}
1039
1040
1041/*
1042 * Perform machine-dependent per-thread initializations
1043 */
1044void
55e303ae 1045machine_thread_init(void)
1c79356b 1046{
55e303ae
A
1047 fpu_module_init();
1048 iopb_init();
1c79356b
A
1049}
1050
1051/*
1052 * Some routines for debugging activation code
1053 */
1054static void dump_handlers(thread_act_t);
1055void dump_regs(thread_act_t);
1056
1057static void
1058dump_handlers(thread_act_t thr_act)
1059{
1060 ReturnHandler *rhp = thr_act->handlers;
1061 int counter = 0;
1062
1063 printf("\t");
1064 while (rhp) {
1065 if (rhp == &thr_act->special_handler){
1066 if (rhp->next)
1067 printf("[NON-Zero next ptr(%x)]", rhp->next);
1068 printf("special_handler()->");
1069 break;
1070 }
1071 printf("hdlr_%d(%x)->",counter,rhp->handler);
1072 rhp = rhp->next;
1073 if (++counter > 32) {
1074 printf("Aborting: HUGE handler chain\n");
1075 break;
1076 }
1077 }
1078 printf("HLDR_NULL\n");
1079}
1080
1081void
1082dump_regs(thread_act_t thr_act)
1083{
1084 if (thr_act->mact.pcb) {
1085 register struct i386_saved_state *ssp = USER_REGS(thr_act);
1086 /* Print out user register state */
1087 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1088 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
1089 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1090 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
1091 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1092 }
1093}
1094
1095int
1096dump_act(thread_act_t thr_act)
1097{
1098 if (!thr_act)
1099 return(0);
1100
1101 printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
1102 thr_act, thr_act->ref_count,
1103 thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
1104 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
1105
55e303ae 1106 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1c79356b
A
1107 thr_act->suspend_count, thr_act->user_stop_count,
1108 thr_act->active, thr_act->ast);
1109 printf("\thi=%x lo=%x\n", thr_act->higher, thr_act->lower);
1110 printf("\tpcb=%x\n", thr_act->mact.pcb);
1111
1112 if (thr_act->thread && thr_act->thread->kernel_stack) {
1113 vm_offset_t stack = thr_act->thread->kernel_stack;
1114
1115 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1116 stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1117 STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state);
1118 }
1119
1120 dump_handlers(thr_act);
1121 dump_regs(thr_act);
1122 return((int)thr_act);
1123}
1124unsigned int
1125get_useraddr()
1126{
1127
1128 thread_act_t thr_act = current_act();
1129
1130 if (thr_act->mact.pcb)
1131 return(thr_act->mact.pcb->iss.eip);
1132 else
1133 return(0);
1134
1135}
1136
1137void
1138thread_swapin_mach_alloc(thread_t thread)
1139{
1140
1141 /* 386 does not have saveareas */
1142
1143}
1144/*
1145 * detach and return a kernel stack from a thread
1146 */
1147
1148vm_offset_t
55e303ae 1149machine_stack_detach(thread_t thread)
1c79356b
A
1150{
1151 vm_offset_t stack;
1152
1153 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
1154 thread, thread->priority,
1155 thread->sched_pri, 0,
1156 0);
1157
1158 stack = thread->kernel_stack;
1159 thread->kernel_stack = 0;
1160 return(stack);
1161}
1162
1163/*
1164 * attach a kernel stack to a thread and initialize it
1165 */
1166
1167void
55e303ae 1168machine_stack_attach(thread_t thread,
1c79356b
A
1169 vm_offset_t stack,
1170 void (*start_pos)(thread_t))
1171{
1172 struct i386_kernel_state *statep;
1c79356b
A
1173
1174 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
1175 thread, thread->priority,
1176 thread->sched_pri, continuation,
1177 0);
1178
1179 assert(stack);
1180 statep = STACK_IKS(stack);
1181 thread->kernel_stack = stack;
1182
1183 statep->k_eip = (unsigned long) Thread_continue;
1184 statep->k_ebx = (unsigned long) start_pos;
1185 statep->k_esp = (unsigned long) STACK_IEL(stack);
55e303ae
A
1186
1187 STACK_IEL(stack)->saved_state = &thread->mact.pcb->iss;
1c79356b
A
1188
1189 return;
1190}
1191
1192/*
1193 * move a stack from old to new thread
1194 */
1195
1196void
55e303ae 1197machine_stack_handoff(thread_t old,
1c79356b
A
1198 thread_t new)
1199{
1200
1201 vm_offset_t stack;
1c79356b
A
1202
1203 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF),
1204 thread, thread->priority,
1205 thread->sched_pri, continuation,
1206 0);
1207
1208 assert(new->top_act);
1209 assert(old->top_act);
1210
55e303ae
A
1211 stack = machine_stack_detach(old);
1212 machine_stack_attach(new, stack, 0);
1c79356b 1213
55e303ae 1214 PMAP_SWITCH_CONTEXT(old->top_act->task, new->top_act->task, cpu_number());
1c79356b 1215
9bccf70c
A
1216 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
1217 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
1218
55e303ae 1219 machine_thread_set_current(new);
1c79356b
A
1220
1221 active_stacks[cpu_number()] = new->kernel_stack;
1222
1223 return;
1224}
0b4e3aa0
A
1225
1226struct i386_act_context {
1227 struct i386_saved_state ss;
1228 struct i386_float_state fs;
1229};
1230
1231void *
1232act_thread_csave(void)
1233{
1234struct i386_act_context *ic;
1235kern_return_t kret;
1236int val;
1237
1238 ic = (struct i386_act_context *)kalloc(sizeof(struct i386_act_context));
1239
1240 if (ic == (struct i386_act_context *)NULL)
1241 return((void *)0);
1242
1243 val = i386_SAVED_STATE_COUNT;
55e303ae
A
1244 kret = machine_thread_get_state(current_act(),
1245 i386_SAVED_STATE,
1246 (thread_state_t) &ic->ss,
1247 &val);
0b4e3aa0
A
1248 if (kret != KERN_SUCCESS) {
1249 kfree((vm_offset_t)ic,sizeof(struct i386_act_context));
1250 return((void *)0);
1251 }
1252 val = i386_FLOAT_STATE_COUNT;
55e303ae
A
1253 kret = machine_thread_get_state(current_act(),
1254 i386_FLOAT_STATE,
1255 (thread_state_t) &ic->fs,
1256 &val);
0b4e3aa0
A
1257 if (kret != KERN_SUCCESS) {
1258 kfree((vm_offset_t)ic,sizeof(struct i386_act_context));
1259 return((void *)0);
1260 }
1261 return(ic);
1262}
1263void
1264act_thread_catt(void *ctx)
1265{
1266struct i386_act_context *ic;
1267kern_return_t kret;
1268int val;
1269
1270 ic = (struct i386_act_context *)ctx;
1271
1272 if (ic == (struct i386_act_context *)NULL)
1273 return;
1274
55e303ae
A
1275 kret = machine_thread_set_state(current_act(),
1276 i386_SAVED_STATE,
1277 (thread_state_t) &ic->ss,
1278 i386_SAVED_STATE_COUNT);
0b4e3aa0
A
1279 if (kret != KERN_SUCCESS)
1280 goto out;
1281
55e303ae
A
1282 kret = machine_thread_set_state(current_act(),
1283 i386_FLOAT_STATE,
1284 (thread_state_t) &ic->fs,
1285 i386_FLOAT_STATE_COUNT);
0b4e3aa0
A
1286 if (kret != KERN_SUCCESS)
1287 goto out;
1288out:
1289 kfree((vm_offset_t)ic,sizeof(struct i386_act_context));
1290}
1291
1292void act_thread_cfree(void *ctx)
1293{
1294 kfree((vm_offset_t)ctx,sizeof(struct i386_act_context));
1295}
1296