]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
ca1a170f437ff45f417dcd86d71c7b67da8568e3
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 #include <mach_rt.h>
52 #include <mach_debug.h>
53 #include <mach_ldebug.h>
54
55 #include <sys/kdebug.h>
56
57 #include <mach/kern_return.h>
58 #include <mach/thread_status.h>
59 #include <mach/vm_param.h>
60
61 #include <i386/cpu_data.h>
62 #include <i386/cpu_number.h>
63
64 #include <kern/counters.h>
65 #include <kern/kalloc.h>
66 #include <kern/mach_param.h>
67 #include <kern/processor.h>
68 #include <kern/cpu_data.h>
69 #include <kern/cpu_number.h>
70 #include <kern/task.h>
71 #include <kern/thread.h>
72 #include <kern/sched_prim.h>
73 #include <kern/misc_protos.h>
74 #include <kern/assert.h>
75 #include <kern/spl.h>
76 #include <kern/machine.h>
77 #include <ipc/ipc_port.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_map.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_protos.h>
82
83 #include <i386/thread.h>
84 #include <i386/eflags.h>
85 #include <i386/proc_reg.h>
86 #include <i386/seg.h>
87 #include <i386/tss.h>
88 #include <i386/user_ldt.h>
89 #include <i386/fpu.h>
90 #include <i386/iopb_entries.h>
91 #include <i386/mp_desc.h>
92 #include <i386/cpu_data.h>
93
94
95 /*
96 * Maps state flavor to number of words in the state:
97 */
98 __private_extern__
99 unsigned int _MachineStateCount[] = {
100 /* FLAVOR_LIST */ 0,
101 i386_NEW_THREAD_STATE_COUNT,
102 i386_FLOAT_STATE_COUNT,
103 i386_ISA_PORT_MAP_STATE_COUNT,
104 i386_V86_ASSIST_STATE_COUNT,
105 i386_REGS_SEGS_STATE_COUNT,
106 i386_THREAD_SYSCALL_STATE_COUNT,
107 /* THREAD_STATE_NONE */ 0,
108 i386_SAVED_STATE_COUNT,
109 };
110
111 /* Forward */
112
113 void act_machine_throughcall(thread_t thr_act);
114 user_addr_t get_useraddr(void);
115 void act_machine_return(int);
116 void act_machine_sv_free(thread_t, int);
117
118 extern thread_t Switch_context(
119 thread_t old,
120 thread_continue_t cont,
121 thread_t new);
122 extern void Thread_continue(void);
123 extern void Load_context(
124 thread_t thread);
125
126 /*
127 * consider_machine_collect:
128 *
129 * Try to collect machine-dependent pages
130 */
131 void
132 consider_machine_collect(void)
133 {
134 }
135
136 void
137 consider_machine_adjust(void)
138 {
139 }
140
141
142 // DEBUG
143 int DEBUG_kldt = 0;
144 int DEBUG_uldt = 0;
145
146 static void
147 act_machine_switch_pcb( thread_t new )
148 {
149 pcb_t pcb = new->machine.pcb;
150 int mycpu;
151 register iopb_tss_t tss = pcb->ims.io_tss;
152 vm_offset_t pcb_stack_top;
153 register user_ldt_t uldt = pcb->ims.ldt;
154
155 assert(new->kernel_stack != 0);
156 STACK_IEL(new->kernel_stack)->saved_state =
157 &new->machine.pcb->iss;
158
159 /*
160 * Save a pointer to the top of the "kernel" stack -
161 * actually the place in the PCB where a trap into
162 * kernel mode will push the registers.
163 * The location depends on V8086 mode. If we are
164 * not in V8086 mode, then a trap into the kernel
165 * won`t save the v86 segments, so we leave room.
166 */
167
168 pcb_stack_top = (pcb->iss.efl & EFL_VM)
169 ? (int) (&pcb->iss + 1)
170 : (int) (&pcb->iss.v86_segs);
171
172 mp_disable_preemption();
173 mycpu = cpu_number();
174
175 if (tss == 0) {
176 /*
177 * No per-thread IO permissions.
178 * Use standard kernel TSS.
179 */
180 if (!(gdt_desc_p(KERNEL_TSS)->access & ACC_TSS_BUSY))
181 set_tr(KERNEL_TSS);
182 current_ktss()->esp0 = pcb_stack_top;
183 }
184 else {
185 /*
186 * Set the IO permissions. Use this thread`s TSS.
187 */
188 *gdt_desc_p(USER_TSS)
189 = *(struct real_descriptor *)tss->iopb_desc;
190 tss->tss.esp0 = pcb_stack_top;
191 set_tr(USER_TSS);
192 gdt_desc_p(KERNEL_TSS)->access &= ~ ACC_TSS_BUSY;
193 }
194
195 /*
196 * Set the thread`s LDT or LDT entry.
197 */
198 if (uldt == 0) {
199 struct real_descriptor *ldtp;
200 /*
201 * Use system LDT.
202 */
203 // Set up the tasks specific ldt entries if extant
204 ldtp = (struct real_descriptor *)current_ldt();
205 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
206 if (pcb->uldt_selector != 0)
207 ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
208 set_ldt(KERNEL_LDT);
209 }
210 else {
211 /*
212 * Thread has its own LDT. // THIS SHOULD BE REMOVED!!!!
213 */
214 *gdt_desc_p(USER_LDT) = uldt->desc;
215 set_ldt(USER_LDT);
216 /*debug*/
217 if ((DEBUG_uldt++ % 0x7fff) == 0)
218 printf("KERNEL----> setting user ldt");
219
220 }
221
222 mp_enable_preemption();
223 /*
224 * Load the floating-point context, if necessary.
225 */
226 fpu_load_context(pcb);
227
228 }
229
230 /*
231 * Switch to the first thread on a CPU.
232 */
233 void
234 machine_load_context(
235 thread_t new)
236 {
237 act_machine_switch_pcb(new);
238 Load_context(new);
239 }
240
241 /*
242 * Switch to a new thread.
243 * Save the old thread`s kernel state or continuation,
244 * and return it.
245 */
246 thread_t
247 machine_switch_context(
248 thread_t old,
249 thread_continue_t continuation,
250 thread_t new)
251 {
252 #if MACH_RT
253 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
254 #endif
255
256 /*
257 * Save FP registers if in use.
258 */
259 fpu_save_context(old);
260
261 /*
262 * Switch address maps if need be, even if not switching tasks.
263 * (A server activation may be "borrowing" a client map.)
264 */
265 {
266 int mycpu = cpu_number();
267
268 PMAP_SWITCH_CONTEXT(old, new, mycpu)
269 }
270
271 /*
272 * Load the rest of the user state for the new thread
273 */
274 act_machine_switch_pcb(new);
275 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
276 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
277 old->continuation = NULL;
278 return(Switch_context(old, continuation, new));
279 }
280
281 /*
282 * act_machine_sv_free
283 * release saveareas associated with an act. if flag is true, release
284 * user level savearea(s) too, else don't
285 */
286 void
287 act_machine_sv_free(__unused thread_t act, __unused int flag)
288 {
289 }
290
291
292 /*
293 * This is where registers that are not normally specified by the mach-o
294 * file on an execve would be nullified, perhaps to avoid a covert channel.
295 */
296 kern_return_t
297 machine_thread_state_initialize(
298 thread_t thread)
299 {
300 #pragma unused (thread)
301
302 return KERN_SUCCESS;
303 }
304
305
306 /*
307 * act_machine_set_state:
308 *
309 * Set the status of the specified thread.
310 */
311
312 kern_return_t
313 machine_thread_set_state(
314 thread_t thr_act,
315 thread_flavor_t flavor,
316 thread_state_t tstate,
317 mach_msg_type_number_t count)
318 {
319 int kernel_act = 0;
320
321 switch (flavor) {
322 case THREAD_SYSCALL_STATE:
323 {
324 register struct thread_syscall_state *state;
325 register struct i386_saved_state *saved_state = USER_REGS(thr_act);
326
327 state = (struct thread_syscall_state *) tstate;
328 saved_state->eax = state->eax;
329 saved_state->edx = state->edx;
330 if (kernel_act)
331 saved_state->efl = state->efl;
332 else
333 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
334 saved_state->eip = state->eip;
335 saved_state->uesp = state->esp;
336 break;
337 }
338
339 case i386_SAVED_STATE:
340 {
341 register struct i386_saved_state *state;
342 register struct i386_saved_state *saved_state;
343
344 if (count < i386_SAVED_STATE_COUNT) {
345 return(KERN_INVALID_ARGUMENT);
346 }
347
348 state = (struct i386_saved_state *) tstate;
349
350 /* Check segment selectors are safe */
351 if (!kernel_act &&
352 !valid_user_segment_selectors(state->cs,
353 state->ss,
354 state->ds,
355 state->es,
356 state->fs,
357 state->gs))
358 return KERN_INVALID_ARGUMENT;
359
360 saved_state = USER_REGS(thr_act);
361
362 /*
363 * General registers
364 */
365 saved_state->edi = state->edi;
366 saved_state->esi = state->esi;
367 saved_state->ebp = state->ebp;
368 saved_state->uesp = state->uesp;
369 saved_state->ebx = state->ebx;
370 saved_state->edx = state->edx;
371 saved_state->ecx = state->ecx;
372 saved_state->eax = state->eax;
373 saved_state->eip = state->eip;
374 if (kernel_act)
375 saved_state->efl = state->efl;
376 else
377 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
378 | EFL_USER_SET;
379
380 /*
381 * Segment registers. Set differently in V8086 mode.
382 */
383 if (state->efl & EFL_VM) {
384 /*
385 * Set V8086 mode segment registers.
386 */
387 saved_state->cs = state->cs & 0xffff;
388 saved_state->ss = state->ss & 0xffff;
389 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
390 saved_state->v86_segs.v86_es = state->es & 0xffff;
391 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
392 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
393
394 /*
395 * Zero protected mode segment registers.
396 */
397 saved_state->ds = 0;
398 saved_state->es = 0;
399 saved_state->fs = 0;
400 saved_state->gs = 0;
401
402 if (thr_act->machine.pcb->ims.v86s.int_table) {
403 /*
404 * Hardware assist on.
405 */
406 thr_act->machine.pcb->ims.v86s.flags =
407 state->efl & (EFL_TF | EFL_IF);
408 }
409 }
410 else if (kernel_act) {
411 /*
412 * 386 mode. Set segment registers for flat
413 * 32-bit address space.
414 */
415 saved_state->cs = KERNEL_CS;
416 saved_state->ss = KERNEL_DS;
417 saved_state->ds = KERNEL_DS;
418 saved_state->es = KERNEL_DS;
419 saved_state->fs = KERNEL_DS;
420 saved_state->gs = CPU_DATA_GS;
421 }
422 else {
423 /*
424 * User setting segment registers.
425 * Code and stack selectors have already been
426 * checked. Others will be reset by 'iret'
427 * if they are not valid.
428 */
429 saved_state->cs = state->cs;
430 saved_state->ss = state->ss;
431 saved_state->ds = state->ds;
432 saved_state->es = state->es;
433 saved_state->fs = state->fs;
434 saved_state->gs = state->gs;
435 }
436 break;
437 }
438
439 case i386_NEW_THREAD_STATE:
440 case i386_REGS_SEGS_STATE:
441 {
442 register struct i386_new_thread_state *state;
443 register struct i386_saved_state *saved_state;
444
445 if (count < i386_NEW_THREAD_STATE_COUNT) {
446 return(KERN_INVALID_ARGUMENT);
447 }
448
449 state = (struct i386_new_thread_state *) tstate;
450
451 if (flavor == i386_REGS_SEGS_STATE) {
452 /*
453 * Code and stack selectors must not be null,
454 * and must have user protection levels.
455 * Only the low 16 bits are valid.
456 */
457 state->cs &= 0xffff;
458 state->ss &= 0xffff;
459 state->ds &= 0xffff;
460 state->es &= 0xffff;
461 state->fs &= 0xffff;
462 state->gs &= 0xffff;
463
464 if (!kernel_act &&
465 !valid_user_segment_selectors(state->cs,
466 state->ss,
467 state->ds,
468 state->es,
469 state->fs,
470 state->gs))
471 return KERN_INVALID_ARGUMENT;
472 }
473
474 saved_state = USER_REGS(thr_act);
475
476 /*
477 * General registers
478 */
479 saved_state->edi = state->edi;
480 saved_state->esi = state->esi;
481 saved_state->ebp = state->ebp;
482 saved_state->uesp = state->uesp;
483 saved_state->ebx = state->ebx;
484 saved_state->edx = state->edx;
485 saved_state->ecx = state->ecx;
486 saved_state->eax = state->eax;
487 saved_state->eip = state->eip;
488 if (kernel_act)
489 saved_state->efl = state->efl;
490 else
491 saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
492 | EFL_USER_SET;
493
494 /*
495 * Segment registers. Set differently in V8086 mode.
496 */
497 if (state->efl & EFL_VM) {
498 /*
499 * Set V8086 mode segment registers.
500 */
501 saved_state->cs = state->cs & 0xffff;
502 saved_state->ss = state->ss & 0xffff;
503 saved_state->v86_segs.v86_ds = state->ds & 0xffff;
504 saved_state->v86_segs.v86_es = state->es & 0xffff;
505 saved_state->v86_segs.v86_fs = state->fs & 0xffff;
506 saved_state->v86_segs.v86_gs = state->gs & 0xffff;
507
508 /*
509 * Zero protected mode segment registers.
510 */
511 saved_state->ds = 0;
512 saved_state->es = 0;
513 saved_state->fs = 0;
514 saved_state->gs = 0;
515
516 if (thr_act->machine.pcb->ims.v86s.int_table) {
517 /*
518 * Hardware assist on.
519 */
520 thr_act->machine.pcb->ims.v86s.flags =
521 state->efl & (EFL_TF | EFL_IF);
522 }
523 }
524 else if (flavor == i386_NEW_THREAD_STATE && kernel_act) {
525 /*
526 * 386 mode. Set segment registers for flat
527 * 32-bit address space.
528 */
529 saved_state->cs = KERNEL_CS;
530 saved_state->ss = KERNEL_DS;
531 saved_state->ds = KERNEL_DS;
532 saved_state->es = KERNEL_DS;
533 saved_state->fs = KERNEL_DS;
534 saved_state->gs = CPU_DATA_GS;
535 }
536 else {
537 /*
538 * User setting segment registers.
539 * Code and stack selectors have already been
540 * checked. Others will be reset by 'iret'
541 * if they are not valid.
542 */
543 saved_state->cs = state->cs;
544 saved_state->ss = state->ss;
545 saved_state->ds = state->ds;
546 saved_state->es = state->es;
547 saved_state->fs = state->fs;
548 saved_state->gs = state->gs;
549 }
550 break;
551 }
552
553 case i386_FLOAT_STATE: {
554 if (count < i386_old_FLOAT_STATE_COUNT)
555 return(KERN_INVALID_ARGUMENT);
556 if (count < i386_FLOAT_STATE_COUNT)
557 return fpu_set_state(thr_act,(struct i386_float_state*)tstate);
558 else return fpu_set_fxstate(thr_act,(struct i386_float_state*)tstate);
559 }
560
561 /*
562 * Temporary - replace by i386_io_map
563 */
564 case i386_ISA_PORT_MAP_STATE: {
565 if (count < i386_ISA_PORT_MAP_STATE_COUNT)
566 return(KERN_INVALID_ARGUMENT);
567
568 break;
569 }
570
571 case i386_V86_ASSIST_STATE:
572 {
573 register struct i386_v86_assist_state *state;
574 vm_offset_t int_table;
575 int int_count;
576
577 if (count < i386_V86_ASSIST_STATE_COUNT)
578 return KERN_INVALID_ARGUMENT;
579
580 state = (struct i386_v86_assist_state *) tstate;
581 int_table = state->int_table;
582 int_count = state->int_count;
583
584 if (int_table >= VM_MAX_ADDRESS ||
585 int_table +
586 int_count * sizeof(struct v86_interrupt_table)
587 > VM_MAX_ADDRESS)
588 return KERN_INVALID_ARGUMENT;
589
590 thr_act->machine.pcb->ims.v86s.int_table = int_table;
591 thr_act->machine.pcb->ims.v86s.int_count = int_count;
592
593 thr_act->machine.pcb->ims.v86s.flags =
594 USER_REGS(thr_act)->efl & (EFL_TF | EFL_IF);
595 break;
596 }
597
598 case i386_THREAD_STATE: {
599 struct i386_saved_state *saved_state;
600 i386_thread_state_t *state25;
601
602 saved_state = USER_REGS(thr_act);
603 state25 = (i386_thread_state_t *)tstate;
604
605 saved_state->eax = state25->eax;
606 saved_state->ebx = state25->ebx;
607 saved_state->ecx = state25->ecx;
608 saved_state->edx = state25->edx;
609 saved_state->edi = state25->edi;
610 saved_state->esi = state25->esi;
611 saved_state->ebp = state25->ebp;
612 saved_state->uesp = state25->esp;
613 saved_state->efl = (state25->eflags & ~EFL_USER_CLEAR)
614 | EFL_USER_SET;
615 saved_state->eip = state25->eip;
616 saved_state->cs = USER_CS; /* FIXME? */
617 saved_state->ss = USER_DS;
618 saved_state->ds = USER_DS;
619 saved_state->es = USER_DS;
620 saved_state->fs = state25->fs;
621 saved_state->gs = state25->gs;
622 }
623 break;
624
625 default:
626 return(KERN_INVALID_ARGUMENT);
627 }
628
629 return(KERN_SUCCESS);
630 }
631
632 /*
633 * thread_getstatus:
634 *
635 * Get the status of the specified thread.
636 */
637
638
639 kern_return_t
640 machine_thread_get_state(
641 thread_t thr_act,
642 thread_flavor_t flavor,
643 thread_state_t tstate,
644 mach_msg_type_number_t *count)
645 {
646 switch (flavor) {
647
648 case i386_SAVED_STATE:
649 {
650 register struct i386_saved_state *state;
651 register struct i386_saved_state *saved_state;
652
653 if (*count < i386_SAVED_STATE_COUNT)
654 return(KERN_INVALID_ARGUMENT);
655
656 state = (struct i386_saved_state *) tstate;
657 saved_state = USER_REGS(thr_act);
658
659 /*
660 * First, copy everything:
661 */
662 *state = *saved_state;
663
664 if (saved_state->efl & EFL_VM) {
665 /*
666 * V8086 mode.
667 */
668 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
669 state->es = saved_state->v86_segs.v86_es & 0xffff;
670 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
671 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
672
673 if (thr_act->machine.pcb->ims.v86s.int_table) {
674 /*
675 * Hardware assist on
676 */
677 if ((thr_act->machine.pcb->ims.v86s.flags &
678 (EFL_IF|V86_IF_PENDING)) == 0)
679 state->efl &= ~EFL_IF;
680 }
681 }
682 else {
683 /*
684 * 386 mode.
685 */
686 state->ds = saved_state->ds & 0xffff;
687 state->es = saved_state->es & 0xffff;
688 state->fs = saved_state->fs & 0xffff;
689 state->gs = saved_state->gs & 0xffff;
690 }
691 *count = i386_SAVED_STATE_COUNT;
692 break;
693 }
694
695 case i386_NEW_THREAD_STATE:
696 case i386_REGS_SEGS_STATE:
697 {
698 register struct i386_new_thread_state *state;
699 register struct i386_saved_state *saved_state;
700
701 if (*count < i386_NEW_THREAD_STATE_COUNT)
702 return(KERN_INVALID_ARGUMENT);
703
704 state = (struct i386_new_thread_state *) tstate;
705 saved_state = USER_REGS(thr_act);
706
707 /*
708 * General registers.
709 */
710 state->edi = saved_state->edi;
711 state->esi = saved_state->esi;
712 state->ebp = saved_state->ebp;
713 state->ebx = saved_state->ebx;
714 state->edx = saved_state->edx;
715 state->ecx = saved_state->ecx;
716 state->eax = saved_state->eax;
717 state->eip = saved_state->eip;
718 state->efl = saved_state->efl;
719 state->uesp = saved_state->uesp;
720
721 state->cs = saved_state->cs;
722 state->ss = saved_state->ss;
723 if (saved_state->efl & EFL_VM) {
724 /*
725 * V8086 mode.
726 */
727 state->ds = saved_state->v86_segs.v86_ds & 0xffff;
728 state->es = saved_state->v86_segs.v86_es & 0xffff;
729 state->fs = saved_state->v86_segs.v86_fs & 0xffff;
730 state->gs = saved_state->v86_segs.v86_gs & 0xffff;
731
732 if (thr_act->machine.pcb->ims.v86s.int_table) {
733 /*
734 * Hardware assist on
735 */
736 if ((thr_act->machine.pcb->ims.v86s.flags &
737 (EFL_IF|V86_IF_PENDING)) == 0)
738 state->efl &= ~EFL_IF;
739 }
740 }
741 else {
742 /*
743 * 386 mode.
744 */
745 state->ds = saved_state->ds & 0xffff;
746 state->es = saved_state->es & 0xffff;
747 state->fs = saved_state->fs & 0xffff;
748 state->gs = saved_state->gs & 0xffff;
749 }
750 *count = i386_NEW_THREAD_STATE_COUNT;
751 break;
752 }
753
754 case THREAD_SYSCALL_STATE:
755 {
756 register struct thread_syscall_state *state;
757 register struct i386_saved_state *saved_state = USER_REGS(thr_act);
758
759 state = (struct thread_syscall_state *) tstate;
760 state->eax = saved_state->eax;
761 state->edx = saved_state->edx;
762 state->efl = saved_state->efl;
763 state->eip = saved_state->eip;
764 state->esp = saved_state->uesp;
765 *count = i386_THREAD_SYSCALL_STATE_COUNT;
766 break;
767 }
768
769 case THREAD_STATE_FLAVOR_LIST:
770 if (*count < 5)
771 return (KERN_INVALID_ARGUMENT);
772 tstate[0] = i386_NEW_THREAD_STATE;
773 tstate[1] = i386_FLOAT_STATE;
774 tstate[2] = i386_ISA_PORT_MAP_STATE;
775 tstate[3] = i386_V86_ASSIST_STATE;
776 tstate[4] = THREAD_SYSCALL_STATE;
777 *count = 5;
778 break;
779
780 case i386_FLOAT_STATE: {
781 if (*count < i386_old_FLOAT_STATE_COUNT)
782 return(KERN_INVALID_ARGUMENT);
783 if (*count< i386_FLOAT_STATE_COUNT) {
784 *count = i386_old_FLOAT_STATE_COUNT;
785 return fpu_get_state(thr_act,(struct i386_float_state *)tstate);
786 } else {
787 *count = i386_FLOAT_STATE_COUNT;
788 return fpu_get_fxstate(thr_act,(struct i386_float_state *)tstate);
789 }
790 }
791
792 /*
793 * Temporary - replace by i386_io_map
794 */
795 case i386_ISA_PORT_MAP_STATE: {
796 register struct i386_isa_port_map_state *state;
797 register iopb_tss_t tss;
798
799 if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
800 return(KERN_INVALID_ARGUMENT);
801
802 state = (struct i386_isa_port_map_state *) tstate;
803 tss = thr_act->machine.pcb->ims.io_tss;
804
805 if (tss == 0) {
806 unsigned int i;
807
808 /*
809 * The thread has no ktss, so no IO permissions.
810 */
811
812 for (i = 0; i < sizeof state->pm; i++)
813 state->pm[i] = 0xff;
814 } else {
815 /*
816 * The thread has its own ktss.
817 */
818
819 bcopy((char *) tss->bitmap,
820 (char *) state->pm,
821 sizeof state->pm);
822 }
823
824 *count = i386_ISA_PORT_MAP_STATE_COUNT;
825 break;
826 }
827
828 case i386_V86_ASSIST_STATE:
829 {
830 register struct i386_v86_assist_state *state;
831
832 if (*count < i386_V86_ASSIST_STATE_COUNT)
833 return KERN_INVALID_ARGUMENT;
834
835 state = (struct i386_v86_assist_state *) tstate;
836 state->int_table = thr_act->machine.pcb->ims.v86s.int_table;
837 state->int_count = thr_act->machine.pcb->ims.v86s.int_count;
838
839 *count = i386_V86_ASSIST_STATE_COUNT;
840 break;
841 }
842
843 case i386_THREAD_STATE: {
844 struct i386_saved_state *saved_state;
845 i386_thread_state_t *state;
846
847 saved_state = USER_REGS(thr_act);
848 state = (i386_thread_state_t *)tstate;
849
850 state->eax = saved_state->eax;
851 state->ebx = saved_state->ebx;
852 state->ecx = saved_state->ecx;
853 state->edx = saved_state->edx;
854 state->edi = saved_state->edi;
855 state->esi = saved_state->esi;
856 state->ebp = saved_state->ebp;
857 state->esp = saved_state->uesp;
858 state->eflags = saved_state->efl;
859 state->eip = saved_state->eip;
860 state->cs = saved_state->cs;
861 state->ss = saved_state->ss;
862 state->ds = saved_state->ds;
863 state->es = saved_state->es;
864 state->fs = saved_state->fs;
865 state->gs = saved_state->gs;
866 break;
867 }
868
869 default:
870 return(KERN_INVALID_ARGUMENT);
871 }
872
873 return(KERN_SUCCESS);
874 }
875
876 /*
877 * Initialize the machine-dependent state for a new thread.
878 */
879 kern_return_t
880 machine_thread_create(
881 thread_t thread,
882 __unused task_t task)
883 {
884 pcb_t pcb = &thread->machine.xxx_pcb;
885
886 thread->machine.pcb = pcb;
887
888 simple_lock_init(&pcb->lock, 0);
889
890 /*
891 * Guarantee that the bootstrapped thread will be in user
892 * mode.
893 */
894 pcb->iss.cs = USER_CS;
895 pcb->iss.ss = USER_DS;
896 pcb->iss.ds = USER_DS;
897 pcb->iss.es = USER_DS;
898 pcb->iss.fs = USER_DS;
899 pcb->iss.gs = USER_DS;
900 pcb->iss.efl = EFL_USER_SET;
901 {
902 struct real_descriptor *ldtp;
903 ldtp = (struct real_descriptor *)ldt;
904 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
905 pcb->uldt_desc = ldtp[sel_idx(USER_DS)];
906 pcb->uldt_selector = 0;
907 }
908
909 /*
910 * Allocate a kernel stack per thread.
911 */
912 stack_alloc(thread);
913
914 return(KERN_SUCCESS);
915 }
916
917 /*
918 * Machine-dependent cleanup prior to destroying a thread
919 */
920 void
921 machine_thread_destroy(
922 thread_t thread)
923 {
924 register pcb_t pcb = thread->machine.pcb;
925
926 assert(pcb);
927
928 if (pcb->ims.io_tss != 0)
929 iopb_destroy(pcb->ims.io_tss);
930 if (pcb->ims.ifps != 0)
931 fpu_free(pcb->ims.ifps);
932 if (pcb->ims.ldt != 0)
933 user_ldt_free(pcb->ims.ldt);
934 thread->machine.pcb = (pcb_t)0;
935 }
936
937 /*
938 * This is used to set the current thr_act/thread
939 * when starting up a new processor
940 */
941 void
942 machine_set_current_thread( thread_t thread )
943 {
944 mp_disable_preemption();
945
946 current_cpu_datap()->cpu_active_thread = thread;
947 current_cpu_datap()->cpu_active_kloaded = THREAD_NULL;
948
949 mp_enable_preemption();
950 }
951
952 void
953 machine_thread_terminate_self(void)
954 {
955 }
956
957 void
958 act_machine_return(int code)
959 {
960 /*
961 * This code is called with nothing locked.
962 * It also returns with nothing locked, if it returns.
963 *
964 * This routine terminates the current thread activation.
965 * If this is the only activation associated with its
966 * thread shuttle, then the entire thread (shuttle plus
967 * activation) is terminated.
968 */
969 assert( code == KERN_TERMINATED );
970
971 thread_terminate_self();
972
973 /*NOTREACHED*/
974
975 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code);
976 }
977
978
979 /*
980 * Perform machine-dependent per-thread initializations
981 */
982 void
983 machine_thread_init(void)
984 {
985 fpu_module_init();
986 iopb_init();
987 }
988
989 /*
990 * Some routines for debugging activation code
991 */
992 static void dump_handlers(thread_t);
993 void dump_regs(thread_t);
994 int dump_act(thread_t thr_act);
995
996 static void
997 dump_handlers(thread_t thr_act)
998 {
999 ReturnHandler *rhp = thr_act->handlers;
1000 int counter = 0;
1001
1002 printf("\t");
1003 while (rhp) {
1004 if (rhp == &thr_act->special_handler){
1005 if (rhp->next)
1006 printf("[NON-Zero next ptr(%x)]", rhp->next);
1007 printf("special_handler()->");
1008 break;
1009 }
1010 printf("hdlr_%d(%x)->",counter,rhp->handler);
1011 rhp = rhp->next;
1012 if (++counter > 32) {
1013 printf("Aborting: HUGE handler chain\n");
1014 break;
1015 }
1016 }
1017 printf("HLDR_NULL\n");
1018 }
1019
1020 void
1021 dump_regs(thread_t thr_act)
1022 {
1023 if (thr_act->machine.pcb) {
1024 register struct i386_saved_state *ssp = USER_REGS(thr_act);
1025 /* Print out user register state */
1026 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1027 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
1028 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1029 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
1030 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1031 }
1032 }
1033
1034 int
1035 dump_act(thread_t thr_act)
1036 {
1037 if (!thr_act)
1038 return(0);
1039
1040 printf("thread(0x%x)(%d): task=%x(%d)\n",
1041 thr_act, thr_act->ref_count,
1042 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
1043
1044 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1045 thr_act->suspend_count, thr_act->user_stop_count,
1046 thr_act->active, thr_act->ast);
1047 printf("\tpcb=%x\n", thr_act->machine.pcb);
1048
1049 if (thr_act->kernel_stack) {
1050 vm_offset_t stack = thr_act->kernel_stack;
1051
1052 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1053 stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1054 STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state);
1055 }
1056
1057 dump_handlers(thr_act);
1058 dump_regs(thr_act);
1059 return((int)thr_act);
1060 }
1061
1062 user_addr_t
1063 get_useraddr(void)
1064 {
1065
1066 thread_t thr_act = current_thread();
1067
1068 if (thr_act->machine.pcb)
1069 return(thr_act->machine.pcb->iss.eip);
1070 else
1071 return(0);
1072
1073 }
1074
1075 /*
1076 * detach and return a kernel stack from a thread
1077 */
1078
1079 vm_offset_t
1080 machine_stack_detach(thread_t thread)
1081 {
1082 vm_offset_t stack;
1083
1084 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
1085 thread, thread->priority,
1086 thread->sched_pri, 0,
1087 0);
1088
1089 stack = thread->kernel_stack;
1090 thread->kernel_stack = 0;
1091 return(stack);
1092 }
1093
1094 /*
1095 * attach a kernel stack to a thread and initialize it
1096 */
1097
1098 void
1099 machine_stack_attach(
1100 thread_t thread,
1101 vm_offset_t stack)
1102 {
1103 struct i386_kernel_state *statep;
1104
1105 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
1106 thread, thread->priority,
1107 thread->sched_pri, 0, 0);
1108
1109 assert(stack);
1110 statep = STACK_IKS(stack);
1111 thread->kernel_stack = stack;
1112
1113 statep->k_eip = (unsigned long) Thread_continue;
1114 statep->k_ebx = (unsigned long) thread_continue;
1115 statep->k_esp = (unsigned long) STACK_IEL(stack);
1116
1117 STACK_IEL(stack)->saved_state = &thread->machine.pcb->iss;
1118
1119 return;
1120 }
1121
1122 /*
1123 * move a stack from old to new thread
1124 */
1125
1126 void
1127 machine_stack_handoff(thread_t old,
1128 thread_t new)
1129 {
1130 vm_offset_t stack;
1131
1132 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF),
1133 thread, thread->priority,
1134 thread->sched_pri, 0, 0);
1135
1136 assert(new);
1137 assert(old);
1138
1139 stack = machine_stack_detach(old);
1140 machine_stack_attach(new, stack);
1141
1142 PMAP_SWITCH_CONTEXT(old->task, new->task, cpu_number());
1143
1144 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
1145 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
1146
1147 machine_set_current_thread(new);
1148
1149 current_cpu_datap()->cpu_active_stack = new->kernel_stack;
1150
1151 return;
1152 }
1153
1154 struct i386_act_context {
1155 struct i386_saved_state ss;
1156 struct i386_float_state fs;
1157 };
1158
1159 void *
1160 act_thread_csave(void)
1161 {
1162 struct i386_act_context *ic;
1163 kern_return_t kret;
1164 int val;
1165
1166 ic = (struct i386_act_context *)kalloc(sizeof(struct i386_act_context));
1167
1168 if (ic == (struct i386_act_context *)NULL)
1169 return((void *)0);
1170
1171 val = i386_SAVED_STATE_COUNT;
1172 kret = machine_thread_get_state(current_thread(),
1173 i386_SAVED_STATE,
1174 (thread_state_t) &ic->ss,
1175 &val);
1176 if (kret != KERN_SUCCESS) {
1177 kfree(ic,sizeof(struct i386_act_context));
1178 return((void *)0);
1179 }
1180 val = i386_FLOAT_STATE_COUNT;
1181 kret = machine_thread_get_state(current_thread(),
1182 i386_FLOAT_STATE,
1183 (thread_state_t) &ic->fs,
1184 &val);
1185 if (kret != KERN_SUCCESS) {
1186 kfree(ic,sizeof(struct i386_act_context));
1187 return((void *)0);
1188 }
1189 return(ic);
1190 }
1191 void
1192 act_thread_catt(void *ctx)
1193 {
1194 struct i386_act_context *ic;
1195 kern_return_t kret;
1196
1197 ic = (struct i386_act_context *)ctx;
1198
1199 if (ic == (struct i386_act_context *)NULL)
1200 return;
1201
1202 kret = machine_thread_set_state(current_thread(),
1203 i386_SAVED_STATE,
1204 (thread_state_t) &ic->ss,
1205 i386_SAVED_STATE_COUNT);
1206 if (kret != KERN_SUCCESS)
1207 goto out;
1208
1209 kret = machine_thread_set_state(current_thread(),
1210 i386_FLOAT_STATE,
1211 (thread_state_t) &ic->fs,
1212 i386_FLOAT_STATE_COUNT);
1213 if (kret != KERN_SUCCESS)
1214 goto out;
1215 out:
1216 kfree(ic,sizeof(struct i386_act_context));
1217 }
1218
1219 void act_thread_cfree(void *ctx)
1220 {
1221 kfree(ctx,sizeof(struct i386_act_context));
1222 }
1223