2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
60 #include <mach_debug.h>
61 #include <mach_ldebug.h>
63 #include <sys/kdebug.h>
65 #include <mach/kern_return.h>
66 #include <mach/thread_status.h>
67 #include <mach/vm_param.h>
69 #include <i386/cpu_data.h>
70 #include <i386/cpu_number.h>
72 #include <kern/counters.h>
73 #include <kern/kalloc.h>
74 #include <kern/mach_param.h>
75 #include <kern/processor.h>
76 #include <kern/cpu_data.h>
77 #include <kern/cpu_number.h>
78 #include <kern/task.h>
79 #include <kern/thread.h>
80 #include <kern/sched_prim.h>
81 #include <kern/misc_protos.h>
82 #include <kern/assert.h>
84 #include <kern/machine.h>
85 #include <ipc/ipc_port.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_map.h>
89 #include <vm/vm_protos.h>
91 #include <i386/thread.h>
92 #include <i386/eflags.h>
93 #include <i386/proc_reg.h>
96 #include <i386/user_ldt.h>
98 #include <i386/iopb_entries.h>
99 #include <i386/mp_desc.h>
100 #include <i386/cpu_data.h>
101 #include <i386/machine_routines.h>
104 * Maps state flavor to number of words in the state:
106 unsigned int _MachineStateCount
[] = {
109 x86_THREAD_STATE32_COUNT
,
110 x86_FLOAT_STATE32_COUNT
,
111 x86_EXCEPTION_STATE32_COUNT
,
112 x86_THREAD_STATE64_COUNT
,
113 x86_FLOAT_STATE64_COUNT
,
114 x86_EXCEPTION_STATE64_COUNT
,
115 x86_THREAD_STATE_COUNT
,
116 x86_FLOAT_STATE_COUNT
,
117 x86_EXCEPTION_STATE_COUNT
,
119 x86_SAVED_STATE32_COUNT
,
120 x86_SAVED_STATE64_COUNT
,
121 x86_DEBUG_STATE32_COUNT
,
122 x86_DEBUG_STATE64_COUNT
,
123 x86_DEBUG_STATE_COUNT
126 zone_t iss_zone32
; /* zone for 32bit saved_state area */
127 zone_t iss_zone64
; /* zone for 64bit saved_state area */
128 zone_t ids_zone32
; /* zone for 32bit debug_state area */
129 zone_t ids_zone64
; /* zone for 64bit debug_state area */
134 void act_machine_throughcall(thread_t thr_act
);
135 user_addr_t
get_useraddr(void);
136 void act_machine_return(int);
137 void act_machine_sv_free(thread_t
, int);
139 extern thread_t
Switch_context(
141 thread_continue_t cont
,
143 extern void Thread_continue(void);
144 extern void Load_context(
149 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
152 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
155 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
158 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
161 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
164 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
167 * Don't let an illegal value for dr7 get set. Specifically,
168 * check for undefined settings. Setting these bit patterns
169 * result in undefined behaviour and can lead to an unexpected
173 dr7_is_valid(uint32_t *dr7
)
176 uint32_t mask1
, mask2
;
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
182 if (!(get_cr4() & CR4_DE
))
183 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
184 i
++, mask1
<<= 4, mask2
<<= 4)
185 if ((*dr7
& mask1
) == mask2
)
189 * len0-3 pattern "10B" is ok for len on 64-bit.
191 if (current_cpu_datap()->cpu_is64bit
== TRUE
)
192 for (i
= 0, mask1
= 0x3<<18, mask2
= 0x2<<18; i
< 4;
193 i
++, mask1
<<= 4, mask2
<<= 4)
194 if ((*dr7
& mask1
) == mask2
)
198 * if we are doing an instruction execution break (indicated
199 * by r/w[x] being "00B"), then the len[x] must also be set
202 for (i
= 0; i
< 4; i
++)
203 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
204 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
208 * Intel docs have these bits fixed.
210 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
211 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
212 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
213 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
214 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
217 * We don't allow anything to set the global breakpoints.
236 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
238 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
239 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
240 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
241 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
242 if (cpu_mode_is64bit())
243 cdp
->cpu_dr7
= ds
->dr7
;
246 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
249 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
252 * We need to enter 64-bit mode in order to set the full
253 * width of these registers
255 set_64bit_debug_regs(ds
);
256 cdp
->cpu_dr7
= ds
->dr7
;
260 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
262 x86_debug_state32_t
*ids
;
265 pcb
= thread
->machine
.pcb
;
269 ids
= zalloc(ids_zone32
);
270 bzero(ids
, sizeof *ids
);
272 simple_lock(&pcb
->lock
);
273 /* make sure it wasn't already alloc()'d elsewhere */
274 if (pcb
->ids
== NULL
) {
276 simple_unlock(&pcb
->lock
);
278 simple_unlock(&pcb
->lock
);
279 zfree(ids_zone32
, ids
);
283 if (!dr7_is_valid(&ds
->dr7
))
287 * Only allow local breakpoints and make sure they are not
288 * in the trampoline code.
292 if (ds
->dr0
>= (unsigned long)HIGH_MEM_BASE
)
295 if (ds
->dr7
& (0x1<<2))
296 if (ds
->dr1
>= (unsigned long)HIGH_MEM_BASE
)
299 if (ds
->dr7
& (0x1<<4))
300 if (ds
->dr2
>= (unsigned long)HIGH_MEM_BASE
)
303 if (ds
->dr7
& (0x1<<6))
304 if (ds
->dr3
>= (unsigned long)HIGH_MEM_BASE
)
314 return (KERN_SUCCESS
);
317 return (KERN_INVALID_ARGUMENT
);
321 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
323 x86_debug_state64_t
*ids
;
326 pcb
= thread
->machine
.pcb
;
330 ids
= zalloc(ids_zone64
);
331 bzero(ids
, sizeof *ids
);
333 simple_lock(&pcb
->lock
);
334 /* make sure it wasn't already alloc()'d elsewhere */
335 if (pcb
->ids
== NULL
) {
337 simple_unlock(&pcb
->lock
);
339 simple_unlock(&pcb
->lock
);
340 zfree(ids_zone64
, ids
);
344 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
348 * Don't allow the user to set debug addresses above their max
352 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
355 if (ds
->dr7
& (0x1<<2))
356 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
359 if (ds
->dr7
& (0x1<<4))
360 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
363 if (ds
->dr7
& (0x1<<6))
364 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
374 return (KERN_SUCCESS
);
377 return (KERN_INVALID_ARGUMENT
);
381 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
383 x86_debug_state32_t
*saved_state
;
385 saved_state
= thread
->machine
.pcb
->ids
;
388 ds
->dr0
= saved_state
->dr0
;
389 ds
->dr1
= saved_state
->dr1
;
390 ds
->dr2
= saved_state
->dr2
;
391 ds
->dr3
= saved_state
->dr3
;
392 ds
->dr4
= saved_state
->dr4
;
393 ds
->dr5
= saved_state
->dr5
;
394 ds
->dr6
= saved_state
->dr6
;
395 ds
->dr7
= saved_state
->dr7
;
397 bzero(ds
, sizeof *ds
);
401 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
403 x86_debug_state64_t
*saved_state
;
405 saved_state
= (x86_debug_state64_t
*)thread
->machine
.pcb
->ids
;
408 ds
->dr0
= saved_state
->dr0
;
409 ds
->dr1
= saved_state
->dr1
;
410 ds
->dr2
= saved_state
->dr2
;
411 ds
->dr3
= saved_state
->dr3
;
412 ds
->dr4
= saved_state
->dr4
;
413 ds
->dr5
= saved_state
->dr5
;
414 ds
->dr6
= saved_state
->dr6
;
415 ds
->dr7
= saved_state
->dr7
;
417 bzero(ds
, sizeof *ds
);
421 * consider_machine_collect:
423 * Try to collect machine-dependent pages
426 consider_machine_collect(void)
431 consider_machine_adjust(void)
438 act_machine_switch_pcb( thread_t
new )
440 pcb_t pcb
= new->machine
.pcb
;
441 struct real_descriptor
*ldtp
;
442 vm_offset_t pcb_stack_top
;
443 vm_offset_t hi_pcb_stack_top
;
445 cpu_data_t
*cdp
= current_cpu_datap();
447 assert(new->kernel_stack
!= 0);
448 STACK_IEL(new->kernel_stack
)->saved_state
= pcb
->iss
;
450 if (!cpu_mode_is64bit()) {
451 x86_saved_state32_tagged_t
*hi_iss32
;
454 * Save a pointer to the top of the "kernel" stack -
455 * actually the place in the PCB where a trap into
456 * kernel mode will push the registers.
458 hi_iss
= (vm_offset_t
)((unsigned long)
459 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0
) |
460 ((unsigned long)pcb
->iss
& PAGE_MASK
));
462 cdp
->cpu_hi_iss
= (void *)hi_iss
;
464 pmap_high_map(pcb
->iss_pte0
, HIGH_CPU_ISS0
);
465 pmap_high_map(pcb
->iss_pte1
, HIGH_CPU_ISS1
);
467 hi_iss32
= (x86_saved_state32_tagged_t
*) hi_iss
;
468 assert(hi_iss32
->tag
== x86_SAVED_STATE32
);
470 hi_pcb_stack_top
= (int) (hi_iss32
+ 1);
473 * For fast syscall, top of interrupt stack points to pcb stack
475 *(vm_offset_t
*) current_sstk() = hi_pcb_stack_top
;
477 current_ktss()->esp0
= hi_pcb_stack_top
;
478 /* XXX: This check is performed against the thread save state flavor rather than the
479 * task's 64-bit feature flag because of the thread/task 64-bit state divergence
480 * that can arise in task_set_64bit() on x86. When that is addressed, we can
481 * revert to checking the task 64 bit feature flag. The assert below is retained
484 } else if (is_saved_state64(pcb
->iss
)) {
485 x86_saved_state64_tagged_t
*iss64
;
488 assert(is_saved_state64(pcb
->iss
));
490 iss64
= (x86_saved_state64_tagged_t
*) pcb
->iss
;
493 * Set pointer to PCB's interrupt stack frame in cpu data.
494 * Used by syscall and double-fault trap handlers.
496 isf
= (vm_offset_t
) &iss64
->state
.isf
;
497 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
498 pcb_stack_top
= (vm_offset_t
) (iss64
+ 1);
499 /* require 16-byte alignment */
500 assert((pcb_stack_top
& 0xF) == 0);
501 /* Interrupt stack is pcb */
502 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
505 * Top of temporary sysenter stack points to pcb stack.
506 * Although this is not normally used by 64-bit users,
507 * it needs to be set in case a sysenter is attempted.
509 *current_sstk64() = UBER64(pcb_stack_top
);
511 cdp
->cpu_task_map
= new->map
->pmap
->pm_kernel_cr3
?
512 TASK_MAP_64BIT_SHARED
: TASK_MAP_64BIT
;
515 * Enable the 64-bit user code segment, USER64_CS.
517 ldt_desc_p(USER64_CS
)->access
|= ACC_PL_U
;
520 x86_saved_state_compat32_t
*iss32compat
;
523 assert(is_saved_state32(pcb
->iss
));
524 iss32compat
= (x86_saved_state_compat32_t
*) pcb
->iss
;
526 pcb_stack_top
= (int) (iss32compat
+ 1);
527 /* require 16-byte alignment */
528 assert((pcb_stack_top
& 0xF) == 0);
531 * Set pointer to PCB's interrupt stack frame in cpu data.
532 * Used by debug trap handler.
534 isf
= (vm_offset_t
) &iss32compat
->isf64
;
535 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
537 /* Top of temporary sysenter stack points to pcb stack */
538 *current_sstk64() = UBER64(pcb_stack_top
);
540 /* Interrupt stack is pcb */
541 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
543 cdp
->cpu_task_map
= TASK_MAP_32BIT
;
548 ldt_desc_p(USER64_CS
)->access
&= ~ACC_PL_U
;
552 * Set the thread`s cthread (a.k.a pthread)
553 * For 32-bit user this involves setting the USER_CTHREAD
554 * descriptor in the LDT to point to the cthread data.
555 * The involves copying in the pre-initialized descriptor.
557 ldtp
= (struct real_descriptor
*)current_ldt();
558 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
559 if (pcb
->uldt_selector
!= 0)
560 ldtp
[sel_idx(pcb
->uldt_selector
)] = pcb
->uldt_desc
;
562 * For 64-bit, we additionally set the 64-bit User GS base
563 * address. On return to 64-bit user, the GS.Base MSR will be written.
565 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
568 * Set the thread`s LDT or LDT entry.
570 if (new->task
== TASK_NULL
|| new->task
->i386_ldt
== 0) {
574 ml_cpu_set_ldt(KERNEL_LDT
);
577 * Task has its own LDT.
584 * Switch to the first thread on a CPU.
587 machine_load_context(
590 new->machine
.specFlags
|= OnProc
;
591 act_machine_switch_pcb(new);
596 * Switch to a new thread.
597 * Save the old thread`s kernel state or continuation,
601 machine_switch_context(
603 thread_continue_t continuation
,
607 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
611 * Save FP registers if in use.
613 fpu_save_context(old
);
615 old
->machine
.specFlags
&= ~OnProc
;
616 new->machine
.specFlags
|= OnProc
;
619 * Switch address maps if need be, even if not switching tasks.
620 * (A server activation may be "borrowing" a client map.)
622 PMAP_SWITCH_CONTEXT(old
, new, cpu_number())
625 * Load the rest of the user state for the new thread
627 act_machine_switch_pcb(new);
628 KERNEL_DEBUG_CONSTANT(
629 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
630 (int)old
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
631 return(Switch_context(old
, continuation
, new));
635 * act_machine_sv_free
636 * release saveareas associated with an act. if flag is true, release
637 * user level savearea(s) too, else don't
640 act_machine_sv_free(__unused thread_t act
, __unused
int flag
)
646 * This is where registers that are not normally specified by the mach-o
647 * file on an execve would be nullified, perhaps to avoid a covert channel.
650 machine_thread_state_initialize(
654 * If there's an fpu save area, free it.
655 * The initialized state will then be lazily faulted-in, if required.
656 * And if we're target, re-arm the no-fpu trap.
658 if (thread
->machine
.pcb
->ifps
) {
659 (void) fpu_set_fxstate(thread
, NULL
);
661 if (thread
== current_thread())
668 get_eflags_exportmask(void)
674 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
675 * for 32bit tasks only
676 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
677 * for 64bit tasks only
678 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
679 * for 32bit tasks only
680 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
681 * for 64bit tasks only
682 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
683 * for either 32bit or 64bit tasks
684 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
685 * for 32bit tasks only
686 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
687 * for 64bit tasks only
688 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
689 * for either 32bit or 64bit tasks
690 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
691 * for 32bit tasks only
692 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
693 * for 64bit tasks only
694 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
695 * for either 32bit or 64bit tasks
700 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
702 x86_saved_state64_t
*saved_state
;
704 saved_state
= USER_REGS64(thread
);
706 es
->trapno
= saved_state
->isf
.trapno
;
707 es
->err
= saved_state
->isf
.err
;
708 es
->faultvaddr
= saved_state
->cr2
;
712 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
714 x86_saved_state32_t
*saved_state
;
716 saved_state
= USER_REGS32(thread
);
718 es
->trapno
= saved_state
->trapno
;
719 es
->err
= saved_state
->err
;
720 es
->faultvaddr
= saved_state
->cr2
;
725 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
727 x86_saved_state32_t
*saved_state
;
729 saved_state
= USER_REGS32(thread
);
731 saved_state
->eax
= ts
->eax
;
732 saved_state
->ebx
= ts
->ebx
;
733 saved_state
->ecx
= ts
->ecx
;
734 saved_state
->edx
= ts
->edx
;
735 saved_state
->edi
= ts
->edi
;
736 saved_state
->esi
= ts
->esi
;
737 saved_state
->ebp
= ts
->ebp
;
738 saved_state
->uesp
= ts
->esp
;
739 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
740 saved_state
->eip
= ts
->eip
;
741 saved_state
->cs
= ts
->cs
? ts
->cs
: USER_CS
;
742 saved_state
->ss
= ts
->ss
? ts
->ss
: USER_DS
;
743 saved_state
->ds
= ts
->ds
? ts
->ds
: USER_DS
;
744 saved_state
->es
= ts
->es
? ts
->es
: USER_DS
;
745 saved_state
->fs
= ts
->fs
;
746 saved_state
->gs
= ts
->gs
;
749 * If the trace trap bit is being set,
750 * ensure that the user returns via iret
751 * - which is signaled thusly:
753 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
754 saved_state
->cs
= SYSENTER_TF_CS
;
756 return(KERN_SUCCESS
);
760 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
762 x86_saved_state64_t
*saved_state
;
764 saved_state
= USER_REGS64(thread
);
766 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
767 !IS_USERADDR64_CANONICAL(ts
->rip
))
768 return(KERN_INVALID_ARGUMENT
);
770 saved_state
->r8
= ts
->r8
;
771 saved_state
->r9
= ts
->r9
;
772 saved_state
->r10
= ts
->r10
;
773 saved_state
->r11
= ts
->r11
;
774 saved_state
->r12
= ts
->r12
;
775 saved_state
->r13
= ts
->r13
;
776 saved_state
->r14
= ts
->r14
;
777 saved_state
->r15
= ts
->r15
;
778 saved_state
->rax
= ts
->rax
;
779 saved_state
->rax
= ts
->rax
;
780 saved_state
->rbx
= ts
->rbx
;
781 saved_state
->rcx
= ts
->rcx
;
782 saved_state
->rdx
= ts
->rdx
;
783 saved_state
->rdi
= ts
->rdi
;
784 saved_state
->rsi
= ts
->rsi
;
785 saved_state
->rbp
= ts
->rbp
;
786 saved_state
->isf
.rsp
= ts
->rsp
;
787 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
788 saved_state
->isf
.rip
= ts
->rip
;
789 saved_state
->isf
.cs
= USER64_CS
;
790 saved_state
->fs
= ts
->fs
;
791 saved_state
->gs
= ts
->gs
;
793 return(KERN_SUCCESS
);
799 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
801 x86_saved_state32_t
*saved_state
;
803 saved_state
= USER_REGS32(thread
);
805 ts
->eax
= saved_state
->eax
;
806 ts
->ebx
= saved_state
->ebx
;
807 ts
->ecx
= saved_state
->ecx
;
808 ts
->edx
= saved_state
->edx
;
809 ts
->edi
= saved_state
->edi
;
810 ts
->esi
= saved_state
->esi
;
811 ts
->ebp
= saved_state
->ebp
;
812 ts
->esp
= saved_state
->uesp
;
813 ts
->eflags
= saved_state
->efl
;
814 ts
->eip
= saved_state
->eip
;
815 ts
->cs
= saved_state
->cs
;
816 ts
->ss
= saved_state
->ss
;
817 ts
->ds
= saved_state
->ds
;
818 ts
->es
= saved_state
->es
;
819 ts
->fs
= saved_state
->fs
;
820 ts
->gs
= saved_state
->gs
;
825 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
827 x86_saved_state64_t
*saved_state
;
829 saved_state
= USER_REGS64(thread
);
831 ts
->r8
= saved_state
->r8
;
832 ts
->r9
= saved_state
->r9
;
833 ts
->r10
= saved_state
->r10
;
834 ts
->r11
= saved_state
->r11
;
835 ts
->r12
= saved_state
->r12
;
836 ts
->r13
= saved_state
->r13
;
837 ts
->r14
= saved_state
->r14
;
838 ts
->r15
= saved_state
->r15
;
839 ts
->rax
= saved_state
->rax
;
840 ts
->rbx
= saved_state
->rbx
;
841 ts
->rcx
= saved_state
->rcx
;
842 ts
->rdx
= saved_state
->rdx
;
843 ts
->rdi
= saved_state
->rdi
;
844 ts
->rsi
= saved_state
->rsi
;
845 ts
->rbp
= saved_state
->rbp
;
846 ts
->rsp
= saved_state
->isf
.rsp
;
847 ts
->rflags
= saved_state
->isf
.rflags
;
848 ts
->rip
= saved_state
->isf
.rip
;
849 ts
->cs
= saved_state
->isf
.cs
;
850 ts
->fs
= saved_state
->fs
;
851 ts
->gs
= saved_state
->gs
;
857 * act_machine_set_state:
859 * Set the status of the specified thread.
863 machine_thread_set_state(
865 thread_flavor_t flavor
,
866 thread_state_t tstate
,
867 mach_msg_type_number_t count
)
872 case x86_SAVED_STATE32
:
874 x86_saved_state32_t
*state
;
875 x86_saved_state32_t
*saved_state
;
877 if (count
< x86_SAVED_STATE32_COUNT
)
878 return(KERN_INVALID_ARGUMENT
);
880 state
= (x86_saved_state32_t
*) tstate
;
882 /* Check segment selectors are safe */
883 if (!valid_user_segment_selectors(state
->cs
,
889 return KERN_INVALID_ARGUMENT
;
891 saved_state
= USER_REGS32(thr_act
);
896 saved_state
->edi
= state
->edi
;
897 saved_state
->esi
= state
->esi
;
898 saved_state
->ebp
= state
->ebp
;
899 saved_state
->uesp
= state
->uesp
;
900 saved_state
->ebx
= state
->ebx
;
901 saved_state
->edx
= state
->edx
;
902 saved_state
->ecx
= state
->ecx
;
903 saved_state
->eax
= state
->eax
;
904 saved_state
->eip
= state
->eip
;
906 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
909 * If the trace trap bit is being set,
910 * ensure that the user returns via iret
911 * - which is signaled thusly:
913 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
914 state
->cs
= SYSENTER_TF_CS
;
917 * User setting segment registers.
918 * Code and stack selectors have already been
919 * checked. Others will be reset by 'iret'
920 * if they are not valid.
922 saved_state
->cs
= state
->cs
;
923 saved_state
->ss
= state
->ss
;
924 saved_state
->ds
= state
->ds
;
925 saved_state
->es
= state
->es
;
926 saved_state
->fs
= state
->fs
;
927 saved_state
->gs
= state
->gs
;
931 case x86_SAVED_STATE64
:
933 x86_saved_state64_t
*state
;
934 x86_saved_state64_t
*saved_state
;
936 if (count
< x86_SAVED_STATE64_COUNT
)
937 return(KERN_INVALID_ARGUMENT
);
939 state
= (x86_saved_state64_t
*) tstate
;
941 /* Verify that the supplied code segment selector is
942 * valid. In 64-bit mode, the FS and GS segment overrides
943 * use the FS.base and GS.base MSRs to calculate
944 * base addresses, and the trampolines don't directly
945 * restore the segment registers--hence they are no
946 * longer relevant for validation.
948 if (!valid_user_code_selector(state
->isf
.cs
))
949 return KERN_INVALID_ARGUMENT
;
951 /* Check pc and stack are canonical addresses */
952 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
953 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
954 return KERN_INVALID_ARGUMENT
;
956 saved_state
= USER_REGS64(thr_act
);
961 saved_state
->r8
= state
->r8
;
962 saved_state
->r9
= state
->r9
;
963 saved_state
->r10
= state
->r10
;
964 saved_state
->r11
= state
->r11
;
965 saved_state
->r12
= state
->r12
;
966 saved_state
->r13
= state
->r13
;
967 saved_state
->r14
= state
->r14
;
968 saved_state
->r15
= state
->r15
;
969 saved_state
->rdi
= state
->rdi
;
970 saved_state
->rsi
= state
->rsi
;
971 saved_state
->rbp
= state
->rbp
;
972 saved_state
->rbx
= state
->rbx
;
973 saved_state
->rdx
= state
->rdx
;
974 saved_state
->rcx
= state
->rcx
;
975 saved_state
->rax
= state
->rax
;
976 saved_state
->isf
.rsp
= state
->isf
.rsp
;
977 saved_state
->isf
.rip
= state
->isf
.rip
;
979 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
982 * User setting segment registers.
983 * Code and stack selectors have already been
984 * checked. Others will be reset by 'sys'
985 * if they are not valid.
987 saved_state
->isf
.cs
= state
->isf
.cs
;
988 saved_state
->isf
.ss
= state
->isf
.ss
;
989 saved_state
->fs
= state
->fs
;
990 saved_state
->gs
= state
->gs
;
995 case x86_FLOAT_STATE32
:
997 if (count
!= x86_FLOAT_STATE32_COUNT
)
998 return(KERN_INVALID_ARGUMENT
);
1000 if (thread_is_64bit(thr_act
))
1001 return(KERN_INVALID_ARGUMENT
);
1003 return fpu_set_fxstate(thr_act
, tstate
);
1006 case x86_FLOAT_STATE64
:
1008 if (count
!= x86_FLOAT_STATE64_COUNT
)
1009 return(KERN_INVALID_ARGUMENT
);
1011 if ( !thread_is_64bit(thr_act
))
1012 return(KERN_INVALID_ARGUMENT
);
1014 return fpu_set_fxstate(thr_act
, tstate
);
1017 case x86_FLOAT_STATE
:
1019 x86_float_state_t
*state
;
1021 if (count
!= x86_FLOAT_STATE_COUNT
)
1022 return(KERN_INVALID_ARGUMENT
);
1024 state
= (x86_float_state_t
*)tstate
;
1026 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
1027 thread_is_64bit(thr_act
)) {
1028 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1030 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
1031 !thread_is_64bit(thr_act
)) {
1032 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1034 return(KERN_INVALID_ARGUMENT
);
1039 case OLD_i386_THREAD_STATE
:
1040 case x86_THREAD_STATE32
:
1042 if (count
!= x86_THREAD_STATE32_COUNT
)
1043 return(KERN_INVALID_ARGUMENT
);
1045 if (thread_is_64bit(thr_act
))
1046 return(KERN_INVALID_ARGUMENT
);
1048 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1051 case x86_THREAD_STATE64
:
1053 if (count
!= x86_THREAD_STATE64_COUNT
)
1054 return(KERN_INVALID_ARGUMENT
);
1056 if ( !thread_is_64bit(thr_act
))
1057 return(KERN_INVALID_ARGUMENT
);
1059 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1062 case x86_THREAD_STATE
:
1064 x86_thread_state_t
*state
;
1066 if (count
!= x86_THREAD_STATE_COUNT
)
1067 return(KERN_INVALID_ARGUMENT
);
1069 state
= (x86_thread_state_t
*)tstate
;
1071 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&& state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1072 thread_is_64bit(thr_act
)) {
1073 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1074 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&& state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1075 !thread_is_64bit(thr_act
)) {
1076 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1078 return(KERN_INVALID_ARGUMENT
);
1082 case x86_DEBUG_STATE32
:
1084 x86_debug_state32_t
*state
;
1087 if (thread_is_64bit(thr_act
))
1088 return(KERN_INVALID_ARGUMENT
);
1090 state
= (x86_debug_state32_t
*)tstate
;
1092 ret
= set_debug_state32(thr_act
, state
);
1096 case x86_DEBUG_STATE64
:
1098 x86_debug_state64_t
*state
;
1101 if (!thread_is_64bit(thr_act
))
1102 return(KERN_INVALID_ARGUMENT
);
1104 state
= (x86_debug_state64_t
*)tstate
;
1106 ret
= set_debug_state64(thr_act
, state
);
1110 case x86_DEBUG_STATE
:
1112 x86_debug_state_t
*state
;
1113 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1115 if (count
!= x86_DEBUG_STATE_COUNT
)
1116 return (KERN_INVALID_ARGUMENT
);
1118 state
= (x86_debug_state_t
*)tstate
;
1119 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1120 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1121 thread_is_64bit(thr_act
)) {
1122 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1125 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1126 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1127 !thread_is_64bit(thr_act
)) {
1128 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1133 return(KERN_INVALID_ARGUMENT
);
1136 return(KERN_SUCCESS
);
1144 * Get the status of the specified thread.
1148 machine_thread_get_state(
1150 thread_flavor_t flavor
,
1151 thread_state_t tstate
,
1152 mach_msg_type_number_t
*count
)
1156 case THREAD_STATE_FLAVOR_LIST
:
1159 return (KERN_INVALID_ARGUMENT
);
1161 tstate
[0] = i386_THREAD_STATE
;
1162 tstate
[1] = i386_FLOAT_STATE
;
1163 tstate
[2] = i386_EXCEPTION_STATE
;
1169 case THREAD_STATE_FLAVOR_LIST_NEW
:
1172 return (KERN_INVALID_ARGUMENT
);
1174 tstate
[0] = x86_THREAD_STATE
;
1175 tstate
[1] = x86_FLOAT_STATE
;
1176 tstate
[2] = x86_EXCEPTION_STATE
;
1177 tstate
[3] = x86_DEBUG_STATE
;
1183 case x86_SAVED_STATE32
:
1185 x86_saved_state32_t
*state
;
1186 x86_saved_state32_t
*saved_state
;
1188 if (*count
< x86_SAVED_STATE32_COUNT
)
1189 return(KERN_INVALID_ARGUMENT
);
1191 state
= (x86_saved_state32_t
*) tstate
;
1192 saved_state
= USER_REGS32(thr_act
);
1195 * First, copy everything:
1197 *state
= *saved_state
;
1198 state
->ds
= saved_state
->ds
& 0xffff;
1199 state
->es
= saved_state
->es
& 0xffff;
1200 state
->fs
= saved_state
->fs
& 0xffff;
1201 state
->gs
= saved_state
->gs
& 0xffff;
1203 *count
= x86_SAVED_STATE32_COUNT
;
1207 case x86_SAVED_STATE64
:
1209 x86_saved_state64_t
*state
;
1210 x86_saved_state64_t
*saved_state
;
1212 if (*count
< x86_SAVED_STATE64_COUNT
)
1213 return(KERN_INVALID_ARGUMENT
);
1215 state
= (x86_saved_state64_t
*)tstate
;
1216 saved_state
= USER_REGS64(thr_act
);
1219 * First, copy everything:
1221 *state
= *saved_state
;
1222 state
->fs
= saved_state
->fs
& 0xffff;
1223 state
->gs
= saved_state
->gs
& 0xffff;
1225 *count
= x86_SAVED_STATE64_COUNT
;
1229 case x86_FLOAT_STATE32
:
1231 if (*count
< x86_FLOAT_STATE32_COUNT
)
1232 return(KERN_INVALID_ARGUMENT
);
1234 if (thread_is_64bit(thr_act
))
1235 return(KERN_INVALID_ARGUMENT
);
1237 *count
= x86_FLOAT_STATE32_COUNT
;
1239 return fpu_get_fxstate(thr_act
, tstate
);
1242 case x86_FLOAT_STATE64
:
1244 if (*count
< x86_FLOAT_STATE64_COUNT
)
1245 return(KERN_INVALID_ARGUMENT
);
1247 if ( !thread_is_64bit(thr_act
))
1248 return(KERN_INVALID_ARGUMENT
);
1250 *count
= x86_FLOAT_STATE64_COUNT
;
1252 return fpu_get_fxstate(thr_act
, tstate
);
1255 case x86_FLOAT_STATE
:
1257 x86_float_state_t
*state
;
1260 if (*count
< x86_FLOAT_STATE_COUNT
)
1261 return(KERN_INVALID_ARGUMENT
);
1263 state
= (x86_float_state_t
*)tstate
;
1266 * no need to bzero... currently
1267 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1269 if (thread_is_64bit(thr_act
)) {
1270 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1271 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1273 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1275 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1276 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1278 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1280 *count
= x86_FLOAT_STATE_COUNT
;
1286 case OLD_i386_THREAD_STATE
:
1287 case x86_THREAD_STATE32
:
1289 if (*count
< x86_THREAD_STATE32_COUNT
)
1290 return(KERN_INVALID_ARGUMENT
);
1292 if (thread_is_64bit(thr_act
))
1293 return(KERN_INVALID_ARGUMENT
);
1295 *count
= x86_THREAD_STATE32_COUNT
;
1297 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1301 case x86_THREAD_STATE64
:
1303 if (*count
< x86_THREAD_STATE64_COUNT
)
1304 return(KERN_INVALID_ARGUMENT
);
1306 if ( !thread_is_64bit(thr_act
))
1307 return(KERN_INVALID_ARGUMENT
);
1309 *count
= x86_THREAD_STATE64_COUNT
;
1311 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1315 case x86_THREAD_STATE
:
1317 x86_thread_state_t
*state
;
1319 if (*count
< x86_THREAD_STATE_COUNT
)
1320 return(KERN_INVALID_ARGUMENT
);
1322 state
= (x86_thread_state_t
*)tstate
;
1324 bzero((char *)state
, sizeof(x86_thread_state_t
));
1326 if (thread_is_64bit(thr_act
)) {
1327 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1328 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1330 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1332 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1333 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1335 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1337 *count
= x86_THREAD_STATE_COUNT
;
1343 case x86_EXCEPTION_STATE32
:
1345 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1346 return(KERN_INVALID_ARGUMENT
);
1348 if (thread_is_64bit(thr_act
))
1349 return(KERN_INVALID_ARGUMENT
);
1351 *count
= x86_EXCEPTION_STATE32_COUNT
;
1353 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1357 case x86_EXCEPTION_STATE64
:
1359 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1360 return(KERN_INVALID_ARGUMENT
);
1362 if ( !thread_is_64bit(thr_act
))
1363 return(KERN_INVALID_ARGUMENT
);
1365 *count
= x86_EXCEPTION_STATE64_COUNT
;
1367 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1371 case x86_EXCEPTION_STATE
:
1373 x86_exception_state_t
*state
;
1375 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1376 return(KERN_INVALID_ARGUMENT
);
1378 state
= (x86_exception_state_t
*)tstate
;
1380 bzero((char *)state
, sizeof(x86_exception_state_t
));
1382 if (thread_is_64bit(thr_act
)) {
1383 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1384 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1386 get_exception_state64(thr_act
, &state
->ues
.es64
);
1388 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1389 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1391 get_exception_state32(thr_act
, &state
->ues
.es32
);
1393 *count
= x86_EXCEPTION_STATE_COUNT
;
1397 case x86_DEBUG_STATE32
:
1399 if (*count
< x86_DEBUG_STATE32_COUNT
)
1400 return(KERN_INVALID_ARGUMENT
);
1402 if (thread_is_64bit(thr_act
))
1403 return(KERN_INVALID_ARGUMENT
);
1405 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1407 *count
= x86_DEBUG_STATE32_COUNT
;
1411 case x86_DEBUG_STATE64
:
1413 if (*count
< x86_DEBUG_STATE64_COUNT
)
1414 return(KERN_INVALID_ARGUMENT
);
1416 if (!thread_is_64bit(thr_act
))
1417 return(KERN_INVALID_ARGUMENT
);
1419 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1421 *count
= x86_DEBUG_STATE64_COUNT
;
1425 case x86_DEBUG_STATE
:
1427 x86_debug_state_t
*state
;
1429 if (*count
< x86_DEBUG_STATE_COUNT
)
1430 return(KERN_INVALID_ARGUMENT
);
1432 state
= (x86_debug_state_t
*)tstate
;
1434 bzero(state
, sizeof *state
);
1436 if (thread_is_64bit(thr_act
)) {
1437 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1438 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1440 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1442 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1443 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1445 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1447 *count
= x86_DEBUG_STATE_COUNT
;
1451 return(KERN_INVALID_ARGUMENT
);
1454 return(KERN_SUCCESS
);
1458 machine_thread_get_kern_state(
1460 thread_flavor_t flavor
,
1461 thread_state_t tstate
,
1462 mach_msg_type_number_t
*count
)
1466 * This works only for an interrupted kernel thread
1468 if (thread
!= current_thread() || current_cpu_datap()->cpu_int_state
== NULL
)
1469 return KERN_FAILURE
;
1473 case x86_THREAD_STATE32
:
1476 x86_thread_state32_t
*state
;
1477 x86_saved_state32_t
*saved_state
;
1479 if (*count
< x86_THREAD_STATE32_COUNT
)
1480 return(KERN_INVALID_ARGUMENT
);
1482 state
= (x86_thread_state32_t
*)tstate
;
1484 assert(is_saved_state32(current_cpu_datap()->cpu_int_state
));
1485 saved_state
= saved_state32(current_cpu_datap()->cpu_int_state
);
1487 * General registers.
1489 state
->eax
= saved_state
->eax
;
1490 state
->ebx
= saved_state
->ebx
;
1491 state
->ecx
= saved_state
->ecx
;
1492 state
->edx
= saved_state
->edx
;
1493 state
->edi
= saved_state
->edi
;
1494 state
->esi
= saved_state
->esi
;
1495 state
->ebp
= saved_state
->ebp
;
1496 state
->esp
= saved_state
->uesp
;
1497 state
->eflags
= saved_state
->efl
;
1498 state
->eip
= saved_state
->eip
;
1499 state
->cs
= saved_state
->cs
;
1500 state
->ss
= saved_state
->ss
;
1501 state
->ds
= saved_state
->ds
& 0xffff;
1502 state
->es
= saved_state
->es
& 0xffff;
1503 state
->fs
= saved_state
->fs
& 0xffff;
1504 state
->gs
= saved_state
->gs
& 0xffff;
1506 *count
= x86_THREAD_STATE32_COUNT
;
1508 return KERN_SUCCESS
;
1510 break; // for completeness
1512 case x86_THREAD_STATE
:
1514 // wrap a 32 bit thread state into a 32/64bit clean thread state
1515 x86_thread_state_t
*state
;
1516 x86_saved_state32_t
*saved_state
;
1518 if(*count
< x86_THREAD_STATE_COUNT
)
1519 return (KERN_INVALID_ARGUMENT
);
1521 state
= (x86_thread_state_t
*)tstate
;
1522 assert(is_saved_state32(current_cpu_datap()->cpu_int_state
));
1523 saved_state
= saved_state32(current_cpu_datap()->cpu_int_state
);
1525 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1526 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1529 * General registers.
1532 state
->uts
.ts32
.eax
= saved_state
->eax
;
1533 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1534 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1535 state
->uts
.ts32
.edx
= saved_state
->edx
;
1536 state
->uts
.ts32
.edi
= saved_state
->edi
;
1537 state
->uts
.ts32
.esi
= saved_state
->esi
;
1538 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1539 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1540 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1541 state
->uts
.ts32
.eip
= saved_state
->eip
;
1542 state
->uts
.ts32
.cs
= saved_state
->cs
;
1543 state
->uts
.ts32
.ss
= saved_state
->ss
;
1544 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1545 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1546 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1547 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1549 *count
= x86_THREAD_STATE_COUNT
;
1550 return KERN_SUCCESS
;
1554 return KERN_FAILURE
;
1559 * Initialize the machine-dependent state for a new thread.
1562 machine_thread_create(
1566 pcb_t pcb
= &thread
->machine
.xxx_pcb
;
1567 struct real_descriptor
*ldtp
;
1569 x86_saved_state_t
*iss
;
1571 inval_copy_windows(thread
);
1573 thread
->machine
.physwindow_pte
= 0;
1574 thread
->machine
.physwindow_busy
= 0;
1576 if (task_has_64BitAddr(task
)) {
1577 x86_sframe64_t
*sf64
;
1579 sf64
= (x86_sframe64_t
*)zalloc(iss_zone64
);
1582 panic("iss_zone64");
1583 pcb
->sf
= (void *)sf64
;
1585 bzero((char *)sf64
, sizeof(x86_sframe64_t
));
1587 iss
= (x86_saved_state_t
*) &sf64
->ssf
;
1588 iss
->flavor
= x86_SAVED_STATE64
;
1590 * Guarantee that the bootstrapped thread will be in user
1593 iss
->ss_64
.isf
.rflags
= EFL_USER_SET
;
1594 iss
->ss_64
.isf
.cs
= USER64_CS
;
1595 iss
->ss_64
.isf
.ss
= USER_DS
;
1596 iss
->ss_64
.fs
= USER_DS
;
1597 iss
->ss_64
.gs
= USER_DS
;
1599 if (cpu_mode_is64bit()) {
1600 x86_sframe_compat32_t
*sfc32
;
1602 sfc32
= (x86_sframe_compat32_t
*)zalloc(iss_zone32
);
1604 panic("iss_zone32");
1605 pcb
->sf
= (void *)sfc32
;
1607 bzero((char *)sfc32
, sizeof(x86_sframe_compat32_t
));
1609 iss
= (x86_saved_state_t
*) &sfc32
->ssf
.iss32
;
1610 iss
->flavor
= x86_SAVED_STATE32
;
1613 x86_saved_state_compat32_t
*xssc
;
1615 xssc
= (x86_saved_state_compat32_t
*) iss
;
1616 xssc
->pad_for_16byte_alignment
[0] = 0x64326432;
1617 xssc
->pad_for_16byte_alignment
[1] = 0x64326432;
1621 x86_sframe32_t
*sf32
;
1623 sf32
= (x86_sframe32_t
*)zalloc(iss_zone32
);
1626 panic("iss_zone32");
1627 pcb
->sf
= (void *)sf32
;
1629 bzero((char *)sf32
, sizeof(x86_sframe32_t
));
1631 iss
= (x86_saved_state_t
*) &sf32
->ssf
;
1632 iss
->flavor
= x86_SAVED_STATE32
;
1635 * Guarantee that the bootstrapped thread will be in user
1638 iss
->ss_32
.cs
= USER_CS
;
1639 iss
->ss_32
.ss
= USER_DS
;
1640 iss
->ss_32
.ds
= USER_DS
;
1641 iss
->ss_32
.es
= USER_DS
;
1642 iss
->ss_32
.fs
= USER_DS
;
1643 iss
->ss_32
.gs
= USER_DS
;
1644 iss
->ss_32
.efl
= EFL_USER_SET
;
1648 thread
->machine
.pcb
= pcb
;
1649 simple_lock_init(&pcb
->lock
, 0);
1651 ldtp
= (struct real_descriptor
*)pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN
);
1652 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
1653 pcb
->uldt_desc
= ldtp
[sel_idx(USER_DS
)];
1654 pcb
->uldt_selector
= 0;
1656 pcb
->iss_pte0
= (uint64_t)pte_kernel_rw(kvtophys((vm_offset_t
)pcb
->iss
));
1658 if (0 == (paddr
= pa_to_pte(kvtophys((vm_offset_t
)(pcb
->iss
) + PAGE_SIZE
))))
1659 pcb
->iss_pte1
= INTEL_PTE_INVALID
;
1661 pcb
->iss_pte1
= (uint64_t)pte_kernel_rw(paddr
);
1663 return(KERN_SUCCESS
);
1667 * Machine-dependent cleanup prior to destroying a thread
1670 machine_thread_destroy(
1673 register pcb_t pcb
= thread
->machine
.pcb
;
1678 fpu_free(pcb
->ifps
);
1680 if (thread_is_64bit(thread
))
1681 zfree(iss_zone64
, pcb
->sf
);
1683 zfree(iss_zone32
, pcb
->sf
);
1687 if (thread_is_64bit(thread
))
1688 zfree(ids_zone64
, pcb
->ids
);
1690 zfree(ids_zone32
, pcb
->ids
);
1692 thread
->machine
.pcb
= (pcb_t
)0;
1697 machine_thread_switch_addrmode(thread_t thread
, int oldmode_is64bit
)
1699 register pcb_t pcb
= thread
->machine
.pcb
;
1704 if (oldmode_is64bit
)
1705 zfree(iss_zone64
, pcb
->sf
);
1707 zfree(iss_zone32
, pcb
->sf
);
1709 machine_thread_create(thread
, thread
->task
);
1711 /* If we're switching ourselves, reset the pcb addresses etc. */
1712 if (thread
== current_thread())
1713 act_machine_switch_pcb(thread
);
1719 * This is used to set the current thr_act/thread
1720 * when starting up a new processor
1723 machine_set_current_thread( thread_t thread
)
1725 current_cpu_datap()->cpu_active_thread
= thread
;
1729 * This is called when a task is termianted.
1732 machine_thread_terminate_self(void)
1734 task_t self_task
= current_task();
1736 user_ldt_t user_ldt
= self_task
->i386_ldt
;
1737 if (user_ldt
!= 0) {
1738 self_task
->i386_ldt
= 0;
1739 user_ldt_free(user_ldt
);
1745 act_machine_return(int code
)
1748 * This code is called with nothing locked.
1749 * It also returns with nothing locked, if it returns.
1751 * This routine terminates the current thread activation.
1752 * If this is the only activation associated with its
1753 * thread shuttle, then the entire thread (shuttle plus
1754 * activation) is terminated.
1756 assert( code
== KERN_TERMINATED
);
1758 thread_terminate_self();
1762 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code
);
1767 * Perform machine-dependent per-thread initializations
1770 machine_thread_init(void)
1772 if (cpu_mode_is64bit()) {
1773 iss_zone64
= zinit(sizeof(x86_sframe64_t
),
1774 THREAD_MAX
* sizeof(x86_sframe64_t
),
1775 THREAD_CHUNK
* sizeof(x86_sframe64_t
),
1776 "x86_64 saved state");
1778 assert(sizeof(x86_sframe_compat32_t
) % 16 == 0);
1779 iss_zone32
= zinit(sizeof(x86_sframe_compat32_t
),
1780 THREAD_MAX
* sizeof(x86_sframe_compat32_t
),
1781 THREAD_CHUNK
* sizeof(x86_sframe_compat32_t
),
1782 "x86_32 saved state");
1784 ids_zone32
= zinit(sizeof(x86_debug_state32_t
),
1785 THREAD_MAX
* (sizeof(x86_debug_state32_t
)),
1786 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
1787 "x86_32 debug state");
1788 ids_zone64
= zinit(sizeof(x86_debug_state64_t
),
1789 THREAD_MAX
* sizeof(x86_debug_state64_t
),
1790 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1791 "x86_64 debug state");
1794 iss_zone32
= zinit(sizeof(x86_sframe32_t
),
1795 THREAD_MAX
* sizeof(x86_sframe32_t
),
1796 THREAD_CHUNK
* sizeof(x86_sframe32_t
),
1798 ids_zone32
= zinit(sizeof(x86_debug_state32_t
),
1799 THREAD_MAX
* (sizeof(x86_debug_state32_t
)),
1800 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
1808 * Some routines for debugging activation code
1810 static void dump_handlers(thread_t
);
1811 void dump_regs(thread_t
);
1812 int dump_act(thread_t thr_act
);
1815 dump_handlers(thread_t thr_act
)
1817 ReturnHandler
*rhp
= thr_act
->handlers
;
1822 if (rhp
== &thr_act
->special_handler
){
1824 printf("[NON-Zero next ptr(%x)]", rhp
->next
);
1825 printf("special_handler()->");
1828 printf("hdlr_%d(%x)->",counter
,rhp
->handler
);
1830 if (++counter
> 32) {
1831 printf("Aborting: HUGE handler chain\n");
1835 printf("HLDR_NULL\n");
1839 dump_regs(thread_t thr_act
)
1841 if (thr_act
->machine
.pcb
== NULL
)
1844 if (thread_is_64bit(thr_act
)) {
1845 x86_saved_state64_t
*ssp
;
1847 ssp
= USER_REGS64(thr_act
);
1849 panic("dump_regs: 64bit tasks not yet supported");
1852 x86_saved_state32_t
*ssp
;
1854 ssp
= USER_REGS32(thr_act
);
1857 * Print out user register state
1859 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1860 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1862 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1863 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1865 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1870 dump_act(thread_t thr_act
)
1875 printf("thread(0x%x)(%d): task=%x(%d)\n",
1876 thr_act
, thr_act
->ref_count
,
1877 thr_act
->task
, thr_act
->task
? thr_act
->task
->ref_count
: 0);
1879 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1880 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1881 thr_act
->active
, thr_act
->ast
);
1882 printf("\tpcb=%x\n", thr_act
->machine
.pcb
);
1884 if (thr_act
->kernel_stack
) {
1885 vm_offset_t stack
= thr_act
->kernel_stack
;
1887 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1888 stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1889 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
1892 dump_handlers(thr_act
);
1894 return((int)thr_act
);
1900 thread_t thr_act
= current_thread();
1902 if (thr_act
->machine
.pcb
== NULL
)
1905 if (thread_is_64bit(thr_act
)) {
1906 x86_saved_state64_t
*iss64
;
1908 iss64
= USER_REGS64(thr_act
);
1910 return(iss64
->isf
.rip
);
1912 x86_saved_state32_t
*iss32
;
1914 iss32
= USER_REGS32(thr_act
);
1921 * detach and return a kernel stack from a thread
1925 machine_stack_detach(thread_t thread
)
1929 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1930 thread
, thread
->priority
,
1931 thread
->sched_pri
, 0,
1934 stack
= thread
->kernel_stack
;
1935 thread
->kernel_stack
= 0;
1941 * attach a kernel stack to a thread and initialize it
1945 machine_stack_attach(
1949 struct x86_kernel_state32
*statep
;
1951 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1952 thread
, thread
->priority
,
1953 thread
->sched_pri
, 0, 0);
1956 thread
->kernel_stack
= stack
;
1958 statep
= STACK_IKS(stack
);
1959 statep
->k_eip
= (unsigned long) Thread_continue
;
1960 statep
->k_ebx
= (unsigned long) thread_continue
;
1961 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
1967 * move a stack from old to new thread
1971 machine_stack_handoff(thread_t old
,
1979 stack
= old
->kernel_stack
;
1980 if (stack
== old
->reserved_stack
) {
1981 assert(new->reserved_stack
);
1982 old
->reserved_stack
= new->reserved_stack
;
1983 new->reserved_stack
= stack
;
1985 old
->kernel_stack
= 0;
1987 * A full call to machine_stack_attach() is unnecessry
1988 * because old stack is already initialized.
1990 new->kernel_stack
= stack
;
1992 fpu_save_context(old
);
1994 old
->machine
.specFlags
&= ~OnProc
;
1995 new->machine
.specFlags
|= OnProc
;
1997 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
1998 act_machine_switch_pcb(new);
2000 KERNEL_DEBUG_CONSTANT(
2001 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
)|DBG_FUNC_NONE
,
2002 old
->reason
, (int)new, old
->sched_pri
, new->sched_pri
, 0);
2004 machine_set_current_thread(new);
2012 struct x86_act_context32
{
2013 x86_saved_state32_t ss
;
2014 x86_float_state32_t fs
;
2015 x86_debug_state32_t ds
;
2018 struct x86_act_context64
{
2019 x86_saved_state64_t ss
;
2020 x86_float_state64_t fs
;
2021 x86_debug_state64_t ds
;
2027 act_thread_csave(void)
2030 mach_msg_type_number_t val
;
2031 thread_t thr_act
= current_thread();
2033 if (thread_is_64bit(thr_act
)) {
2034 struct x86_act_context64
*ic64
;
2036 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
2038 if (ic64
== (struct x86_act_context64
*)NULL
)
2041 val
= x86_SAVED_STATE64_COUNT
;
2042 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
2043 (thread_state_t
) &ic64
->ss
, &val
);
2044 if (kret
!= KERN_SUCCESS
) {
2045 kfree(ic64
, sizeof(struct x86_act_context64
));
2048 val
= x86_FLOAT_STATE64_COUNT
;
2049 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
2050 (thread_state_t
) &ic64
->fs
, &val
);
2052 if (kret
!= KERN_SUCCESS
) {
2053 kfree(ic64
, sizeof(struct x86_act_context64
));
2057 val
= x86_DEBUG_STATE64_COUNT
;
2058 kret
= machine_thread_get_state(thr_act
,
2060 (thread_state_t
)&ic64
->ds
,
2062 if (kret
!= KERN_SUCCESS
) {
2063 kfree(ic64
, sizeof(struct x86_act_context64
));
2069 struct x86_act_context32
*ic32
;
2071 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2073 if (ic32
== (struct x86_act_context32
*)NULL
)
2076 val
= x86_SAVED_STATE32_COUNT
;
2077 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2078 (thread_state_t
) &ic32
->ss
, &val
);
2079 if (kret
!= KERN_SUCCESS
) {
2080 kfree(ic32
, sizeof(struct x86_act_context32
));
2083 val
= x86_FLOAT_STATE32_COUNT
;
2084 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2085 (thread_state_t
) &ic32
->fs
, &val
);
2086 if (kret
!= KERN_SUCCESS
) {
2087 kfree(ic32
, sizeof(struct x86_act_context32
));
2091 val
= x86_DEBUG_STATE32_COUNT
;
2092 kret
= machine_thread_get_state(thr_act
,
2094 (thread_state_t
)&ic32
->ds
,
2096 if (kret
!= KERN_SUCCESS
) {
2097 kfree(ic32
, sizeof(struct x86_act_context32
));
2106 act_thread_catt(void *ctx
)
2108 thread_t thr_act
= current_thread();
2111 if (ctx
== (void *)NULL
)
2114 if (thread_is_64bit(thr_act
)) {
2115 struct x86_act_context64
*ic64
;
2117 ic64
= (struct x86_act_context64
*)ctx
;
2119 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2120 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2121 if (kret
== KERN_SUCCESS
) {
2122 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2123 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2125 kfree(ic64
, sizeof(struct x86_act_context64
));
2127 struct x86_act_context32
*ic32
;
2129 ic32
= (struct x86_act_context32
*)ctx
;
2131 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2132 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2133 if (kret
== KERN_SUCCESS
) {
2134 kret
= machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2135 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2136 if (kret
== KERN_SUCCESS
&& thr_act
->machine
.pcb
->ids
)
2137 machine_thread_set_state(thr_act
,
2139 (thread_state_t
)&ic32
->ds
,
2140 x86_DEBUG_STATE32_COUNT
);
2142 kfree(ic32
, sizeof(struct x86_act_context32
));
2147 void act_thread_cfree(__unused
void *ctx
)