2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #include <i386/machine_routines.h>
95 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
99 #endif /* CONFIG_COUNTERS */
102 #include <kern/kpc.h>
106 #include <kperf/kperf.h>
110 #include <kern/hv_support.h>
114 * Maps state flavor to number of words in the state:
116 unsigned int _MachineStateCount
[] = {
119 x86_THREAD_STATE32_COUNT
,
120 x86_FLOAT_STATE32_COUNT
,
121 x86_EXCEPTION_STATE32_COUNT
,
122 x86_THREAD_STATE64_COUNT
,
123 x86_FLOAT_STATE64_COUNT
,
124 x86_EXCEPTION_STATE64_COUNT
,
125 x86_THREAD_STATE_COUNT
,
126 x86_FLOAT_STATE_COUNT
,
127 x86_EXCEPTION_STATE_COUNT
,
129 x86_SAVED_STATE32_COUNT
,
130 x86_SAVED_STATE64_COUNT
,
131 x86_DEBUG_STATE32_COUNT
,
132 x86_DEBUG_STATE64_COUNT
,
133 x86_DEBUG_STATE_COUNT
136 zone_t iss_zone
; /* zone for saved_state area */
137 zone_t ids_zone
; /* zone for debug_state area */
141 extern void Thread_continue(void);
142 extern void Load_context(
146 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
149 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
152 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
155 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
158 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
161 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
165 machine_pmc_cswitch(thread_t
/* old */, thread_t
/* new */);
168 pmc_swi(thread_t
/* old */, thread_t
/*new */);
171 pmc_swi(thread_t old
, thread_t
new) {
172 current_cpu_datap()->csw_old_thread
= old
;
173 current_cpu_datap()->csw_new_thread
= new;
178 machine_pmc_cswitch(thread_t old
, thread_t
new) {
179 if (pmc_thread_eligible(old
) || pmc_thread_eligible(new)) {
184 void ml_get_csw_threads(thread_t
*old
, thread_t
*new) {
185 *old
= current_cpu_datap()->csw_old_thread
;
186 *new = current_cpu_datap()->csw_new_thread
;
189 #endif /* CONFIG_COUNTERS */
193 ml_kpc_cswitch(thread_t old
, thread_t
new)
195 if(!kpc_threads_counting
)
198 /* call the kpc function */
199 kpc_switch_context( old
, new );
205 ml_kperf_cswitch(thread_t old
, thread_t
new)
207 if(!kperf_cswitch_hook
)
210 /* call the kpc function */
211 kperf_switch_context( old
, new );
217 ml_hv_cswitch(thread_t old
, thread_t
new)
219 if (old
->hv_thread_target
)
220 hv_callbacks
.preempt(old
->hv_thread_target
);
222 if (new->hv_thread_target
)
223 hv_callbacks
.dispatch(new->hv_thread_target
);
228 * Don't let an illegal value for dr7 get set. Specifically,
229 * check for undefined settings. Setting these bit patterns
230 * result in undefined behaviour and can lead to an unexpected
234 dr7_is_valid(uint32_t *dr7
)
237 uint32_t mask1
, mask2
;
240 * If the DE bit is set in CR4, R/W0-3 can be pattern
241 * "10B" to indicate i/o reads and write
243 if (!(get_cr4() & CR4_DE
))
244 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
245 i
++, mask1
<<= 4, mask2
<<= 4)
246 if ((*dr7
& mask1
) == mask2
)
250 * if we are doing an instruction execution break (indicated
251 * by r/w[x] being "00B"), then the len[x] must also be set
254 for (i
= 0; i
< 4; i
++)
255 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
256 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
260 * Intel docs have these bits fixed.
262 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
263 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
264 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
265 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
266 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
269 * We don't allow anything to set the global breakpoints.
287 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
290 debug_state_is_valid32(x86_debug_state32_t
*ds
)
292 if (!dr7_is_valid(&ds
->dr7
))
300 debug_state_is_valid64(x86_debug_state64_t
*ds
)
302 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
306 * Don't allow the user to set debug addresses above their max
310 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
313 if (ds
->dr7
& (0x1<<2))
314 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
317 if (ds
->dr7
& (0x1<<4))
318 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
321 if (ds
->dr7
& (0x1<<6))
322 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
330 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
332 x86_debug_state32_t
*ids
;
335 pcb
= THREAD_TO_PCB(thread
);
338 if (debug_state_is_valid32(ds
) != TRUE
) {
339 return KERN_INVALID_ARGUMENT
;
343 ids
= zalloc(ids_zone
);
344 bzero(ids
, sizeof *ids
);
346 simple_lock(&pcb
->lock
);
347 /* make sure it wasn't already alloc()'d elsewhere */
348 if (pcb
->ids
== NULL
) {
350 simple_unlock(&pcb
->lock
);
352 simple_unlock(&pcb
->lock
);
353 zfree(ids_zone
, ids
);
358 copy_debug_state32(ds
, ids
, FALSE
);
360 return (KERN_SUCCESS
);
364 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
366 x86_debug_state64_t
*ids
;
369 pcb
= THREAD_TO_PCB(thread
);
372 if (debug_state_is_valid64(ds
) != TRUE
) {
373 return KERN_INVALID_ARGUMENT
;
377 ids
= zalloc(ids_zone
);
378 bzero(ids
, sizeof *ids
);
381 if (thread
->hv_thread_target
) {
382 hv_callbacks
.volatile_state(thread
->hv_thread_target
,
387 simple_lock(&pcb
->lock
);
388 /* make sure it wasn't already alloc()'d elsewhere */
389 if (pcb
->ids
== NULL
) {
391 simple_unlock(&pcb
->lock
);
393 simple_unlock(&pcb
->lock
);
394 zfree(ids_zone
, ids
);
398 copy_debug_state64(ds
, ids
, FALSE
);
400 return (KERN_SUCCESS
);
404 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
406 x86_debug_state32_t
*saved_state
;
408 saved_state
= thread
->machine
.ids
;
411 copy_debug_state32(saved_state
, ds
, TRUE
);
413 bzero(ds
, sizeof *ds
);
417 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
419 x86_debug_state64_t
*saved_state
;
421 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
424 copy_debug_state64(saved_state
, ds
, TRUE
);
426 bzero(ds
, sizeof *ds
);
430 * consider_machine_collect:
432 * Try to collect machine-dependent pages
435 consider_machine_collect(void)
440 consider_machine_adjust(void)
445 * Switch to the first thread on a CPU.
448 machine_load_context(
452 machine_pmc_cswitch(NULL
, new);
454 new->machine
.specFlags
|= OnProc
;
455 act_machine_switch_pcb(NULL
, new);
460 * Switch to a new thread.
461 * Save the old thread`s kernel state or continuation,
465 machine_switch_context(
467 thread_continue_t continuation
,
471 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
474 machine_pmc_cswitch(old
, new);
477 ml_kpc_cswitch(old
, new);
480 ml_kperf_cswitch(old
, new);
483 * Save FP registers if in use.
485 fpu_save_context(old
);
487 old
->machine
.specFlags
&= ~OnProc
;
488 new->machine
.specFlags
|= OnProc
;
491 * Monitor the stack depth and report new max,
492 * not worrying about races.
494 vm_offset_t depth
= current_stack_depth();
495 if (depth
> kernel_stack_depth_max
) {
496 kernel_stack_depth_max
= depth
;
497 KERNEL_DEBUG_CONSTANT(
498 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
499 (long) depth
, 0, 0, 0, 0);
503 * Switch address maps if need be, even if not switching tasks.
504 * (A server activation may be "borrowing" a client map.)
506 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
509 * Load the rest of the user state for the new thread
511 act_machine_switch_pcb(old
, new);
514 ml_hv_cswitch(old
, new);
517 return(Switch_context(old
, continuation
, new));
521 machine_processor_shutdown(
523 void (*doshutdown
)(processor_t
),
524 processor_t processor
)
529 fpu_save_context(thread
);
530 PMAP_SWITCH_CONTEXT(thread
, processor
->idle_thread
, cpu_number());
531 return(Shutdown_context(thread
, doshutdown
, processor
));
536 * This is where registers that are not normally specified by the mach-o
537 * file on an execve would be nullified, perhaps to avoid a covert channel.
540 machine_thread_state_initialize(
544 * If there's an fpu save area, free it.
545 * The initialized state will then be lazily faulted-in, if required.
546 * And if we're target, re-arm the no-fpu trap.
548 if (thread
->machine
.ifps
) {
549 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
551 if (thread
== current_thread())
555 if (thread
->machine
.ids
) {
556 zfree(ids_zone
, thread
->machine
.ids
);
557 thread
->machine
.ids
= NULL
;
564 get_eflags_exportmask(void)
570 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
571 * for 32bit tasks only
572 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
573 * for 64bit tasks only
574 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
575 * for 32bit tasks only
576 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
577 * for 64bit tasks only
578 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
579 * for either 32bit or 64bit tasks
580 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
581 * for 32bit tasks only
582 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
583 * for 64bit tasks only
584 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
585 * for either 32bit or 64bit tasks
586 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
587 * for 32bit tasks only
588 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
589 * for 64bit tasks only
590 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
591 * for either 32bit or 64bit tasks
596 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
598 x86_saved_state64_t
*saved_state
;
600 saved_state
= USER_REGS64(thread
);
602 es
->trapno
= saved_state
->isf
.trapno
;
603 es
->cpu
= saved_state
->isf
.cpu
;
604 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
605 es
->faultvaddr
= saved_state
->cr2
;
609 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
611 x86_saved_state32_t
*saved_state
;
613 saved_state
= USER_REGS32(thread
);
615 es
->trapno
= saved_state
->trapno
;
616 es
->cpu
= saved_state
->cpu
;
617 es
->err
= saved_state
->err
;
618 es
->faultvaddr
= saved_state
->cr2
;
623 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
625 x86_saved_state32_t
*saved_state
;
627 pal_register_cache_state(thread
, DIRTY
);
629 saved_state
= USER_REGS32(thread
);
632 * Scrub segment selector values:
636 * On a 64 bit kernel, we always override the data segments,
637 * as the actual selector numbers have changed. This also
638 * means that we don't support setting the data segments
645 /* Set GS to CTHREAD only if's been established */
646 ts
->gs
= thread
->machine
.cthread_self
? USER_CTHREAD
: NULL_SEG
;
648 /* Check segment selectors are safe */
649 if (!valid_user_segment_selectors(ts
->cs
,
655 return(KERN_INVALID_ARGUMENT
);
657 saved_state
->eax
= ts
->eax
;
658 saved_state
->ebx
= ts
->ebx
;
659 saved_state
->ecx
= ts
->ecx
;
660 saved_state
->edx
= ts
->edx
;
661 saved_state
->edi
= ts
->edi
;
662 saved_state
->esi
= ts
->esi
;
663 saved_state
->ebp
= ts
->ebp
;
664 saved_state
->uesp
= ts
->esp
;
665 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
666 saved_state
->eip
= ts
->eip
;
667 saved_state
->cs
= ts
->cs
;
668 saved_state
->ss
= ts
->ss
;
669 saved_state
->ds
= ts
->ds
;
670 saved_state
->es
= ts
->es
;
671 saved_state
->fs
= ts
->fs
;
672 saved_state
->gs
= ts
->gs
;
675 * If the trace trap bit is being set,
676 * ensure that the user returns via iret
677 * - which is signaled thusly:
679 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
680 saved_state
->cs
= SYSENTER_TF_CS
;
682 return(KERN_SUCCESS
);
686 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
688 x86_saved_state64_t
*saved_state
;
690 pal_register_cache_state(thread
, DIRTY
);
692 saved_state
= USER_REGS64(thread
);
694 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
695 !IS_USERADDR64_CANONICAL(ts
->rip
))
696 return(KERN_INVALID_ARGUMENT
);
698 saved_state
->r8
= ts
->r8
;
699 saved_state
->r9
= ts
->r9
;
700 saved_state
->r10
= ts
->r10
;
701 saved_state
->r11
= ts
->r11
;
702 saved_state
->r12
= ts
->r12
;
703 saved_state
->r13
= ts
->r13
;
704 saved_state
->r14
= ts
->r14
;
705 saved_state
->r15
= ts
->r15
;
706 saved_state
->rax
= ts
->rax
;
707 saved_state
->rbx
= ts
->rbx
;
708 saved_state
->rcx
= ts
->rcx
;
709 saved_state
->rdx
= ts
->rdx
;
710 saved_state
->rdi
= ts
->rdi
;
711 saved_state
->rsi
= ts
->rsi
;
712 saved_state
->rbp
= ts
->rbp
;
713 saved_state
->isf
.rsp
= ts
->rsp
;
714 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
715 saved_state
->isf
.rip
= ts
->rip
;
716 saved_state
->isf
.cs
= USER64_CS
;
717 saved_state
->fs
= (uint32_t)ts
->fs
;
718 saved_state
->gs
= (uint32_t)ts
->gs
;
720 return(KERN_SUCCESS
);
726 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
728 x86_saved_state32_t
*saved_state
;
730 pal_register_cache_state(thread
, VALID
);
732 saved_state
= USER_REGS32(thread
);
734 ts
->eax
= saved_state
->eax
;
735 ts
->ebx
= saved_state
->ebx
;
736 ts
->ecx
= saved_state
->ecx
;
737 ts
->edx
= saved_state
->edx
;
738 ts
->edi
= saved_state
->edi
;
739 ts
->esi
= saved_state
->esi
;
740 ts
->ebp
= saved_state
->ebp
;
741 ts
->esp
= saved_state
->uesp
;
742 ts
->eflags
= saved_state
->efl
;
743 ts
->eip
= saved_state
->eip
;
744 ts
->cs
= saved_state
->cs
;
745 ts
->ss
= saved_state
->ss
;
746 ts
->ds
= saved_state
->ds
;
747 ts
->es
= saved_state
->es
;
748 ts
->fs
= saved_state
->fs
;
749 ts
->gs
= saved_state
->gs
;
754 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
756 x86_saved_state64_t
*saved_state
;
758 pal_register_cache_state(thread
, VALID
);
760 saved_state
= USER_REGS64(thread
);
762 ts
->r8
= saved_state
->r8
;
763 ts
->r9
= saved_state
->r9
;
764 ts
->r10
= saved_state
->r10
;
765 ts
->r11
= saved_state
->r11
;
766 ts
->r12
= saved_state
->r12
;
767 ts
->r13
= saved_state
->r13
;
768 ts
->r14
= saved_state
->r14
;
769 ts
->r15
= saved_state
->r15
;
770 ts
->rax
= saved_state
->rax
;
771 ts
->rbx
= saved_state
->rbx
;
772 ts
->rcx
= saved_state
->rcx
;
773 ts
->rdx
= saved_state
->rdx
;
774 ts
->rdi
= saved_state
->rdi
;
775 ts
->rsi
= saved_state
->rsi
;
776 ts
->rbp
= saved_state
->rbp
;
777 ts
->rsp
= saved_state
->isf
.rsp
;
778 ts
->rflags
= saved_state
->isf
.rflags
;
779 ts
->rip
= saved_state
->isf
.rip
;
780 ts
->cs
= saved_state
->isf
.cs
;
781 ts
->fs
= saved_state
->fs
;
782 ts
->gs
= saved_state
->gs
;
787 * act_machine_set_state:
789 * Set the status of the specified thread.
793 machine_thread_set_state(
795 thread_flavor_t flavor
,
796 thread_state_t tstate
,
797 mach_msg_type_number_t count
)
800 case x86_SAVED_STATE32
:
802 x86_saved_state32_t
*state
;
803 x86_saved_state32_t
*saved_state
;
805 if (count
< x86_SAVED_STATE32_COUNT
)
806 return(KERN_INVALID_ARGUMENT
);
808 if (thread_is_64bit(thr_act
))
809 return(KERN_INVALID_ARGUMENT
);
811 state
= (x86_saved_state32_t
*) tstate
;
813 /* Check segment selectors are safe */
814 if (!valid_user_segment_selectors(state
->cs
,
820 return KERN_INVALID_ARGUMENT
;
822 pal_register_cache_state(thr_act
, DIRTY
);
824 saved_state
= USER_REGS32(thr_act
);
829 saved_state
->edi
= state
->edi
;
830 saved_state
->esi
= state
->esi
;
831 saved_state
->ebp
= state
->ebp
;
832 saved_state
->uesp
= state
->uesp
;
833 saved_state
->ebx
= state
->ebx
;
834 saved_state
->edx
= state
->edx
;
835 saved_state
->ecx
= state
->ecx
;
836 saved_state
->eax
= state
->eax
;
837 saved_state
->eip
= state
->eip
;
839 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
842 * If the trace trap bit is being set,
843 * ensure that the user returns via iret
844 * - which is signaled thusly:
846 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
847 state
->cs
= SYSENTER_TF_CS
;
850 * User setting segment registers.
851 * Code and stack selectors have already been
852 * checked. Others will be reset by 'iret'
853 * if they are not valid.
855 saved_state
->cs
= state
->cs
;
856 saved_state
->ss
= state
->ss
;
857 saved_state
->ds
= state
->ds
;
858 saved_state
->es
= state
->es
;
859 saved_state
->fs
= state
->fs
;
860 saved_state
->gs
= state
->gs
;
865 case x86_SAVED_STATE64
:
867 x86_saved_state64_t
*state
;
868 x86_saved_state64_t
*saved_state
;
870 if (count
< x86_SAVED_STATE64_COUNT
)
871 return(KERN_INVALID_ARGUMENT
);
873 if (!thread_is_64bit(thr_act
))
874 return(KERN_INVALID_ARGUMENT
);
876 state
= (x86_saved_state64_t
*) tstate
;
878 /* Verify that the supplied code segment selector is
879 * valid. In 64-bit mode, the FS and GS segment overrides
880 * use the FS.base and GS.base MSRs to calculate
881 * base addresses, and the trampolines don't directly
882 * restore the segment registers--hence they are no
883 * longer relevant for validation.
885 if (!valid_user_code_selector(state
->isf
.cs
))
886 return KERN_INVALID_ARGUMENT
;
888 /* Check pc and stack are canonical addresses */
889 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
890 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
891 return KERN_INVALID_ARGUMENT
;
893 pal_register_cache_state(thr_act
, DIRTY
);
895 saved_state
= USER_REGS64(thr_act
);
900 saved_state
->r8
= state
->r8
;
901 saved_state
->r9
= state
->r9
;
902 saved_state
->r10
= state
->r10
;
903 saved_state
->r11
= state
->r11
;
904 saved_state
->r12
= state
->r12
;
905 saved_state
->r13
= state
->r13
;
906 saved_state
->r14
= state
->r14
;
907 saved_state
->r15
= state
->r15
;
908 saved_state
->rdi
= state
->rdi
;
909 saved_state
->rsi
= state
->rsi
;
910 saved_state
->rbp
= state
->rbp
;
911 saved_state
->rbx
= state
->rbx
;
912 saved_state
->rdx
= state
->rdx
;
913 saved_state
->rcx
= state
->rcx
;
914 saved_state
->rax
= state
->rax
;
915 saved_state
->isf
.rsp
= state
->isf
.rsp
;
916 saved_state
->isf
.rip
= state
->isf
.rip
;
918 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
921 * User setting segment registers.
922 * Code and stack selectors have already been
923 * checked. Others will be reset by 'sys'
924 * if they are not valid.
926 saved_state
->isf
.cs
= state
->isf
.cs
;
927 saved_state
->isf
.ss
= state
->isf
.ss
;
928 saved_state
->fs
= state
->fs
;
929 saved_state
->gs
= state
->gs
;
934 case x86_FLOAT_STATE32
:
936 if (count
!= x86_FLOAT_STATE32_COUNT
)
937 return(KERN_INVALID_ARGUMENT
);
939 if (thread_is_64bit(thr_act
))
940 return(KERN_INVALID_ARGUMENT
);
942 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
945 case x86_FLOAT_STATE64
:
947 if (count
!= x86_FLOAT_STATE64_COUNT
)
948 return(KERN_INVALID_ARGUMENT
);
950 if ( !thread_is_64bit(thr_act
))
951 return(KERN_INVALID_ARGUMENT
);
953 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
956 case x86_FLOAT_STATE
:
958 x86_float_state_t
*state
;
960 if (count
!= x86_FLOAT_STATE_COUNT
)
961 return(KERN_INVALID_ARGUMENT
);
963 state
= (x86_float_state_t
*)tstate
;
964 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
965 thread_is_64bit(thr_act
)) {
966 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
968 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
969 !thread_is_64bit(thr_act
)) {
970 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
972 return(KERN_INVALID_ARGUMENT
);
975 case x86_AVX_STATE32
:
977 if (count
!= x86_AVX_STATE32_COUNT
)
978 return(KERN_INVALID_ARGUMENT
);
980 if (thread_is_64bit(thr_act
))
981 return(KERN_INVALID_ARGUMENT
);
983 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
986 case x86_AVX_STATE64
:
988 if (count
!= x86_AVX_STATE64_COUNT
)
989 return(KERN_INVALID_ARGUMENT
);
991 if (!thread_is_64bit(thr_act
))
992 return(KERN_INVALID_ARGUMENT
);
994 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
999 x86_avx_state_t
*state
;
1001 if (count
!= x86_AVX_STATE_COUNT
)
1002 return(KERN_INVALID_ARGUMENT
);
1004 state
= (x86_avx_state_t
*)tstate
;
1005 if (state
->ash
.flavor
== x86_AVX_STATE64
&&
1006 state
->ash
.count
== x86_FLOAT_STATE64_COUNT
&&
1007 thread_is_64bit(thr_act
)) {
1008 return fpu_set_fxstate(thr_act
,
1009 (thread_state_t
)&state
->ufs
.as64
,
1012 if (state
->ash
.flavor
== x86_FLOAT_STATE32
&&
1013 state
->ash
.count
== x86_FLOAT_STATE32_COUNT
&&
1014 !thread_is_64bit(thr_act
)) {
1015 return fpu_set_fxstate(thr_act
,
1016 (thread_state_t
)&state
->ufs
.as32
,
1019 return(KERN_INVALID_ARGUMENT
);
1022 case x86_THREAD_STATE32
:
1024 if (count
!= x86_THREAD_STATE32_COUNT
)
1025 return(KERN_INVALID_ARGUMENT
);
1027 if (thread_is_64bit(thr_act
))
1028 return(KERN_INVALID_ARGUMENT
);
1030 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1033 case x86_THREAD_STATE64
:
1035 if (count
!= x86_THREAD_STATE64_COUNT
)
1036 return(KERN_INVALID_ARGUMENT
);
1038 if (!thread_is_64bit(thr_act
))
1039 return(KERN_INVALID_ARGUMENT
);
1041 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1044 case x86_THREAD_STATE
:
1046 x86_thread_state_t
*state
;
1048 if (count
!= x86_THREAD_STATE_COUNT
)
1049 return(KERN_INVALID_ARGUMENT
);
1051 state
= (x86_thread_state_t
*)tstate
;
1053 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1054 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1055 thread_is_64bit(thr_act
)) {
1056 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1057 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1058 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1059 !thread_is_64bit(thr_act
)) {
1060 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1062 return(KERN_INVALID_ARGUMENT
);
1066 case x86_DEBUG_STATE32
:
1068 x86_debug_state32_t
*state
;
1071 if (thread_is_64bit(thr_act
))
1072 return(KERN_INVALID_ARGUMENT
);
1074 state
= (x86_debug_state32_t
*)tstate
;
1076 ret
= set_debug_state32(thr_act
, state
);
1080 case x86_DEBUG_STATE64
:
1082 x86_debug_state64_t
*state
;
1085 if (!thread_is_64bit(thr_act
))
1086 return(KERN_INVALID_ARGUMENT
);
1088 state
= (x86_debug_state64_t
*)tstate
;
1090 ret
= set_debug_state64(thr_act
, state
);
1094 case x86_DEBUG_STATE
:
1096 x86_debug_state_t
*state
;
1097 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1099 if (count
!= x86_DEBUG_STATE_COUNT
)
1100 return (KERN_INVALID_ARGUMENT
);
1102 state
= (x86_debug_state_t
*)tstate
;
1103 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1104 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1105 thread_is_64bit(thr_act
)) {
1106 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1109 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1110 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1111 !thread_is_64bit(thr_act
)) {
1112 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1117 return(KERN_INVALID_ARGUMENT
);
1120 return(KERN_SUCCESS
);
1128 * Get the status of the specified thread.
1132 machine_thread_get_state(
1134 thread_flavor_t flavor
,
1135 thread_state_t tstate
,
1136 mach_msg_type_number_t
*count
)
1141 case THREAD_STATE_FLAVOR_LIST
:
1144 return (KERN_INVALID_ARGUMENT
);
1146 tstate
[0] = i386_THREAD_STATE
;
1147 tstate
[1] = i386_FLOAT_STATE
;
1148 tstate
[2] = i386_EXCEPTION_STATE
;
1154 case THREAD_STATE_FLAVOR_LIST_NEW
:
1157 return (KERN_INVALID_ARGUMENT
);
1159 tstate
[0] = x86_THREAD_STATE
;
1160 tstate
[1] = x86_FLOAT_STATE
;
1161 tstate
[2] = x86_EXCEPTION_STATE
;
1162 tstate
[3] = x86_DEBUG_STATE
;
1168 case THREAD_STATE_FLAVOR_LIST_10_9
:
1171 return (KERN_INVALID_ARGUMENT
);
1173 tstate
[0] = x86_THREAD_STATE
;
1174 tstate
[1] = x86_FLOAT_STATE
;
1175 tstate
[2] = x86_EXCEPTION_STATE
;
1176 tstate
[3] = x86_DEBUG_STATE
;
1177 tstate
[4] = x86_AVX_STATE
;
1183 case x86_SAVED_STATE32
:
1185 x86_saved_state32_t
*state
;
1186 x86_saved_state32_t
*saved_state
;
1188 if (*count
< x86_SAVED_STATE32_COUNT
)
1189 return(KERN_INVALID_ARGUMENT
);
1191 if (thread_is_64bit(thr_act
))
1192 return(KERN_INVALID_ARGUMENT
);
1194 state
= (x86_saved_state32_t
*) tstate
;
1195 saved_state
= USER_REGS32(thr_act
);
1198 * First, copy everything:
1200 *state
= *saved_state
;
1201 state
->ds
= saved_state
->ds
& 0xffff;
1202 state
->es
= saved_state
->es
& 0xffff;
1203 state
->fs
= saved_state
->fs
& 0xffff;
1204 state
->gs
= saved_state
->gs
& 0xffff;
1206 *count
= x86_SAVED_STATE32_COUNT
;
1210 case x86_SAVED_STATE64
:
1212 x86_saved_state64_t
*state
;
1213 x86_saved_state64_t
*saved_state
;
1215 if (*count
< x86_SAVED_STATE64_COUNT
)
1216 return(KERN_INVALID_ARGUMENT
);
1218 if (!thread_is_64bit(thr_act
))
1219 return(KERN_INVALID_ARGUMENT
);
1221 state
= (x86_saved_state64_t
*)tstate
;
1222 saved_state
= USER_REGS64(thr_act
);
1225 * First, copy everything:
1227 *state
= *saved_state
;
1228 state
->fs
= saved_state
->fs
& 0xffff;
1229 state
->gs
= saved_state
->gs
& 0xffff;
1231 *count
= x86_SAVED_STATE64_COUNT
;
1235 case x86_FLOAT_STATE32
:
1237 if (*count
< x86_FLOAT_STATE32_COUNT
)
1238 return(KERN_INVALID_ARGUMENT
);
1240 if (thread_is_64bit(thr_act
))
1241 return(KERN_INVALID_ARGUMENT
);
1243 *count
= x86_FLOAT_STATE32_COUNT
;
1245 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1248 case x86_FLOAT_STATE64
:
1250 if (*count
< x86_FLOAT_STATE64_COUNT
)
1251 return(KERN_INVALID_ARGUMENT
);
1253 if ( !thread_is_64bit(thr_act
))
1254 return(KERN_INVALID_ARGUMENT
);
1256 *count
= x86_FLOAT_STATE64_COUNT
;
1258 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1261 case x86_FLOAT_STATE
:
1263 x86_float_state_t
*state
;
1266 if (*count
< x86_FLOAT_STATE_COUNT
)
1267 return(KERN_INVALID_ARGUMENT
);
1269 state
= (x86_float_state_t
*)tstate
;
1272 * no need to bzero... currently
1273 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1275 if (thread_is_64bit(thr_act
)) {
1276 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1277 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1279 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1281 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1282 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1284 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1286 *count
= x86_FLOAT_STATE_COUNT
;
1291 case x86_AVX_STATE32
:
1293 if (*count
!= x86_AVX_STATE32_COUNT
)
1294 return(KERN_INVALID_ARGUMENT
);
1296 if (thread_is_64bit(thr_act
))
1297 return(KERN_INVALID_ARGUMENT
);
1299 *count
= x86_AVX_STATE32_COUNT
;
1301 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1304 case x86_AVX_STATE64
:
1306 if (*count
!= x86_AVX_STATE64_COUNT
)
1307 return(KERN_INVALID_ARGUMENT
);
1309 if ( !thread_is_64bit(thr_act
))
1310 return(KERN_INVALID_ARGUMENT
);
1312 *count
= x86_AVX_STATE64_COUNT
;
1314 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1319 x86_avx_state_t
*state
;
1322 if (*count
< x86_AVX_STATE_COUNT
)
1323 return(KERN_INVALID_ARGUMENT
);
1325 state
= (x86_avx_state_t
*)tstate
;
1327 bzero((char *)state
, sizeof(x86_avx_state_t
));
1328 if (thread_is_64bit(thr_act
)) {
1329 state
->ash
.flavor
= x86_AVX_STATE64
;
1330 state
->ash
.count
= x86_AVX_STATE64_COUNT
;
1331 kret
= fpu_get_fxstate(thr_act
,
1332 (thread_state_t
)&state
->ufs
.as64
,
1335 state
->ash
.flavor
= x86_AVX_STATE32
;
1336 state
->ash
.count
= x86_AVX_STATE32_COUNT
;
1337 kret
= fpu_get_fxstate(thr_act
,
1338 (thread_state_t
)&state
->ufs
.as32
,
1341 *count
= x86_AVX_STATE_COUNT
;
1346 case x86_THREAD_STATE32
:
1348 if (*count
< x86_THREAD_STATE32_COUNT
)
1349 return(KERN_INVALID_ARGUMENT
);
1351 if (thread_is_64bit(thr_act
))
1352 return(KERN_INVALID_ARGUMENT
);
1354 *count
= x86_THREAD_STATE32_COUNT
;
1356 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1360 case x86_THREAD_STATE64
:
1362 if (*count
< x86_THREAD_STATE64_COUNT
)
1363 return(KERN_INVALID_ARGUMENT
);
1365 if ( !thread_is_64bit(thr_act
))
1366 return(KERN_INVALID_ARGUMENT
);
1368 *count
= x86_THREAD_STATE64_COUNT
;
1370 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1374 case x86_THREAD_STATE
:
1376 x86_thread_state_t
*state
;
1378 if (*count
< x86_THREAD_STATE_COUNT
)
1379 return(KERN_INVALID_ARGUMENT
);
1381 state
= (x86_thread_state_t
*)tstate
;
1383 bzero((char *)state
, sizeof(x86_thread_state_t
));
1385 if (thread_is_64bit(thr_act
)) {
1386 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1387 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1389 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1391 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1392 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1394 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1396 *count
= x86_THREAD_STATE_COUNT
;
1402 case x86_EXCEPTION_STATE32
:
1404 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1405 return(KERN_INVALID_ARGUMENT
);
1407 if (thread_is_64bit(thr_act
))
1408 return(KERN_INVALID_ARGUMENT
);
1410 *count
= x86_EXCEPTION_STATE32_COUNT
;
1412 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1414 * Suppress the cpu number for binary compatibility
1415 * of this deprecated state.
1417 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1421 case x86_EXCEPTION_STATE64
:
1423 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1424 return(KERN_INVALID_ARGUMENT
);
1426 if ( !thread_is_64bit(thr_act
))
1427 return(KERN_INVALID_ARGUMENT
);
1429 *count
= x86_EXCEPTION_STATE64_COUNT
;
1431 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1433 * Suppress the cpu number for binary compatibility
1434 * of this deprecated state.
1436 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1440 case x86_EXCEPTION_STATE
:
1442 x86_exception_state_t
*state
;
1444 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1445 return(KERN_INVALID_ARGUMENT
);
1447 state
= (x86_exception_state_t
*)tstate
;
1449 bzero((char *)state
, sizeof(x86_exception_state_t
));
1451 if (thread_is_64bit(thr_act
)) {
1452 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1453 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1455 get_exception_state64(thr_act
, &state
->ues
.es64
);
1457 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1458 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1460 get_exception_state32(thr_act
, &state
->ues
.es32
);
1462 *count
= x86_EXCEPTION_STATE_COUNT
;
1466 case x86_DEBUG_STATE32
:
1468 if (*count
< x86_DEBUG_STATE32_COUNT
)
1469 return(KERN_INVALID_ARGUMENT
);
1471 if (thread_is_64bit(thr_act
))
1472 return(KERN_INVALID_ARGUMENT
);
1474 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1476 *count
= x86_DEBUG_STATE32_COUNT
;
1480 case x86_DEBUG_STATE64
:
1482 if (*count
< x86_DEBUG_STATE64_COUNT
)
1483 return(KERN_INVALID_ARGUMENT
);
1485 if (!thread_is_64bit(thr_act
))
1486 return(KERN_INVALID_ARGUMENT
);
1488 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1490 *count
= x86_DEBUG_STATE64_COUNT
;
1494 case x86_DEBUG_STATE
:
1496 x86_debug_state_t
*state
;
1498 if (*count
< x86_DEBUG_STATE_COUNT
)
1499 return(KERN_INVALID_ARGUMENT
);
1501 state
= (x86_debug_state_t
*)tstate
;
1503 bzero(state
, sizeof *state
);
1505 if (thread_is_64bit(thr_act
)) {
1506 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1507 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1509 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1511 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1512 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1514 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1516 *count
= x86_DEBUG_STATE_COUNT
;
1520 return(KERN_INVALID_ARGUMENT
);
1523 return(KERN_SUCCESS
);
1527 machine_thread_get_kern_state(
1529 thread_flavor_t flavor
,
1530 thread_state_t tstate
,
1531 mach_msg_type_number_t
*count
)
1533 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1536 * This works only for an interrupted kernel thread
1538 if (thread
!= current_thread() || int_state
== NULL
)
1539 return KERN_FAILURE
;
1542 case x86_THREAD_STATE32
: {
1543 x86_thread_state32_t
*state
;
1544 x86_saved_state32_t
*saved_state
;
1546 if (!is_saved_state32(int_state
) ||
1547 *count
< x86_THREAD_STATE32_COUNT
)
1548 return (KERN_INVALID_ARGUMENT
);
1550 state
= (x86_thread_state32_t
*) tstate
;
1552 saved_state
= saved_state32(int_state
);
1554 * General registers.
1556 state
->eax
= saved_state
->eax
;
1557 state
->ebx
= saved_state
->ebx
;
1558 state
->ecx
= saved_state
->ecx
;
1559 state
->edx
= saved_state
->edx
;
1560 state
->edi
= saved_state
->edi
;
1561 state
->esi
= saved_state
->esi
;
1562 state
->ebp
= saved_state
->ebp
;
1563 state
->esp
= saved_state
->uesp
;
1564 state
->eflags
= saved_state
->efl
;
1565 state
->eip
= saved_state
->eip
;
1566 state
->cs
= saved_state
->cs
;
1567 state
->ss
= saved_state
->ss
;
1568 state
->ds
= saved_state
->ds
& 0xffff;
1569 state
->es
= saved_state
->es
& 0xffff;
1570 state
->fs
= saved_state
->fs
& 0xffff;
1571 state
->gs
= saved_state
->gs
& 0xffff;
1573 *count
= x86_THREAD_STATE32_COUNT
;
1575 return KERN_SUCCESS
;
1578 case x86_THREAD_STATE64
: {
1579 x86_thread_state64_t
*state
;
1580 x86_saved_state64_t
*saved_state
;
1582 if (!is_saved_state64(int_state
) ||
1583 *count
< x86_THREAD_STATE64_COUNT
)
1584 return (KERN_INVALID_ARGUMENT
);
1586 state
= (x86_thread_state64_t
*) tstate
;
1588 saved_state
= saved_state64(int_state
);
1590 * General registers.
1592 state
->rax
= saved_state
->rax
;
1593 state
->rbx
= saved_state
->rbx
;
1594 state
->rcx
= saved_state
->rcx
;
1595 state
->rdx
= saved_state
->rdx
;
1596 state
->rdi
= saved_state
->rdi
;
1597 state
->rsi
= saved_state
->rsi
;
1598 state
->rbp
= saved_state
->rbp
;
1599 state
->rsp
= saved_state
->isf
.rsp
;
1600 state
->r8
= saved_state
->r8
;
1601 state
->r9
= saved_state
->r9
;
1602 state
->r10
= saved_state
->r10
;
1603 state
->r11
= saved_state
->r11
;
1604 state
->r12
= saved_state
->r12
;
1605 state
->r13
= saved_state
->r13
;
1606 state
->r14
= saved_state
->r14
;
1607 state
->r15
= saved_state
->r15
;
1609 state
->rip
= saved_state
->isf
.rip
;
1610 state
->rflags
= saved_state
->isf
.rflags
;
1611 state
->cs
= saved_state
->isf
.cs
;
1612 state
->fs
= saved_state
->fs
& 0xffff;
1613 state
->gs
= saved_state
->gs
& 0xffff;
1614 *count
= x86_THREAD_STATE64_COUNT
;
1616 return KERN_SUCCESS
;
1619 case x86_THREAD_STATE
: {
1620 x86_thread_state_t
*state
= NULL
;
1622 if (*count
< x86_THREAD_STATE_COUNT
)
1623 return (KERN_INVALID_ARGUMENT
);
1625 state
= (x86_thread_state_t
*) tstate
;
1627 if (is_saved_state32(int_state
)) {
1628 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1630 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1631 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1634 * General registers.
1636 state
->uts
.ts32
.eax
= saved_state
->eax
;
1637 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1638 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1639 state
->uts
.ts32
.edx
= saved_state
->edx
;
1640 state
->uts
.ts32
.edi
= saved_state
->edi
;
1641 state
->uts
.ts32
.esi
= saved_state
->esi
;
1642 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1643 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1644 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1645 state
->uts
.ts32
.eip
= saved_state
->eip
;
1646 state
->uts
.ts32
.cs
= saved_state
->cs
;
1647 state
->uts
.ts32
.ss
= saved_state
->ss
;
1648 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1649 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1650 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1651 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1652 } else if (is_saved_state64(int_state
)) {
1653 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1655 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1656 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1659 * General registers.
1661 state
->uts
.ts64
.rax
= saved_state
->rax
;
1662 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1663 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1664 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1665 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1666 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1667 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1668 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1669 state
->uts
.ts64
.r8
= saved_state
->r8
;
1670 state
->uts
.ts64
.r9
= saved_state
->r9
;
1671 state
->uts
.ts64
.r10
= saved_state
->r10
;
1672 state
->uts
.ts64
.r11
= saved_state
->r11
;
1673 state
->uts
.ts64
.r12
= saved_state
->r12
;
1674 state
->uts
.ts64
.r13
= saved_state
->r13
;
1675 state
->uts
.ts64
.r14
= saved_state
->r14
;
1676 state
->uts
.ts64
.r15
= saved_state
->r15
;
1678 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1679 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1680 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1681 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1682 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1684 panic("unknown thread state");
1687 *count
= x86_THREAD_STATE_COUNT
;
1688 return KERN_SUCCESS
;
1691 return KERN_FAILURE
;
1696 machine_thread_switch_addrmode(thread_t thread
)
1699 * We don't want to be preempted until we're done
1700 * - particularly if we're switching the current thread
1702 disable_preemption();
1705 * Reset the state saveareas. As we're resetting, we anticipate no
1706 * memory allocations in this path.
1708 machine_thread_create(thread
, thread
->task
);
1710 /* If we're switching ourselves, reset the pcb addresses etc. */
1711 if (thread
== current_thread()) {
1712 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1713 act_machine_switch_pcb(NULL
, thread
);
1714 ml_set_interrupts_enabled(istate
);
1716 enable_preemption();
1722 * This is used to set the current thr_act/thread
1723 * when starting up a new processor
1726 machine_set_current_thread(thread_t thread
)
1728 current_cpu_datap()->cpu_active_thread
= thread
;
1733 * Perform machine-dependent per-thread initializations
1736 machine_thread_init(void)
1738 iss_zone
= zinit(sizeof(x86_saved_state_t
),
1739 thread_max
* sizeof(x86_saved_state_t
),
1740 THREAD_CHUNK
* sizeof(x86_saved_state_t
),
1741 "x86_64 saved state");
1743 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1744 thread_max
* sizeof(x86_debug_state64_t
),
1745 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1746 "x86_64 debug state");
1756 thread_t thr_act
= current_thread();
1758 if (thread_is_64bit(thr_act
)) {
1759 x86_saved_state64_t
*iss64
;
1761 iss64
= USER_REGS64(thr_act
);
1763 return(iss64
->isf
.rip
);
1765 x86_saved_state32_t
*iss32
;
1767 iss32
= USER_REGS32(thr_act
);
1774 * detach and return a kernel stack from a thread
1778 machine_stack_detach(thread_t thread
)
1782 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1783 (uintptr_t)thread_tid(thread
), thread
->priority
,
1784 thread
->sched_pri
, 0,
1787 stack
= thread
->kernel_stack
;
1788 thread
->kernel_stack
= 0;
1794 * attach a kernel stack to a thread and initialize it
1798 machine_stack_attach(
1802 struct x86_kernel_state
*statep
;
1804 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1805 (uintptr_t)thread_tid(thread
), thread
->priority
,
1806 thread
->sched_pri
, 0, 0);
1809 thread
->kernel_stack
= stack
;
1811 statep
= STACK_IKS(stack
);
1812 #if defined(__x86_64__)
1813 statep
->k_rip
= (unsigned long) Thread_continue
;
1814 statep
->k_rbx
= (unsigned long) thread_continue
;
1815 statep
->k_rsp
= (unsigned long) (STACK_IKS(stack
) - 1);
1817 statep
->k_eip
= (unsigned long) Thread_continue
;
1818 statep
->k_ebx
= (unsigned long) thread_continue
;
1819 statep
->k_esp
= (unsigned long) (STACK_IKS(stack
) - 1);
1826 * move a stack from old to new thread
1830 machine_stack_handoff(thread_t old
,
1839 machine_pmc_cswitch(old
, new);
1842 ml_kpc_cswitch(old
, new);
1845 ml_kperf_cswitch(old
, new);
1848 stack
= old
->kernel_stack
;
1849 if (stack
== old
->reserved_stack
) {
1850 assert(new->reserved_stack
);
1851 old
->reserved_stack
= new->reserved_stack
;
1852 new->reserved_stack
= stack
;
1854 old
->kernel_stack
= 0;
1856 * A full call to machine_stack_attach() is unnecessry
1857 * because old stack is already initialized.
1859 new->kernel_stack
= stack
;
1861 fpu_save_context(old
);
1863 old
->machine
.specFlags
&= ~OnProc
;
1864 new->machine
.specFlags
|= OnProc
;
1866 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
1867 act_machine_switch_pcb(old
, new);
1870 ml_hv_cswitch(old
, new);
1873 machine_set_current_thread(new);
1881 struct x86_act_context32
{
1882 x86_saved_state32_t ss
;
1883 x86_float_state32_t fs
;
1884 x86_debug_state32_t ds
;
1887 struct x86_act_context64
{
1888 x86_saved_state64_t ss
;
1889 x86_float_state64_t fs
;
1890 x86_debug_state64_t ds
;
1896 act_thread_csave(void)
1899 mach_msg_type_number_t val
;
1900 thread_t thr_act
= current_thread();
1902 if (thread_is_64bit(thr_act
)) {
1903 struct x86_act_context64
*ic64
;
1905 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
1907 if (ic64
== (struct x86_act_context64
*)NULL
)
1910 val
= x86_SAVED_STATE64_COUNT
;
1911 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
1912 (thread_state_t
) &ic64
->ss
, &val
);
1913 if (kret
!= KERN_SUCCESS
) {
1914 kfree(ic64
, sizeof(struct x86_act_context64
));
1917 val
= x86_FLOAT_STATE64_COUNT
;
1918 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
1919 (thread_state_t
) &ic64
->fs
, &val
);
1920 if (kret
!= KERN_SUCCESS
) {
1921 kfree(ic64
, sizeof(struct x86_act_context64
));
1925 val
= x86_DEBUG_STATE64_COUNT
;
1926 kret
= machine_thread_get_state(thr_act
,
1928 (thread_state_t
)&ic64
->ds
,
1930 if (kret
!= KERN_SUCCESS
) {
1931 kfree(ic64
, sizeof(struct x86_act_context64
));
1937 struct x86_act_context32
*ic32
;
1939 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
1941 if (ic32
== (struct x86_act_context32
*)NULL
)
1944 val
= x86_SAVED_STATE32_COUNT
;
1945 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
1946 (thread_state_t
) &ic32
->ss
, &val
);
1947 if (kret
!= KERN_SUCCESS
) {
1948 kfree(ic32
, sizeof(struct x86_act_context32
));
1951 val
= x86_FLOAT_STATE32_COUNT
;
1952 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
1953 (thread_state_t
) &ic32
->fs
, &val
);
1954 if (kret
!= KERN_SUCCESS
) {
1955 kfree(ic32
, sizeof(struct x86_act_context32
));
1959 val
= x86_DEBUG_STATE32_COUNT
;
1960 kret
= machine_thread_get_state(thr_act
,
1962 (thread_state_t
)&ic32
->ds
,
1964 if (kret
!= KERN_SUCCESS
) {
1965 kfree(ic32
, sizeof(struct x86_act_context32
));
1974 act_thread_catt(void *ctx
)
1976 thread_t thr_act
= current_thread();
1979 if (ctx
== (void *)NULL
)
1982 if (thread_is_64bit(thr_act
)) {
1983 struct x86_act_context64
*ic64
;
1985 ic64
= (struct x86_act_context64
*)ctx
;
1987 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
1988 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
1989 if (kret
== KERN_SUCCESS
) {
1990 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
1991 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
1993 kfree(ic64
, sizeof(struct x86_act_context64
));
1995 struct x86_act_context32
*ic32
;
1997 ic32
= (struct x86_act_context32
*)ctx
;
1999 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2000 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2001 if (kret
== KERN_SUCCESS
) {
2002 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2003 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2005 kfree(ic32
, sizeof(struct x86_act_context32
));
2010 void act_thread_cfree(__unused
void *ctx
)
2016 * Duplicate one x86_debug_state32_t to another. "all" parameter
2017 * chooses whether dr4 and dr5 are copied (they are never meant
2018 * to be installed when we do machine_task_set_state() or
2019 * machine_thread_set_state()).
2023 x86_debug_state32_t
*src
,
2024 x86_debug_state32_t
*target
,
2028 target
->dr4
= src
->dr4
;
2029 target
->dr5
= src
->dr5
;
2032 target
->dr0
= src
->dr0
;
2033 target
->dr1
= src
->dr1
;
2034 target
->dr2
= src
->dr2
;
2035 target
->dr3
= src
->dr3
;
2036 target
->dr6
= src
->dr6
;
2037 target
->dr7
= src
->dr7
;
2041 * Duplicate one x86_debug_state64_t to another. "all" parameter
2042 * chooses whether dr4 and dr5 are copied (they are never meant
2043 * to be installed when we do machine_task_set_state() or
2044 * machine_thread_set_state()).
2048 x86_debug_state64_t
*src
,
2049 x86_debug_state64_t
*target
,
2053 target
->dr4
= src
->dr4
;
2054 target
->dr5
= src
->dr5
;
2057 target
->dr0
= src
->dr0
;
2058 target
->dr1
= src
->dr1
;
2059 target
->dr2
= src
->dr2
;
2060 target
->dr3
= src
->dr3
;
2061 target
->dr6
= src
->dr6
;
2062 target
->dr7
= src
->dr7
;