2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #include <i386/machine_routines.h>
95 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
99 #endif /* CONFIG_COUNTERS */
102 #include <kern/kpc.h>
106 #include <kperf/kperf.h>
110 * Maps state flavor to number of words in the state:
112 unsigned int _MachineStateCount
[] = {
115 x86_THREAD_STATE32_COUNT
,
116 x86_FLOAT_STATE32_COUNT
,
117 x86_EXCEPTION_STATE32_COUNT
,
118 x86_THREAD_STATE64_COUNT
,
119 x86_FLOAT_STATE64_COUNT
,
120 x86_EXCEPTION_STATE64_COUNT
,
121 x86_THREAD_STATE_COUNT
,
122 x86_FLOAT_STATE_COUNT
,
123 x86_EXCEPTION_STATE_COUNT
,
125 x86_SAVED_STATE32_COUNT
,
126 x86_SAVED_STATE64_COUNT
,
127 x86_DEBUG_STATE32_COUNT
,
128 x86_DEBUG_STATE64_COUNT
,
129 x86_DEBUG_STATE_COUNT
132 zone_t iss_zone
; /* zone for saved_state area */
133 zone_t ids_zone
; /* zone for debug_state area */
137 extern void Thread_continue(void);
138 extern void Load_context(
142 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
145 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
148 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
151 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
154 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
157 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
161 machine_pmc_cswitch(thread_t
/* old */, thread_t
/* new */);
164 pmc_swi(thread_t
/* old */, thread_t
/*new */);
167 pmc_swi(thread_t old
, thread_t
new) {
168 current_cpu_datap()->csw_old_thread
= old
;
169 current_cpu_datap()->csw_new_thread
= new;
174 machine_pmc_cswitch(thread_t old
, thread_t
new) {
175 if (pmc_thread_eligible(old
) || pmc_thread_eligible(new)) {
180 void ml_get_csw_threads(thread_t
*old
, thread_t
*new) {
181 *old
= current_cpu_datap()->csw_old_thread
;
182 *new = current_cpu_datap()->csw_new_thread
;
185 #endif /* CONFIG_COUNTERS */
189 ml_kpc_cswitch(thread_t old
, thread_t
new)
191 if(!kpc_threads_counting
)
194 /* call the kpc function */
195 kpc_switch_context( old
, new );
201 ml_kperf_cswitch(thread_t old
, thread_t
new)
203 if(!kperf_cswitch_hook
)
206 /* call the kpc function */
207 kperf_switch_context( old
, new );
212 * Don't let an illegal value for dr7 get set. Specifically,
213 * check for undefined settings. Setting these bit patterns
214 * result in undefined behaviour and can lead to an unexpected
218 dr7_is_valid(uint32_t *dr7
)
221 uint32_t mask1
, mask2
;
224 * If the DE bit is set in CR4, R/W0-3 can be pattern
225 * "10B" to indicate i/o reads and write
227 if (!(get_cr4() & CR4_DE
))
228 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
229 i
++, mask1
<<= 4, mask2
<<= 4)
230 if ((*dr7
& mask1
) == mask2
)
234 * if we are doing an instruction execution break (indicated
235 * by r/w[x] being "00B"), then the len[x] must also be set
238 for (i
= 0; i
< 4; i
++)
239 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
240 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
244 * Intel docs have these bits fixed.
246 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
247 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
248 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
249 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
250 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
253 * We don't allow anything to set the global breakpoints.
272 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
274 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
275 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
276 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
277 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
278 cdp
->cpu_dr7
= ds
->dr7
;
281 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
284 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
287 * We need to enter 64-bit mode in order to set the full
288 * width of these registers
290 set_64bit_debug_regs(ds
);
291 cdp
->cpu_dr7
= ds
->dr7
;
295 debug_state_is_valid32(x86_debug_state32_t
*ds
)
297 if (!dr7_is_valid(&ds
->dr7
))
305 debug_state_is_valid64(x86_debug_state64_t
*ds
)
307 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
311 * Don't allow the user to set debug addresses above their max
315 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
318 if (ds
->dr7
& (0x1<<2))
319 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
322 if (ds
->dr7
& (0x1<<4))
323 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
326 if (ds
->dr7
& (0x1<<6))
327 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
335 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
337 x86_debug_state32_t
*ids
;
340 pcb
= THREAD_TO_PCB(thread
);
343 if (debug_state_is_valid32(ds
) != TRUE
) {
344 return KERN_INVALID_ARGUMENT
;
348 ids
= zalloc(ids_zone
);
349 bzero(ids
, sizeof *ids
);
351 simple_lock(&pcb
->lock
);
352 /* make sure it wasn't already alloc()'d elsewhere */
353 if (pcb
->ids
== NULL
) {
355 simple_unlock(&pcb
->lock
);
357 simple_unlock(&pcb
->lock
);
358 zfree(ids_zone
, ids
);
363 copy_debug_state32(ds
, ids
, FALSE
);
365 return (KERN_SUCCESS
);
369 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
371 x86_debug_state64_t
*ids
;
374 pcb
= THREAD_TO_PCB(thread
);
377 if (debug_state_is_valid64(ds
) != TRUE
) {
378 return KERN_INVALID_ARGUMENT
;
382 ids
= zalloc(ids_zone
);
383 bzero(ids
, sizeof *ids
);
385 simple_lock(&pcb
->lock
);
386 /* make sure it wasn't already alloc()'d elsewhere */
387 if (pcb
->ids
== NULL
) {
389 simple_unlock(&pcb
->lock
);
391 simple_unlock(&pcb
->lock
);
392 zfree(ids_zone
, ids
);
396 copy_debug_state64(ds
, ids
, FALSE
);
398 return (KERN_SUCCESS
);
402 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
404 x86_debug_state32_t
*saved_state
;
406 saved_state
= thread
->machine
.ids
;
409 copy_debug_state32(saved_state
, ds
, TRUE
);
411 bzero(ds
, sizeof *ds
);
415 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
417 x86_debug_state64_t
*saved_state
;
419 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
422 copy_debug_state64(saved_state
, ds
, TRUE
);
424 bzero(ds
, sizeof *ds
);
428 * consider_machine_collect:
430 * Try to collect machine-dependent pages
433 consider_machine_collect(void)
438 consider_machine_adjust(void)
443 * Switch to the first thread on a CPU.
446 machine_load_context(
450 machine_pmc_cswitch(NULL
, new);
452 new->machine
.specFlags
|= OnProc
;
453 act_machine_switch_pcb(NULL
, new);
458 * Switch to a new thread.
459 * Save the old thread`s kernel state or continuation,
463 machine_switch_context(
465 thread_continue_t continuation
,
469 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
472 machine_pmc_cswitch(old
, new);
475 ml_kpc_cswitch(old
, new);
478 ml_kperf_cswitch(old
, new);
481 * Save FP registers if in use.
483 fpu_save_context(old
);
485 old
->machine
.specFlags
&= ~OnProc
;
486 new->machine
.specFlags
|= OnProc
;
489 * Monitor the stack depth and report new max,
490 * not worrying about races.
492 vm_offset_t depth
= current_stack_depth();
493 if (depth
> kernel_stack_depth_max
) {
494 kernel_stack_depth_max
= depth
;
495 KERNEL_DEBUG_CONSTANT(
496 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
497 (long) depth
, 0, 0, 0, 0);
501 * Switch address maps if need be, even if not switching tasks.
502 * (A server activation may be "borrowing" a client map.)
504 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
507 * Load the rest of the user state for the new thread
509 act_machine_switch_pcb(old
, new);
511 return(Switch_context(old
, continuation
, new));
515 machine_processor_shutdown(
517 void (*doshutdown
)(processor_t
),
518 processor_t processor
)
523 fpu_save_context(thread
);
524 PMAP_SWITCH_CONTEXT(thread
, processor
->idle_thread
, cpu_number());
525 return(Shutdown_context(thread
, doshutdown
, processor
));
530 * This is where registers that are not normally specified by the mach-o
531 * file on an execve would be nullified, perhaps to avoid a covert channel.
534 machine_thread_state_initialize(
538 * If there's an fpu save area, free it.
539 * The initialized state will then be lazily faulted-in, if required.
540 * And if we're target, re-arm the no-fpu trap.
542 if (thread
->machine
.ifps
) {
543 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
545 if (thread
== current_thread())
549 if (thread
->machine
.ids
) {
550 zfree(ids_zone
, thread
->machine
.ids
);
551 thread
->machine
.ids
= NULL
;
558 get_eflags_exportmask(void)
564 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
565 * for 32bit tasks only
566 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
567 * for 64bit tasks only
568 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
569 * for 32bit tasks only
570 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
571 * for 64bit tasks only
572 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
573 * for either 32bit or 64bit tasks
574 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
575 * for 32bit tasks only
576 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
577 * for 64bit tasks only
578 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
579 * for either 32bit or 64bit tasks
580 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
581 * for 32bit tasks only
582 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
583 * for 64bit tasks only
584 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
585 * for either 32bit or 64bit tasks
590 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
592 x86_saved_state64_t
*saved_state
;
594 saved_state
= USER_REGS64(thread
);
596 es
->trapno
= saved_state
->isf
.trapno
;
597 es
->cpu
= saved_state
->isf
.cpu
;
598 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
599 es
->faultvaddr
= saved_state
->cr2
;
603 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
605 x86_saved_state32_t
*saved_state
;
607 saved_state
= USER_REGS32(thread
);
609 es
->trapno
= saved_state
->trapno
;
610 es
->cpu
= saved_state
->cpu
;
611 es
->err
= saved_state
->err
;
612 es
->faultvaddr
= saved_state
->cr2
;
617 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
619 x86_saved_state32_t
*saved_state
;
621 pal_register_cache_state(thread
, DIRTY
);
623 saved_state
= USER_REGS32(thread
);
626 * Scrub segment selector values:
630 * On a 64 bit kernel, we always override the data segments,
631 * as the actual selector numbers have changed. This also
632 * means that we don't support setting the data segments
639 /* Check segment selectors are safe */
640 if (!valid_user_segment_selectors(ts
->cs
,
646 return(KERN_INVALID_ARGUMENT
);
648 saved_state
->eax
= ts
->eax
;
649 saved_state
->ebx
= ts
->ebx
;
650 saved_state
->ecx
= ts
->ecx
;
651 saved_state
->edx
= ts
->edx
;
652 saved_state
->edi
= ts
->edi
;
653 saved_state
->esi
= ts
->esi
;
654 saved_state
->ebp
= ts
->ebp
;
655 saved_state
->uesp
= ts
->esp
;
656 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
657 saved_state
->eip
= ts
->eip
;
658 saved_state
->cs
= ts
->cs
;
659 saved_state
->ss
= ts
->ss
;
660 saved_state
->ds
= ts
->ds
;
661 saved_state
->es
= ts
->es
;
662 saved_state
->fs
= ts
->fs
;
663 saved_state
->gs
= ts
->gs
;
666 * If the trace trap bit is being set,
667 * ensure that the user returns via iret
668 * - which is signaled thusly:
670 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
671 saved_state
->cs
= SYSENTER_TF_CS
;
673 return(KERN_SUCCESS
);
677 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
679 x86_saved_state64_t
*saved_state
;
681 pal_register_cache_state(thread
, DIRTY
);
683 saved_state
= USER_REGS64(thread
);
685 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
686 !IS_USERADDR64_CANONICAL(ts
->rip
))
687 return(KERN_INVALID_ARGUMENT
);
689 saved_state
->r8
= ts
->r8
;
690 saved_state
->r9
= ts
->r9
;
691 saved_state
->r10
= ts
->r10
;
692 saved_state
->r11
= ts
->r11
;
693 saved_state
->r12
= ts
->r12
;
694 saved_state
->r13
= ts
->r13
;
695 saved_state
->r14
= ts
->r14
;
696 saved_state
->r15
= ts
->r15
;
697 saved_state
->rax
= ts
->rax
;
698 saved_state
->rbx
= ts
->rbx
;
699 saved_state
->rcx
= ts
->rcx
;
700 saved_state
->rdx
= ts
->rdx
;
701 saved_state
->rdi
= ts
->rdi
;
702 saved_state
->rsi
= ts
->rsi
;
703 saved_state
->rbp
= ts
->rbp
;
704 saved_state
->isf
.rsp
= ts
->rsp
;
705 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
706 saved_state
->isf
.rip
= ts
->rip
;
707 saved_state
->isf
.cs
= USER64_CS
;
708 saved_state
->fs
= (uint32_t)ts
->fs
;
709 saved_state
->gs
= (uint32_t)ts
->gs
;
711 return(KERN_SUCCESS
);
717 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
719 x86_saved_state32_t
*saved_state
;
721 pal_register_cache_state(thread
, VALID
);
723 saved_state
= USER_REGS32(thread
);
725 ts
->eax
= saved_state
->eax
;
726 ts
->ebx
= saved_state
->ebx
;
727 ts
->ecx
= saved_state
->ecx
;
728 ts
->edx
= saved_state
->edx
;
729 ts
->edi
= saved_state
->edi
;
730 ts
->esi
= saved_state
->esi
;
731 ts
->ebp
= saved_state
->ebp
;
732 ts
->esp
= saved_state
->uesp
;
733 ts
->eflags
= saved_state
->efl
;
734 ts
->eip
= saved_state
->eip
;
735 ts
->cs
= saved_state
->cs
;
736 ts
->ss
= saved_state
->ss
;
737 ts
->ds
= saved_state
->ds
;
738 ts
->es
= saved_state
->es
;
739 ts
->fs
= saved_state
->fs
;
740 ts
->gs
= saved_state
->gs
;
745 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
747 x86_saved_state64_t
*saved_state
;
749 pal_register_cache_state(thread
, VALID
);
751 saved_state
= USER_REGS64(thread
);
753 ts
->r8
= saved_state
->r8
;
754 ts
->r9
= saved_state
->r9
;
755 ts
->r10
= saved_state
->r10
;
756 ts
->r11
= saved_state
->r11
;
757 ts
->r12
= saved_state
->r12
;
758 ts
->r13
= saved_state
->r13
;
759 ts
->r14
= saved_state
->r14
;
760 ts
->r15
= saved_state
->r15
;
761 ts
->rax
= saved_state
->rax
;
762 ts
->rbx
= saved_state
->rbx
;
763 ts
->rcx
= saved_state
->rcx
;
764 ts
->rdx
= saved_state
->rdx
;
765 ts
->rdi
= saved_state
->rdi
;
766 ts
->rsi
= saved_state
->rsi
;
767 ts
->rbp
= saved_state
->rbp
;
768 ts
->rsp
= saved_state
->isf
.rsp
;
769 ts
->rflags
= saved_state
->isf
.rflags
;
770 ts
->rip
= saved_state
->isf
.rip
;
771 ts
->cs
= saved_state
->isf
.cs
;
772 ts
->fs
= saved_state
->fs
;
773 ts
->gs
= saved_state
->gs
;
778 * act_machine_set_state:
780 * Set the status of the specified thread.
784 machine_thread_set_state(
786 thread_flavor_t flavor
,
787 thread_state_t tstate
,
788 mach_msg_type_number_t count
)
791 case x86_SAVED_STATE32
:
793 x86_saved_state32_t
*state
;
794 x86_saved_state32_t
*saved_state
;
796 if (count
< x86_SAVED_STATE32_COUNT
)
797 return(KERN_INVALID_ARGUMENT
);
799 if (thread_is_64bit(thr_act
))
800 return(KERN_INVALID_ARGUMENT
);
802 state
= (x86_saved_state32_t
*) tstate
;
804 /* Check segment selectors are safe */
805 if (!valid_user_segment_selectors(state
->cs
,
811 return KERN_INVALID_ARGUMENT
;
813 pal_register_cache_state(thr_act
, DIRTY
);
815 saved_state
= USER_REGS32(thr_act
);
820 saved_state
->edi
= state
->edi
;
821 saved_state
->esi
= state
->esi
;
822 saved_state
->ebp
= state
->ebp
;
823 saved_state
->uesp
= state
->uesp
;
824 saved_state
->ebx
= state
->ebx
;
825 saved_state
->edx
= state
->edx
;
826 saved_state
->ecx
= state
->ecx
;
827 saved_state
->eax
= state
->eax
;
828 saved_state
->eip
= state
->eip
;
830 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
833 * If the trace trap bit is being set,
834 * ensure that the user returns via iret
835 * - which is signaled thusly:
837 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
838 state
->cs
= SYSENTER_TF_CS
;
841 * User setting segment registers.
842 * Code and stack selectors have already been
843 * checked. Others will be reset by 'iret'
844 * if they are not valid.
846 saved_state
->cs
= state
->cs
;
847 saved_state
->ss
= state
->ss
;
848 saved_state
->ds
= state
->ds
;
849 saved_state
->es
= state
->es
;
850 saved_state
->fs
= state
->fs
;
851 saved_state
->gs
= state
->gs
;
856 case x86_SAVED_STATE64
:
858 x86_saved_state64_t
*state
;
859 x86_saved_state64_t
*saved_state
;
861 if (count
< x86_SAVED_STATE64_COUNT
)
862 return(KERN_INVALID_ARGUMENT
);
864 if (!thread_is_64bit(thr_act
))
865 return(KERN_INVALID_ARGUMENT
);
867 state
= (x86_saved_state64_t
*) tstate
;
869 /* Verify that the supplied code segment selector is
870 * valid. In 64-bit mode, the FS and GS segment overrides
871 * use the FS.base and GS.base MSRs to calculate
872 * base addresses, and the trampolines don't directly
873 * restore the segment registers--hence they are no
874 * longer relevant for validation.
876 if (!valid_user_code_selector(state
->isf
.cs
))
877 return KERN_INVALID_ARGUMENT
;
879 /* Check pc and stack are canonical addresses */
880 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
881 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
882 return KERN_INVALID_ARGUMENT
;
884 pal_register_cache_state(thr_act
, DIRTY
);
886 saved_state
= USER_REGS64(thr_act
);
891 saved_state
->r8
= state
->r8
;
892 saved_state
->r9
= state
->r9
;
893 saved_state
->r10
= state
->r10
;
894 saved_state
->r11
= state
->r11
;
895 saved_state
->r12
= state
->r12
;
896 saved_state
->r13
= state
->r13
;
897 saved_state
->r14
= state
->r14
;
898 saved_state
->r15
= state
->r15
;
899 saved_state
->rdi
= state
->rdi
;
900 saved_state
->rsi
= state
->rsi
;
901 saved_state
->rbp
= state
->rbp
;
902 saved_state
->rbx
= state
->rbx
;
903 saved_state
->rdx
= state
->rdx
;
904 saved_state
->rcx
= state
->rcx
;
905 saved_state
->rax
= state
->rax
;
906 saved_state
->isf
.rsp
= state
->isf
.rsp
;
907 saved_state
->isf
.rip
= state
->isf
.rip
;
909 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
912 * User setting segment registers.
913 * Code and stack selectors have already been
914 * checked. Others will be reset by 'sys'
915 * if they are not valid.
917 saved_state
->isf
.cs
= state
->isf
.cs
;
918 saved_state
->isf
.ss
= state
->isf
.ss
;
919 saved_state
->fs
= state
->fs
;
920 saved_state
->gs
= state
->gs
;
925 case x86_FLOAT_STATE32
:
927 if (count
!= x86_FLOAT_STATE32_COUNT
)
928 return(KERN_INVALID_ARGUMENT
);
930 if (thread_is_64bit(thr_act
))
931 return(KERN_INVALID_ARGUMENT
);
933 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
936 case x86_FLOAT_STATE64
:
938 if (count
!= x86_FLOAT_STATE64_COUNT
)
939 return(KERN_INVALID_ARGUMENT
);
941 if ( !thread_is_64bit(thr_act
))
942 return(KERN_INVALID_ARGUMENT
);
944 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
947 case x86_FLOAT_STATE
:
949 x86_float_state_t
*state
;
951 if (count
!= x86_FLOAT_STATE_COUNT
)
952 return(KERN_INVALID_ARGUMENT
);
954 state
= (x86_float_state_t
*)tstate
;
955 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
956 thread_is_64bit(thr_act
)) {
957 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
959 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
960 !thread_is_64bit(thr_act
)) {
961 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
963 return(KERN_INVALID_ARGUMENT
);
966 case x86_AVX_STATE32
:
968 if (count
!= x86_AVX_STATE32_COUNT
)
969 return(KERN_INVALID_ARGUMENT
);
971 if (thread_is_64bit(thr_act
))
972 return(KERN_INVALID_ARGUMENT
);
974 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
977 case x86_AVX_STATE64
:
979 if (count
!= x86_AVX_STATE64_COUNT
)
980 return(KERN_INVALID_ARGUMENT
);
982 if (!thread_is_64bit(thr_act
))
983 return(KERN_INVALID_ARGUMENT
);
985 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
990 x86_avx_state_t
*state
;
992 if (count
!= x86_AVX_STATE_COUNT
)
993 return(KERN_INVALID_ARGUMENT
);
995 state
= (x86_avx_state_t
*)tstate
;
996 if (state
->ash
.flavor
== x86_AVX_STATE64
&&
997 state
->ash
.count
== x86_FLOAT_STATE64_COUNT
&&
998 thread_is_64bit(thr_act
)) {
999 return fpu_set_fxstate(thr_act
,
1000 (thread_state_t
)&state
->ufs
.as64
,
1003 if (state
->ash
.flavor
== x86_FLOAT_STATE32
&&
1004 state
->ash
.count
== x86_FLOAT_STATE32_COUNT
&&
1005 !thread_is_64bit(thr_act
)) {
1006 return fpu_set_fxstate(thr_act
,
1007 (thread_state_t
)&state
->ufs
.as32
,
1010 return(KERN_INVALID_ARGUMENT
);
1013 case x86_THREAD_STATE32
:
1015 if (count
!= x86_THREAD_STATE32_COUNT
)
1016 return(KERN_INVALID_ARGUMENT
);
1018 if (thread_is_64bit(thr_act
))
1019 return(KERN_INVALID_ARGUMENT
);
1021 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1024 case x86_THREAD_STATE64
:
1026 if (count
!= x86_THREAD_STATE64_COUNT
)
1027 return(KERN_INVALID_ARGUMENT
);
1029 if (!thread_is_64bit(thr_act
))
1030 return(KERN_INVALID_ARGUMENT
);
1032 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1035 case x86_THREAD_STATE
:
1037 x86_thread_state_t
*state
;
1039 if (count
!= x86_THREAD_STATE_COUNT
)
1040 return(KERN_INVALID_ARGUMENT
);
1042 state
= (x86_thread_state_t
*)tstate
;
1044 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1045 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1046 thread_is_64bit(thr_act
)) {
1047 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1048 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1049 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1050 !thread_is_64bit(thr_act
)) {
1051 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1053 return(KERN_INVALID_ARGUMENT
);
1057 case x86_DEBUG_STATE32
:
1059 x86_debug_state32_t
*state
;
1062 if (thread_is_64bit(thr_act
))
1063 return(KERN_INVALID_ARGUMENT
);
1065 state
= (x86_debug_state32_t
*)tstate
;
1067 ret
= set_debug_state32(thr_act
, state
);
1071 case x86_DEBUG_STATE64
:
1073 x86_debug_state64_t
*state
;
1076 if (!thread_is_64bit(thr_act
))
1077 return(KERN_INVALID_ARGUMENT
);
1079 state
= (x86_debug_state64_t
*)tstate
;
1081 ret
= set_debug_state64(thr_act
, state
);
1085 case x86_DEBUG_STATE
:
1087 x86_debug_state_t
*state
;
1088 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1090 if (count
!= x86_DEBUG_STATE_COUNT
)
1091 return (KERN_INVALID_ARGUMENT
);
1093 state
= (x86_debug_state_t
*)tstate
;
1094 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1095 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1096 thread_is_64bit(thr_act
)) {
1097 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1100 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1101 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1102 !thread_is_64bit(thr_act
)) {
1103 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1108 return(KERN_INVALID_ARGUMENT
);
1111 return(KERN_SUCCESS
);
1119 * Get the status of the specified thread.
1123 machine_thread_get_state(
1125 thread_flavor_t flavor
,
1126 thread_state_t tstate
,
1127 mach_msg_type_number_t
*count
)
1132 case THREAD_STATE_FLAVOR_LIST
:
1135 return (KERN_INVALID_ARGUMENT
);
1137 tstate
[0] = i386_THREAD_STATE
;
1138 tstate
[1] = i386_FLOAT_STATE
;
1139 tstate
[2] = i386_EXCEPTION_STATE
;
1145 case THREAD_STATE_FLAVOR_LIST_NEW
:
1148 return (KERN_INVALID_ARGUMENT
);
1150 tstate
[0] = x86_THREAD_STATE
;
1151 tstate
[1] = x86_FLOAT_STATE
;
1152 tstate
[2] = x86_EXCEPTION_STATE
;
1153 tstate
[3] = x86_DEBUG_STATE
;
1159 case THREAD_STATE_FLAVOR_LIST_10_9
:
1162 return (KERN_INVALID_ARGUMENT
);
1164 tstate
[0] = x86_THREAD_STATE
;
1165 tstate
[1] = x86_FLOAT_STATE
;
1166 tstate
[2] = x86_EXCEPTION_STATE
;
1167 tstate
[3] = x86_DEBUG_STATE
;
1168 tstate
[4] = x86_AVX_STATE
;
1174 case x86_SAVED_STATE32
:
1176 x86_saved_state32_t
*state
;
1177 x86_saved_state32_t
*saved_state
;
1179 if (*count
< x86_SAVED_STATE32_COUNT
)
1180 return(KERN_INVALID_ARGUMENT
);
1182 if (thread_is_64bit(thr_act
))
1183 return(KERN_INVALID_ARGUMENT
);
1185 state
= (x86_saved_state32_t
*) tstate
;
1186 saved_state
= USER_REGS32(thr_act
);
1189 * First, copy everything:
1191 *state
= *saved_state
;
1192 state
->ds
= saved_state
->ds
& 0xffff;
1193 state
->es
= saved_state
->es
& 0xffff;
1194 state
->fs
= saved_state
->fs
& 0xffff;
1195 state
->gs
= saved_state
->gs
& 0xffff;
1197 *count
= x86_SAVED_STATE32_COUNT
;
1201 case x86_SAVED_STATE64
:
1203 x86_saved_state64_t
*state
;
1204 x86_saved_state64_t
*saved_state
;
1206 if (*count
< x86_SAVED_STATE64_COUNT
)
1207 return(KERN_INVALID_ARGUMENT
);
1209 if (!thread_is_64bit(thr_act
))
1210 return(KERN_INVALID_ARGUMENT
);
1212 state
= (x86_saved_state64_t
*)tstate
;
1213 saved_state
= USER_REGS64(thr_act
);
1216 * First, copy everything:
1218 *state
= *saved_state
;
1219 state
->fs
= saved_state
->fs
& 0xffff;
1220 state
->gs
= saved_state
->gs
& 0xffff;
1222 *count
= x86_SAVED_STATE64_COUNT
;
1226 case x86_FLOAT_STATE32
:
1228 if (*count
< x86_FLOAT_STATE32_COUNT
)
1229 return(KERN_INVALID_ARGUMENT
);
1231 if (thread_is_64bit(thr_act
))
1232 return(KERN_INVALID_ARGUMENT
);
1234 *count
= x86_FLOAT_STATE32_COUNT
;
1236 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1239 case x86_FLOAT_STATE64
:
1241 if (*count
< x86_FLOAT_STATE64_COUNT
)
1242 return(KERN_INVALID_ARGUMENT
);
1244 if ( !thread_is_64bit(thr_act
))
1245 return(KERN_INVALID_ARGUMENT
);
1247 *count
= x86_FLOAT_STATE64_COUNT
;
1249 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1252 case x86_FLOAT_STATE
:
1254 x86_float_state_t
*state
;
1257 if (*count
< x86_FLOAT_STATE_COUNT
)
1258 return(KERN_INVALID_ARGUMENT
);
1260 state
= (x86_float_state_t
*)tstate
;
1263 * no need to bzero... currently
1264 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1266 if (thread_is_64bit(thr_act
)) {
1267 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1268 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1270 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1272 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1273 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1275 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1277 *count
= x86_FLOAT_STATE_COUNT
;
1282 case x86_AVX_STATE32
:
1284 if (*count
!= x86_AVX_STATE32_COUNT
)
1285 return(KERN_INVALID_ARGUMENT
);
1287 if (thread_is_64bit(thr_act
))
1288 return(KERN_INVALID_ARGUMENT
);
1290 *count
= x86_AVX_STATE32_COUNT
;
1292 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1295 case x86_AVX_STATE64
:
1297 if (*count
!= x86_AVX_STATE64_COUNT
)
1298 return(KERN_INVALID_ARGUMENT
);
1300 if ( !thread_is_64bit(thr_act
))
1301 return(KERN_INVALID_ARGUMENT
);
1303 *count
= x86_AVX_STATE64_COUNT
;
1305 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1310 x86_avx_state_t
*state
;
1313 if (*count
< x86_AVX_STATE_COUNT
)
1314 return(KERN_INVALID_ARGUMENT
);
1316 state
= (x86_avx_state_t
*)tstate
;
1318 bzero((char *)state
, sizeof(x86_avx_state_t
));
1319 if (thread_is_64bit(thr_act
)) {
1320 state
->ash
.flavor
= x86_AVX_STATE64
;
1321 state
->ash
.count
= x86_AVX_STATE64_COUNT
;
1322 kret
= fpu_get_fxstate(thr_act
,
1323 (thread_state_t
)&state
->ufs
.as64
,
1326 state
->ash
.flavor
= x86_AVX_STATE32
;
1327 state
->ash
.count
= x86_AVX_STATE32_COUNT
;
1328 kret
= fpu_get_fxstate(thr_act
,
1329 (thread_state_t
)&state
->ufs
.as32
,
1332 *count
= x86_AVX_STATE_COUNT
;
1337 case x86_THREAD_STATE32
:
1339 if (*count
< x86_THREAD_STATE32_COUNT
)
1340 return(KERN_INVALID_ARGUMENT
);
1342 if (thread_is_64bit(thr_act
))
1343 return(KERN_INVALID_ARGUMENT
);
1345 *count
= x86_THREAD_STATE32_COUNT
;
1347 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1351 case x86_THREAD_STATE64
:
1353 if (*count
< x86_THREAD_STATE64_COUNT
)
1354 return(KERN_INVALID_ARGUMENT
);
1356 if ( !thread_is_64bit(thr_act
))
1357 return(KERN_INVALID_ARGUMENT
);
1359 *count
= x86_THREAD_STATE64_COUNT
;
1361 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1365 case x86_THREAD_STATE
:
1367 x86_thread_state_t
*state
;
1369 if (*count
< x86_THREAD_STATE_COUNT
)
1370 return(KERN_INVALID_ARGUMENT
);
1372 state
= (x86_thread_state_t
*)tstate
;
1374 bzero((char *)state
, sizeof(x86_thread_state_t
));
1376 if (thread_is_64bit(thr_act
)) {
1377 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1378 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1380 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1382 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1383 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1385 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1387 *count
= x86_THREAD_STATE_COUNT
;
1393 case x86_EXCEPTION_STATE32
:
1395 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1396 return(KERN_INVALID_ARGUMENT
);
1398 if (thread_is_64bit(thr_act
))
1399 return(KERN_INVALID_ARGUMENT
);
1401 *count
= x86_EXCEPTION_STATE32_COUNT
;
1403 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1405 * Suppress the cpu number for binary compatibility
1406 * of this deprecated state.
1408 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1412 case x86_EXCEPTION_STATE64
:
1414 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1415 return(KERN_INVALID_ARGUMENT
);
1417 if ( !thread_is_64bit(thr_act
))
1418 return(KERN_INVALID_ARGUMENT
);
1420 *count
= x86_EXCEPTION_STATE64_COUNT
;
1422 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1424 * Suppress the cpu number for binary compatibility
1425 * of this deprecated state.
1427 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1431 case x86_EXCEPTION_STATE
:
1433 x86_exception_state_t
*state
;
1435 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1436 return(KERN_INVALID_ARGUMENT
);
1438 state
= (x86_exception_state_t
*)tstate
;
1440 bzero((char *)state
, sizeof(x86_exception_state_t
));
1442 if (thread_is_64bit(thr_act
)) {
1443 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1444 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1446 get_exception_state64(thr_act
, &state
->ues
.es64
);
1448 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1449 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1451 get_exception_state32(thr_act
, &state
->ues
.es32
);
1453 *count
= x86_EXCEPTION_STATE_COUNT
;
1457 case x86_DEBUG_STATE32
:
1459 if (*count
< x86_DEBUG_STATE32_COUNT
)
1460 return(KERN_INVALID_ARGUMENT
);
1462 if (thread_is_64bit(thr_act
))
1463 return(KERN_INVALID_ARGUMENT
);
1465 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1467 *count
= x86_DEBUG_STATE32_COUNT
;
1471 case x86_DEBUG_STATE64
:
1473 if (*count
< x86_DEBUG_STATE64_COUNT
)
1474 return(KERN_INVALID_ARGUMENT
);
1476 if (!thread_is_64bit(thr_act
))
1477 return(KERN_INVALID_ARGUMENT
);
1479 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1481 *count
= x86_DEBUG_STATE64_COUNT
;
1485 case x86_DEBUG_STATE
:
1487 x86_debug_state_t
*state
;
1489 if (*count
< x86_DEBUG_STATE_COUNT
)
1490 return(KERN_INVALID_ARGUMENT
);
1492 state
= (x86_debug_state_t
*)tstate
;
1494 bzero(state
, sizeof *state
);
1496 if (thread_is_64bit(thr_act
)) {
1497 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1498 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1500 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1502 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1503 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1505 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1507 *count
= x86_DEBUG_STATE_COUNT
;
1511 return(KERN_INVALID_ARGUMENT
);
1514 return(KERN_SUCCESS
);
1518 machine_thread_get_kern_state(
1520 thread_flavor_t flavor
,
1521 thread_state_t tstate
,
1522 mach_msg_type_number_t
*count
)
1524 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1527 * This works only for an interrupted kernel thread
1529 if (thread
!= current_thread() || int_state
== NULL
)
1530 return KERN_FAILURE
;
1533 case x86_THREAD_STATE32
: {
1534 x86_thread_state32_t
*state
;
1535 x86_saved_state32_t
*saved_state
;
1537 if (!is_saved_state32(int_state
) ||
1538 *count
< x86_THREAD_STATE32_COUNT
)
1539 return (KERN_INVALID_ARGUMENT
);
1541 state
= (x86_thread_state32_t
*) tstate
;
1543 saved_state
= saved_state32(int_state
);
1545 * General registers.
1547 state
->eax
= saved_state
->eax
;
1548 state
->ebx
= saved_state
->ebx
;
1549 state
->ecx
= saved_state
->ecx
;
1550 state
->edx
= saved_state
->edx
;
1551 state
->edi
= saved_state
->edi
;
1552 state
->esi
= saved_state
->esi
;
1553 state
->ebp
= saved_state
->ebp
;
1554 state
->esp
= saved_state
->uesp
;
1555 state
->eflags
= saved_state
->efl
;
1556 state
->eip
= saved_state
->eip
;
1557 state
->cs
= saved_state
->cs
;
1558 state
->ss
= saved_state
->ss
;
1559 state
->ds
= saved_state
->ds
& 0xffff;
1560 state
->es
= saved_state
->es
& 0xffff;
1561 state
->fs
= saved_state
->fs
& 0xffff;
1562 state
->gs
= saved_state
->gs
& 0xffff;
1564 *count
= x86_THREAD_STATE32_COUNT
;
1566 return KERN_SUCCESS
;
1569 case x86_THREAD_STATE64
: {
1570 x86_thread_state64_t
*state
;
1571 x86_saved_state64_t
*saved_state
;
1573 if (!is_saved_state64(int_state
) ||
1574 *count
< x86_THREAD_STATE64_COUNT
)
1575 return (KERN_INVALID_ARGUMENT
);
1577 state
= (x86_thread_state64_t
*) tstate
;
1579 saved_state
= saved_state64(int_state
);
1581 * General registers.
1583 state
->rax
= saved_state
->rax
;
1584 state
->rbx
= saved_state
->rbx
;
1585 state
->rcx
= saved_state
->rcx
;
1586 state
->rdx
= saved_state
->rdx
;
1587 state
->rdi
= saved_state
->rdi
;
1588 state
->rsi
= saved_state
->rsi
;
1589 state
->rbp
= saved_state
->rbp
;
1590 state
->rsp
= saved_state
->isf
.rsp
;
1591 state
->r8
= saved_state
->r8
;
1592 state
->r9
= saved_state
->r9
;
1593 state
->r10
= saved_state
->r10
;
1594 state
->r11
= saved_state
->r11
;
1595 state
->r12
= saved_state
->r12
;
1596 state
->r13
= saved_state
->r13
;
1597 state
->r14
= saved_state
->r14
;
1598 state
->r15
= saved_state
->r15
;
1600 state
->rip
= saved_state
->isf
.rip
;
1601 state
->rflags
= saved_state
->isf
.rflags
;
1602 state
->cs
= saved_state
->isf
.cs
;
1603 state
->fs
= saved_state
->fs
& 0xffff;
1604 state
->gs
= saved_state
->gs
& 0xffff;
1605 *count
= x86_THREAD_STATE64_COUNT
;
1607 return KERN_SUCCESS
;
1610 case x86_THREAD_STATE
: {
1611 x86_thread_state_t
*state
= NULL
;
1613 if (*count
< x86_THREAD_STATE_COUNT
)
1614 return (KERN_INVALID_ARGUMENT
);
1616 state
= (x86_thread_state_t
*) tstate
;
1618 if (is_saved_state32(int_state
)) {
1619 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1621 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1622 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1625 * General registers.
1627 state
->uts
.ts32
.eax
= saved_state
->eax
;
1628 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1629 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1630 state
->uts
.ts32
.edx
= saved_state
->edx
;
1631 state
->uts
.ts32
.edi
= saved_state
->edi
;
1632 state
->uts
.ts32
.esi
= saved_state
->esi
;
1633 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1634 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1635 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1636 state
->uts
.ts32
.eip
= saved_state
->eip
;
1637 state
->uts
.ts32
.cs
= saved_state
->cs
;
1638 state
->uts
.ts32
.ss
= saved_state
->ss
;
1639 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1640 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1641 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1642 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1643 } else if (is_saved_state64(int_state
)) {
1644 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1646 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1647 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1650 * General registers.
1652 state
->uts
.ts64
.rax
= saved_state
->rax
;
1653 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1654 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1655 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1656 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1657 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1658 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1659 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1660 state
->uts
.ts64
.r8
= saved_state
->r8
;
1661 state
->uts
.ts64
.r9
= saved_state
->r9
;
1662 state
->uts
.ts64
.r10
= saved_state
->r10
;
1663 state
->uts
.ts64
.r11
= saved_state
->r11
;
1664 state
->uts
.ts64
.r12
= saved_state
->r12
;
1665 state
->uts
.ts64
.r13
= saved_state
->r13
;
1666 state
->uts
.ts64
.r14
= saved_state
->r14
;
1667 state
->uts
.ts64
.r15
= saved_state
->r15
;
1669 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1670 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1671 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1672 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1673 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1675 panic("unknown thread state");
1678 *count
= x86_THREAD_STATE_COUNT
;
1679 return KERN_SUCCESS
;
1682 return KERN_FAILURE
;
1687 machine_thread_switch_addrmode(thread_t thread
)
1690 * We don't want to be preempted until we're done
1691 * - particularly if we're switching the current thread
1693 disable_preemption();
1696 * Reset the state saveareas. As we're resetting, we anticipate no
1697 * memory allocations in this path.
1699 machine_thread_create(thread
, thread
->task
);
1701 /* If we're switching ourselves, reset the pcb addresses etc. */
1702 if (thread
== current_thread()) {
1703 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1704 act_machine_switch_pcb(NULL
, thread
);
1705 ml_set_interrupts_enabled(istate
);
1707 enable_preemption();
1713 * This is used to set the current thr_act/thread
1714 * when starting up a new processor
1717 machine_set_current_thread(thread_t thread
)
1719 current_cpu_datap()->cpu_active_thread
= thread
;
1724 * Perform machine-dependent per-thread initializations
1727 machine_thread_init(void)
1729 iss_zone
= zinit(sizeof(x86_saved_state_t
),
1730 thread_max
* sizeof(x86_saved_state_t
),
1731 THREAD_CHUNK
* sizeof(x86_saved_state_t
),
1732 "x86_64 saved state");
1734 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1735 thread_max
* sizeof(x86_debug_state64_t
),
1736 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1737 "x86_64 debug state");
1747 thread_t thr_act
= current_thread();
1749 if (thread_is_64bit(thr_act
)) {
1750 x86_saved_state64_t
*iss64
;
1752 iss64
= USER_REGS64(thr_act
);
1754 return(iss64
->isf
.rip
);
1756 x86_saved_state32_t
*iss32
;
1758 iss32
= USER_REGS32(thr_act
);
1765 * detach and return a kernel stack from a thread
1769 machine_stack_detach(thread_t thread
)
1773 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1774 (uintptr_t)thread_tid(thread
), thread
->priority
,
1775 thread
->sched_pri
, 0,
1778 stack
= thread
->kernel_stack
;
1779 thread
->kernel_stack
= 0;
1785 * attach a kernel stack to a thread and initialize it
1789 machine_stack_attach(
1793 struct x86_kernel_state
*statep
;
1795 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1796 (uintptr_t)thread_tid(thread
), thread
->priority
,
1797 thread
->sched_pri
, 0, 0);
1800 thread
->kernel_stack
= stack
;
1802 statep
= STACK_IKS(stack
);
1803 #if defined(__x86_64__)
1804 statep
->k_rip
= (unsigned long) Thread_continue
;
1805 statep
->k_rbx
= (unsigned long) thread_continue
;
1806 statep
->k_rsp
= (unsigned long) (STACK_IKS(stack
) - 1);
1808 statep
->k_eip
= (unsigned long) Thread_continue
;
1809 statep
->k_ebx
= (unsigned long) thread_continue
;
1810 statep
->k_esp
= (unsigned long) (STACK_IKS(stack
) - 1);
1817 * move a stack from old to new thread
1821 machine_stack_handoff(thread_t old
,
1830 machine_pmc_cswitch(old
, new);
1833 ml_kpc_cswitch(old
, new);
1836 ml_kperf_cswitch(old
, new);
1839 stack
= old
->kernel_stack
;
1840 if (stack
== old
->reserved_stack
) {
1841 assert(new->reserved_stack
);
1842 old
->reserved_stack
= new->reserved_stack
;
1843 new->reserved_stack
= stack
;
1845 old
->kernel_stack
= 0;
1847 * A full call to machine_stack_attach() is unnecessry
1848 * because old stack is already initialized.
1850 new->kernel_stack
= stack
;
1852 fpu_save_context(old
);
1854 old
->machine
.specFlags
&= ~OnProc
;
1855 new->machine
.specFlags
|= OnProc
;
1857 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
1858 act_machine_switch_pcb(old
, new);
1860 machine_set_current_thread(new);
1868 struct x86_act_context32
{
1869 x86_saved_state32_t ss
;
1870 x86_float_state32_t fs
;
1871 x86_debug_state32_t ds
;
1874 struct x86_act_context64
{
1875 x86_saved_state64_t ss
;
1876 x86_float_state64_t fs
;
1877 x86_debug_state64_t ds
;
1883 act_thread_csave(void)
1886 mach_msg_type_number_t val
;
1887 thread_t thr_act
= current_thread();
1889 if (thread_is_64bit(thr_act
)) {
1890 struct x86_act_context64
*ic64
;
1892 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
1894 if (ic64
== (struct x86_act_context64
*)NULL
)
1897 val
= x86_SAVED_STATE64_COUNT
;
1898 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
1899 (thread_state_t
) &ic64
->ss
, &val
);
1900 if (kret
!= KERN_SUCCESS
) {
1901 kfree(ic64
, sizeof(struct x86_act_context64
));
1904 val
= x86_FLOAT_STATE64_COUNT
;
1905 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
1906 (thread_state_t
) &ic64
->fs
, &val
);
1907 if (kret
!= KERN_SUCCESS
) {
1908 kfree(ic64
, sizeof(struct x86_act_context64
));
1912 val
= x86_DEBUG_STATE64_COUNT
;
1913 kret
= machine_thread_get_state(thr_act
,
1915 (thread_state_t
)&ic64
->ds
,
1917 if (kret
!= KERN_SUCCESS
) {
1918 kfree(ic64
, sizeof(struct x86_act_context64
));
1924 struct x86_act_context32
*ic32
;
1926 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
1928 if (ic32
== (struct x86_act_context32
*)NULL
)
1931 val
= x86_SAVED_STATE32_COUNT
;
1932 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
1933 (thread_state_t
) &ic32
->ss
, &val
);
1934 if (kret
!= KERN_SUCCESS
) {
1935 kfree(ic32
, sizeof(struct x86_act_context32
));
1938 val
= x86_FLOAT_STATE32_COUNT
;
1939 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
1940 (thread_state_t
) &ic32
->fs
, &val
);
1941 if (kret
!= KERN_SUCCESS
) {
1942 kfree(ic32
, sizeof(struct x86_act_context32
));
1946 val
= x86_DEBUG_STATE32_COUNT
;
1947 kret
= machine_thread_get_state(thr_act
,
1949 (thread_state_t
)&ic32
->ds
,
1951 if (kret
!= KERN_SUCCESS
) {
1952 kfree(ic32
, sizeof(struct x86_act_context32
));
1961 act_thread_catt(void *ctx
)
1963 thread_t thr_act
= current_thread();
1966 if (ctx
== (void *)NULL
)
1969 if (thread_is_64bit(thr_act
)) {
1970 struct x86_act_context64
*ic64
;
1972 ic64
= (struct x86_act_context64
*)ctx
;
1974 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
1975 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
1976 if (kret
== KERN_SUCCESS
) {
1977 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
1978 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
1980 kfree(ic64
, sizeof(struct x86_act_context64
));
1982 struct x86_act_context32
*ic32
;
1984 ic32
= (struct x86_act_context32
*)ctx
;
1986 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
1987 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
1988 if (kret
== KERN_SUCCESS
) {
1989 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
1990 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
1992 kfree(ic32
, sizeof(struct x86_act_context32
));
1997 void act_thread_cfree(__unused
void *ctx
)
2003 * Duplicate one x86_debug_state32_t to another. "all" parameter
2004 * chooses whether dr4 and dr5 are copied (they are never meant
2005 * to be installed when we do machine_task_set_state() or
2006 * machine_thread_set_state()).
2010 x86_debug_state32_t
*src
,
2011 x86_debug_state32_t
*target
,
2015 target
->dr4
= src
->dr4
;
2016 target
->dr5
= src
->dr5
;
2019 target
->dr0
= src
->dr0
;
2020 target
->dr1
= src
->dr1
;
2021 target
->dr2
= src
->dr2
;
2022 target
->dr3
= src
->dr3
;
2023 target
->dr6
= src
->dr6
;
2024 target
->dr7
= src
->dr7
;
2028 * Duplicate one x86_debug_state64_t to another. "all" parameter
2029 * chooses whether dr4 and dr5 are copied (they are never meant
2030 * to be installed when we do machine_task_set_state() or
2031 * machine_thread_set_state()).
2035 x86_debug_state64_t
*src
,
2036 x86_debug_state64_t
*target
,
2040 target
->dr4
= src
->dr4
;
2041 target
->dr5
= src
->dr5
;
2044 target
->dr0
= src
->dr0
;
2045 target
->dr1
= src
->dr1
;
2046 target
->dr2
= src
->dr2
;
2047 target
->dr3
= src
->dr3
;
2048 target
->dr6
= src
->dr6
;
2049 target
->dr7
= src
->dr7
;