2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
60 #include <sys/kdebug.h>
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
66 #include <kern/counters.h>
67 #include <kern/kalloc.h>
68 #include <kern/mach_param.h>
69 #include <kern/processor.h>
70 #include <kern/cpu_data.h>
71 #include <kern/cpu_number.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/sched_prim.h>
75 #include <kern/misc_protos.h>
76 #include <kern/assert.h>
78 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #include <i386/machine_routines.h>
95 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
98 #include <kern/hv_support.h>
102 * Maps state flavor to number of words in the state:
104 unsigned int _MachineStateCount
[] = {
105 [x86_THREAD_STATE32
] = x86_THREAD_STATE32_COUNT
,
106 [x86_THREAD_STATE64
] = x86_THREAD_STATE64_COUNT
,
107 [x86_THREAD_STATE
] = x86_THREAD_STATE_COUNT
,
108 [x86_FLOAT_STATE32
] = x86_FLOAT_STATE32_COUNT
,
109 [x86_FLOAT_STATE64
] = x86_FLOAT_STATE64_COUNT
,
110 [x86_FLOAT_STATE
] = x86_FLOAT_STATE_COUNT
,
111 [x86_EXCEPTION_STATE32
] = x86_EXCEPTION_STATE32_COUNT
,
112 [x86_EXCEPTION_STATE64
] = x86_EXCEPTION_STATE64_COUNT
,
113 [x86_EXCEPTION_STATE
] = x86_EXCEPTION_STATE_COUNT
,
114 [x86_DEBUG_STATE32
] = x86_DEBUG_STATE32_COUNT
,
115 [x86_DEBUG_STATE64
] = x86_DEBUG_STATE64_COUNT
,
116 [x86_DEBUG_STATE
] = x86_DEBUG_STATE_COUNT
,
117 [x86_AVX_STATE32
] = x86_AVX_STATE32_COUNT
,
118 [x86_AVX_STATE64
] = x86_AVX_STATE64_COUNT
,
119 [x86_AVX_STATE
] = x86_AVX_STATE_COUNT
,
120 #if !defined(RC_HIDE_XNU_J137)
121 [x86_AVX512_STATE32
] = x86_AVX512_STATE32_COUNT
,
122 [x86_AVX512_STATE64
] = x86_AVX512_STATE64_COUNT
,
123 [x86_AVX512_STATE
] = x86_AVX512_STATE_COUNT
,
124 #endif /* not RC_HIDE_XNU_J137 */
127 zone_t iss_zone
; /* zone for saved_state area */
128 zone_t ids_zone
; /* zone for debug_state area */
132 extern void Thread_continue(void);
133 extern void Load_context(
134 thread_t thread
) __attribute__((noreturn
));
137 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
140 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
143 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
146 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
149 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
152 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
156 ml_hv_cswitch(thread_t old
, thread_t
new)
158 if (old
->hv_thread_target
)
159 hv_callbacks
.preempt(old
->hv_thread_target
);
161 if (new->hv_thread_target
)
162 hv_callbacks
.dispatch(new->hv_thread_target
);
167 * Don't let an illegal value for the lower 32-bits of dr7 get set.
168 * Specifically, check for undefined settings. Setting these bit patterns
169 * result in undefined behaviour and can lead to an unexpected
173 dr7d_is_valid(uint32_t *dr7d
)
176 uint32_t mask1
, mask2
;
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
182 if (!(get_cr4() & CR4_DE
))
183 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
184 i
++, mask1
<<= 4, mask2
<<= 4)
185 if ((*dr7d
& mask1
) == mask2
)
189 * if we are doing an instruction execution break (indicated
190 * by r/w[x] being "00B"), then the len[x] must also be set
193 for (i
= 0; i
< 4; i
++)
194 if (((((*dr7d
>> (16 + i
*4))) & 0x3) == 0) &&
195 ((((*dr7d
>> (18 + i
*4))) & 0x3) != 0))
199 * Intel docs have these bits fixed.
201 *dr7d
|= 0x1 << 10; /* set bit 10 to 1 */
202 *dr7d
&= ~(0x1 << 11); /* set bit 11 to 0 */
203 *dr7d
&= ~(0x1 << 12); /* set bit 12 to 0 */
204 *dr7d
&= ~(0x1 << 14); /* set bit 14 to 0 */
205 *dr7d
&= ~(0x1 << 15); /* set bit 15 to 0 */
208 * We don't allow anything to set the global breakpoints.
214 if (*dr7d
& (0x2<<2))
217 if (*dr7d
& (0x2<<4))
220 if (*dr7d
& (0x2<<6))
226 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
229 debug_state_is_valid32(x86_debug_state32_t
*ds
)
231 if (!dr7d_is_valid(&ds
->dr7
))
238 debug_state_is_valid64(x86_debug_state64_t
*ds
)
240 if (!dr7d_is_valid((uint32_t *)&ds
->dr7
))
244 * Don't allow the user to set debug addresses above their max
248 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
251 if (ds
->dr7
& (0x1<<2))
252 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
255 if (ds
->dr7
& (0x1<<4))
256 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
259 if (ds
->dr7
& (0x1<<6))
260 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
263 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
264 ds
->dr7
&= 0xffffffffULL
;
271 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
273 x86_debug_state32_t
*new_ids
;
276 pcb
= THREAD_TO_PCB(thread
);
278 if (debug_state_is_valid32(ds
) != TRUE
) {
279 return KERN_INVALID_ARGUMENT
;
282 if (pcb
->ids
== NULL
) {
283 new_ids
= zalloc(ids_zone
);
284 bzero(new_ids
, sizeof *new_ids
);
286 simple_lock(&pcb
->lock
);
287 /* make sure it wasn't already alloc()'d elsewhere */
288 if (pcb
->ids
== NULL
) {
290 simple_unlock(&pcb
->lock
);
292 simple_unlock(&pcb
->lock
);
293 zfree(ids_zone
, new_ids
);
298 copy_debug_state32(ds
, pcb
->ids
, FALSE
);
300 return (KERN_SUCCESS
);
304 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
306 x86_debug_state64_t
*new_ids
;
309 pcb
= THREAD_TO_PCB(thread
);
311 if (debug_state_is_valid64(ds
) != TRUE
) {
312 return KERN_INVALID_ARGUMENT
;
315 if (pcb
->ids
== NULL
) {
316 new_ids
= zalloc(ids_zone
);
317 bzero(new_ids
, sizeof *new_ids
);
320 if (thread
->hv_thread_target
) {
321 hv_callbacks
.volatile_state(thread
->hv_thread_target
,
326 simple_lock(&pcb
->lock
);
327 /* make sure it wasn't already alloc()'d elsewhere */
328 if (pcb
->ids
== NULL
) {
330 simple_unlock(&pcb
->lock
);
332 simple_unlock(&pcb
->lock
);
333 zfree(ids_zone
, new_ids
);
337 copy_debug_state64(ds
, pcb
->ids
, FALSE
);
339 return (KERN_SUCCESS
);
343 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
345 x86_debug_state32_t
*saved_state
;
347 saved_state
= thread
->machine
.ids
;
350 copy_debug_state32(saved_state
, ds
, TRUE
);
352 bzero(ds
, sizeof *ds
);
356 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
358 x86_debug_state64_t
*saved_state
;
360 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
363 copy_debug_state64(saved_state
, ds
, TRUE
);
365 bzero(ds
, sizeof *ds
);
369 * consider_machine_collect:
371 * Try to collect machine-dependent pages
374 consider_machine_collect(void)
379 consider_machine_adjust(void)
384 * Switch to the first thread on a CPU.
387 machine_load_context(
390 new->machine
.specFlags
|= OnProc
;
391 act_machine_switch_pcb(NULL
, new);
395 static inline void pmap_switch_context(thread_t ot
, thread_t nt
, int cnum
) {
396 pmap_assert(ml_get_interrupts_enabled() == FALSE
);
397 vm_map_t nmap
= nt
->map
, omap
= ot
->map
;
398 if ((omap
!= nmap
) || (nmap
->pmap
->pagezero_accessible
)) {
399 PMAP_DEACTIVATE_MAP(omap
, ot
, cnum
);
400 PMAP_ACTIVATE_MAP(nmap
, nt
, cnum
);
405 * Switch to a new thread.
406 * Save the old thread`s kernel state or continuation,
410 machine_switch_context(
412 thread_continue_t continuation
,
415 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
422 * Save FP registers if in use.
424 fpu_switch_context(old
, new);
426 old
->machine
.specFlags
&= ~OnProc
;
427 new->machine
.specFlags
|= OnProc
;
430 * Monitor the stack depth and report new max,
431 * not worrying about races.
433 vm_offset_t depth
= current_stack_depth();
434 if (depth
> kernel_stack_depth_max
) {
435 kernel_stack_depth_max
= depth
;
436 KERNEL_DEBUG_CONSTANT(
437 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
438 (long) depth
, 0, 0, 0, 0);
442 * Switch address maps if need be, even if not switching tasks.
443 * (A server activation may be "borrowing" a client map.)
445 pmap_switch_context(old
, new, cpu_number());
448 * Load the rest of the user state for the new thread
450 act_machine_switch_pcb(old
, new);
453 ml_hv_cswitch(old
, new);
456 return(Switch_context(old
, continuation
, new));
460 machine_processor_shutdown(
462 void (*doshutdown
)(processor_t
),
463 processor_t processor
)
468 fpu_switch_context(thread
, NULL
);
469 pmap_switch_context(thread
, processor
->idle_thread
, cpu_number());
470 return(Shutdown_context(thread
, doshutdown
, processor
));
475 * This is where registers that are not normally specified by the mach-o
476 * file on an execve would be nullified, perhaps to avoid a covert channel.
479 machine_thread_state_initialize(
483 * If there's an fpu save area, free it.
484 * The initialized state will then be lazily faulted-in, if required.
485 * And if we're target, re-arm the no-fpu trap.
487 if (thread
->machine
.ifps
) {
488 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
490 if (thread
== current_thread())
494 if (thread
->machine
.ids
) {
495 zfree(ids_zone
, thread
->machine
.ids
);
496 thread
->machine
.ids
= NULL
;
503 get_eflags_exportmask(void)
509 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
510 * for 32bit tasks only
511 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
512 * for 64bit tasks only
513 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
514 * for 32bit tasks only
515 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
516 * for 64bit tasks only
517 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
518 * for either 32bit or 64bit tasks
519 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
520 * for 32bit tasks only
521 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
522 * for 64bit tasks only
523 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
524 * for either 32bit or 64bit tasks
525 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
526 * for 32bit tasks only
527 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
528 * for 64bit tasks only
529 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
530 * for either 32bit or 64bit tasks
535 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
537 x86_saved_state64_t
*saved_state
;
539 saved_state
= USER_REGS64(thread
);
541 es
->trapno
= saved_state
->isf
.trapno
;
542 es
->cpu
= saved_state
->isf
.cpu
;
543 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
544 es
->faultvaddr
= saved_state
->cr2
;
548 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
550 x86_saved_state32_t
*saved_state
;
552 saved_state
= USER_REGS32(thread
);
554 es
->trapno
= saved_state
->trapno
;
555 es
->cpu
= saved_state
->cpu
;
556 es
->err
= saved_state
->err
;
557 es
->faultvaddr
= saved_state
->cr2
;
562 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
564 x86_saved_state32_t
*saved_state
;
566 pal_register_cache_state(thread
, DIRTY
);
568 saved_state
= USER_REGS32(thread
);
571 * Scrub segment selector values:
575 * On a 64 bit kernel, we always override the data segments,
576 * as the actual selector numbers have changed. This also
577 * means that we don't support setting the data segments
584 /* Set GS to CTHREAD only if's been established */
585 ts
->gs
= thread
->machine
.cthread_self
? USER_CTHREAD
: NULL_SEG
;
587 /* Check segment selectors are safe */
588 if (!valid_user_segment_selectors(ts
->cs
,
594 return(KERN_INVALID_ARGUMENT
);
596 saved_state
->eax
= ts
->eax
;
597 saved_state
->ebx
= ts
->ebx
;
598 saved_state
->ecx
= ts
->ecx
;
599 saved_state
->edx
= ts
->edx
;
600 saved_state
->edi
= ts
->edi
;
601 saved_state
->esi
= ts
->esi
;
602 saved_state
->ebp
= ts
->ebp
;
603 saved_state
->uesp
= ts
->esp
;
604 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
605 saved_state
->eip
= ts
->eip
;
606 saved_state
->cs
= ts
->cs
;
607 saved_state
->ss
= ts
->ss
;
608 saved_state
->ds
= ts
->ds
;
609 saved_state
->es
= ts
->es
;
610 saved_state
->fs
= ts
->fs
;
611 saved_state
->gs
= ts
->gs
;
614 * If the trace trap bit is being set,
615 * ensure that the user returns via iret
616 * - which is signaled thusly:
618 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
619 saved_state
->cs
= SYSENTER_TF_CS
;
621 return(KERN_SUCCESS
);
625 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
627 x86_saved_state64_t
*saved_state
;
629 pal_register_cache_state(thread
, DIRTY
);
631 saved_state
= USER_REGS64(thread
);
633 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
634 !IS_USERADDR64_CANONICAL(ts
->rip
))
635 return(KERN_INVALID_ARGUMENT
);
637 saved_state
->r8
= ts
->r8
;
638 saved_state
->r9
= ts
->r9
;
639 saved_state
->r10
= ts
->r10
;
640 saved_state
->r11
= ts
->r11
;
641 saved_state
->r12
= ts
->r12
;
642 saved_state
->r13
= ts
->r13
;
643 saved_state
->r14
= ts
->r14
;
644 saved_state
->r15
= ts
->r15
;
645 saved_state
->rax
= ts
->rax
;
646 saved_state
->rbx
= ts
->rbx
;
647 saved_state
->rcx
= ts
->rcx
;
648 saved_state
->rdx
= ts
->rdx
;
649 saved_state
->rdi
= ts
->rdi
;
650 saved_state
->rsi
= ts
->rsi
;
651 saved_state
->rbp
= ts
->rbp
;
652 saved_state
->isf
.rsp
= ts
->rsp
;
653 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
654 saved_state
->isf
.rip
= ts
->rip
;
655 saved_state
->isf
.cs
= USER64_CS
;
656 saved_state
->fs
= (uint32_t)ts
->fs
;
657 saved_state
->gs
= (uint32_t)ts
->gs
;
659 return(KERN_SUCCESS
);
665 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
667 x86_saved_state32_t
*saved_state
;
669 pal_register_cache_state(thread
, VALID
);
671 saved_state
= USER_REGS32(thread
);
673 ts
->eax
= saved_state
->eax
;
674 ts
->ebx
= saved_state
->ebx
;
675 ts
->ecx
= saved_state
->ecx
;
676 ts
->edx
= saved_state
->edx
;
677 ts
->edi
= saved_state
->edi
;
678 ts
->esi
= saved_state
->esi
;
679 ts
->ebp
= saved_state
->ebp
;
680 ts
->esp
= saved_state
->uesp
;
681 ts
->eflags
= saved_state
->efl
;
682 ts
->eip
= saved_state
->eip
;
683 ts
->cs
= saved_state
->cs
;
684 ts
->ss
= saved_state
->ss
;
685 ts
->ds
= saved_state
->ds
;
686 ts
->es
= saved_state
->es
;
687 ts
->fs
= saved_state
->fs
;
688 ts
->gs
= saved_state
->gs
;
693 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
695 x86_saved_state64_t
*saved_state
;
697 pal_register_cache_state(thread
, VALID
);
699 saved_state
= USER_REGS64(thread
);
701 ts
->r8
= saved_state
->r8
;
702 ts
->r9
= saved_state
->r9
;
703 ts
->r10
= saved_state
->r10
;
704 ts
->r11
= saved_state
->r11
;
705 ts
->r12
= saved_state
->r12
;
706 ts
->r13
= saved_state
->r13
;
707 ts
->r14
= saved_state
->r14
;
708 ts
->r15
= saved_state
->r15
;
709 ts
->rax
= saved_state
->rax
;
710 ts
->rbx
= saved_state
->rbx
;
711 ts
->rcx
= saved_state
->rcx
;
712 ts
->rdx
= saved_state
->rdx
;
713 ts
->rdi
= saved_state
->rdi
;
714 ts
->rsi
= saved_state
->rsi
;
715 ts
->rbp
= saved_state
->rbp
;
716 ts
->rsp
= saved_state
->isf
.rsp
;
717 ts
->rflags
= saved_state
->isf
.rflags
;
718 ts
->rip
= saved_state
->isf
.rip
;
719 ts
->cs
= saved_state
->isf
.cs
;
720 ts
->fs
= saved_state
->fs
;
721 ts
->gs
= saved_state
->gs
;
725 machine_thread_state_convert_to_user(
726 __unused thread_t thread
,
727 __unused thread_flavor_t flavor
,
728 __unused thread_state_t tstate
,
729 __unused mach_msg_type_number_t
*count
)
731 // No conversion to userspace representation on this platform
736 machine_thread_state_convert_from_user(
737 __unused thread_t thread
,
738 __unused thread_flavor_t flavor
,
739 __unused thread_state_t tstate
,
740 __unused mach_msg_type_number_t count
)
742 // No conversion from userspace representation on this platform
747 machine_thread_siguctx_pointer_convert_to_user(
748 __unused thread_t thread
,
749 __unused user_addr_t
*uctxp
)
751 // No conversion to userspace representation on this platform
756 machine_thread_function_pointers_convert_from_user(
757 __unused thread_t thread
,
758 __unused user_addr_t
*fptrs
,
759 __unused
uint32_t count
)
761 // No conversion from userspace representation on this platform
766 * act_machine_set_state:
768 * Set the status of the specified thread.
772 machine_thread_set_state(
774 thread_flavor_t flavor
,
775 thread_state_t tstate
,
776 mach_msg_type_number_t count
)
779 case x86_SAVED_STATE32
:
781 x86_saved_state32_t
*state
;
782 x86_saved_state32_t
*saved_state
;
784 if (count
< x86_SAVED_STATE32_COUNT
)
785 return(KERN_INVALID_ARGUMENT
);
787 if (thread_is_64bit_addr(thr_act
))
788 return(KERN_INVALID_ARGUMENT
);
790 state
= (x86_saved_state32_t
*) tstate
;
792 /* Check segment selectors are safe */
793 if (!valid_user_segment_selectors(state
->cs
,
799 return KERN_INVALID_ARGUMENT
;
801 pal_register_cache_state(thr_act
, DIRTY
);
803 saved_state
= USER_REGS32(thr_act
);
808 saved_state
->edi
= state
->edi
;
809 saved_state
->esi
= state
->esi
;
810 saved_state
->ebp
= state
->ebp
;
811 saved_state
->uesp
= state
->uesp
;
812 saved_state
->ebx
= state
->ebx
;
813 saved_state
->edx
= state
->edx
;
814 saved_state
->ecx
= state
->ecx
;
815 saved_state
->eax
= state
->eax
;
816 saved_state
->eip
= state
->eip
;
818 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
821 * If the trace trap bit is being set,
822 * ensure that the user returns via iret
823 * - which is signaled thusly:
825 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
826 state
->cs
= SYSENTER_TF_CS
;
829 * User setting segment registers.
830 * Code and stack selectors have already been
831 * checked. Others will be reset by 'iret'
832 * if they are not valid.
834 saved_state
->cs
= state
->cs
;
835 saved_state
->ss
= state
->ss
;
836 saved_state
->ds
= state
->ds
;
837 saved_state
->es
= state
->es
;
838 saved_state
->fs
= state
->fs
;
839 saved_state
->gs
= state
->gs
;
844 case x86_SAVED_STATE64
:
846 x86_saved_state64_t
*state
;
847 x86_saved_state64_t
*saved_state
;
849 if (count
< x86_SAVED_STATE64_COUNT
)
850 return(KERN_INVALID_ARGUMENT
);
852 if (!thread_is_64bit_addr(thr_act
))
853 return(KERN_INVALID_ARGUMENT
);
855 state
= (x86_saved_state64_t
*) tstate
;
857 /* Verify that the supplied code segment selector is
858 * valid. In 64-bit mode, the FS and GS segment overrides
859 * use the FS.base and GS.base MSRs to calculate
860 * base addresses, and the trampolines don't directly
861 * restore the segment registers--hence they are no
862 * longer relevant for validation.
864 if (!valid_user_code_selector(state
->isf
.cs
))
865 return KERN_INVALID_ARGUMENT
;
867 /* Check pc and stack are canonical addresses */
868 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
869 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
870 return KERN_INVALID_ARGUMENT
;
872 pal_register_cache_state(thr_act
, DIRTY
);
874 saved_state
= USER_REGS64(thr_act
);
879 saved_state
->r8
= state
->r8
;
880 saved_state
->r9
= state
->r9
;
881 saved_state
->r10
= state
->r10
;
882 saved_state
->r11
= state
->r11
;
883 saved_state
->r12
= state
->r12
;
884 saved_state
->r13
= state
->r13
;
885 saved_state
->r14
= state
->r14
;
886 saved_state
->r15
= state
->r15
;
887 saved_state
->rdi
= state
->rdi
;
888 saved_state
->rsi
= state
->rsi
;
889 saved_state
->rbp
= state
->rbp
;
890 saved_state
->rbx
= state
->rbx
;
891 saved_state
->rdx
= state
->rdx
;
892 saved_state
->rcx
= state
->rcx
;
893 saved_state
->rax
= state
->rax
;
894 saved_state
->isf
.rsp
= state
->isf
.rsp
;
895 saved_state
->isf
.rip
= state
->isf
.rip
;
897 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
900 * User setting segment registers.
901 * Code and stack selectors have already been
902 * checked. Others will be reset by 'sys'
903 * if they are not valid.
905 saved_state
->isf
.cs
= state
->isf
.cs
;
906 saved_state
->isf
.ss
= state
->isf
.ss
;
907 saved_state
->fs
= state
->fs
;
908 saved_state
->gs
= state
->gs
;
913 case x86_FLOAT_STATE32
:
914 case x86_AVX_STATE32
:
915 #if !defined(RC_HIDE_XNU_J137)
916 case x86_AVX512_STATE32
:
917 #endif /* not RC_HIDE_XNU_J137 */
919 if (count
!= _MachineStateCount
[flavor
])
920 return(KERN_INVALID_ARGUMENT
);
922 if (thread_is_64bit_addr(thr_act
))
923 return(KERN_INVALID_ARGUMENT
);
925 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
928 case x86_FLOAT_STATE64
:
929 case x86_AVX_STATE64
:
930 #if !defined(RC_HIDE_XNU_J137)
931 case x86_AVX512_STATE64
:
932 #endif /* not RC_HIDE_XNU_J137 */
934 if (count
!= _MachineStateCount
[flavor
])
935 return(KERN_INVALID_ARGUMENT
);
937 if (!thread_is_64bit_addr(thr_act
))
938 return(KERN_INVALID_ARGUMENT
);
940 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
943 case x86_FLOAT_STATE
:
945 x86_float_state_t
*state
;
947 if (count
!= x86_FLOAT_STATE_COUNT
)
948 return(KERN_INVALID_ARGUMENT
);
950 state
= (x86_float_state_t
*)tstate
;
951 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
952 thread_is_64bit_addr(thr_act
)) {
953 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
955 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
956 !thread_is_64bit_addr(thr_act
)) {
957 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
959 return(KERN_INVALID_ARGUMENT
);
963 #if !defined(RC_HIDE_XNU_J137)
964 case x86_AVX512_STATE
:
967 x86_avx_state_t
*state
;
969 if (count
!= _MachineStateCount
[flavor
])
970 return(KERN_INVALID_ARGUMENT
);
972 state
= (x86_avx_state_t
*)tstate
;
973 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
975 if (state
->ash
.flavor
== (flavor
- 1) &&
976 state
->ash
.count
== _MachineStateCount
[flavor
- 1] &&
977 thread_is_64bit_addr(thr_act
)) {
978 return fpu_set_fxstate(thr_act
,
979 (thread_state_t
)&state
->ufs
.as64
,
983 if (state
->ash
.flavor
== (flavor
- 2) &&
984 state
->ash
.count
== _MachineStateCount
[flavor
- 2] &&
985 !thread_is_64bit_addr(thr_act
)) {
986 return fpu_set_fxstate(thr_act
,
987 (thread_state_t
)&state
->ufs
.as32
,
990 return(KERN_INVALID_ARGUMENT
);
993 case x86_THREAD_STATE32
:
995 if (count
!= x86_THREAD_STATE32_COUNT
)
996 return(KERN_INVALID_ARGUMENT
);
998 if (thread_is_64bit_addr(thr_act
))
999 return(KERN_INVALID_ARGUMENT
);
1001 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1004 case x86_THREAD_STATE64
:
1006 if (count
!= x86_THREAD_STATE64_COUNT
)
1007 return(KERN_INVALID_ARGUMENT
);
1009 if (!thread_is_64bit_addr(thr_act
))
1010 return(KERN_INVALID_ARGUMENT
);
1012 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1015 case x86_THREAD_STATE
:
1017 x86_thread_state_t
*state
;
1019 if (count
!= x86_THREAD_STATE_COUNT
)
1020 return(KERN_INVALID_ARGUMENT
);
1022 state
= (x86_thread_state_t
*)tstate
;
1024 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1025 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1026 thread_is_64bit_addr(thr_act
)) {
1027 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1028 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1029 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1030 !thread_is_64bit_addr(thr_act
)) {
1031 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1033 return(KERN_INVALID_ARGUMENT
);
1035 case x86_DEBUG_STATE32
:
1037 x86_debug_state32_t
*state
;
1040 if (thread_is_64bit_addr(thr_act
))
1041 return(KERN_INVALID_ARGUMENT
);
1043 state
= (x86_debug_state32_t
*)tstate
;
1045 ret
= set_debug_state32(thr_act
, state
);
1049 case x86_DEBUG_STATE64
:
1051 x86_debug_state64_t
*state
;
1054 if (!thread_is_64bit_addr(thr_act
))
1055 return(KERN_INVALID_ARGUMENT
);
1057 state
= (x86_debug_state64_t
*)tstate
;
1059 ret
= set_debug_state64(thr_act
, state
);
1063 case x86_DEBUG_STATE
:
1065 x86_debug_state_t
*state
;
1066 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1068 if (count
!= x86_DEBUG_STATE_COUNT
)
1069 return (KERN_INVALID_ARGUMENT
);
1071 state
= (x86_debug_state_t
*)tstate
;
1072 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1073 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1074 thread_is_64bit_addr(thr_act
)) {
1075 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1078 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1079 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1080 !thread_is_64bit_addr(thr_act
)) {
1081 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1086 return(KERN_INVALID_ARGUMENT
);
1089 return(KERN_SUCCESS
);
1097 * Get the status of the specified thread.
1101 machine_thread_get_state(
1103 thread_flavor_t flavor
,
1104 thread_state_t tstate
,
1105 mach_msg_type_number_t
*count
)
1110 case THREAD_STATE_FLAVOR_LIST
:
1113 return (KERN_INVALID_ARGUMENT
);
1115 tstate
[0] = i386_THREAD_STATE
;
1116 tstate
[1] = i386_FLOAT_STATE
;
1117 tstate
[2] = i386_EXCEPTION_STATE
;
1123 case THREAD_STATE_FLAVOR_LIST_NEW
:
1126 return (KERN_INVALID_ARGUMENT
);
1128 tstate
[0] = x86_THREAD_STATE
;
1129 tstate
[1] = x86_FLOAT_STATE
;
1130 tstate
[2] = x86_EXCEPTION_STATE
;
1131 tstate
[3] = x86_DEBUG_STATE
;
1137 case THREAD_STATE_FLAVOR_LIST_10_9
:
1140 return (KERN_INVALID_ARGUMENT
);
1142 tstate
[0] = x86_THREAD_STATE
;
1143 tstate
[1] = x86_FLOAT_STATE
;
1144 tstate
[2] = x86_EXCEPTION_STATE
;
1145 tstate
[3] = x86_DEBUG_STATE
;
1146 tstate
[4] = x86_AVX_STATE
;
1152 #if !defined(RC_HIDE_XNU_J137)
1153 case THREAD_STATE_FLAVOR_LIST_10_13
:
1156 return (KERN_INVALID_ARGUMENT
);
1158 tstate
[0] = x86_THREAD_STATE
;
1159 tstate
[1] = x86_FLOAT_STATE
;
1160 tstate
[2] = x86_EXCEPTION_STATE
;
1161 tstate
[3] = x86_DEBUG_STATE
;
1162 tstate
[4] = x86_AVX_STATE
;
1163 tstate
[5] = x86_AVX512_STATE
;
1170 case x86_SAVED_STATE32
:
1172 x86_saved_state32_t
*state
;
1173 x86_saved_state32_t
*saved_state
;
1175 if (*count
< x86_SAVED_STATE32_COUNT
)
1176 return(KERN_INVALID_ARGUMENT
);
1178 if (thread_is_64bit_addr(thr_act
))
1179 return(KERN_INVALID_ARGUMENT
);
1181 state
= (x86_saved_state32_t
*) tstate
;
1182 saved_state
= USER_REGS32(thr_act
);
1185 * First, copy everything:
1187 *state
= *saved_state
;
1188 state
->ds
= saved_state
->ds
& 0xffff;
1189 state
->es
= saved_state
->es
& 0xffff;
1190 state
->fs
= saved_state
->fs
& 0xffff;
1191 state
->gs
= saved_state
->gs
& 0xffff;
1193 *count
= x86_SAVED_STATE32_COUNT
;
1197 case x86_SAVED_STATE64
:
1199 x86_saved_state64_t
*state
;
1200 x86_saved_state64_t
*saved_state
;
1202 if (*count
< x86_SAVED_STATE64_COUNT
)
1203 return(KERN_INVALID_ARGUMENT
);
1205 if (!thread_is_64bit_addr(thr_act
))
1206 return(KERN_INVALID_ARGUMENT
);
1208 state
= (x86_saved_state64_t
*)tstate
;
1209 saved_state
= USER_REGS64(thr_act
);
1212 * First, copy everything:
1214 *state
= *saved_state
;
1215 state
->fs
= saved_state
->fs
& 0xffff;
1216 state
->gs
= saved_state
->gs
& 0xffff;
1218 *count
= x86_SAVED_STATE64_COUNT
;
1222 case x86_FLOAT_STATE32
:
1224 if (*count
< x86_FLOAT_STATE32_COUNT
)
1225 return(KERN_INVALID_ARGUMENT
);
1227 if (thread_is_64bit_addr(thr_act
))
1228 return(KERN_INVALID_ARGUMENT
);
1230 *count
= x86_FLOAT_STATE32_COUNT
;
1232 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1235 case x86_FLOAT_STATE64
:
1237 if (*count
< x86_FLOAT_STATE64_COUNT
)
1238 return(KERN_INVALID_ARGUMENT
);
1240 if ( !thread_is_64bit_addr(thr_act
))
1241 return(KERN_INVALID_ARGUMENT
);
1243 *count
= x86_FLOAT_STATE64_COUNT
;
1245 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1248 case x86_FLOAT_STATE
:
1250 x86_float_state_t
*state
;
1253 if (*count
< x86_FLOAT_STATE_COUNT
)
1254 return(KERN_INVALID_ARGUMENT
);
1256 state
= (x86_float_state_t
*)tstate
;
1259 * no need to bzero... currently
1260 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1262 if (thread_is_64bit_addr(thr_act
)) {
1263 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1264 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1266 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1268 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1269 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1271 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1273 *count
= x86_FLOAT_STATE_COUNT
;
1278 case x86_AVX_STATE32
:
1279 #if !defined(RC_HIDE_XNU_J137)
1280 case x86_AVX512_STATE32
:
1283 if (*count
!= _MachineStateCount
[flavor
])
1284 return(KERN_INVALID_ARGUMENT
);
1286 if (thread_is_64bit_addr(thr_act
))
1287 return(KERN_INVALID_ARGUMENT
);
1289 *count
= _MachineStateCount
[flavor
];
1291 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1294 case x86_AVX_STATE64
:
1295 #if !defined(RC_HIDE_XNU_J137)
1296 case x86_AVX512_STATE64
:
1299 if (*count
!= _MachineStateCount
[flavor
])
1300 return(KERN_INVALID_ARGUMENT
);
1302 if ( !thread_is_64bit_addr(thr_act
))
1303 return(KERN_INVALID_ARGUMENT
);
1305 *count
= _MachineStateCount
[flavor
];
1307 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1311 #if !defined(RC_HIDE_XNU_J137)
1312 case x86_AVX512_STATE
:
1315 x86_avx_state_t
*state
;
1316 thread_state_t fstate
;
1318 if (*count
< _MachineStateCount
[flavor
])
1319 return(KERN_INVALID_ARGUMENT
);
1321 *count
= _MachineStateCount
[flavor
];
1322 state
= (x86_avx_state_t
*)tstate
;
1324 bzero((char *)state
, *count
* sizeof(int));
1326 if (thread_is_64bit_addr(thr_act
)) {
1327 flavor
-= 1; /* 64-bit flavor */
1328 fstate
= (thread_state_t
) &state
->ufs
.as64
;
1330 flavor
-= 2; /* 32-bit flavor */
1331 fstate
= (thread_state_t
) &state
->ufs
.as32
;
1333 state
->ash
.flavor
= flavor
;
1334 state
->ash
.count
= _MachineStateCount
[flavor
];
1336 return fpu_get_fxstate(thr_act
, fstate
, flavor
);
1339 case x86_THREAD_STATE32
:
1341 if (*count
< x86_THREAD_STATE32_COUNT
)
1342 return(KERN_INVALID_ARGUMENT
);
1344 if (thread_is_64bit_addr(thr_act
))
1345 return(KERN_INVALID_ARGUMENT
);
1347 *count
= x86_THREAD_STATE32_COUNT
;
1349 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1353 case x86_THREAD_STATE64
:
1355 if (*count
< x86_THREAD_STATE64_COUNT
)
1356 return(KERN_INVALID_ARGUMENT
);
1358 if ( !thread_is_64bit_addr(thr_act
))
1359 return(KERN_INVALID_ARGUMENT
);
1361 *count
= x86_THREAD_STATE64_COUNT
;
1363 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1367 case x86_THREAD_STATE
:
1369 x86_thread_state_t
*state
;
1371 if (*count
< x86_THREAD_STATE_COUNT
)
1372 return(KERN_INVALID_ARGUMENT
);
1374 state
= (x86_thread_state_t
*)tstate
;
1376 bzero((char *)state
, sizeof(x86_thread_state_t
));
1378 if (thread_is_64bit_addr(thr_act
)) {
1379 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1380 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1382 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1384 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1385 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1387 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1389 *count
= x86_THREAD_STATE_COUNT
;
1395 case x86_EXCEPTION_STATE32
:
1397 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1398 return(KERN_INVALID_ARGUMENT
);
1400 if (thread_is_64bit_addr(thr_act
))
1401 return(KERN_INVALID_ARGUMENT
);
1403 *count
= x86_EXCEPTION_STATE32_COUNT
;
1405 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1407 * Suppress the cpu number for binary compatibility
1408 * of this deprecated state.
1410 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1414 case x86_EXCEPTION_STATE64
:
1416 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1417 return(KERN_INVALID_ARGUMENT
);
1419 if ( !thread_is_64bit_addr(thr_act
))
1420 return(KERN_INVALID_ARGUMENT
);
1422 *count
= x86_EXCEPTION_STATE64_COUNT
;
1424 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1426 * Suppress the cpu number for binary compatibility
1427 * of this deprecated state.
1429 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1433 case x86_EXCEPTION_STATE
:
1435 x86_exception_state_t
*state
;
1437 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1438 return(KERN_INVALID_ARGUMENT
);
1440 state
= (x86_exception_state_t
*)tstate
;
1442 bzero((char *)state
, sizeof(x86_exception_state_t
));
1444 if (thread_is_64bit_addr(thr_act
)) {
1445 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1446 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1448 get_exception_state64(thr_act
, &state
->ues
.es64
);
1450 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1451 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1453 get_exception_state32(thr_act
, &state
->ues
.es32
);
1455 *count
= x86_EXCEPTION_STATE_COUNT
;
1459 case x86_DEBUG_STATE32
:
1461 if (*count
< x86_DEBUG_STATE32_COUNT
)
1462 return(KERN_INVALID_ARGUMENT
);
1464 if (thread_is_64bit_addr(thr_act
))
1465 return(KERN_INVALID_ARGUMENT
);
1467 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1469 *count
= x86_DEBUG_STATE32_COUNT
;
1473 case x86_DEBUG_STATE64
:
1475 if (*count
< x86_DEBUG_STATE64_COUNT
)
1476 return(KERN_INVALID_ARGUMENT
);
1478 if (!thread_is_64bit_addr(thr_act
))
1479 return(KERN_INVALID_ARGUMENT
);
1481 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1483 *count
= x86_DEBUG_STATE64_COUNT
;
1487 case x86_DEBUG_STATE
:
1489 x86_debug_state_t
*state
;
1491 if (*count
< x86_DEBUG_STATE_COUNT
)
1492 return(KERN_INVALID_ARGUMENT
);
1494 state
= (x86_debug_state_t
*)tstate
;
1496 bzero(state
, sizeof *state
);
1498 if (thread_is_64bit_addr(thr_act
)) {
1499 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1500 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1502 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1504 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1505 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1507 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1509 *count
= x86_DEBUG_STATE_COUNT
;
1513 return(KERN_INVALID_ARGUMENT
);
1516 return(KERN_SUCCESS
);
1520 machine_thread_get_kern_state(
1522 thread_flavor_t flavor
,
1523 thread_state_t tstate
,
1524 mach_msg_type_number_t
*count
)
1526 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1529 * This works only for an interrupted kernel thread
1531 if (thread
!= current_thread() || int_state
== NULL
)
1532 return KERN_FAILURE
;
1535 case x86_THREAD_STATE32
: {
1536 x86_thread_state32_t
*state
;
1537 x86_saved_state32_t
*saved_state
;
1539 if (!is_saved_state32(int_state
) ||
1540 *count
< x86_THREAD_STATE32_COUNT
)
1541 return (KERN_INVALID_ARGUMENT
);
1543 state
= (x86_thread_state32_t
*) tstate
;
1545 saved_state
= saved_state32(int_state
);
1547 * General registers.
1549 state
->eax
= saved_state
->eax
;
1550 state
->ebx
= saved_state
->ebx
;
1551 state
->ecx
= saved_state
->ecx
;
1552 state
->edx
= saved_state
->edx
;
1553 state
->edi
= saved_state
->edi
;
1554 state
->esi
= saved_state
->esi
;
1555 state
->ebp
= saved_state
->ebp
;
1556 state
->esp
= saved_state
->uesp
;
1557 state
->eflags
= saved_state
->efl
;
1558 state
->eip
= saved_state
->eip
;
1559 state
->cs
= saved_state
->cs
;
1560 state
->ss
= saved_state
->ss
;
1561 state
->ds
= saved_state
->ds
& 0xffff;
1562 state
->es
= saved_state
->es
& 0xffff;
1563 state
->fs
= saved_state
->fs
& 0xffff;
1564 state
->gs
= saved_state
->gs
& 0xffff;
1566 *count
= x86_THREAD_STATE32_COUNT
;
1568 return KERN_SUCCESS
;
1571 case x86_THREAD_STATE64
: {
1572 x86_thread_state64_t
*state
;
1573 x86_saved_state64_t
*saved_state
;
1575 if (!is_saved_state64(int_state
) ||
1576 *count
< x86_THREAD_STATE64_COUNT
)
1577 return (KERN_INVALID_ARGUMENT
);
1579 state
= (x86_thread_state64_t
*) tstate
;
1581 saved_state
= saved_state64(int_state
);
1583 * General registers.
1585 state
->rax
= saved_state
->rax
;
1586 state
->rbx
= saved_state
->rbx
;
1587 state
->rcx
= saved_state
->rcx
;
1588 state
->rdx
= saved_state
->rdx
;
1589 state
->rdi
= saved_state
->rdi
;
1590 state
->rsi
= saved_state
->rsi
;
1591 state
->rbp
= saved_state
->rbp
;
1592 state
->rsp
= saved_state
->isf
.rsp
;
1593 state
->r8
= saved_state
->r8
;
1594 state
->r9
= saved_state
->r9
;
1595 state
->r10
= saved_state
->r10
;
1596 state
->r11
= saved_state
->r11
;
1597 state
->r12
= saved_state
->r12
;
1598 state
->r13
= saved_state
->r13
;
1599 state
->r14
= saved_state
->r14
;
1600 state
->r15
= saved_state
->r15
;
1602 state
->rip
= saved_state
->isf
.rip
;
1603 state
->rflags
= saved_state
->isf
.rflags
;
1604 state
->cs
= saved_state
->isf
.cs
;
1605 state
->fs
= saved_state
->fs
& 0xffff;
1606 state
->gs
= saved_state
->gs
& 0xffff;
1607 *count
= x86_THREAD_STATE64_COUNT
;
1609 return KERN_SUCCESS
;
1612 case x86_THREAD_STATE
: {
1613 x86_thread_state_t
*state
= NULL
;
1615 if (*count
< x86_THREAD_STATE_COUNT
)
1616 return (KERN_INVALID_ARGUMENT
);
1618 state
= (x86_thread_state_t
*) tstate
;
1620 if (is_saved_state32(int_state
)) {
1621 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1623 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1624 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1627 * General registers.
1629 state
->uts
.ts32
.eax
= saved_state
->eax
;
1630 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1631 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1632 state
->uts
.ts32
.edx
= saved_state
->edx
;
1633 state
->uts
.ts32
.edi
= saved_state
->edi
;
1634 state
->uts
.ts32
.esi
= saved_state
->esi
;
1635 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1636 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1637 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1638 state
->uts
.ts32
.eip
= saved_state
->eip
;
1639 state
->uts
.ts32
.cs
= saved_state
->cs
;
1640 state
->uts
.ts32
.ss
= saved_state
->ss
;
1641 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1642 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1643 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1644 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1645 } else if (is_saved_state64(int_state
)) {
1646 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1648 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1649 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1652 * General registers.
1654 state
->uts
.ts64
.rax
= saved_state
->rax
;
1655 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1656 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1657 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1658 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1659 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1660 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1661 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1662 state
->uts
.ts64
.r8
= saved_state
->r8
;
1663 state
->uts
.ts64
.r9
= saved_state
->r9
;
1664 state
->uts
.ts64
.r10
= saved_state
->r10
;
1665 state
->uts
.ts64
.r11
= saved_state
->r11
;
1666 state
->uts
.ts64
.r12
= saved_state
->r12
;
1667 state
->uts
.ts64
.r13
= saved_state
->r13
;
1668 state
->uts
.ts64
.r14
= saved_state
->r14
;
1669 state
->uts
.ts64
.r15
= saved_state
->r15
;
1671 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1672 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1673 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1674 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1675 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1677 panic("unknown thread state");
1680 *count
= x86_THREAD_STATE_COUNT
;
1681 return KERN_SUCCESS
;
1684 return KERN_FAILURE
;
1689 machine_thread_switch_addrmode(thread_t thread
)
1692 * We don't want to be preempted until we're done
1693 * - particularly if we're switching the current thread
1695 disable_preemption();
1698 * Reset the state saveareas. As we're resetting, we anticipate no
1699 * memory allocations in this path.
1701 machine_thread_create(thread
, thread
->task
);
1703 /* Adjust FPU state */
1704 fpu_switch_addrmode(thread
, task_has_64Bit_addr(thread
->task
));
1706 /* If we're switching ourselves, reset the pcb addresses etc. */
1707 if (thread
== current_thread()) {
1708 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1709 act_machine_switch_pcb(NULL
, thread
);
1710 ml_set_interrupts_enabled(istate
);
1712 enable_preemption();
1718 * This is used to set the current thr_act/thread
1719 * when starting up a new processor
1722 machine_set_current_thread(thread_t thread
)
1724 current_cpu_datap()->cpu_active_thread
= thread
;
1729 * Perform machine-dependent per-thread initializations
1732 machine_thread_init(void)
1734 iss_zone
= zinit(sizeof(x86_saved_state_t
),
1735 thread_max
* sizeof(x86_saved_state_t
),
1736 THREAD_CHUNK
* sizeof(x86_saved_state_t
),
1737 "x86_64 saved state");
1739 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1740 thread_max
* sizeof(x86_debug_state64_t
),
1741 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1742 "x86_64 debug state");
1752 thread_t thr_act
= current_thread();
1754 if (thread_is_64bit_addr(thr_act
)) {
1755 x86_saved_state64_t
*iss64
;
1757 iss64
= USER_REGS64(thr_act
);
1759 return(iss64
->isf
.rip
);
1761 x86_saved_state32_t
*iss32
;
1763 iss32
= USER_REGS32(thr_act
);
1770 * detach and return a kernel stack from a thread
1774 machine_stack_detach(thread_t thread
)
1778 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1779 (uintptr_t)thread_tid(thread
), thread
->priority
,
1780 thread
->sched_pri
, 0,
1783 stack
= thread
->kernel_stack
;
1784 thread
->kernel_stack
= 0;
1790 * attach a kernel stack to a thread and initialize it
1794 machine_stack_attach(
1798 struct x86_kernel_state
*statep
;
1800 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1801 (uintptr_t)thread_tid(thread
), thread
->priority
,
1802 thread
->sched_pri
, 0, 0);
1805 thread
->kernel_stack
= stack
;
1806 thread_initialize_kernel_state(thread
);
1808 statep
= STACK_IKS(stack
);
1809 #if defined(__x86_64__)
1810 statep
->k_rip
= (unsigned long) Thread_continue
;
1811 statep
->k_rbx
= (unsigned long) thread_continue
;
1812 statep
->k_rsp
= (unsigned long) STACK_IKS(stack
);
1814 statep
->k_eip
= (unsigned long) Thread_continue
;
1815 statep
->k_ebx
= (unsigned long) thread_continue
;
1816 statep
->k_esp
= (unsigned long) STACK_IKS(stack
);
1823 * move a stack from old to new thread
1827 machine_stack_handoff(thread_t old
,
1837 stack
= old
->kernel_stack
;
1838 if (stack
== old
->reserved_stack
) {
1839 assert(new->reserved_stack
);
1840 old
->reserved_stack
= new->reserved_stack
;
1841 new->reserved_stack
= stack
;
1843 old
->kernel_stack
= 0;
1845 * A full call to machine_stack_attach() is unnecessry
1846 * because old stack is already initialized.
1848 new->kernel_stack
= stack
;
1850 fpu_switch_context(old
, new);
1852 old
->machine
.specFlags
&= ~OnProc
;
1853 new->machine
.specFlags
|= OnProc
;
1855 pmap_switch_context(old
, new, cpu_number());
1856 act_machine_switch_pcb(old
, new);
1859 ml_hv_cswitch(old
, new);
1862 machine_set_current_thread(new);
1863 thread_initialize_kernel_state(new);
1871 struct x86_act_context32
{
1872 x86_saved_state32_t ss
;
1873 x86_float_state32_t fs
;
1874 x86_debug_state32_t ds
;
1877 struct x86_act_context64
{
1878 x86_saved_state64_t ss
;
1879 x86_float_state64_t fs
;
1880 x86_debug_state64_t ds
;
1886 act_thread_csave(void)
1889 mach_msg_type_number_t val
;
1890 thread_t thr_act
= current_thread();
1892 if (thread_is_64bit_addr(thr_act
)) {
1893 struct x86_act_context64
*ic64
;
1895 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
1897 if (ic64
== (struct x86_act_context64
*)NULL
)
1900 val
= x86_SAVED_STATE64_COUNT
;
1901 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
1902 (thread_state_t
) &ic64
->ss
, &val
);
1903 if (kret
!= KERN_SUCCESS
) {
1904 kfree(ic64
, sizeof(struct x86_act_context64
));
1907 val
= x86_FLOAT_STATE64_COUNT
;
1908 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
1909 (thread_state_t
) &ic64
->fs
, &val
);
1910 if (kret
!= KERN_SUCCESS
) {
1911 kfree(ic64
, sizeof(struct x86_act_context64
));
1915 val
= x86_DEBUG_STATE64_COUNT
;
1916 kret
= machine_thread_get_state(thr_act
,
1918 (thread_state_t
)&ic64
->ds
,
1920 if (kret
!= KERN_SUCCESS
) {
1921 kfree(ic64
, sizeof(struct x86_act_context64
));
1927 struct x86_act_context32
*ic32
;
1929 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
1931 if (ic32
== (struct x86_act_context32
*)NULL
)
1934 val
= x86_SAVED_STATE32_COUNT
;
1935 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
1936 (thread_state_t
) &ic32
->ss
, &val
);
1937 if (kret
!= KERN_SUCCESS
) {
1938 kfree(ic32
, sizeof(struct x86_act_context32
));
1941 val
= x86_FLOAT_STATE32_COUNT
;
1942 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
1943 (thread_state_t
) &ic32
->fs
, &val
);
1944 if (kret
!= KERN_SUCCESS
) {
1945 kfree(ic32
, sizeof(struct x86_act_context32
));
1949 val
= x86_DEBUG_STATE32_COUNT
;
1950 kret
= machine_thread_get_state(thr_act
,
1952 (thread_state_t
)&ic32
->ds
,
1954 if (kret
!= KERN_SUCCESS
) {
1955 kfree(ic32
, sizeof(struct x86_act_context32
));
1964 act_thread_catt(void *ctx
)
1966 thread_t thr_act
= current_thread();
1969 if (ctx
== (void *)NULL
)
1972 if (thread_is_64bit_addr(thr_act
)) {
1973 struct x86_act_context64
*ic64
;
1975 ic64
= (struct x86_act_context64
*)ctx
;
1977 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
1978 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
1979 if (kret
== KERN_SUCCESS
) {
1980 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
1981 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
1983 kfree(ic64
, sizeof(struct x86_act_context64
));
1985 struct x86_act_context32
*ic32
;
1987 ic32
= (struct x86_act_context32
*)ctx
;
1989 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
1990 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
1991 if (kret
== KERN_SUCCESS
) {
1992 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
1993 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
1995 kfree(ic32
, sizeof(struct x86_act_context32
));
2000 void act_thread_cfree(__unused
void *ctx
)
2006 * Duplicate one x86_debug_state32_t to another. "all" parameter
2007 * chooses whether dr4 and dr5 are copied (they are never meant
2008 * to be installed when we do machine_task_set_state() or
2009 * machine_thread_set_state()).
2013 x86_debug_state32_t
*src
,
2014 x86_debug_state32_t
*target
,
2018 target
->dr4
= src
->dr4
;
2019 target
->dr5
= src
->dr5
;
2022 target
->dr0
= src
->dr0
;
2023 target
->dr1
= src
->dr1
;
2024 target
->dr2
= src
->dr2
;
2025 target
->dr3
= src
->dr3
;
2026 target
->dr6
= src
->dr6
;
2027 target
->dr7
= src
->dr7
;
2031 * Duplicate one x86_debug_state64_t to another. "all" parameter
2032 * chooses whether dr4 and dr5 are copied (they are never meant
2033 * to be installed when we do machine_task_set_state() or
2034 * machine_thread_set_state()).
2038 x86_debug_state64_t
*src
,
2039 x86_debug_state64_t
*target
,
2043 target
->dr4
= src
->dr4
;
2044 target
->dr5
= src
->dr5
;
2047 target
->dr0
= src
->dr0
;
2048 target
->dr1
= src
->dr1
;
2049 target
->dr2
= src
->dr2
;
2050 target
->dr3
= src
->dr3
;
2051 target
->dr6
= src
->dr6
;
2052 target
->dr7
= src
->dr7
;