2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
60 #include <sys/kdebug.h>
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
66 #include <kern/counters.h>
67 #include <kern/kalloc.h>
68 #include <kern/mach_param.h>
69 #include <kern/processor.h>
70 #include <kern/cpu_data.h>
71 #include <kern/cpu_number.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/sched_prim.h>
75 #include <kern/misc_protos.h>
76 #include <kern/assert.h>
78 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #include <i386/machine_routines.h>
95 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
99 #include <kern/hv_support.h>
103 * Maps state flavor to number of words in the state:
105 unsigned int _MachineStateCount
[] = {
106 [x86_THREAD_STATE32
] = x86_THREAD_STATE32_COUNT
,
107 [x86_THREAD_STATE64
] = x86_THREAD_STATE64_COUNT
,
108 [x86_THREAD_FULL_STATE64
] = x86_THREAD_FULL_STATE64_COUNT
,
109 [x86_THREAD_STATE
] = x86_THREAD_STATE_COUNT
,
110 [x86_FLOAT_STATE32
] = x86_FLOAT_STATE32_COUNT
,
111 [x86_FLOAT_STATE64
] = x86_FLOAT_STATE64_COUNT
,
112 [x86_FLOAT_STATE
] = x86_FLOAT_STATE_COUNT
,
113 [x86_EXCEPTION_STATE32
] = x86_EXCEPTION_STATE32_COUNT
,
114 [x86_EXCEPTION_STATE64
] = x86_EXCEPTION_STATE64_COUNT
,
115 [x86_EXCEPTION_STATE
] = x86_EXCEPTION_STATE_COUNT
,
116 [x86_DEBUG_STATE32
] = x86_DEBUG_STATE32_COUNT
,
117 [x86_DEBUG_STATE64
] = x86_DEBUG_STATE64_COUNT
,
118 [x86_DEBUG_STATE
] = x86_DEBUG_STATE_COUNT
,
119 [x86_AVX_STATE32
] = x86_AVX_STATE32_COUNT
,
120 [x86_AVX_STATE64
] = x86_AVX_STATE64_COUNT
,
121 [x86_AVX_STATE
] = x86_AVX_STATE_COUNT
,
122 [x86_AVX512_STATE32
] = x86_AVX512_STATE32_COUNT
,
123 [x86_AVX512_STATE64
] = x86_AVX512_STATE64_COUNT
,
124 [x86_AVX512_STATE
] = x86_AVX512_STATE_COUNT
,
125 [x86_PAGEIN_STATE
] = x86_PAGEIN_STATE_COUNT
128 zone_t iss_zone
; /* zone for saved_state area */
129 zone_t ids_zone
; /* zone for debug_state area */
133 extern void Thread_continue(void);
134 extern void Load_context(
135 thread_t thread
) __attribute__((noreturn
));
138 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
141 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
144 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
147 get_thread_state64(thread_t thread
, void *ts
, boolean_t full
);
150 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
153 set_thread_state64(thread_t thread
, void *ts
, boolean_t full
);
157 ml_hv_cswitch(thread_t old
, thread_t
new)
159 if (old
->hv_thread_target
) {
160 hv_callbacks
.preempt(old
->hv_thread_target
);
163 if (new->hv_thread_target
) {
164 hv_callbacks
.dispatch(new->hv_thread_target
);
170 * Don't let an illegal value for the lower 32-bits of dr7 get set.
171 * Specifically, check for undefined settings. Setting these bit patterns
172 * result in undefined behaviour and can lead to an unexpected
176 dr7d_is_valid(uint32_t *dr7d
)
179 uint32_t mask1
, mask2
;
182 * If the DE bit is set in CR4, R/W0-3 can be pattern
183 * "10B" to indicate i/o reads and write
185 if (!(get_cr4() & CR4_DE
)) {
186 for (i
= 0, mask1
= 0x3 << 16, mask2
= 0x2 << 16; i
< 4;
187 i
++, mask1
<<= 4, mask2
<<= 4) {
188 if ((*dr7d
& mask1
) == mask2
) {
195 * if we are doing an instruction execution break (indicated
196 * by r/w[x] being "00B"), then the len[x] must also be set
199 for (i
= 0; i
< 4; i
++) {
200 if (((((*dr7d
>> (16 + i
* 4))) & 0x3) == 0) &&
201 ((((*dr7d
>> (18 + i
* 4))) & 0x3) != 0)) {
207 * Intel docs have these bits fixed.
209 *dr7d
|= 0x1 << 10; /* set bit 10 to 1 */
210 *dr7d
&= ~(0x1 << 11); /* set bit 11 to 0 */
211 *dr7d
&= ~(0x1 << 12); /* set bit 12 to 0 */
212 *dr7d
&= ~(0x1 << 14); /* set bit 14 to 0 */
213 *dr7d
&= ~(0x1 << 15); /* set bit 15 to 0 */
216 * We don't allow anything to set the global breakpoints.
223 if (*dr7d
& (0x2 << 2)) {
227 if (*dr7d
& (0x2 << 4)) {
231 if (*dr7d
& (0x2 << 6)) {
238 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
241 debug_state_is_valid32(x86_debug_state32_t
*ds
)
243 if (!dr7d_is_valid(&ds
->dr7
)) {
251 debug_state_is_valid64(x86_debug_state64_t
*ds
)
253 if (!dr7d_is_valid((uint32_t *)&ds
->dr7
)) {
258 * Don't allow the user to set debug addresses above their max
262 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
) {
267 if (ds
->dr7
& (0x1 << 2)) {
268 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
) {
273 if (ds
->dr7
& (0x1 << 4)) {
274 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
) {
279 if (ds
->dr7
& (0x1 << 6)) {
280 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
) {
285 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
286 ds
->dr7
&= 0xffffffffULL
;
293 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
295 x86_debug_state32_t
*new_ids
;
298 pcb
= THREAD_TO_PCB(thread
);
300 if (debug_state_is_valid32(ds
) != TRUE
) {
301 return KERN_INVALID_ARGUMENT
;
304 if (pcb
->ids
== NULL
) {
305 new_ids
= zalloc(ids_zone
);
306 bzero(new_ids
, sizeof *new_ids
);
308 simple_lock(&pcb
->lock
, LCK_GRP_NULL
);
309 /* make sure it wasn't already alloc()'d elsewhere */
310 if (pcb
->ids
== NULL
) {
312 simple_unlock(&pcb
->lock
);
314 simple_unlock(&pcb
->lock
);
315 zfree(ids_zone
, new_ids
);
320 copy_debug_state32(ds
, pcb
->ids
, FALSE
);
326 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
328 x86_debug_state64_t
*new_ids
;
331 pcb
= THREAD_TO_PCB(thread
);
333 if (debug_state_is_valid64(ds
) != TRUE
) {
334 return KERN_INVALID_ARGUMENT
;
337 if (pcb
->ids
== NULL
) {
338 new_ids
= zalloc(ids_zone
);
339 bzero(new_ids
, sizeof *new_ids
);
342 if (thread
->hv_thread_target
) {
343 hv_callbacks
.volatile_state(thread
->hv_thread_target
,
348 simple_lock(&pcb
->lock
, LCK_GRP_NULL
);
349 /* make sure it wasn't already alloc()'d elsewhere */
350 if (pcb
->ids
== NULL
) {
352 simple_unlock(&pcb
->lock
);
354 simple_unlock(&pcb
->lock
);
355 zfree(ids_zone
, new_ids
);
359 copy_debug_state64(ds
, pcb
->ids
, FALSE
);
365 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
367 x86_debug_state32_t
*saved_state
;
369 saved_state
= thread
->machine
.ids
;
372 copy_debug_state32(saved_state
, ds
, TRUE
);
374 bzero(ds
, sizeof *ds
);
379 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
381 x86_debug_state64_t
*saved_state
;
383 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
386 copy_debug_state64(saved_state
, ds
, TRUE
);
388 bzero(ds
, sizeof *ds
);
393 * consider_machine_collect:
395 * Try to collect machine-dependent pages
398 consider_machine_collect(void)
403 consider_machine_adjust(void)
408 * Switch to the first thread on a CPU.
411 machine_load_context(
414 new->machine
.specFlags
|= OnProc
;
415 act_machine_switch_pcb(NULL
, new);
420 pmap_switch_context(thread_t ot
, thread_t nt
, int cnum
)
422 pmap_assert(ml_get_interrupts_enabled() == FALSE
);
423 vm_map_t nmap
= nt
->map
, omap
= ot
->map
;
424 if ((omap
!= nmap
) || (nmap
->pmap
->pagezero_accessible
)) {
425 PMAP_DEACTIVATE_MAP(omap
, ot
, cnum
);
426 PMAP_ACTIVATE_MAP(nmap
, nt
, cnum
);
431 * Switch to a new thread.
432 * Save the old thread`s kernel state or continuation,
436 machine_switch_context(
438 thread_continue_t continuation
,
441 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
448 * Save FP registers if in use.
450 fpu_switch_context(old
, new);
452 old
->machine
.specFlags
&= ~OnProc
;
453 new->machine
.specFlags
|= OnProc
;
456 * Monitor the stack depth and report new max,
457 * not worrying about races.
459 vm_offset_t depth
= current_stack_depth();
460 if (depth
> kernel_stack_depth_max
) {
461 kernel_stack_depth_max
= depth
;
462 KERNEL_DEBUG_CONSTANT(
463 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
464 (long) depth
, 0, 0, 0, 0);
468 * Switch address maps if need be, even if not switching tasks.
469 * (A server activation may be "borrowing" a client map.)
471 pmap_switch_context(old
, new, cpu_number());
474 * Load the rest of the user state for the new thread
476 act_machine_switch_pcb(old
, new);
479 ml_hv_cswitch(old
, new);
482 return Switch_context(old
, continuation
, new);
486 machine_thread_on_core(thread_t thread
)
488 return thread
->machine
.specFlags
& OnProc
;
492 machine_processor_shutdown(
494 void (*doshutdown
)(processor_t
),
495 processor_t processor
)
500 fpu_switch_context(thread
, NULL
);
501 pmap_switch_context(thread
, processor
->idle_thread
, cpu_number());
502 return Shutdown_context(thread
, doshutdown
, processor
);
507 * This is where registers that are not normally specified by the mach-o
508 * file on an execve would be nullified, perhaps to avoid a covert channel.
511 machine_thread_state_initialize(
515 * If there's an fpu save area, free it.
516 * The initialized state will then be lazily faulted-in, if required.
517 * And if we're target, re-arm the no-fpu trap.
519 if (thread
->machine
.ifps
) {
520 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
522 if (thread
== current_thread()) {
527 if (thread
->machine
.ids
) {
528 zfree(ids_zone
, thread
->machine
.ids
);
529 thread
->machine
.ids
= NULL
;
536 get_eflags_exportmask(void)
542 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
543 * for 32bit tasks only
544 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
545 * for 64bit tasks only
546 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
547 * for 32bit tasks only
548 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
549 * for 64bit tasks only
550 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
551 * for either 32bit or 64bit tasks
552 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
553 * for 32bit tasks only
554 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
555 * for 64bit tasks only
556 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
557 * for either 32bit or 64bit tasks
558 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
559 * for 32bit tasks only
560 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
561 * for 64bit tasks only
562 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
563 * for either 32bit or 64bit tasks
568 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
570 x86_saved_state64_t
*saved_state
;
572 saved_state
= USER_REGS64(thread
);
574 es
->trapno
= saved_state
->isf
.trapno
;
575 es
->cpu
= saved_state
->isf
.cpu
;
576 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
577 es
->faultvaddr
= saved_state
->cr2
;
581 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
583 x86_saved_state32_t
*saved_state
;
585 saved_state
= USER_REGS32(thread
);
587 es
->trapno
= saved_state
->trapno
;
588 es
->cpu
= saved_state
->cpu
;
589 es
->err
= saved_state
->err
;
590 es
->faultvaddr
= saved_state
->cr2
;
595 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
597 x86_saved_state32_t
*saved_state
;
599 pal_register_cache_state(thread
, DIRTY
);
601 saved_state
= USER_REGS32(thread
);
604 * Scrub segment selector values:
608 * On a 64 bit kernel, we always override the data segments,
609 * as the actual selector numbers have changed. This also
610 * means that we don't support setting the data segments
617 /* Set GS to CTHREAD only if's been established */
618 ts
->gs
= thread
->machine
.cthread_self
? USER_CTHREAD
: NULL_SEG
;
620 /* Check segment selectors are safe */
621 if (!valid_user_segment_selectors(ts
->cs
,
627 return KERN_INVALID_ARGUMENT
;
630 saved_state
->eax
= ts
->eax
;
631 saved_state
->ebx
= ts
->ebx
;
632 saved_state
->ecx
= ts
->ecx
;
633 saved_state
->edx
= ts
->edx
;
634 saved_state
->edi
= ts
->edi
;
635 saved_state
->esi
= ts
->esi
;
636 saved_state
->ebp
= ts
->ebp
;
637 saved_state
->uesp
= ts
->esp
;
638 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
639 saved_state
->eip
= ts
->eip
;
640 saved_state
->cs
= ts
->cs
;
641 saved_state
->ss
= ts
->ss
;
642 saved_state
->ds
= ts
->ds
;
643 saved_state
->es
= ts
->es
;
644 saved_state
->fs
= ts
->fs
;
645 saved_state
->gs
= ts
->gs
;
648 * If the trace trap bit is being set,
649 * ensure that the user returns via iret
650 * - which is signaled thusly:
652 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
) {
653 saved_state
->cs
= SYSENTER_TF_CS
;
660 set_thread_state64(thread_t thread
, void *state
, int full
)
662 x86_thread_state64_t
*ts
;
663 x86_saved_state64_t
*saved_state
;
666 ts
= &((x86_thread_full_state64_t
*)state
)->ss64
;
668 ts
= (x86_thread_state64_t
*)state
;
671 pal_register_cache_state(thread
, DIRTY
);
673 saved_state
= USER_REGS64(thread
);
675 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
676 !IS_USERADDR64_CANONICAL(ts
->rip
)) {
677 return KERN_INVALID_ARGUMENT
;
680 saved_state
->r8
= ts
->r8
;
681 saved_state
->r9
= ts
->r9
;
682 saved_state
->r10
= ts
->r10
;
683 saved_state
->r11
= ts
->r11
;
684 saved_state
->r12
= ts
->r12
;
685 saved_state
->r13
= ts
->r13
;
686 saved_state
->r14
= ts
->r14
;
687 saved_state
->r15
= ts
->r15
;
688 saved_state
->rax
= ts
->rax
;
689 saved_state
->rbx
= ts
->rbx
;
690 saved_state
->rcx
= ts
->rcx
;
691 saved_state
->rdx
= ts
->rdx
;
692 saved_state
->rdi
= ts
->rdi
;
693 saved_state
->rsi
= ts
->rsi
;
694 saved_state
->rbp
= ts
->rbp
;
695 saved_state
->isf
.rsp
= ts
->rsp
;
696 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
697 saved_state
->isf
.rip
= ts
->rip
;
700 saved_state
->isf
.cs
= USER64_CS
;
702 saved_state
->isf
.cs
= ((x86_thread_full_state64_t
*)ts
)->ss64
.cs
;
703 saved_state
->isf
.ss
= ((x86_thread_full_state64_t
*)ts
)->ss
;
704 saved_state
->ds
= (uint32_t)((x86_thread_full_state64_t
*)ts
)->ds
;
705 saved_state
->es
= (uint32_t)((x86_thread_full_state64_t
*)ts
)->es
;
706 machine_thread_set_tsd_base(thread
,
707 ((x86_thread_full_state64_t
*)ts
)->gsbase
);
710 saved_state
->fs
= (uint32_t)ts
->fs
;
711 saved_state
->gs
= (uint32_t)ts
->gs
;
719 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
721 x86_saved_state32_t
*saved_state
;
723 pal_register_cache_state(thread
, VALID
);
725 saved_state
= USER_REGS32(thread
);
727 ts
->eax
= saved_state
->eax
;
728 ts
->ebx
= saved_state
->ebx
;
729 ts
->ecx
= saved_state
->ecx
;
730 ts
->edx
= saved_state
->edx
;
731 ts
->edi
= saved_state
->edi
;
732 ts
->esi
= saved_state
->esi
;
733 ts
->ebp
= saved_state
->ebp
;
734 ts
->esp
= saved_state
->uesp
;
735 ts
->eflags
= saved_state
->efl
;
736 ts
->eip
= saved_state
->eip
;
737 ts
->cs
= saved_state
->cs
;
738 ts
->ss
= saved_state
->ss
;
739 ts
->ds
= saved_state
->ds
;
740 ts
->es
= saved_state
->es
;
741 ts
->fs
= saved_state
->fs
;
742 ts
->gs
= saved_state
->gs
;
747 get_thread_state64(thread_t thread
, void *state
, boolean_t full
)
749 x86_thread_state64_t
*ts
;
750 x86_saved_state64_t
*saved_state
;
753 ts
= &((x86_thread_full_state64_t
*)state
)->ss64
;
755 ts
= (x86_thread_state64_t
*)state
;
758 pal_register_cache_state(thread
, VALID
);
760 saved_state
= USER_REGS64(thread
);
762 ts
->r8
= saved_state
->r8
;
763 ts
->r9
= saved_state
->r9
;
764 ts
->r10
= saved_state
->r10
;
765 ts
->r11
= saved_state
->r11
;
766 ts
->r12
= saved_state
->r12
;
767 ts
->r13
= saved_state
->r13
;
768 ts
->r14
= saved_state
->r14
;
769 ts
->r15
= saved_state
->r15
;
770 ts
->rax
= saved_state
->rax
;
771 ts
->rbx
= saved_state
->rbx
;
772 ts
->rcx
= saved_state
->rcx
;
773 ts
->rdx
= saved_state
->rdx
;
774 ts
->rdi
= saved_state
->rdi
;
775 ts
->rsi
= saved_state
->rsi
;
776 ts
->rbp
= saved_state
->rbp
;
777 ts
->rsp
= saved_state
->isf
.rsp
;
778 ts
->rflags
= saved_state
->isf
.rflags
;
779 ts
->rip
= saved_state
->isf
.rip
;
780 ts
->cs
= saved_state
->isf
.cs
;
783 ((x86_thread_full_state64_t
*)state
)->ds
= saved_state
->ds
;
784 ((x86_thread_full_state64_t
*)state
)->es
= saved_state
->es
;
785 ((x86_thread_full_state64_t
*)state
)->ss
= saved_state
->isf
.ss
;
786 ((x86_thread_full_state64_t
*)state
)->gsbase
=
787 thread
->machine
.cthread_self
;
790 ts
->fs
= saved_state
->fs
;
791 ts
->gs
= saved_state
->gs
;
795 machine_thread_state_convert_to_user(
796 __unused thread_t thread
,
797 __unused thread_flavor_t flavor
,
798 __unused thread_state_t tstate
,
799 __unused mach_msg_type_number_t
*count
)
801 // No conversion to userspace representation on this platform
806 machine_thread_state_convert_from_user(
807 __unused thread_t thread
,
808 __unused thread_flavor_t flavor
,
809 __unused thread_state_t tstate
,
810 __unused mach_msg_type_number_t count
)
812 // No conversion from userspace representation on this platform
817 machine_thread_siguctx_pointer_convert_to_user(
818 __unused thread_t thread
,
819 __unused user_addr_t
*uctxp
)
821 // No conversion to userspace representation on this platform
826 machine_thread_function_pointers_convert_from_user(
827 __unused thread_t thread
,
828 __unused user_addr_t
*fptrs
,
829 __unused
uint32_t count
)
831 // No conversion from userspace representation on this platform
836 * act_machine_set_state:
838 * Set the status of the specified thread.
842 machine_thread_set_state(
844 thread_flavor_t flavor
,
845 thread_state_t tstate
,
846 mach_msg_type_number_t count
)
849 case x86_SAVED_STATE32
:
851 x86_saved_state32_t
*state
;
852 x86_saved_state32_t
*saved_state
;
854 if (count
< x86_SAVED_STATE32_COUNT
) {
855 return KERN_INVALID_ARGUMENT
;
858 state
= (x86_saved_state32_t
*) tstate
;
861 * Refuse to allow 64-bit processes to set
864 if (thread_is_64bit_addr(thr_act
)) {
865 return KERN_INVALID_ARGUMENT
;
868 /* Check segment selectors are safe */
869 if (!valid_user_segment_selectors(state
->cs
,
875 return KERN_INVALID_ARGUMENT
;
878 pal_register_cache_state(thr_act
, DIRTY
);
880 saved_state
= USER_REGS32(thr_act
);
885 saved_state
->edi
= state
->edi
;
886 saved_state
->esi
= state
->esi
;
887 saved_state
->ebp
= state
->ebp
;
888 saved_state
->uesp
= state
->uesp
;
889 saved_state
->ebx
= state
->ebx
;
890 saved_state
->edx
= state
->edx
;
891 saved_state
->ecx
= state
->ecx
;
892 saved_state
->eax
= state
->eax
;
893 saved_state
->eip
= state
->eip
;
895 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
898 * If the trace trap bit is being set,
899 * ensure that the user returns via iret
900 * - which is signaled thusly:
902 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
) {
903 state
->cs
= SYSENTER_TF_CS
;
907 * User setting segment registers.
908 * Code and stack selectors have already been
909 * checked. Others will be reset by 'iret'
910 * if they are not valid.
912 saved_state
->cs
= state
->cs
;
913 saved_state
->ss
= state
->ss
;
914 saved_state
->ds
= state
->ds
;
915 saved_state
->es
= state
->es
;
916 saved_state
->fs
= state
->fs
;
917 saved_state
->gs
= state
->gs
;
922 case x86_SAVED_STATE64
:
924 x86_saved_state64_t
*state
;
925 x86_saved_state64_t
*saved_state
;
927 if (count
< x86_SAVED_STATE64_COUNT
) {
928 return KERN_INVALID_ARGUMENT
;
931 if (!thread_is_64bit_addr(thr_act
)) {
932 return KERN_INVALID_ARGUMENT
;
935 state
= (x86_saved_state64_t
*) tstate
;
937 /* Verify that the supplied code segment selector is
938 * valid. In 64-bit mode, the FS and GS segment overrides
939 * use the FS.base and GS.base MSRs to calculate
940 * base addresses, and the trampolines don't directly
941 * restore the segment registers--hence they are no
942 * longer relevant for validation.
944 if (!valid_user_code_selector(state
->isf
.cs
)) {
945 return KERN_INVALID_ARGUMENT
;
948 /* Check pc and stack are canonical addresses */
949 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
950 !IS_USERADDR64_CANONICAL(state
->isf
.rip
)) {
951 return KERN_INVALID_ARGUMENT
;
954 pal_register_cache_state(thr_act
, DIRTY
);
956 saved_state
= USER_REGS64(thr_act
);
961 saved_state
->r8
= state
->r8
;
962 saved_state
->r9
= state
->r9
;
963 saved_state
->r10
= state
->r10
;
964 saved_state
->r11
= state
->r11
;
965 saved_state
->r12
= state
->r12
;
966 saved_state
->r13
= state
->r13
;
967 saved_state
->r14
= state
->r14
;
968 saved_state
->r15
= state
->r15
;
969 saved_state
->rdi
= state
->rdi
;
970 saved_state
->rsi
= state
->rsi
;
971 saved_state
->rbp
= state
->rbp
;
972 saved_state
->rbx
= state
->rbx
;
973 saved_state
->rdx
= state
->rdx
;
974 saved_state
->rcx
= state
->rcx
;
975 saved_state
->rax
= state
->rax
;
976 saved_state
->isf
.rsp
= state
->isf
.rsp
;
977 saved_state
->isf
.rip
= state
->isf
.rip
;
979 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
982 * User setting segment registers.
983 * Code and stack selectors have already been
984 * checked. Others will be reset by 'sys'
985 * if they are not valid.
987 saved_state
->isf
.cs
= state
->isf
.cs
;
988 saved_state
->isf
.ss
= state
->isf
.ss
;
989 saved_state
->fs
= state
->fs
;
990 saved_state
->gs
= state
->gs
;
995 case x86_FLOAT_STATE32
:
996 case x86_AVX_STATE32
:
997 case x86_AVX512_STATE32
:
999 if (count
!= _MachineStateCount
[flavor
]) {
1000 return KERN_INVALID_ARGUMENT
;
1003 if (thread_is_64bit_addr(thr_act
)) {
1004 return KERN_INVALID_ARGUMENT
;
1007 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
1010 case x86_FLOAT_STATE64
:
1011 case x86_AVX_STATE64
:
1012 case x86_AVX512_STATE64
:
1014 if (count
!= _MachineStateCount
[flavor
]) {
1015 return KERN_INVALID_ARGUMENT
;
1018 if (!thread_is_64bit_addr(thr_act
)) {
1019 return KERN_INVALID_ARGUMENT
;
1022 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
1025 case x86_FLOAT_STATE
:
1027 x86_float_state_t
*state
;
1029 if (count
!= x86_FLOAT_STATE_COUNT
) {
1030 return KERN_INVALID_ARGUMENT
;
1033 state
= (x86_float_state_t
*)tstate
;
1034 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
1035 thread_is_64bit_addr(thr_act
)) {
1036 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1038 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
1039 !thread_is_64bit_addr(thr_act
)) {
1040 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1042 return KERN_INVALID_ARGUMENT
;
1046 case x86_AVX512_STATE
:
1048 x86_avx_state_t
*state
;
1050 if (count
!= _MachineStateCount
[flavor
]) {
1051 return KERN_INVALID_ARGUMENT
;
1054 state
= (x86_avx_state_t
*)tstate
;
1055 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1056 /* 64-bit flavor? */
1057 if (state
->ash
.flavor
== (flavor
- 1) &&
1058 state
->ash
.count
== _MachineStateCount
[flavor
- 1] &&
1059 thread_is_64bit_addr(thr_act
)) {
1060 return fpu_set_fxstate(thr_act
,
1061 (thread_state_t
)&state
->ufs
.as64
,
1064 /* 32-bit flavor? */
1065 if (state
->ash
.flavor
== (flavor
- 2) &&
1066 state
->ash
.count
== _MachineStateCount
[flavor
- 2] &&
1067 !thread_is_64bit_addr(thr_act
)) {
1068 return fpu_set_fxstate(thr_act
,
1069 (thread_state_t
)&state
->ufs
.as32
,
1072 return KERN_INVALID_ARGUMENT
;
1075 case x86_THREAD_STATE32
:
1077 if (count
!= x86_THREAD_STATE32_COUNT
) {
1078 return KERN_INVALID_ARGUMENT
;
1081 if (thread_is_64bit_addr(thr_act
)) {
1082 return KERN_INVALID_ARGUMENT
;
1085 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1088 case x86_THREAD_STATE64
:
1090 if (count
!= x86_THREAD_STATE64_COUNT
) {
1091 return KERN_INVALID_ARGUMENT
;
1094 if (!thread_is_64bit_addr(thr_act
)) {
1095 return KERN_INVALID_ARGUMENT
;
1098 return set_thread_state64(thr_act
, tstate
, FALSE
);
1101 case x86_THREAD_FULL_STATE64
:
1103 if (count
!= x86_THREAD_FULL_STATE64_COUNT
) {
1104 return KERN_INVALID_ARGUMENT
;
1107 if (!thread_is_64bit_addr(thr_act
)) {
1108 return KERN_INVALID_ARGUMENT
;
1111 /* If this process does not have a custom LDT, return failure */
1112 if (thr_act
->task
->i386_ldt
== 0) {
1113 return KERN_INVALID_ARGUMENT
;
1116 return set_thread_state64(thr_act
, tstate
, TRUE
);
1119 case x86_THREAD_STATE
:
1121 x86_thread_state_t
*state
;
1123 if (count
!= x86_THREAD_STATE_COUNT
) {
1124 return KERN_INVALID_ARGUMENT
;
1127 state
= (x86_thread_state_t
*)tstate
;
1129 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1130 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1131 thread_is_64bit_addr(thr_act
)) {
1132 return set_thread_state64(thr_act
, &state
->uts
.ts64
, FALSE
);
1133 } else if (state
->tsh
.flavor
== x86_THREAD_FULL_STATE64
&&
1134 state
->tsh
.count
== x86_THREAD_FULL_STATE64_COUNT
&&
1135 thread_is_64bit_addr(thr_act
) && thr_act
->task
->i386_ldt
!= 0) {
1136 return set_thread_state64(thr_act
, &state
->uts
.ts64
, TRUE
);
1137 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1138 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1139 !thread_is_64bit_addr(thr_act
)) {
1140 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1142 return KERN_INVALID_ARGUMENT
;
1145 case x86_DEBUG_STATE32
:
1147 x86_debug_state32_t
*state
;
1150 if (thread_is_64bit_addr(thr_act
)) {
1151 return KERN_INVALID_ARGUMENT
;
1154 state
= (x86_debug_state32_t
*)tstate
;
1156 ret
= set_debug_state32(thr_act
, state
);
1160 case x86_DEBUG_STATE64
:
1162 x86_debug_state64_t
*state
;
1165 if (!thread_is_64bit_addr(thr_act
)) {
1166 return KERN_INVALID_ARGUMENT
;
1169 state
= (x86_debug_state64_t
*)tstate
;
1171 ret
= set_debug_state64(thr_act
, state
);
1175 case x86_DEBUG_STATE
:
1177 x86_debug_state_t
*state
;
1178 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1180 if (count
!= x86_DEBUG_STATE_COUNT
) {
1181 return KERN_INVALID_ARGUMENT
;
1184 state
= (x86_debug_state_t
*)tstate
;
1185 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1186 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1187 thread_is_64bit_addr(thr_act
)) {
1188 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1189 } else if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1190 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1191 !thread_is_64bit_addr(thr_act
)) {
1192 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1197 return KERN_INVALID_ARGUMENT
;
1200 return KERN_SUCCESS
;
1204 machine_thread_pc(thread_t thr_act
)
1206 if (thread_is_64bit_addr(thr_act
)) {
1207 return (mach_vm_address_t
)USER_REGS64(thr_act
)->isf
.rip
;
1209 return (mach_vm_address_t
)USER_REGS32(thr_act
)->eip
;
1214 machine_thread_reset_pc(thread_t thr_act
, mach_vm_address_t pc
)
1216 pal_register_cache_state(thr_act
, DIRTY
);
1218 if (thread_is_64bit_addr(thr_act
)) {
1219 if (!IS_USERADDR64_CANONICAL(pc
)) {
1222 USER_REGS64(thr_act
)->isf
.rip
= (uint64_t)pc
;
1224 USER_REGS32(thr_act
)->eip
= (uint32_t)pc
;
1232 * Get the status of the specified thread.
1236 machine_thread_get_state(
1238 thread_flavor_t flavor
,
1239 thread_state_t tstate
,
1240 mach_msg_type_number_t
*count
)
1243 case THREAD_STATE_FLAVOR_LIST
:
1246 return KERN_INVALID_ARGUMENT
;
1249 tstate
[0] = i386_THREAD_STATE
;
1250 tstate
[1] = i386_FLOAT_STATE
;
1251 tstate
[2] = i386_EXCEPTION_STATE
;
1257 case THREAD_STATE_FLAVOR_LIST_NEW
:
1260 return KERN_INVALID_ARGUMENT
;
1263 tstate
[0] = x86_THREAD_STATE
;
1264 tstate
[1] = x86_FLOAT_STATE
;
1265 tstate
[2] = x86_EXCEPTION_STATE
;
1266 tstate
[3] = x86_DEBUG_STATE
;
1272 case THREAD_STATE_FLAVOR_LIST_10_9
:
1275 return KERN_INVALID_ARGUMENT
;
1278 tstate
[0] = x86_THREAD_STATE
;
1279 tstate
[1] = x86_FLOAT_STATE
;
1280 tstate
[2] = x86_EXCEPTION_STATE
;
1281 tstate
[3] = x86_DEBUG_STATE
;
1282 tstate
[4] = x86_AVX_STATE
;
1288 case THREAD_STATE_FLAVOR_LIST_10_13
:
1291 return KERN_INVALID_ARGUMENT
;
1294 tstate
[0] = x86_THREAD_STATE
;
1295 tstate
[1] = x86_FLOAT_STATE
;
1296 tstate
[2] = x86_EXCEPTION_STATE
;
1297 tstate
[3] = x86_DEBUG_STATE
;
1298 tstate
[4] = x86_AVX_STATE
;
1299 tstate
[5] = x86_AVX512_STATE
;
1305 case THREAD_STATE_FLAVOR_LIST_10_15
:
1308 return KERN_INVALID_ARGUMENT
;
1311 tstate
[0] = x86_THREAD_STATE
;
1312 tstate
[1] = x86_FLOAT_STATE
;
1313 tstate
[2] = x86_EXCEPTION_STATE
;
1314 tstate
[3] = x86_DEBUG_STATE
;
1315 tstate
[4] = x86_AVX_STATE
;
1316 tstate
[5] = x86_AVX512_STATE
;
1317 tstate
[6] = x86_PAGEIN_STATE
;
1323 case x86_SAVED_STATE32
:
1325 x86_saved_state32_t
*state
;
1326 x86_saved_state32_t
*saved_state
;
1328 if (*count
< x86_SAVED_STATE32_COUNT
) {
1329 return KERN_INVALID_ARGUMENT
;
1332 if (thread_is_64bit_addr(thr_act
)) {
1333 return KERN_INVALID_ARGUMENT
;
1336 state
= (x86_saved_state32_t
*) tstate
;
1337 saved_state
= USER_REGS32(thr_act
);
1340 * First, copy everything:
1342 *state
= *saved_state
;
1343 state
->ds
= saved_state
->ds
& 0xffff;
1344 state
->es
= saved_state
->es
& 0xffff;
1345 state
->fs
= saved_state
->fs
& 0xffff;
1346 state
->gs
= saved_state
->gs
& 0xffff;
1348 *count
= x86_SAVED_STATE32_COUNT
;
1352 case x86_SAVED_STATE64
:
1354 x86_saved_state64_t
*state
;
1355 x86_saved_state64_t
*saved_state
;
1357 if (*count
< x86_SAVED_STATE64_COUNT
) {
1358 return KERN_INVALID_ARGUMENT
;
1361 if (!thread_is_64bit_addr(thr_act
)) {
1362 return KERN_INVALID_ARGUMENT
;
1365 state
= (x86_saved_state64_t
*)tstate
;
1366 saved_state
= USER_REGS64(thr_act
);
1369 * First, copy everything:
1371 *state
= *saved_state
;
1372 state
->ds
= saved_state
->ds
& 0xffff;
1373 state
->es
= saved_state
->es
& 0xffff;
1374 state
->fs
= saved_state
->fs
& 0xffff;
1375 state
->gs
= saved_state
->gs
& 0xffff;
1377 *count
= x86_SAVED_STATE64_COUNT
;
1381 case x86_FLOAT_STATE32
:
1383 if (*count
< x86_FLOAT_STATE32_COUNT
) {
1384 return KERN_INVALID_ARGUMENT
;
1387 if (thread_is_64bit_addr(thr_act
)) {
1388 return KERN_INVALID_ARGUMENT
;
1391 *count
= x86_FLOAT_STATE32_COUNT
;
1393 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1396 case x86_FLOAT_STATE64
:
1398 if (*count
< x86_FLOAT_STATE64_COUNT
) {
1399 return KERN_INVALID_ARGUMENT
;
1402 if (!thread_is_64bit_addr(thr_act
)) {
1403 return KERN_INVALID_ARGUMENT
;
1406 *count
= x86_FLOAT_STATE64_COUNT
;
1408 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1411 case x86_FLOAT_STATE
:
1413 x86_float_state_t
*state
;
1416 if (*count
< x86_FLOAT_STATE_COUNT
) {
1417 return KERN_INVALID_ARGUMENT
;
1420 state
= (x86_float_state_t
*)tstate
;
1423 * no need to bzero... currently
1424 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1426 if (thread_is_64bit_addr(thr_act
)) {
1427 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1428 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1430 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1432 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1433 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1435 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1437 *count
= x86_FLOAT_STATE_COUNT
;
1442 case x86_AVX_STATE32
:
1443 case x86_AVX512_STATE32
:
1445 if (*count
!= _MachineStateCount
[flavor
]) {
1446 return KERN_INVALID_ARGUMENT
;
1449 if (thread_is_64bit_addr(thr_act
)) {
1450 return KERN_INVALID_ARGUMENT
;
1453 *count
= _MachineStateCount
[flavor
];
1455 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1458 case x86_AVX_STATE64
:
1459 case x86_AVX512_STATE64
:
1461 if (*count
!= _MachineStateCount
[flavor
]) {
1462 return KERN_INVALID_ARGUMENT
;
1465 if (!thread_is_64bit_addr(thr_act
)) {
1466 return KERN_INVALID_ARGUMENT
;
1469 *count
= _MachineStateCount
[flavor
];
1471 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1475 case x86_AVX512_STATE
:
1477 x86_avx_state_t
*state
;
1478 thread_state_t fstate
;
1480 if (*count
< _MachineStateCount
[flavor
]) {
1481 return KERN_INVALID_ARGUMENT
;
1484 *count
= _MachineStateCount
[flavor
];
1485 state
= (x86_avx_state_t
*)tstate
;
1487 bzero((char *)state
, *count
* sizeof(int));
1489 if (thread_is_64bit_addr(thr_act
)) {
1490 flavor
-= 1; /* 64-bit flavor */
1491 fstate
= (thread_state_t
) &state
->ufs
.as64
;
1493 flavor
-= 2; /* 32-bit flavor */
1494 fstate
= (thread_state_t
) &state
->ufs
.as32
;
1496 state
->ash
.flavor
= flavor
;
1497 state
->ash
.count
= _MachineStateCount
[flavor
];
1499 return fpu_get_fxstate(thr_act
, fstate
, flavor
);
1502 case x86_THREAD_STATE32
:
1504 if (*count
< x86_THREAD_STATE32_COUNT
) {
1505 return KERN_INVALID_ARGUMENT
;
1508 if (thread_is_64bit_addr(thr_act
)) {
1509 return KERN_INVALID_ARGUMENT
;
1512 *count
= x86_THREAD_STATE32_COUNT
;
1514 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1518 case x86_THREAD_STATE64
:
1520 if (*count
< x86_THREAD_STATE64_COUNT
) {
1521 return KERN_INVALID_ARGUMENT
;
1524 if (!thread_is_64bit_addr(thr_act
)) {
1525 return KERN_INVALID_ARGUMENT
;
1528 *count
= x86_THREAD_STATE64_COUNT
;
1530 get_thread_state64(thr_act
, tstate
, FALSE
);
1534 case x86_THREAD_FULL_STATE64
:
1536 if (*count
< x86_THREAD_FULL_STATE64_COUNT
) {
1537 return KERN_INVALID_ARGUMENT
;
1540 if (!thread_is_64bit_addr(thr_act
)) {
1541 return KERN_INVALID_ARGUMENT
;
1544 /* If this process does not have a custom LDT, return failure */
1545 if (thr_act
->task
->i386_ldt
== 0) {
1546 return KERN_INVALID_ARGUMENT
;
1549 *count
= x86_THREAD_FULL_STATE64_COUNT
;
1551 get_thread_state64(thr_act
, tstate
, TRUE
);
1555 case x86_THREAD_STATE
:
1557 x86_thread_state_t
*state
;
1559 if (*count
< x86_THREAD_STATE_COUNT
) {
1560 return KERN_INVALID_ARGUMENT
;
1563 state
= (x86_thread_state_t
*)tstate
;
1565 bzero((char *)state
, sizeof(x86_thread_state_t
));
1567 if (thread_is_64bit_addr(thr_act
)) {
1568 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1569 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1571 get_thread_state64(thr_act
, &state
->uts
.ts64
, FALSE
);
1573 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1574 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1576 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1578 *count
= x86_THREAD_STATE_COUNT
;
1584 case x86_EXCEPTION_STATE32
:
1586 if (*count
< x86_EXCEPTION_STATE32_COUNT
) {
1587 return KERN_INVALID_ARGUMENT
;
1590 if (thread_is_64bit_addr(thr_act
)) {
1591 return KERN_INVALID_ARGUMENT
;
1594 *count
= x86_EXCEPTION_STATE32_COUNT
;
1596 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1598 * Suppress the cpu number for binary compatibility
1599 * of this deprecated state.
1601 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1605 case x86_EXCEPTION_STATE64
:
1607 if (*count
< x86_EXCEPTION_STATE64_COUNT
) {
1608 return KERN_INVALID_ARGUMENT
;
1611 if (!thread_is_64bit_addr(thr_act
)) {
1612 return KERN_INVALID_ARGUMENT
;
1615 *count
= x86_EXCEPTION_STATE64_COUNT
;
1617 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1619 * Suppress the cpu number for binary compatibility
1620 * of this deprecated state.
1622 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1626 case x86_EXCEPTION_STATE
:
1628 x86_exception_state_t
*state
;
1630 if (*count
< x86_EXCEPTION_STATE_COUNT
) {
1631 return KERN_INVALID_ARGUMENT
;
1634 state
= (x86_exception_state_t
*)tstate
;
1636 bzero((char *)state
, sizeof(x86_exception_state_t
));
1638 if (thread_is_64bit_addr(thr_act
)) {
1639 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1640 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1642 get_exception_state64(thr_act
, &state
->ues
.es64
);
1644 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1645 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1647 get_exception_state32(thr_act
, &state
->ues
.es32
);
1649 *count
= x86_EXCEPTION_STATE_COUNT
;
1653 case x86_DEBUG_STATE32
:
1655 if (*count
< x86_DEBUG_STATE32_COUNT
) {
1656 return KERN_INVALID_ARGUMENT
;
1659 if (thread_is_64bit_addr(thr_act
)) {
1660 return KERN_INVALID_ARGUMENT
;
1663 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1665 *count
= x86_DEBUG_STATE32_COUNT
;
1669 case x86_DEBUG_STATE64
:
1671 if (*count
< x86_DEBUG_STATE64_COUNT
) {
1672 return KERN_INVALID_ARGUMENT
;
1675 if (!thread_is_64bit_addr(thr_act
)) {
1676 return KERN_INVALID_ARGUMENT
;
1679 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1681 *count
= x86_DEBUG_STATE64_COUNT
;
1685 case x86_DEBUG_STATE
:
1687 x86_debug_state_t
*state
;
1689 if (*count
< x86_DEBUG_STATE_COUNT
) {
1690 return KERN_INVALID_ARGUMENT
;
1693 state
= (x86_debug_state_t
*)tstate
;
1695 bzero(state
, sizeof *state
);
1697 if (thread_is_64bit_addr(thr_act
)) {
1698 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1699 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1701 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1703 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1704 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1706 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1708 *count
= x86_DEBUG_STATE_COUNT
;
1712 case x86_PAGEIN_STATE
:
1714 if (*count
< x86_PAGEIN_STATE_COUNT
) {
1715 return KERN_INVALID_ARGUMENT
;
1718 x86_pagein_state_t
*state
= (void *)tstate
;
1720 state
->__pagein_error
= thr_act
->t_pagein_error
;
1722 *count
= x86_PAGEIN_STATE_COUNT
;
1726 return KERN_INVALID_ARGUMENT
;
1729 return KERN_SUCCESS
;
1733 machine_thread_get_kern_state(
1735 thread_flavor_t flavor
,
1736 thread_state_t tstate
,
1737 mach_msg_type_number_t
*count
)
1739 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1742 * This works only for an interrupted kernel thread
1744 if (thread
!= current_thread() || int_state
== NULL
) {
1745 return KERN_FAILURE
;
1749 case x86_THREAD_STATE32
: {
1750 x86_thread_state32_t
*state
;
1751 x86_saved_state32_t
*saved_state
;
1753 if (!is_saved_state32(int_state
) ||
1754 *count
< x86_THREAD_STATE32_COUNT
) {
1755 return KERN_INVALID_ARGUMENT
;
1758 state
= (x86_thread_state32_t
*) tstate
;
1760 saved_state
= saved_state32(int_state
);
1762 * General registers.
1764 state
->eax
= saved_state
->eax
;
1765 state
->ebx
= saved_state
->ebx
;
1766 state
->ecx
= saved_state
->ecx
;
1767 state
->edx
= saved_state
->edx
;
1768 state
->edi
= saved_state
->edi
;
1769 state
->esi
= saved_state
->esi
;
1770 state
->ebp
= saved_state
->ebp
;
1771 state
->esp
= saved_state
->uesp
;
1772 state
->eflags
= saved_state
->efl
;
1773 state
->eip
= saved_state
->eip
;
1774 state
->cs
= saved_state
->cs
;
1775 state
->ss
= saved_state
->ss
;
1776 state
->ds
= saved_state
->ds
& 0xffff;
1777 state
->es
= saved_state
->es
& 0xffff;
1778 state
->fs
= saved_state
->fs
& 0xffff;
1779 state
->gs
= saved_state
->gs
& 0xffff;
1781 *count
= x86_THREAD_STATE32_COUNT
;
1783 return KERN_SUCCESS
;
1786 case x86_THREAD_STATE64
: {
1787 x86_thread_state64_t
*state
;
1788 x86_saved_state64_t
*saved_state
;
1790 if (!is_saved_state64(int_state
) ||
1791 *count
< x86_THREAD_STATE64_COUNT
) {
1792 return KERN_INVALID_ARGUMENT
;
1795 state
= (x86_thread_state64_t
*) tstate
;
1797 saved_state
= saved_state64(int_state
);
1799 * General registers.
1801 state
->rax
= saved_state
->rax
;
1802 state
->rbx
= saved_state
->rbx
;
1803 state
->rcx
= saved_state
->rcx
;
1804 state
->rdx
= saved_state
->rdx
;
1805 state
->rdi
= saved_state
->rdi
;
1806 state
->rsi
= saved_state
->rsi
;
1807 state
->rbp
= saved_state
->rbp
;
1808 state
->rsp
= saved_state
->isf
.rsp
;
1809 state
->r8
= saved_state
->r8
;
1810 state
->r9
= saved_state
->r9
;
1811 state
->r10
= saved_state
->r10
;
1812 state
->r11
= saved_state
->r11
;
1813 state
->r12
= saved_state
->r12
;
1814 state
->r13
= saved_state
->r13
;
1815 state
->r14
= saved_state
->r14
;
1816 state
->r15
= saved_state
->r15
;
1818 state
->rip
= saved_state
->isf
.rip
;
1819 state
->rflags
= saved_state
->isf
.rflags
;
1820 state
->cs
= saved_state
->isf
.cs
;
1821 state
->fs
= saved_state
->fs
& 0xffff;
1822 state
->gs
= saved_state
->gs
& 0xffff;
1823 *count
= x86_THREAD_STATE64_COUNT
;
1825 return KERN_SUCCESS
;
1828 case x86_THREAD_STATE
: {
1829 x86_thread_state_t
*state
= NULL
;
1831 if (*count
< x86_THREAD_STATE_COUNT
) {
1832 return KERN_INVALID_ARGUMENT
;
1835 state
= (x86_thread_state_t
*) tstate
;
1837 if (is_saved_state32(int_state
)) {
1838 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1840 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1841 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1844 * General registers.
1846 state
->uts
.ts32
.eax
= saved_state
->eax
;
1847 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1848 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1849 state
->uts
.ts32
.edx
= saved_state
->edx
;
1850 state
->uts
.ts32
.edi
= saved_state
->edi
;
1851 state
->uts
.ts32
.esi
= saved_state
->esi
;
1852 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1853 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1854 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1855 state
->uts
.ts32
.eip
= saved_state
->eip
;
1856 state
->uts
.ts32
.cs
= saved_state
->cs
;
1857 state
->uts
.ts32
.ss
= saved_state
->ss
;
1858 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1859 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1860 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1861 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1862 } else if (is_saved_state64(int_state
)) {
1863 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1865 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1866 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1869 * General registers.
1871 state
->uts
.ts64
.rax
= saved_state
->rax
;
1872 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1873 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1874 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1875 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1876 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1877 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1878 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1879 state
->uts
.ts64
.r8
= saved_state
->r8
;
1880 state
->uts
.ts64
.r9
= saved_state
->r9
;
1881 state
->uts
.ts64
.r10
= saved_state
->r10
;
1882 state
->uts
.ts64
.r11
= saved_state
->r11
;
1883 state
->uts
.ts64
.r12
= saved_state
->r12
;
1884 state
->uts
.ts64
.r13
= saved_state
->r13
;
1885 state
->uts
.ts64
.r14
= saved_state
->r14
;
1886 state
->uts
.ts64
.r15
= saved_state
->r15
;
1888 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1889 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1890 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1891 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1892 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1894 panic("unknown thread state");
1897 *count
= x86_THREAD_STATE_COUNT
;
1898 return KERN_SUCCESS
;
1901 return KERN_FAILURE
;
1906 machine_thread_switch_addrmode(thread_t thread
)
1909 * We don't want to be preempted until we're done
1910 * - particularly if we're switching the current thread
1912 disable_preemption();
1915 * Reset the state saveareas. As we're resetting, we anticipate no
1916 * memory allocations in this path.
1918 machine_thread_create(thread
, thread
->task
);
1920 /* Adjust FPU state */
1921 fpu_switch_addrmode(thread
, task_has_64Bit_addr(thread
->task
));
1923 /* If we're switching ourselves, reset the pcb addresses etc. */
1924 if (thread
== current_thread()) {
1925 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1926 act_machine_switch_pcb(NULL
, thread
);
1927 ml_set_interrupts_enabled(istate
);
1929 enable_preemption();
1935 * This is used to set the current thr_act/thread
1936 * when starting up a new processor
1939 machine_set_current_thread(thread_t thread
)
1941 current_cpu_datap()->cpu_active_thread
= thread
;
1946 * Perform machine-dependent per-thread initializations
1949 machine_thread_init(void)
1951 iss_zone
= zinit(sizeof(x86_saved_state_t
),
1952 thread_max
* sizeof(x86_saved_state_t
),
1953 THREAD_CHUNK
* sizeof(x86_saved_state_t
),
1954 "x86_64 saved state");
1956 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1957 thread_max
* sizeof(x86_debug_state64_t
),
1958 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1959 "x86_64 debug state");
1969 thread_t thr_act
= current_thread();
1971 if (thread_is_64bit_addr(thr_act
)) {
1972 x86_saved_state64_t
*iss64
;
1974 iss64
= USER_REGS64(thr_act
);
1976 return iss64
->isf
.rip
;
1978 x86_saved_state32_t
*iss32
;
1980 iss32
= USER_REGS32(thr_act
);
1987 * detach and return a kernel stack from a thread
1991 machine_stack_detach(thread_t thread
)
1995 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1996 (uintptr_t)thread_tid(thread
), thread
->priority
,
1997 thread
->sched_pri
, 0,
2000 stack
= thread
->kernel_stack
;
2001 thread
->kernel_stack
= 0;
2007 * attach a kernel stack to a thread and initialize it
2011 machine_stack_attach(
2015 struct x86_kernel_state
*statep
;
2017 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
2018 (uintptr_t)thread_tid(thread
), thread
->priority
,
2019 thread
->sched_pri
, 0, 0);
2022 thread
->kernel_stack
= stack
;
2023 thread_initialize_kernel_state(thread
);
2025 statep
= STACK_IKS(stack
);
2028 * Reset the state of the thread to resume from a continuation,
2029 * including resetting the stack and frame pointer to avoid backtracers
2030 * seeing this temporary state and attempting to walk the defunct stack.
2032 statep
->k_rbp
= (uint64_t) 0;
2033 statep
->k_rip
= (uint64_t) Thread_continue
;
2034 statep
->k_rbx
= (uint64_t) thread_continue
;
2035 statep
->k_rsp
= (uint64_t) STACK_IKS(stack
);
2041 * move a stack from old to new thread
2045 machine_stack_handoff(thread_t old
,
2055 stack
= old
->kernel_stack
;
2056 if (stack
== old
->reserved_stack
) {
2057 assert(new->reserved_stack
);
2058 old
->reserved_stack
= new->reserved_stack
;
2059 new->reserved_stack
= stack
;
2061 old
->kernel_stack
= 0;
2063 * A full call to machine_stack_attach() is unnecessry
2064 * because old stack is already initialized.
2066 new->kernel_stack
= stack
;
2068 fpu_switch_context(old
, new);
2070 old
->machine
.specFlags
&= ~OnProc
;
2071 new->machine
.specFlags
|= OnProc
;
2073 pmap_switch_context(old
, new, cpu_number());
2074 act_machine_switch_pcb(old
, new);
2077 ml_hv_cswitch(old
, new);
2080 machine_set_current_thread(new);
2081 thread_initialize_kernel_state(new);
2089 struct x86_act_context32
{
2090 x86_saved_state32_t ss
;
2091 x86_float_state32_t fs
;
2092 x86_debug_state32_t ds
;
2095 struct x86_act_context64
{
2096 x86_saved_state64_t ss
;
2097 x86_float_state64_t fs
;
2098 x86_debug_state64_t ds
;
2104 act_thread_csave(void)
2107 mach_msg_type_number_t val
;
2108 thread_t thr_act
= current_thread();
2110 if (thread_is_64bit_addr(thr_act
)) {
2111 struct x86_act_context64
*ic64
;
2113 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
2115 if (ic64
== (struct x86_act_context64
*)NULL
) {
2119 val
= x86_SAVED_STATE64_COUNT
;
2120 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
2121 (thread_state_t
) &ic64
->ss
, &val
);
2122 if (kret
!= KERN_SUCCESS
) {
2123 kfree(ic64
, sizeof(struct x86_act_context64
));
2126 val
= x86_FLOAT_STATE64_COUNT
;
2127 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
2128 (thread_state_t
) &ic64
->fs
, &val
);
2129 if (kret
!= KERN_SUCCESS
) {
2130 kfree(ic64
, sizeof(struct x86_act_context64
));
2134 val
= x86_DEBUG_STATE64_COUNT
;
2135 kret
= machine_thread_get_state(thr_act
,
2137 (thread_state_t
)&ic64
->ds
,
2139 if (kret
!= KERN_SUCCESS
) {
2140 kfree(ic64
, sizeof(struct x86_act_context64
));
2145 struct x86_act_context32
*ic32
;
2147 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2149 if (ic32
== (struct x86_act_context32
*)NULL
) {
2153 val
= x86_SAVED_STATE32_COUNT
;
2154 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2155 (thread_state_t
) &ic32
->ss
, &val
);
2156 if (kret
!= KERN_SUCCESS
) {
2157 kfree(ic32
, sizeof(struct x86_act_context32
));
2160 val
= x86_FLOAT_STATE32_COUNT
;
2161 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2162 (thread_state_t
) &ic32
->fs
, &val
);
2163 if (kret
!= KERN_SUCCESS
) {
2164 kfree(ic32
, sizeof(struct x86_act_context32
));
2168 val
= x86_DEBUG_STATE32_COUNT
;
2169 kret
= machine_thread_get_state(thr_act
,
2171 (thread_state_t
)&ic32
->ds
,
2173 if (kret
!= KERN_SUCCESS
) {
2174 kfree(ic32
, sizeof(struct x86_act_context32
));
2183 act_thread_catt(void *ctx
)
2185 thread_t thr_act
= current_thread();
2188 if (ctx
== (void *)NULL
) {
2192 if (thread_is_64bit_addr(thr_act
)) {
2193 struct x86_act_context64
*ic64
;
2195 ic64
= (struct x86_act_context64
*)ctx
;
2197 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2198 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2199 if (kret
== KERN_SUCCESS
) {
2200 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2201 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2203 kfree(ic64
, sizeof(struct x86_act_context64
));
2205 struct x86_act_context32
*ic32
;
2207 ic32
= (struct x86_act_context32
*)ctx
;
2209 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2210 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2211 if (kret
== KERN_SUCCESS
) {
2212 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2213 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2215 kfree(ic32
, sizeof(struct x86_act_context32
));
2221 act_thread_cfree(__unused
void *ctx
)
2227 * Duplicate one x86_debug_state32_t to another. "all" parameter
2228 * chooses whether dr4 and dr5 are copied (they are never meant
2229 * to be installed when we do machine_task_set_state() or
2230 * machine_thread_set_state()).
2234 x86_debug_state32_t
*src
,
2235 x86_debug_state32_t
*target
,
2239 target
->dr4
= src
->dr4
;
2240 target
->dr5
= src
->dr5
;
2243 target
->dr0
= src
->dr0
;
2244 target
->dr1
= src
->dr1
;
2245 target
->dr2
= src
->dr2
;
2246 target
->dr3
= src
->dr3
;
2247 target
->dr6
= src
->dr6
;
2248 target
->dr7
= src
->dr7
;
2252 * Duplicate one x86_debug_state64_t to another. "all" parameter
2253 * chooses whether dr4 and dr5 are copied (they are never meant
2254 * to be installed when we do machine_task_set_state() or
2255 * machine_thread_set_state()).
2259 x86_debug_state64_t
*src
,
2260 x86_debug_state64_t
*target
,
2264 target
->dr4
= src
->dr4
;
2265 target
->dr5
= src
->dr5
;
2268 target
->dr0
= src
->dr0
;
2269 target
->dr1
= src
->dr1
;
2270 target
->dr2
= src
->dr2
;
2271 target
->dr3
= src
->dr3
;
2272 target
->dr6
= src
->dr6
;
2273 target
->dr7
= src
->dr7
;