2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
60 #include <sys/kdebug.h>
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
66 #include <kern/kalloc.h>
67 #include <kern/mach_param.h>
68 #include <kern/processor.h>
69 #include <kern/cpu_data.h>
70 #include <kern/cpu_number.h>
71 #include <kern/task.h>
72 #include <kern/thread.h>
73 #include <kern/sched_prim.h>
74 #include <kern/misc_protos.h>
75 #include <kern/assert.h>
77 #include <kern/machine.h>
79 #include <ipc/ipc_port.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
83 #include <vm/vm_protos.h>
85 #include <i386/cpu_data.h>
86 #include <i386/cpu_number.h>
87 #include <i386/eflags.h>
88 #include <i386/proc_reg.h>
90 #include <i386/misc_protos.h>
91 #include <i386/mp_desc.h>
92 #include <i386/thread.h>
93 #include <i386/machine_routines.h>
94 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
98 #include <kern/hv_support.h>
102 * Maps state flavor to number of words in the state:
104 unsigned int _MachineStateCount
[] = {
105 [x86_THREAD_STATE32
] = x86_THREAD_STATE32_COUNT
,
106 [x86_THREAD_STATE64
] = x86_THREAD_STATE64_COUNT
,
107 [x86_THREAD_FULL_STATE64
] = x86_THREAD_FULL_STATE64_COUNT
,
108 [x86_THREAD_STATE
] = x86_THREAD_STATE_COUNT
,
109 [x86_FLOAT_STATE32
] = x86_FLOAT_STATE32_COUNT
,
110 [x86_FLOAT_STATE64
] = x86_FLOAT_STATE64_COUNT
,
111 [x86_FLOAT_STATE
] = x86_FLOAT_STATE_COUNT
,
112 [x86_EXCEPTION_STATE32
] = x86_EXCEPTION_STATE32_COUNT
,
113 [x86_EXCEPTION_STATE64
] = x86_EXCEPTION_STATE64_COUNT
,
114 [x86_EXCEPTION_STATE
] = x86_EXCEPTION_STATE_COUNT
,
115 [x86_DEBUG_STATE32
] = x86_DEBUG_STATE32_COUNT
,
116 [x86_DEBUG_STATE64
] = x86_DEBUG_STATE64_COUNT
,
117 [x86_DEBUG_STATE
] = x86_DEBUG_STATE_COUNT
,
118 [x86_AVX_STATE32
] = x86_AVX_STATE32_COUNT
,
119 [x86_AVX_STATE64
] = x86_AVX_STATE64_COUNT
,
120 [x86_AVX_STATE
] = x86_AVX_STATE_COUNT
,
121 [x86_AVX512_STATE32
] = x86_AVX512_STATE32_COUNT
,
122 [x86_AVX512_STATE64
] = x86_AVX512_STATE64_COUNT
,
123 [x86_AVX512_STATE
] = x86_AVX512_STATE_COUNT
,
124 [x86_PAGEIN_STATE
] = x86_PAGEIN_STATE_COUNT
127 ZONE_DECLARE(iss_zone
, "x86_64 saved state",
128 sizeof(x86_saved_state_t
), ZC_NONE
);
130 ZONE_DECLARE(ids_zone
, "x86_64 debug state",
131 sizeof(x86_debug_state64_t
), ZC_NONE
);
135 extern void Thread_continue(void);
136 extern void Load_context(
137 thread_t thread
) __attribute__((noreturn
));
140 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
143 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
146 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
149 get_thread_state64(thread_t thread
, void *ts
, boolean_t full
);
152 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
155 set_thread_state64(thread_t thread
, void *ts
, boolean_t full
);
159 ml_hv_cswitch(thread_t old
, thread_t
new)
161 if (old
->hv_thread_target
) {
162 hv_callbacks
.preempt(old
->hv_thread_target
);
165 if (new->hv_thread_target
) {
166 hv_callbacks
.dispatch(new->hv_thread_target
);
172 * Don't let an illegal value for the lower 32-bits of dr7 get set.
173 * Specifically, check for undefined settings. Setting these bit patterns
174 * result in undefined behaviour and can lead to an unexpected
178 dr7d_is_valid(uint32_t *dr7d
)
181 uint32_t mask1
, mask2
;
184 * If the DE bit is set in CR4, R/W0-3 can be pattern
185 * "10B" to indicate i/o reads and write
187 if (!(get_cr4() & CR4_DE
)) {
188 for (i
= 0, mask1
= 0x3 << 16, mask2
= 0x2 << 16; i
< 4;
189 i
++, mask1
<<= 4, mask2
<<= 4) {
190 if ((*dr7d
& mask1
) == mask2
) {
197 * if we are doing an instruction execution break (indicated
198 * by r/w[x] being "00B"), then the len[x] must also be set
201 for (i
= 0; i
< 4; i
++) {
202 if (((((*dr7d
>> (16 + i
* 4))) & 0x3) == 0) &&
203 ((((*dr7d
>> (18 + i
* 4))) & 0x3) != 0)) {
209 * Intel docs have these bits fixed.
211 *dr7d
|= 0x1 << 10; /* set bit 10 to 1 */
212 *dr7d
&= ~(0x1 << 11); /* set bit 11 to 0 */
213 *dr7d
&= ~(0x1 << 12); /* set bit 12 to 0 */
214 *dr7d
&= ~(0x1 << 14); /* set bit 14 to 0 */
215 *dr7d
&= ~(0x1 << 15); /* set bit 15 to 0 */
218 * We don't allow anything to set the global breakpoints.
225 if (*dr7d
& (0x2 << 2)) {
229 if (*dr7d
& (0x2 << 4)) {
233 if (*dr7d
& (0x2 << 6)) {
240 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
243 debug_state_is_valid32(x86_debug_state32_t
*ds
)
245 if (!dr7d_is_valid(&ds
->dr7
)) {
253 debug_state_is_valid64(x86_debug_state64_t
*ds
)
255 if (!dr7d_is_valid((uint32_t *)&ds
->dr7
)) {
260 * Don't allow the user to set debug addresses above their max
264 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
) {
269 if (ds
->dr7
& (0x1 << 2)) {
270 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
) {
275 if (ds
->dr7
& (0x1 << 4)) {
276 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
) {
281 if (ds
->dr7
& (0x1 << 6)) {
282 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
) {
287 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
288 ds
->dr7
&= 0xffffffffULL
;
295 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
297 x86_debug_state32_t
*new_ids
;
300 pcb
= THREAD_TO_PCB(thread
);
302 if (debug_state_is_valid32(ds
) != TRUE
) {
303 return KERN_INVALID_ARGUMENT
;
306 if (pcb
->ids
== NULL
) {
307 new_ids
= zalloc(ids_zone
);
308 bzero(new_ids
, sizeof *new_ids
);
310 simple_lock(&pcb
->lock
, LCK_GRP_NULL
);
311 /* make sure it wasn't already alloc()'d elsewhere */
312 if (pcb
->ids
== NULL
) {
314 simple_unlock(&pcb
->lock
);
316 simple_unlock(&pcb
->lock
);
317 zfree(ids_zone
, new_ids
);
322 copy_debug_state32(ds
, pcb
->ids
, FALSE
);
328 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
330 x86_debug_state64_t
*new_ids
;
333 pcb
= THREAD_TO_PCB(thread
);
335 if (debug_state_is_valid64(ds
) != TRUE
) {
336 return KERN_INVALID_ARGUMENT
;
339 if (pcb
->ids
== NULL
) {
340 new_ids
= zalloc(ids_zone
);
341 bzero(new_ids
, sizeof *new_ids
);
344 if (thread
->hv_thread_target
) {
345 hv_callbacks
.volatile_state(thread
->hv_thread_target
,
350 simple_lock(&pcb
->lock
, LCK_GRP_NULL
);
351 /* make sure it wasn't already alloc()'d elsewhere */
352 if (pcb
->ids
== NULL
) {
354 simple_unlock(&pcb
->lock
);
356 simple_unlock(&pcb
->lock
);
357 zfree(ids_zone
, new_ids
);
361 copy_debug_state64(ds
, pcb
->ids
, FALSE
);
367 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
369 x86_debug_state32_t
*saved_state
;
371 saved_state
= thread
->machine
.ids
;
374 copy_debug_state32(saved_state
, ds
, TRUE
);
376 bzero(ds
, sizeof *ds
);
381 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
383 x86_debug_state64_t
*saved_state
;
385 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
388 copy_debug_state64(saved_state
, ds
, TRUE
);
390 bzero(ds
, sizeof *ds
);
395 * consider_machine_collect:
397 * Try to collect machine-dependent pages
400 consider_machine_collect(void)
405 consider_machine_adjust(void)
410 * Switch to the first thread on a CPU.
413 machine_load_context(
416 new->machine
.specFlags
|= OnProc
;
417 act_machine_switch_pcb(NULL
, new);
422 pmap_switch_context(thread_t ot
, thread_t nt
, int cnum
)
424 pmap_assert(ml_get_interrupts_enabled() == FALSE
);
425 vm_map_t nmap
= nt
->map
, omap
= ot
->map
;
426 if ((omap
!= nmap
) || (nmap
->pmap
->pagezero_accessible
)) {
427 PMAP_DEACTIVATE_MAP(omap
, ot
, cnum
);
428 PMAP_ACTIVATE_MAP(nmap
, nt
, cnum
);
433 * Switch to a new thread.
434 * Save the old thread`s kernel state or continuation,
438 machine_switch_context(
440 thread_continue_t continuation
,
443 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
450 * Save FP registers if in use.
452 fpu_switch_context(old
, new);
454 old
->machine
.specFlags
&= ~OnProc
;
455 new->machine
.specFlags
|= OnProc
;
458 * Monitor the stack depth and report new max,
459 * not worrying about races.
461 vm_offset_t depth
= current_stack_depth();
462 if (depth
> kernel_stack_depth_max
) {
463 kernel_stack_depth_max
= depth
;
464 KERNEL_DEBUG_CONSTANT(
465 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
466 (long) depth
, 0, 0, 0, 0);
470 * Switch address maps if need be, even if not switching tasks.
471 * (A server activation may be "borrowing" a client map.)
473 pmap_switch_context(old
, new, cpu_number());
476 * Load the rest of the user state for the new thread
478 act_machine_switch_pcb(old
, new);
481 ml_hv_cswitch(old
, new);
484 return Switch_context(old
, continuation
, new);
488 machine_thread_on_core(thread_t thread
)
490 return thread
->machine
.specFlags
& OnProc
;
494 machine_processor_shutdown(
496 void (*doshutdown
)(processor_t
),
497 processor_t processor
)
502 fpu_switch_context(thread
, NULL
);
503 pmap_switch_context(thread
, processor
->idle_thread
, cpu_number());
504 return Shutdown_context(thread
, doshutdown
, processor
);
509 * This is where registers that are not normally specified by the mach-o
510 * file on an execve would be nullified, perhaps to avoid a covert channel.
513 machine_thread_state_initialize(
517 * If there's an fpu save area, free it.
518 * The initialized state will then be lazily faulted-in, if required.
519 * And if we're target, re-arm the no-fpu trap.
521 if (thread
->machine
.ifps
) {
522 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
524 if (thread
== current_thread()) {
529 if (thread
->machine
.ids
) {
530 zfree(ids_zone
, thread
->machine
.ids
);
531 thread
->machine
.ids
= NULL
;
538 get_eflags_exportmask(void)
544 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
545 * for 32bit tasks only
546 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
547 * for 64bit tasks only
548 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
549 * for 32bit tasks only
550 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
551 * for 64bit tasks only
552 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
553 * for either 32bit or 64bit tasks
554 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
555 * for 32bit tasks only
556 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
557 * for 64bit tasks only
558 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
559 * for either 32bit or 64bit tasks
560 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
561 * for 32bit tasks only
562 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
563 * for 64bit tasks only
564 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
565 * for either 32bit or 64bit tasks
570 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
572 x86_saved_state64_t
*saved_state
;
574 saved_state
= USER_REGS64(thread
);
576 es
->trapno
= saved_state
->isf
.trapno
;
577 es
->cpu
= saved_state
->isf
.cpu
;
578 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
579 es
->faultvaddr
= saved_state
->cr2
;
583 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
585 x86_saved_state32_t
*saved_state
;
587 saved_state
= USER_REGS32(thread
);
589 es
->trapno
= saved_state
->trapno
;
590 es
->cpu
= saved_state
->cpu
;
591 es
->err
= saved_state
->err
;
592 es
->faultvaddr
= saved_state
->cr2
;
597 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
599 x86_saved_state32_t
*saved_state
;
601 pal_register_cache_state(thread
, DIRTY
);
603 saved_state
= USER_REGS32(thread
);
606 * Scrub segment selector values:
610 * On a 64 bit kernel, we always override the data segments,
611 * as the actual selector numbers have changed. This also
612 * means that we don't support setting the data segments
619 /* Set GS to CTHREAD only if's been established */
620 ts
->gs
= thread
->machine
.cthread_self
? USER_CTHREAD
: NULL_SEG
;
622 /* Check segment selectors are safe */
623 if (!valid_user_segment_selectors(ts
->cs
,
629 return KERN_INVALID_ARGUMENT
;
632 saved_state
->eax
= ts
->eax
;
633 saved_state
->ebx
= ts
->ebx
;
634 saved_state
->ecx
= ts
->ecx
;
635 saved_state
->edx
= ts
->edx
;
636 saved_state
->edi
= ts
->edi
;
637 saved_state
->esi
= ts
->esi
;
638 saved_state
->ebp
= ts
->ebp
;
639 saved_state
->uesp
= ts
->esp
;
640 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
641 saved_state
->eip
= ts
->eip
;
642 saved_state
->cs
= ts
->cs
;
643 saved_state
->ss
= ts
->ss
;
644 saved_state
->ds
= ts
->ds
;
645 saved_state
->es
= ts
->es
;
646 saved_state
->fs
= ts
->fs
;
647 saved_state
->gs
= ts
->gs
;
650 * If the trace trap bit is being set,
651 * ensure that the user returns via iret
652 * - which is signaled thusly:
654 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
) {
655 saved_state
->cs
= SYSENTER_TF_CS
;
662 set_thread_state64(thread_t thread
, void *state
, int full
)
664 x86_thread_state64_t
*ts
;
665 x86_saved_state64_t
*saved_state
;
668 ts
= &((x86_thread_full_state64_t
*)state
)->ss64
;
669 if (!valid_user_code_selector(((x86_thread_full_state64_t
*)ts
)->ss64
.cs
)) {
670 return KERN_INVALID_ARGUMENT
;
673 ts
= (x86_thread_state64_t
*)state
;
674 // In this case, ts->cs exists but is ignored, and
675 // CS is always set to USER_CS below instead.
678 pal_register_cache_state(thread
, DIRTY
);
680 saved_state
= USER_REGS64(thread
);
682 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
683 !IS_USERADDR64_CANONICAL(ts
->rip
)) {
684 return KERN_INVALID_ARGUMENT
;
687 saved_state
->r8
= ts
->r8
;
688 saved_state
->r9
= ts
->r9
;
689 saved_state
->r10
= ts
->r10
;
690 saved_state
->r11
= ts
->r11
;
691 saved_state
->r12
= ts
->r12
;
692 saved_state
->r13
= ts
->r13
;
693 saved_state
->r14
= ts
->r14
;
694 saved_state
->r15
= ts
->r15
;
695 saved_state
->rax
= ts
->rax
;
696 saved_state
->rbx
= ts
->rbx
;
697 saved_state
->rcx
= ts
->rcx
;
698 saved_state
->rdx
= ts
->rdx
;
699 saved_state
->rdi
= ts
->rdi
;
700 saved_state
->rsi
= ts
->rsi
;
701 saved_state
->rbp
= ts
->rbp
;
702 saved_state
->isf
.rsp
= ts
->rsp
;
703 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
704 saved_state
->isf
.rip
= ts
->rip
;
707 saved_state
->isf
.cs
= USER64_CS
;
709 saved_state
->isf
.cs
= ((x86_thread_full_state64_t
*)ts
)->ss64
.cs
;
710 saved_state
->isf
.ss
= ((x86_thread_full_state64_t
*)ts
)->ss
;
711 saved_state
->ds
= (uint32_t)((x86_thread_full_state64_t
*)ts
)->ds
;
712 saved_state
->es
= (uint32_t)((x86_thread_full_state64_t
*)ts
)->es
;
713 machine_thread_set_tsd_base(thread
,
714 ((x86_thread_full_state64_t
*)ts
)->gsbase
);
717 saved_state
->fs
= (uint32_t)ts
->fs
;
718 saved_state
->gs
= (uint32_t)ts
->gs
;
726 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
728 x86_saved_state32_t
*saved_state
;
730 pal_register_cache_state(thread
, VALID
);
732 saved_state
= USER_REGS32(thread
);
734 ts
->eax
= saved_state
->eax
;
735 ts
->ebx
= saved_state
->ebx
;
736 ts
->ecx
= saved_state
->ecx
;
737 ts
->edx
= saved_state
->edx
;
738 ts
->edi
= saved_state
->edi
;
739 ts
->esi
= saved_state
->esi
;
740 ts
->ebp
= saved_state
->ebp
;
741 ts
->esp
= saved_state
->uesp
;
742 ts
->eflags
= saved_state
->efl
;
743 ts
->eip
= saved_state
->eip
;
744 ts
->cs
= saved_state
->cs
;
745 ts
->ss
= saved_state
->ss
;
746 ts
->ds
= saved_state
->ds
;
747 ts
->es
= saved_state
->es
;
748 ts
->fs
= saved_state
->fs
;
749 ts
->gs
= saved_state
->gs
;
754 get_thread_state64(thread_t thread
, void *state
, boolean_t full
)
756 x86_thread_state64_t
*ts
;
757 x86_saved_state64_t
*saved_state
;
760 ts
= &((x86_thread_full_state64_t
*)state
)->ss64
;
762 ts
= (x86_thread_state64_t
*)state
;
765 pal_register_cache_state(thread
, VALID
);
767 saved_state
= USER_REGS64(thread
);
769 ts
->r8
= saved_state
->r8
;
770 ts
->r9
= saved_state
->r9
;
771 ts
->r10
= saved_state
->r10
;
772 ts
->r11
= saved_state
->r11
;
773 ts
->r12
= saved_state
->r12
;
774 ts
->r13
= saved_state
->r13
;
775 ts
->r14
= saved_state
->r14
;
776 ts
->r15
= saved_state
->r15
;
777 ts
->rax
= saved_state
->rax
;
778 ts
->rbx
= saved_state
->rbx
;
779 ts
->rcx
= saved_state
->rcx
;
780 ts
->rdx
= saved_state
->rdx
;
781 ts
->rdi
= saved_state
->rdi
;
782 ts
->rsi
= saved_state
->rsi
;
783 ts
->rbp
= saved_state
->rbp
;
784 ts
->rsp
= saved_state
->isf
.rsp
;
785 ts
->rflags
= saved_state
->isf
.rflags
;
786 ts
->rip
= saved_state
->isf
.rip
;
787 ts
->cs
= saved_state
->isf
.cs
;
790 ((x86_thread_full_state64_t
*)state
)->ds
= saved_state
->ds
;
791 ((x86_thread_full_state64_t
*)state
)->es
= saved_state
->es
;
792 ((x86_thread_full_state64_t
*)state
)->ss
= saved_state
->isf
.ss
;
793 ((x86_thread_full_state64_t
*)state
)->gsbase
=
794 thread
->machine
.cthread_self
;
797 ts
->fs
= saved_state
->fs
;
798 ts
->gs
= saved_state
->gs
;
802 machine_thread_state_convert_to_user(
803 __unused thread_t thread
,
804 __unused thread_flavor_t flavor
,
805 __unused thread_state_t tstate
,
806 __unused mach_msg_type_number_t
*count
)
808 // No conversion to userspace representation on this platform
813 machine_thread_state_convert_from_user(
814 __unused thread_t thread
,
815 __unused thread_flavor_t flavor
,
816 __unused thread_state_t tstate
,
817 __unused mach_msg_type_number_t count
)
819 // No conversion from userspace representation on this platform
824 machine_thread_siguctx_pointer_convert_to_user(
825 __unused thread_t thread
,
826 __unused user_addr_t
*uctxp
)
828 // No conversion to userspace representation on this platform
833 machine_thread_function_pointers_convert_from_user(
834 __unused thread_t thread
,
835 __unused user_addr_t
*fptrs
,
836 __unused
uint32_t count
)
838 // No conversion from userspace representation on this platform
843 * act_machine_set_state:
845 * Set the status of the specified thread.
849 machine_thread_set_state(
851 thread_flavor_t flavor
,
852 thread_state_t tstate
,
853 mach_msg_type_number_t count
)
856 case x86_SAVED_STATE32
:
858 x86_saved_state32_t
*state
;
859 x86_saved_state32_t
*saved_state
;
861 if (count
< x86_SAVED_STATE32_COUNT
) {
862 return KERN_INVALID_ARGUMENT
;
865 state
= (x86_saved_state32_t
*) tstate
;
868 * Refuse to allow 64-bit processes to set
871 if (thread_is_64bit_addr(thr_act
)) {
872 return KERN_INVALID_ARGUMENT
;
875 /* Check segment selectors are safe */
876 if (!valid_user_segment_selectors(state
->cs
,
882 return KERN_INVALID_ARGUMENT
;
885 pal_register_cache_state(thr_act
, DIRTY
);
887 saved_state
= USER_REGS32(thr_act
);
892 saved_state
->edi
= state
->edi
;
893 saved_state
->esi
= state
->esi
;
894 saved_state
->ebp
= state
->ebp
;
895 saved_state
->uesp
= state
->uesp
;
896 saved_state
->ebx
= state
->ebx
;
897 saved_state
->edx
= state
->edx
;
898 saved_state
->ecx
= state
->ecx
;
899 saved_state
->eax
= state
->eax
;
900 saved_state
->eip
= state
->eip
;
902 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
905 * If the trace trap bit is being set,
906 * ensure that the user returns via iret
907 * - which is signaled thusly:
909 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
) {
910 state
->cs
= SYSENTER_TF_CS
;
914 * User setting segment registers.
915 * Code and stack selectors have already been
916 * checked. Others will be reset by 'iret'
917 * if they are not valid.
919 saved_state
->cs
= state
->cs
;
920 saved_state
->ss
= state
->ss
;
921 saved_state
->ds
= state
->ds
;
922 saved_state
->es
= state
->es
;
923 saved_state
->fs
= state
->fs
;
924 saved_state
->gs
= state
->gs
;
929 case x86_SAVED_STATE64
:
931 x86_saved_state64_t
*state
;
932 x86_saved_state64_t
*saved_state
;
934 if (count
< x86_SAVED_STATE64_COUNT
) {
935 return KERN_INVALID_ARGUMENT
;
938 if (!thread_is_64bit_addr(thr_act
)) {
939 return KERN_INVALID_ARGUMENT
;
942 state
= (x86_saved_state64_t
*) tstate
;
944 /* Verify that the supplied code segment selector is
945 * valid. In 64-bit mode, the FS and GS segment overrides
946 * use the FS.base and GS.base MSRs to calculate
947 * base addresses, and the trampolines don't directly
948 * restore the segment registers--hence they are no
949 * longer relevant for validation.
951 if (!valid_user_code_selector(state
->isf
.cs
)) {
952 return KERN_INVALID_ARGUMENT
;
955 /* Check pc and stack are canonical addresses */
956 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
957 !IS_USERADDR64_CANONICAL(state
->isf
.rip
)) {
958 return KERN_INVALID_ARGUMENT
;
961 pal_register_cache_state(thr_act
, DIRTY
);
963 saved_state
= USER_REGS64(thr_act
);
968 saved_state
->r8
= state
->r8
;
969 saved_state
->r9
= state
->r9
;
970 saved_state
->r10
= state
->r10
;
971 saved_state
->r11
= state
->r11
;
972 saved_state
->r12
= state
->r12
;
973 saved_state
->r13
= state
->r13
;
974 saved_state
->r14
= state
->r14
;
975 saved_state
->r15
= state
->r15
;
976 saved_state
->rdi
= state
->rdi
;
977 saved_state
->rsi
= state
->rsi
;
978 saved_state
->rbp
= state
->rbp
;
979 saved_state
->rbx
= state
->rbx
;
980 saved_state
->rdx
= state
->rdx
;
981 saved_state
->rcx
= state
->rcx
;
982 saved_state
->rax
= state
->rax
;
983 saved_state
->isf
.rsp
= state
->isf
.rsp
;
984 saved_state
->isf
.rip
= state
->isf
.rip
;
986 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
989 * User setting segment registers.
990 * Code and stack selectors have already been
991 * checked. Others will be reset by 'sys'
992 * if they are not valid.
994 saved_state
->isf
.cs
= state
->isf
.cs
;
995 saved_state
->isf
.ss
= state
->isf
.ss
;
996 saved_state
->fs
= state
->fs
;
997 saved_state
->gs
= state
->gs
;
1002 case x86_FLOAT_STATE32
:
1003 case x86_AVX_STATE32
:
1004 case x86_AVX512_STATE32
:
1006 if (count
!= _MachineStateCount
[flavor
]) {
1007 return KERN_INVALID_ARGUMENT
;
1010 if (thread_is_64bit_addr(thr_act
)) {
1011 return KERN_INVALID_ARGUMENT
;
1014 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
1017 case x86_FLOAT_STATE64
:
1018 case x86_AVX_STATE64
:
1019 case x86_AVX512_STATE64
:
1021 if (count
!= _MachineStateCount
[flavor
]) {
1022 return KERN_INVALID_ARGUMENT
;
1025 if (!thread_is_64bit_addr(thr_act
)) {
1026 return KERN_INVALID_ARGUMENT
;
1029 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
1032 case x86_FLOAT_STATE
:
1034 x86_float_state_t
*state
;
1036 if (count
!= x86_FLOAT_STATE_COUNT
) {
1037 return KERN_INVALID_ARGUMENT
;
1040 state
= (x86_float_state_t
*)tstate
;
1041 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
1042 thread_is_64bit_addr(thr_act
)) {
1043 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1045 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
1046 !thread_is_64bit_addr(thr_act
)) {
1047 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1049 return KERN_INVALID_ARGUMENT
;
1053 case x86_AVX512_STATE
:
1055 x86_avx_state_t
*state
;
1057 if (count
!= _MachineStateCount
[flavor
]) {
1058 return KERN_INVALID_ARGUMENT
;
1061 state
= (x86_avx_state_t
*)tstate
;
1062 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1063 /* 64-bit flavor? */
1064 if (state
->ash
.flavor
== (flavor
- 1) &&
1065 state
->ash
.count
== _MachineStateCount
[flavor
- 1] &&
1066 thread_is_64bit_addr(thr_act
)) {
1067 return fpu_set_fxstate(thr_act
,
1068 (thread_state_t
)&state
->ufs
.as64
,
1071 /* 32-bit flavor? */
1072 if (state
->ash
.flavor
== (flavor
- 2) &&
1073 state
->ash
.count
== _MachineStateCount
[flavor
- 2] &&
1074 !thread_is_64bit_addr(thr_act
)) {
1075 return fpu_set_fxstate(thr_act
,
1076 (thread_state_t
)&state
->ufs
.as32
,
1079 return KERN_INVALID_ARGUMENT
;
1082 case x86_THREAD_STATE32
:
1084 if (count
!= x86_THREAD_STATE32_COUNT
) {
1085 return KERN_INVALID_ARGUMENT
;
1088 if (thread_is_64bit_addr(thr_act
)) {
1089 return KERN_INVALID_ARGUMENT
;
1092 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1095 case x86_THREAD_STATE64
:
1097 if (count
!= x86_THREAD_STATE64_COUNT
) {
1098 return KERN_INVALID_ARGUMENT
;
1101 if (!thread_is_64bit_addr(thr_act
)) {
1102 return KERN_INVALID_ARGUMENT
;
1105 return set_thread_state64(thr_act
, tstate
, FALSE
);
1108 case x86_THREAD_FULL_STATE64
:
1110 if (count
!= x86_THREAD_FULL_STATE64_COUNT
) {
1111 return KERN_INVALID_ARGUMENT
;
1114 if (!thread_is_64bit_addr(thr_act
)) {
1115 return KERN_INVALID_ARGUMENT
;
1118 /* If this process does not have a custom LDT, return failure */
1119 if (thr_act
->task
->i386_ldt
== 0) {
1120 return KERN_INVALID_ARGUMENT
;
1123 return set_thread_state64(thr_act
, tstate
, TRUE
);
1126 case x86_THREAD_STATE
:
1128 x86_thread_state_t
*state
;
1130 if (count
!= x86_THREAD_STATE_COUNT
) {
1131 return KERN_INVALID_ARGUMENT
;
1134 state
= (x86_thread_state_t
*)tstate
;
1136 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1137 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1138 thread_is_64bit_addr(thr_act
)) {
1139 return set_thread_state64(thr_act
, &state
->uts
.ts64
, FALSE
);
1140 } else if (state
->tsh
.flavor
== x86_THREAD_FULL_STATE64
&&
1141 state
->tsh
.count
== x86_THREAD_FULL_STATE64_COUNT
&&
1142 thread_is_64bit_addr(thr_act
) && thr_act
->task
->i386_ldt
!= 0) {
1143 return set_thread_state64(thr_act
, &state
->uts
.ts64
, TRUE
);
1144 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1145 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1146 !thread_is_64bit_addr(thr_act
)) {
1147 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1149 return KERN_INVALID_ARGUMENT
;
1152 case x86_DEBUG_STATE32
:
1154 x86_debug_state32_t
*state
;
1157 if (thread_is_64bit_addr(thr_act
)) {
1158 return KERN_INVALID_ARGUMENT
;
1161 state
= (x86_debug_state32_t
*)tstate
;
1163 ret
= set_debug_state32(thr_act
, state
);
1167 case x86_DEBUG_STATE64
:
1169 x86_debug_state64_t
*state
;
1172 if (!thread_is_64bit_addr(thr_act
)) {
1173 return KERN_INVALID_ARGUMENT
;
1176 state
= (x86_debug_state64_t
*)tstate
;
1178 ret
= set_debug_state64(thr_act
, state
);
1182 case x86_DEBUG_STATE
:
1184 x86_debug_state_t
*state
;
1185 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1187 if (count
!= x86_DEBUG_STATE_COUNT
) {
1188 return KERN_INVALID_ARGUMENT
;
1191 state
= (x86_debug_state_t
*)tstate
;
1192 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1193 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1194 thread_is_64bit_addr(thr_act
)) {
1195 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1196 } else if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1197 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1198 !thread_is_64bit_addr(thr_act
)) {
1199 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1204 return KERN_INVALID_ARGUMENT
;
1207 return KERN_SUCCESS
;
1211 machine_thread_pc(thread_t thr_act
)
1213 if (thread_is_64bit_addr(thr_act
)) {
1214 return (mach_vm_address_t
)USER_REGS64(thr_act
)->isf
.rip
;
1216 return (mach_vm_address_t
)USER_REGS32(thr_act
)->eip
;
1221 machine_thread_reset_pc(thread_t thr_act
, mach_vm_address_t pc
)
1223 pal_register_cache_state(thr_act
, DIRTY
);
1225 if (thread_is_64bit_addr(thr_act
)) {
1226 if (!IS_USERADDR64_CANONICAL(pc
)) {
1229 USER_REGS64(thr_act
)->isf
.rip
= (uint64_t)pc
;
1231 USER_REGS32(thr_act
)->eip
= (uint32_t)pc
;
1239 * Get the status of the specified thread.
1243 machine_thread_get_state(
1245 thread_flavor_t flavor
,
1246 thread_state_t tstate
,
1247 mach_msg_type_number_t
*count
)
1250 case THREAD_STATE_FLAVOR_LIST
:
1253 return KERN_INVALID_ARGUMENT
;
1256 tstate
[0] = i386_THREAD_STATE
;
1257 tstate
[1] = i386_FLOAT_STATE
;
1258 tstate
[2] = i386_EXCEPTION_STATE
;
1264 case THREAD_STATE_FLAVOR_LIST_NEW
:
1267 return KERN_INVALID_ARGUMENT
;
1270 tstate
[0] = x86_THREAD_STATE
;
1271 tstate
[1] = x86_FLOAT_STATE
;
1272 tstate
[2] = x86_EXCEPTION_STATE
;
1273 tstate
[3] = x86_DEBUG_STATE
;
1279 case THREAD_STATE_FLAVOR_LIST_10_9
:
1282 return KERN_INVALID_ARGUMENT
;
1285 tstate
[0] = x86_THREAD_STATE
;
1286 tstate
[1] = x86_FLOAT_STATE
;
1287 tstate
[2] = x86_EXCEPTION_STATE
;
1288 tstate
[3] = x86_DEBUG_STATE
;
1289 tstate
[4] = x86_AVX_STATE
;
1295 case THREAD_STATE_FLAVOR_LIST_10_13
:
1298 return KERN_INVALID_ARGUMENT
;
1301 tstate
[0] = x86_THREAD_STATE
;
1302 tstate
[1] = x86_FLOAT_STATE
;
1303 tstate
[2] = x86_EXCEPTION_STATE
;
1304 tstate
[3] = x86_DEBUG_STATE
;
1305 tstate
[4] = x86_AVX_STATE
;
1306 tstate
[5] = x86_AVX512_STATE
;
1312 case THREAD_STATE_FLAVOR_LIST_10_15
:
1315 return KERN_INVALID_ARGUMENT
;
1318 tstate
[0] = x86_THREAD_STATE
;
1319 tstate
[1] = x86_FLOAT_STATE
;
1320 tstate
[2] = x86_EXCEPTION_STATE
;
1321 tstate
[3] = x86_DEBUG_STATE
;
1322 tstate
[4] = x86_AVX_STATE
;
1323 tstate
[5] = x86_AVX512_STATE
;
1324 tstate
[6] = x86_PAGEIN_STATE
;
1330 case x86_SAVED_STATE32
:
1332 x86_saved_state32_t
*state
;
1333 x86_saved_state32_t
*saved_state
;
1335 if (*count
< x86_SAVED_STATE32_COUNT
) {
1336 return KERN_INVALID_ARGUMENT
;
1339 if (thread_is_64bit_addr(thr_act
)) {
1340 return KERN_INVALID_ARGUMENT
;
1343 state
= (x86_saved_state32_t
*) tstate
;
1344 saved_state
= USER_REGS32(thr_act
);
1347 * First, copy everything:
1349 *state
= *saved_state
;
1350 state
->ds
= saved_state
->ds
& 0xffff;
1351 state
->es
= saved_state
->es
& 0xffff;
1352 state
->fs
= saved_state
->fs
& 0xffff;
1353 state
->gs
= saved_state
->gs
& 0xffff;
1355 *count
= x86_SAVED_STATE32_COUNT
;
1359 case x86_SAVED_STATE64
:
1361 x86_saved_state64_t
*state
;
1362 x86_saved_state64_t
*saved_state
;
1364 if (*count
< x86_SAVED_STATE64_COUNT
) {
1365 return KERN_INVALID_ARGUMENT
;
1368 if (!thread_is_64bit_addr(thr_act
)) {
1369 return KERN_INVALID_ARGUMENT
;
1372 state
= (x86_saved_state64_t
*)tstate
;
1373 saved_state
= USER_REGS64(thr_act
);
1376 * First, copy everything:
1378 *state
= *saved_state
;
1379 state
->ds
= saved_state
->ds
& 0xffff;
1380 state
->es
= saved_state
->es
& 0xffff;
1381 state
->fs
= saved_state
->fs
& 0xffff;
1382 state
->gs
= saved_state
->gs
& 0xffff;
1384 *count
= x86_SAVED_STATE64_COUNT
;
1388 case x86_FLOAT_STATE32
:
1390 if (*count
< x86_FLOAT_STATE32_COUNT
) {
1391 return KERN_INVALID_ARGUMENT
;
1394 if (thread_is_64bit_addr(thr_act
)) {
1395 return KERN_INVALID_ARGUMENT
;
1398 *count
= x86_FLOAT_STATE32_COUNT
;
1400 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1403 case x86_FLOAT_STATE64
:
1405 if (*count
< x86_FLOAT_STATE64_COUNT
) {
1406 return KERN_INVALID_ARGUMENT
;
1409 if (!thread_is_64bit_addr(thr_act
)) {
1410 return KERN_INVALID_ARGUMENT
;
1413 *count
= x86_FLOAT_STATE64_COUNT
;
1415 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1418 case x86_FLOAT_STATE
:
1420 x86_float_state_t
*state
;
1423 if (*count
< x86_FLOAT_STATE_COUNT
) {
1424 return KERN_INVALID_ARGUMENT
;
1427 state
= (x86_float_state_t
*)tstate
;
1430 * no need to bzero... currently
1431 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1433 if (thread_is_64bit_addr(thr_act
)) {
1434 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1435 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1437 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1439 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1440 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1442 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1444 *count
= x86_FLOAT_STATE_COUNT
;
1449 case x86_AVX_STATE32
:
1450 case x86_AVX512_STATE32
:
1452 if (*count
!= _MachineStateCount
[flavor
]) {
1453 return KERN_INVALID_ARGUMENT
;
1456 if (thread_is_64bit_addr(thr_act
)) {
1457 return KERN_INVALID_ARGUMENT
;
1460 *count
= _MachineStateCount
[flavor
];
1462 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1465 case x86_AVX_STATE64
:
1466 case x86_AVX512_STATE64
:
1468 if (*count
!= _MachineStateCount
[flavor
]) {
1469 return KERN_INVALID_ARGUMENT
;
1472 if (!thread_is_64bit_addr(thr_act
)) {
1473 return KERN_INVALID_ARGUMENT
;
1476 *count
= _MachineStateCount
[flavor
];
1478 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1482 case x86_AVX512_STATE
:
1484 x86_avx_state_t
*state
;
1485 thread_state_t fstate
;
1487 if (*count
< _MachineStateCount
[flavor
]) {
1488 return KERN_INVALID_ARGUMENT
;
1491 *count
= _MachineStateCount
[flavor
];
1492 state
= (x86_avx_state_t
*)tstate
;
1494 bzero((char *)state
, *count
* sizeof(int));
1496 if (thread_is_64bit_addr(thr_act
)) {
1497 flavor
-= 1; /* 64-bit flavor */
1498 fstate
= (thread_state_t
) &state
->ufs
.as64
;
1500 flavor
-= 2; /* 32-bit flavor */
1501 fstate
= (thread_state_t
) &state
->ufs
.as32
;
1503 state
->ash
.flavor
= flavor
;
1504 state
->ash
.count
= _MachineStateCount
[flavor
];
1506 return fpu_get_fxstate(thr_act
, fstate
, flavor
);
1509 case x86_THREAD_STATE32
:
1511 if (*count
< x86_THREAD_STATE32_COUNT
) {
1512 return KERN_INVALID_ARGUMENT
;
1515 if (thread_is_64bit_addr(thr_act
)) {
1516 return KERN_INVALID_ARGUMENT
;
1519 *count
= x86_THREAD_STATE32_COUNT
;
1521 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1525 case x86_THREAD_STATE64
:
1527 if (*count
< x86_THREAD_STATE64_COUNT
) {
1528 return KERN_INVALID_ARGUMENT
;
1531 if (!thread_is_64bit_addr(thr_act
)) {
1532 return KERN_INVALID_ARGUMENT
;
1535 *count
= x86_THREAD_STATE64_COUNT
;
1537 get_thread_state64(thr_act
, tstate
, FALSE
);
1541 case x86_THREAD_FULL_STATE64
:
1543 if (*count
< x86_THREAD_FULL_STATE64_COUNT
) {
1544 return KERN_INVALID_ARGUMENT
;
1547 if (!thread_is_64bit_addr(thr_act
)) {
1548 return KERN_INVALID_ARGUMENT
;
1551 /* If this process does not have a custom LDT, return failure */
1552 if (thr_act
->task
->i386_ldt
== 0) {
1553 return KERN_INVALID_ARGUMENT
;
1556 *count
= x86_THREAD_FULL_STATE64_COUNT
;
1558 get_thread_state64(thr_act
, tstate
, TRUE
);
1562 case x86_THREAD_STATE
:
1564 x86_thread_state_t
*state
;
1566 if (*count
< x86_THREAD_STATE_COUNT
) {
1567 return KERN_INVALID_ARGUMENT
;
1570 state
= (x86_thread_state_t
*)tstate
;
1572 bzero((char *)state
, sizeof(x86_thread_state_t
));
1574 if (thread_is_64bit_addr(thr_act
)) {
1575 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1576 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1578 get_thread_state64(thr_act
, &state
->uts
.ts64
, FALSE
);
1580 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1581 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1583 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1585 *count
= x86_THREAD_STATE_COUNT
;
1591 case x86_EXCEPTION_STATE32
:
1593 if (*count
< x86_EXCEPTION_STATE32_COUNT
) {
1594 return KERN_INVALID_ARGUMENT
;
1597 if (thread_is_64bit_addr(thr_act
)) {
1598 return KERN_INVALID_ARGUMENT
;
1601 *count
= x86_EXCEPTION_STATE32_COUNT
;
1603 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1605 * Suppress the cpu number for binary compatibility
1606 * of this deprecated state.
1608 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1612 case x86_EXCEPTION_STATE64
:
1614 if (*count
< x86_EXCEPTION_STATE64_COUNT
) {
1615 return KERN_INVALID_ARGUMENT
;
1618 if (!thread_is_64bit_addr(thr_act
)) {
1619 return KERN_INVALID_ARGUMENT
;
1622 *count
= x86_EXCEPTION_STATE64_COUNT
;
1624 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1626 * Suppress the cpu number for binary compatibility
1627 * of this deprecated state.
1629 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1633 case x86_EXCEPTION_STATE
:
1635 x86_exception_state_t
*state
;
1637 if (*count
< x86_EXCEPTION_STATE_COUNT
) {
1638 return KERN_INVALID_ARGUMENT
;
1641 state
= (x86_exception_state_t
*)tstate
;
1643 bzero((char *)state
, sizeof(x86_exception_state_t
));
1645 if (thread_is_64bit_addr(thr_act
)) {
1646 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1647 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1649 get_exception_state64(thr_act
, &state
->ues
.es64
);
1651 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1652 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1654 get_exception_state32(thr_act
, &state
->ues
.es32
);
1656 *count
= x86_EXCEPTION_STATE_COUNT
;
1660 case x86_DEBUG_STATE32
:
1662 if (*count
< x86_DEBUG_STATE32_COUNT
) {
1663 return KERN_INVALID_ARGUMENT
;
1666 if (thread_is_64bit_addr(thr_act
)) {
1667 return KERN_INVALID_ARGUMENT
;
1670 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1672 *count
= x86_DEBUG_STATE32_COUNT
;
1676 case x86_DEBUG_STATE64
:
1678 if (*count
< x86_DEBUG_STATE64_COUNT
) {
1679 return KERN_INVALID_ARGUMENT
;
1682 if (!thread_is_64bit_addr(thr_act
)) {
1683 return KERN_INVALID_ARGUMENT
;
1686 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1688 *count
= x86_DEBUG_STATE64_COUNT
;
1692 case x86_DEBUG_STATE
:
1694 x86_debug_state_t
*state
;
1696 if (*count
< x86_DEBUG_STATE_COUNT
) {
1697 return KERN_INVALID_ARGUMENT
;
1700 state
= (x86_debug_state_t
*)tstate
;
1702 bzero(state
, sizeof *state
);
1704 if (thread_is_64bit_addr(thr_act
)) {
1705 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1706 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1708 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1710 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1711 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1713 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1715 *count
= x86_DEBUG_STATE_COUNT
;
1719 case x86_PAGEIN_STATE
:
1721 if (*count
< x86_PAGEIN_STATE_COUNT
) {
1722 return KERN_INVALID_ARGUMENT
;
1725 x86_pagein_state_t
*state
= (void *)tstate
;
1727 state
->__pagein_error
= thr_act
->t_pagein_error
;
1729 *count
= x86_PAGEIN_STATE_COUNT
;
1733 case x86_INSTRUCTION_STATE
:
1735 if (*count
< x86_INSTRUCTION_STATE_COUNT
) {
1736 return KERN_INVALID_ARGUMENT
;
1739 x86_instruction_state_t
*state
= (void *)tstate
;
1740 x86_instruction_state_t
*src_state
= THREAD_TO_PCB(thr_act
)->insn_state
;
1742 if (src_state
!= 0 && (src_state
->insn_stream_valid_bytes
> 0 || src_state
->out_of_synch
)) {
1743 #if DEVELOPMENT || DEBUG
1744 extern int insnstream_force_cacheline_mismatch
;
1746 size_t byte_count
= (src_state
->insn_stream_valid_bytes
> x86_INSTRUCTION_STATE_MAX_INSN_BYTES
)
1747 ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES
: src_state
->insn_stream_valid_bytes
;
1748 if (byte_count
> 0) {
1749 bcopy(src_state
->insn_bytes
, state
->insn_bytes
, byte_count
);
1751 state
->insn_offset
= src_state
->insn_offset
;
1752 state
->insn_stream_valid_bytes
= byte_count
;
1753 #if DEVELOPMENT || DEBUG
1754 state
->out_of_synch
= src_state
->out_of_synch
|| insnstream_force_cacheline_mismatch
;
1755 insnstream_force_cacheline_mismatch
= 0; /* One-shot, reset after use */
1757 if (state
->out_of_synch
) {
1758 bcopy(&src_state
->insn_cacheline
[0], &state
->insn_cacheline
[0],
1759 x86_INSTRUCTION_STATE_CACHELINE_SIZE
);
1761 bzero(&state
->insn_cacheline
[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE
);
1764 state
->out_of_synch
= src_state
->out_of_synch
;
1766 *count
= x86_INSTRUCTION_STATE_COUNT
;
1773 case x86_LAST_BRANCH_STATE
:
1777 if (!last_branch_support_enabled
|| *count
< x86_LAST_BRANCH_STATE_COUNT
) {
1778 return KERN_INVALID_ARGUMENT
;
1781 istate
= ml_set_interrupts_enabled(FALSE
);
1782 /* If the current thread is asking for its own LBR data, synch the LBRs first */
1783 if (thr_act
== current_thread()) {
1784 i386_lbr_synch(thr_act
);
1786 ml_set_interrupts_enabled(istate
);
1788 if (i386_lbr_native_state_to_mach_thread_state(THREAD_TO_PCB(thr_act
), (last_branch_state_t
*)tstate
) < 0) {
1790 return KERN_INVALID_ARGUMENT
;
1793 *count
= x86_LAST_BRANCH_STATE_COUNT
;
1798 return KERN_INVALID_ARGUMENT
;
1801 return KERN_SUCCESS
;
1805 machine_thread_get_kern_state(
1807 thread_flavor_t flavor
,
1808 thread_state_t tstate
,
1809 mach_msg_type_number_t
*count
)
1811 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1814 * This works only for an interrupted kernel thread
1816 if (thread
!= current_thread() || int_state
== NULL
) {
1817 return KERN_FAILURE
;
1821 case x86_THREAD_STATE32
: {
1822 x86_thread_state32_t
*state
;
1823 x86_saved_state32_t
*saved_state
;
1825 if (!is_saved_state32(int_state
) ||
1826 *count
< x86_THREAD_STATE32_COUNT
) {
1827 return KERN_INVALID_ARGUMENT
;
1830 state
= (x86_thread_state32_t
*) tstate
;
1832 saved_state
= saved_state32(int_state
);
1834 * General registers.
1836 state
->eax
= saved_state
->eax
;
1837 state
->ebx
= saved_state
->ebx
;
1838 state
->ecx
= saved_state
->ecx
;
1839 state
->edx
= saved_state
->edx
;
1840 state
->edi
= saved_state
->edi
;
1841 state
->esi
= saved_state
->esi
;
1842 state
->ebp
= saved_state
->ebp
;
1843 state
->esp
= saved_state
->uesp
;
1844 state
->eflags
= saved_state
->efl
;
1845 state
->eip
= saved_state
->eip
;
1846 state
->cs
= saved_state
->cs
;
1847 state
->ss
= saved_state
->ss
;
1848 state
->ds
= saved_state
->ds
& 0xffff;
1849 state
->es
= saved_state
->es
& 0xffff;
1850 state
->fs
= saved_state
->fs
& 0xffff;
1851 state
->gs
= saved_state
->gs
& 0xffff;
1853 *count
= x86_THREAD_STATE32_COUNT
;
1855 return KERN_SUCCESS
;
1858 case x86_THREAD_STATE64
: {
1859 x86_thread_state64_t
*state
;
1860 x86_saved_state64_t
*saved_state
;
1862 if (!is_saved_state64(int_state
) ||
1863 *count
< x86_THREAD_STATE64_COUNT
) {
1864 return KERN_INVALID_ARGUMENT
;
1867 state
= (x86_thread_state64_t
*) tstate
;
1869 saved_state
= saved_state64(int_state
);
1871 * General registers.
1873 state
->rax
= saved_state
->rax
;
1874 state
->rbx
= saved_state
->rbx
;
1875 state
->rcx
= saved_state
->rcx
;
1876 state
->rdx
= saved_state
->rdx
;
1877 state
->rdi
= saved_state
->rdi
;
1878 state
->rsi
= saved_state
->rsi
;
1879 state
->rbp
= saved_state
->rbp
;
1880 state
->rsp
= saved_state
->isf
.rsp
;
1881 state
->r8
= saved_state
->r8
;
1882 state
->r9
= saved_state
->r9
;
1883 state
->r10
= saved_state
->r10
;
1884 state
->r11
= saved_state
->r11
;
1885 state
->r12
= saved_state
->r12
;
1886 state
->r13
= saved_state
->r13
;
1887 state
->r14
= saved_state
->r14
;
1888 state
->r15
= saved_state
->r15
;
1890 state
->rip
= saved_state
->isf
.rip
;
1891 state
->rflags
= saved_state
->isf
.rflags
;
1892 state
->cs
= saved_state
->isf
.cs
;
1893 state
->fs
= saved_state
->fs
& 0xffff;
1894 state
->gs
= saved_state
->gs
& 0xffff;
1895 *count
= x86_THREAD_STATE64_COUNT
;
1897 return KERN_SUCCESS
;
1900 case x86_THREAD_STATE
: {
1901 x86_thread_state_t
*state
= NULL
;
1903 if (*count
< x86_THREAD_STATE_COUNT
) {
1904 return KERN_INVALID_ARGUMENT
;
1907 state
= (x86_thread_state_t
*) tstate
;
1909 if (is_saved_state32(int_state
)) {
1910 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1912 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1913 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1916 * General registers.
1918 state
->uts
.ts32
.eax
= saved_state
->eax
;
1919 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1920 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1921 state
->uts
.ts32
.edx
= saved_state
->edx
;
1922 state
->uts
.ts32
.edi
= saved_state
->edi
;
1923 state
->uts
.ts32
.esi
= saved_state
->esi
;
1924 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1925 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1926 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1927 state
->uts
.ts32
.eip
= saved_state
->eip
;
1928 state
->uts
.ts32
.cs
= saved_state
->cs
;
1929 state
->uts
.ts32
.ss
= saved_state
->ss
;
1930 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1931 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1932 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1933 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1934 } else if (is_saved_state64(int_state
)) {
1935 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1937 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1938 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1941 * General registers.
1943 state
->uts
.ts64
.rax
= saved_state
->rax
;
1944 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1945 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1946 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1947 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1948 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1949 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1950 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1951 state
->uts
.ts64
.r8
= saved_state
->r8
;
1952 state
->uts
.ts64
.r9
= saved_state
->r9
;
1953 state
->uts
.ts64
.r10
= saved_state
->r10
;
1954 state
->uts
.ts64
.r11
= saved_state
->r11
;
1955 state
->uts
.ts64
.r12
= saved_state
->r12
;
1956 state
->uts
.ts64
.r13
= saved_state
->r13
;
1957 state
->uts
.ts64
.r14
= saved_state
->r14
;
1958 state
->uts
.ts64
.r15
= saved_state
->r15
;
1960 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1961 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1962 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1963 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1964 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1966 panic("unknown thread state");
1969 *count
= x86_THREAD_STATE_COUNT
;
1970 return KERN_SUCCESS
;
1973 return KERN_FAILURE
;
1978 machine_thread_switch_addrmode(thread_t thread
)
1981 * We don't want to be preempted until we're done
1982 * - particularly if we're switching the current thread
1984 disable_preemption();
1987 * Reset the state saveareas. As we're resetting, we anticipate no
1988 * memory allocations in this path.
1990 machine_thread_create(thread
, thread
->task
);
1992 /* Adjust FPU state */
1993 fpu_switch_addrmode(thread
, task_has_64Bit_addr(thread
->task
));
1995 /* If we're switching ourselves, reset the pcb addresses etc. */
1996 if (thread
== current_thread()) {
1997 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1998 act_machine_switch_pcb(NULL
, thread
);
1999 ml_set_interrupts_enabled(istate
);
2001 enable_preemption();
2007 * This is used to set the current thr_act/thread
2008 * when starting up a new processor
2011 machine_set_current_thread(thread_t thread
)
2013 current_cpu_datap()->cpu_active_thread
= thread
;
2018 * Perform machine-dependent per-thread initializations
2021 machine_thread_init(void)
2027 * machine_thread_template_init: Initialize machine-specific portion of
2028 * the thread template.
2031 machine_thread_template_init(thread_t thr_template
)
2033 assert(fpu_default
!= UNDEFINED
);
2035 THREAD_TO_PCB(thr_template
)->xstate
= fpu_default
;
2041 thread_t thr_act
= current_thread();
2043 if (thread_is_64bit_addr(thr_act
)) {
2044 x86_saved_state64_t
*iss64
;
2046 iss64
= USER_REGS64(thr_act
);
2048 return iss64
->isf
.rip
;
2050 x86_saved_state32_t
*iss32
;
2052 iss32
= USER_REGS32(thr_act
);
2059 * detach and return a kernel stack from a thread
2063 machine_stack_detach(thread_t thread
)
2067 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
2068 (uintptr_t)thread_tid(thread
), thread
->priority
,
2069 thread
->sched_pri
, 0,
2072 stack
= thread
->kernel_stack
;
2073 thread
->kernel_stack
= 0;
2079 * attach a kernel stack to a thread and initialize it
2083 machine_stack_attach(
2087 struct x86_kernel_state
*statep
;
2089 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
2090 (uintptr_t)thread_tid(thread
), thread
->priority
,
2091 thread
->sched_pri
, 0, 0);
2094 thread
->kernel_stack
= stack
;
2095 thread_initialize_kernel_state(thread
);
2097 statep
= STACK_IKS(stack
);
2100 * Reset the state of the thread to resume from a continuation,
2101 * including resetting the stack and frame pointer to avoid backtracers
2102 * seeing this temporary state and attempting to walk the defunct stack.
2104 statep
->k_rbp
= (uint64_t) 0;
2105 statep
->k_rip
= (uint64_t) Thread_continue
;
2106 statep
->k_rbx
= (uint64_t) thread_continue
;
2107 statep
->k_rsp
= (uint64_t) STACK_IKS(stack
);
2113 * move a stack from old to new thread
2117 machine_stack_handoff(thread_t old
,
2127 stack
= old
->kernel_stack
;
2128 if (stack
== old
->reserved_stack
) {
2129 assert(new->reserved_stack
);
2130 old
->reserved_stack
= new->reserved_stack
;
2131 new->reserved_stack
= stack
;
2133 old
->kernel_stack
= 0;
2135 * A full call to machine_stack_attach() is unnecessry
2136 * because old stack is already initialized.
2138 new->kernel_stack
= stack
;
2140 fpu_switch_context(old
, new);
2142 old
->machine
.specFlags
&= ~OnProc
;
2143 new->machine
.specFlags
|= OnProc
;
2145 pmap_switch_context(old
, new, cpu_number());
2146 act_machine_switch_pcb(old
, new);
2149 ml_hv_cswitch(old
, new);
2152 machine_set_current_thread(new);
2153 thread_initialize_kernel_state(new);
2161 struct x86_act_context32
{
2162 x86_saved_state32_t ss
;
2163 x86_float_state32_t fs
;
2164 x86_debug_state32_t ds
;
2167 struct x86_act_context64
{
2168 x86_saved_state64_t ss
;
2169 x86_float_state64_t fs
;
2170 x86_debug_state64_t ds
;
2176 act_thread_csave(void)
2179 mach_msg_type_number_t val
;
2180 thread_t thr_act
= current_thread();
2182 if (thread_is_64bit_addr(thr_act
)) {
2183 struct x86_act_context64
*ic64
;
2185 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
2187 if (ic64
== (struct x86_act_context64
*)NULL
) {
2191 val
= x86_SAVED_STATE64_COUNT
;
2192 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
2193 (thread_state_t
) &ic64
->ss
, &val
);
2194 if (kret
!= KERN_SUCCESS
) {
2195 kfree(ic64
, sizeof(struct x86_act_context64
));
2198 val
= x86_FLOAT_STATE64_COUNT
;
2199 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
2200 (thread_state_t
) &ic64
->fs
, &val
);
2201 if (kret
!= KERN_SUCCESS
) {
2202 kfree(ic64
, sizeof(struct x86_act_context64
));
2206 val
= x86_DEBUG_STATE64_COUNT
;
2207 kret
= machine_thread_get_state(thr_act
,
2209 (thread_state_t
)&ic64
->ds
,
2211 if (kret
!= KERN_SUCCESS
) {
2212 kfree(ic64
, sizeof(struct x86_act_context64
));
2217 struct x86_act_context32
*ic32
;
2219 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2221 if (ic32
== (struct x86_act_context32
*)NULL
) {
2225 val
= x86_SAVED_STATE32_COUNT
;
2226 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2227 (thread_state_t
) &ic32
->ss
, &val
);
2228 if (kret
!= KERN_SUCCESS
) {
2229 kfree(ic32
, sizeof(struct x86_act_context32
));
2232 val
= x86_FLOAT_STATE32_COUNT
;
2233 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2234 (thread_state_t
) &ic32
->fs
, &val
);
2235 if (kret
!= KERN_SUCCESS
) {
2236 kfree(ic32
, sizeof(struct x86_act_context32
));
2240 val
= x86_DEBUG_STATE32_COUNT
;
2241 kret
= machine_thread_get_state(thr_act
,
2243 (thread_state_t
)&ic32
->ds
,
2245 if (kret
!= KERN_SUCCESS
) {
2246 kfree(ic32
, sizeof(struct x86_act_context32
));
2255 act_thread_catt(void *ctx
)
2257 thread_t thr_act
= current_thread();
2260 if (ctx
== (void *)NULL
) {
2264 if (thread_is_64bit_addr(thr_act
)) {
2265 struct x86_act_context64
*ic64
;
2267 ic64
= (struct x86_act_context64
*)ctx
;
2269 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2270 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2271 if (kret
== KERN_SUCCESS
) {
2272 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2273 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2275 kfree(ic64
, sizeof(struct x86_act_context64
));
2277 struct x86_act_context32
*ic32
;
2279 ic32
= (struct x86_act_context32
*)ctx
;
2281 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2282 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2283 if (kret
== KERN_SUCCESS
) {
2284 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2285 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2287 kfree(ic32
, sizeof(struct x86_act_context32
));
2293 act_thread_cfree(__unused
void *ctx
)
2299 * Duplicate one x86_debug_state32_t to another. "all" parameter
2300 * chooses whether dr4 and dr5 are copied (they are never meant
2301 * to be installed when we do machine_task_set_state() or
2302 * machine_thread_set_state()).
2306 x86_debug_state32_t
*src
,
2307 x86_debug_state32_t
*target
,
2311 target
->dr4
= src
->dr4
;
2312 target
->dr5
= src
->dr5
;
2315 target
->dr0
= src
->dr0
;
2316 target
->dr1
= src
->dr1
;
2317 target
->dr2
= src
->dr2
;
2318 target
->dr3
= src
->dr3
;
2319 target
->dr6
= src
->dr6
;
2320 target
->dr7
= src
->dr7
;
2324 * Duplicate one x86_debug_state64_t to another. "all" parameter
2325 * chooses whether dr4 and dr5 are copied (they are never meant
2326 * to be installed when we do machine_task_set_state() or
2327 * machine_thread_set_state()).
2331 x86_debug_state64_t
*src
,
2332 x86_debug_state64_t
*target
,
2336 target
->dr4
= src
->dr4
;
2337 target
->dr5
= src
->dr5
;
2340 target
->dr0
= src
->dr0
;
2341 target
->dr1
= src
->dr1
;
2342 target
->dr2
= src
->dr2
;
2343 target
->dr3
= src
->dr3
;
2344 target
->dr6
= src
->dr6
;
2345 target
->dr7
= src
->dr7
;