2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
81 #include <ipc/ipc_port.h>
82 #include <vm/vm_kern.h>
83 #include <vm/vm_map.h>
85 #include <vm/vm_protos.h>
87 #include <i386/cpu_data.h>
88 #include <i386/cpu_number.h>
89 #include <i386/eflags.h>
90 #include <i386/proc_reg.h>
92 #include <i386/misc_protos.h>
93 #include <i386/mp_desc.h>
94 #include <i386/thread.h>
95 #include <i386/machine_routines.h>
96 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
99 #include <kern/hv_support.h>
103 * Maps state flavor to number of words in the state:
105 unsigned int _MachineStateCount
[] = {
106 [x86_THREAD_STATE32
] = x86_THREAD_STATE32_COUNT
,
107 [x86_THREAD_STATE64
] = x86_THREAD_STATE64_COUNT
,
108 [x86_THREAD_STATE
] = x86_THREAD_STATE_COUNT
,
109 [x86_FLOAT_STATE32
] = x86_FLOAT_STATE32_COUNT
,
110 [x86_FLOAT_STATE64
] = x86_FLOAT_STATE64_COUNT
,
111 [x86_FLOAT_STATE
] = x86_FLOAT_STATE_COUNT
,
112 [x86_EXCEPTION_STATE32
] = x86_EXCEPTION_STATE32_COUNT
,
113 [x86_EXCEPTION_STATE64
] = x86_EXCEPTION_STATE64_COUNT
,
114 [x86_EXCEPTION_STATE
] = x86_EXCEPTION_STATE_COUNT
,
115 [x86_DEBUG_STATE32
] = x86_DEBUG_STATE32_COUNT
,
116 [x86_DEBUG_STATE64
] = x86_DEBUG_STATE64_COUNT
,
117 [x86_DEBUG_STATE
] = x86_DEBUG_STATE_COUNT
,
118 [x86_AVX_STATE32
] = x86_AVX_STATE32_COUNT
,
119 [x86_AVX_STATE64
] = x86_AVX_STATE64_COUNT
,
120 [x86_AVX_STATE
] = x86_AVX_STATE_COUNT
,
123 zone_t iss_zone
; /* zone for saved_state area */
124 zone_t ids_zone
; /* zone for debug_state area */
128 extern void Thread_continue(void);
129 extern void Load_context(
133 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
136 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
139 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
142 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
145 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
148 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
152 ml_hv_cswitch(thread_t old
, thread_t
new)
154 if (old
->hv_thread_target
)
155 hv_callbacks
.preempt(old
->hv_thread_target
);
157 if (new->hv_thread_target
)
158 hv_callbacks
.dispatch(new->hv_thread_target
);
163 * Don't let an illegal value for the lower 32-bits of dr7 get set.
164 * Specifically, check for undefined settings. Setting these bit patterns
165 * result in undefined behaviour and can lead to an unexpected
169 dr7d_is_valid(uint32_t *dr7d
)
172 uint32_t mask1
, mask2
;
175 * If the DE bit is set in CR4, R/W0-3 can be pattern
176 * "10B" to indicate i/o reads and write
178 if (!(get_cr4() & CR4_DE
))
179 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
180 i
++, mask1
<<= 4, mask2
<<= 4)
181 if ((*dr7d
& mask1
) == mask2
)
185 * if we are doing an instruction execution break (indicated
186 * by r/w[x] being "00B"), then the len[x] must also be set
189 for (i
= 0; i
< 4; i
++)
190 if (((((*dr7d
>> (16 + i
*4))) & 0x3) == 0) &&
191 ((((*dr7d
>> (18 + i
*4))) & 0x3) != 0))
195 * Intel docs have these bits fixed.
197 *dr7d
|= 0x1 << 10; /* set bit 10 to 1 */
198 *dr7d
&= ~(0x1 << 11); /* set bit 11 to 0 */
199 *dr7d
&= ~(0x1 << 12); /* set bit 12 to 0 */
200 *dr7d
&= ~(0x1 << 14); /* set bit 14 to 0 */
201 *dr7d
&= ~(0x1 << 15); /* set bit 15 to 0 */
204 * We don't allow anything to set the global breakpoints.
210 if (*dr7d
& (0x2<<2))
213 if (*dr7d
& (0x2<<4))
216 if (*dr7d
& (0x2<<6))
222 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
225 debug_state_is_valid32(x86_debug_state32_t
*ds
)
227 if (!dr7d_is_valid(&ds
->dr7
))
234 debug_state_is_valid64(x86_debug_state64_t
*ds
)
236 if (!dr7d_is_valid((uint32_t *)&ds
->dr7
))
240 * Don't allow the user to set debug addresses above their max
244 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
247 if (ds
->dr7
& (0x1<<2))
248 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
251 if (ds
->dr7
& (0x1<<4))
252 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
255 if (ds
->dr7
& (0x1<<6))
256 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
259 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
260 ds
->dr7
&= 0xffffffffULL
;
267 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
269 x86_debug_state32_t
*ids
;
272 pcb
= THREAD_TO_PCB(thread
);
275 if (debug_state_is_valid32(ds
) != TRUE
) {
276 return KERN_INVALID_ARGUMENT
;
280 ids
= zalloc(ids_zone
);
281 bzero(ids
, sizeof *ids
);
283 simple_lock(&pcb
->lock
);
284 /* make sure it wasn't already alloc()'d elsewhere */
285 if (pcb
->ids
== NULL
) {
287 simple_unlock(&pcb
->lock
);
289 simple_unlock(&pcb
->lock
);
290 zfree(ids_zone
, ids
);
295 copy_debug_state32(ds
, ids
, FALSE
);
297 return (KERN_SUCCESS
);
301 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
303 x86_debug_state64_t
*ids
;
306 pcb
= THREAD_TO_PCB(thread
);
309 if (debug_state_is_valid64(ds
) != TRUE
) {
310 return KERN_INVALID_ARGUMENT
;
314 ids
= zalloc(ids_zone
);
315 bzero(ids
, sizeof *ids
);
318 if (thread
->hv_thread_target
) {
319 hv_callbacks
.volatile_state(thread
->hv_thread_target
,
324 simple_lock(&pcb
->lock
);
325 /* make sure it wasn't already alloc()'d elsewhere */
326 if (pcb
->ids
== NULL
) {
328 simple_unlock(&pcb
->lock
);
330 simple_unlock(&pcb
->lock
);
331 zfree(ids_zone
, ids
);
335 copy_debug_state64(ds
, ids
, FALSE
);
337 return (KERN_SUCCESS
);
341 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
343 x86_debug_state32_t
*saved_state
;
345 saved_state
= thread
->machine
.ids
;
348 copy_debug_state32(saved_state
, ds
, TRUE
);
350 bzero(ds
, sizeof *ds
);
354 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
356 x86_debug_state64_t
*saved_state
;
358 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
361 copy_debug_state64(saved_state
, ds
, TRUE
);
363 bzero(ds
, sizeof *ds
);
367 * consider_machine_collect:
369 * Try to collect machine-dependent pages
372 consider_machine_collect(void)
377 consider_machine_adjust(void)
382 * Switch to the first thread on a CPU.
385 machine_load_context(
388 new->machine
.specFlags
|= OnProc
;
389 act_machine_switch_pcb(NULL
, new);
393 static inline void pmap_switch_context(thread_t ot
, thread_t nt
, int cnum
) {
394 pmap_assert(ml_get_interrupts_enabled() == FALSE
);
395 vm_map_t nmap
= nt
->map
, omap
= ot
->map
;
396 if ((omap
!= nmap
) || (nmap
->pmap
->pagezero_accessible
)) {
397 PMAP_DEACTIVATE_MAP(omap
, ot
, cnum
);
398 PMAP_ACTIVATE_MAP(nmap
, nt
, cnum
);
403 * Switch to a new thread.
404 * Save the old thread`s kernel state or continuation,
408 machine_switch_context(
410 thread_continue_t continuation
,
414 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
420 * Save FP registers if in use.
422 fpu_save_context(old
);
424 old
->machine
.specFlags
&= ~OnProc
;
425 new->machine
.specFlags
|= OnProc
;
428 * Monitor the stack depth and report new max,
429 * not worrying about races.
431 vm_offset_t depth
= current_stack_depth();
432 if (depth
> kernel_stack_depth_max
) {
433 kernel_stack_depth_max
= depth
;
434 KERNEL_DEBUG_CONSTANT(
435 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
436 (long) depth
, 0, 0, 0, 0);
440 * Switch address maps if need be, even if not switching tasks.
441 * (A server activation may be "borrowing" a client map.)
443 pmap_switch_context(old
, new, cpu_number());
446 * Load the rest of the user state for the new thread
448 act_machine_switch_pcb(old
, new);
451 ml_hv_cswitch(old
, new);
454 return(Switch_context(old
, continuation
, new));
458 machine_processor_shutdown(
460 void (*doshutdown
)(processor_t
),
461 processor_t processor
)
466 fpu_save_context(thread
);
467 pmap_switch_context(thread
, processor
->idle_thread
, cpu_number());
468 return(Shutdown_context(thread
, doshutdown
, processor
));
473 * This is where registers that are not normally specified by the mach-o
474 * file on an execve would be nullified, perhaps to avoid a covert channel.
477 machine_thread_state_initialize(
481 * If there's an fpu save area, free it.
482 * The initialized state will then be lazily faulted-in, if required.
483 * And if we're target, re-arm the no-fpu trap.
485 if (thread
->machine
.ifps
) {
486 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
488 if (thread
== current_thread())
492 if (thread
->machine
.ids
) {
493 zfree(ids_zone
, thread
->machine
.ids
);
494 thread
->machine
.ids
= NULL
;
501 get_eflags_exportmask(void)
507 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
508 * for 32bit tasks only
509 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
510 * for 64bit tasks only
511 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
512 * for 32bit tasks only
513 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
514 * for 64bit tasks only
515 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
516 * for either 32bit or 64bit tasks
517 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
518 * for 32bit tasks only
519 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
520 * for 64bit tasks only
521 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
522 * for either 32bit or 64bit tasks
523 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
524 * for 32bit tasks only
525 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
526 * for 64bit tasks only
527 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
528 * for either 32bit or 64bit tasks
533 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
535 x86_saved_state64_t
*saved_state
;
537 saved_state
= USER_REGS64(thread
);
539 es
->trapno
= saved_state
->isf
.trapno
;
540 es
->cpu
= saved_state
->isf
.cpu
;
541 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
542 es
->faultvaddr
= saved_state
->cr2
;
546 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
548 x86_saved_state32_t
*saved_state
;
550 saved_state
= USER_REGS32(thread
);
552 es
->trapno
= saved_state
->trapno
;
553 es
->cpu
= saved_state
->cpu
;
554 es
->err
= saved_state
->err
;
555 es
->faultvaddr
= saved_state
->cr2
;
560 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
562 x86_saved_state32_t
*saved_state
;
564 pal_register_cache_state(thread
, DIRTY
);
566 saved_state
= USER_REGS32(thread
);
569 * Scrub segment selector values:
573 * On a 64 bit kernel, we always override the data segments,
574 * as the actual selector numbers have changed. This also
575 * means that we don't support setting the data segments
582 /* Set GS to CTHREAD only if's been established */
583 ts
->gs
= thread
->machine
.cthread_self
? USER_CTHREAD
: NULL_SEG
;
585 /* Check segment selectors are safe */
586 if (!valid_user_segment_selectors(ts
->cs
,
592 return(KERN_INVALID_ARGUMENT
);
594 saved_state
->eax
= ts
->eax
;
595 saved_state
->ebx
= ts
->ebx
;
596 saved_state
->ecx
= ts
->ecx
;
597 saved_state
->edx
= ts
->edx
;
598 saved_state
->edi
= ts
->edi
;
599 saved_state
->esi
= ts
->esi
;
600 saved_state
->ebp
= ts
->ebp
;
601 saved_state
->uesp
= ts
->esp
;
602 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
603 saved_state
->eip
= ts
->eip
;
604 saved_state
->cs
= ts
->cs
;
605 saved_state
->ss
= ts
->ss
;
606 saved_state
->ds
= ts
->ds
;
607 saved_state
->es
= ts
->es
;
608 saved_state
->fs
= ts
->fs
;
609 saved_state
->gs
= ts
->gs
;
612 * If the trace trap bit is being set,
613 * ensure that the user returns via iret
614 * - which is signaled thusly:
616 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
617 saved_state
->cs
= SYSENTER_TF_CS
;
619 return(KERN_SUCCESS
);
623 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
625 x86_saved_state64_t
*saved_state
;
627 pal_register_cache_state(thread
, DIRTY
);
629 saved_state
= USER_REGS64(thread
);
631 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
632 !IS_USERADDR64_CANONICAL(ts
->rip
))
633 return(KERN_INVALID_ARGUMENT
);
635 saved_state
->r8
= ts
->r8
;
636 saved_state
->r9
= ts
->r9
;
637 saved_state
->r10
= ts
->r10
;
638 saved_state
->r11
= ts
->r11
;
639 saved_state
->r12
= ts
->r12
;
640 saved_state
->r13
= ts
->r13
;
641 saved_state
->r14
= ts
->r14
;
642 saved_state
->r15
= ts
->r15
;
643 saved_state
->rax
= ts
->rax
;
644 saved_state
->rbx
= ts
->rbx
;
645 saved_state
->rcx
= ts
->rcx
;
646 saved_state
->rdx
= ts
->rdx
;
647 saved_state
->rdi
= ts
->rdi
;
648 saved_state
->rsi
= ts
->rsi
;
649 saved_state
->rbp
= ts
->rbp
;
650 saved_state
->isf
.rsp
= ts
->rsp
;
651 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
652 saved_state
->isf
.rip
= ts
->rip
;
653 saved_state
->isf
.cs
= USER64_CS
;
654 saved_state
->fs
= (uint32_t)ts
->fs
;
655 saved_state
->gs
= (uint32_t)ts
->gs
;
657 return(KERN_SUCCESS
);
663 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
665 x86_saved_state32_t
*saved_state
;
667 pal_register_cache_state(thread
, VALID
);
669 saved_state
= USER_REGS32(thread
);
671 ts
->eax
= saved_state
->eax
;
672 ts
->ebx
= saved_state
->ebx
;
673 ts
->ecx
= saved_state
->ecx
;
674 ts
->edx
= saved_state
->edx
;
675 ts
->edi
= saved_state
->edi
;
676 ts
->esi
= saved_state
->esi
;
677 ts
->ebp
= saved_state
->ebp
;
678 ts
->esp
= saved_state
->uesp
;
679 ts
->eflags
= saved_state
->efl
;
680 ts
->eip
= saved_state
->eip
;
681 ts
->cs
= saved_state
->cs
;
682 ts
->ss
= saved_state
->ss
;
683 ts
->ds
= saved_state
->ds
;
684 ts
->es
= saved_state
->es
;
685 ts
->fs
= saved_state
->fs
;
686 ts
->gs
= saved_state
->gs
;
691 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
693 x86_saved_state64_t
*saved_state
;
695 pal_register_cache_state(thread
, VALID
);
697 saved_state
= USER_REGS64(thread
);
699 ts
->r8
= saved_state
->r8
;
700 ts
->r9
= saved_state
->r9
;
701 ts
->r10
= saved_state
->r10
;
702 ts
->r11
= saved_state
->r11
;
703 ts
->r12
= saved_state
->r12
;
704 ts
->r13
= saved_state
->r13
;
705 ts
->r14
= saved_state
->r14
;
706 ts
->r15
= saved_state
->r15
;
707 ts
->rax
= saved_state
->rax
;
708 ts
->rbx
= saved_state
->rbx
;
709 ts
->rcx
= saved_state
->rcx
;
710 ts
->rdx
= saved_state
->rdx
;
711 ts
->rdi
= saved_state
->rdi
;
712 ts
->rsi
= saved_state
->rsi
;
713 ts
->rbp
= saved_state
->rbp
;
714 ts
->rsp
= saved_state
->isf
.rsp
;
715 ts
->rflags
= saved_state
->isf
.rflags
;
716 ts
->rip
= saved_state
->isf
.rip
;
717 ts
->cs
= saved_state
->isf
.cs
;
718 ts
->fs
= saved_state
->fs
;
719 ts
->gs
= saved_state
->gs
;
724 * act_machine_set_state:
726 * Set the status of the specified thread.
730 machine_thread_set_state(
732 thread_flavor_t flavor
,
733 thread_state_t tstate
,
734 mach_msg_type_number_t count
)
737 case x86_SAVED_STATE32
:
739 x86_saved_state32_t
*state
;
740 x86_saved_state32_t
*saved_state
;
742 if (count
< x86_SAVED_STATE32_COUNT
)
743 return(KERN_INVALID_ARGUMENT
);
745 if (thread_is_64bit(thr_act
))
746 return(KERN_INVALID_ARGUMENT
);
748 state
= (x86_saved_state32_t
*) tstate
;
750 /* Check segment selectors are safe */
751 if (!valid_user_segment_selectors(state
->cs
,
757 return KERN_INVALID_ARGUMENT
;
759 pal_register_cache_state(thr_act
, DIRTY
);
761 saved_state
= USER_REGS32(thr_act
);
766 saved_state
->edi
= state
->edi
;
767 saved_state
->esi
= state
->esi
;
768 saved_state
->ebp
= state
->ebp
;
769 saved_state
->uesp
= state
->uesp
;
770 saved_state
->ebx
= state
->ebx
;
771 saved_state
->edx
= state
->edx
;
772 saved_state
->ecx
= state
->ecx
;
773 saved_state
->eax
= state
->eax
;
774 saved_state
->eip
= state
->eip
;
776 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
779 * If the trace trap bit is being set,
780 * ensure that the user returns via iret
781 * - which is signaled thusly:
783 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
784 state
->cs
= SYSENTER_TF_CS
;
787 * User setting segment registers.
788 * Code and stack selectors have already been
789 * checked. Others will be reset by 'iret'
790 * if they are not valid.
792 saved_state
->cs
= state
->cs
;
793 saved_state
->ss
= state
->ss
;
794 saved_state
->ds
= state
->ds
;
795 saved_state
->es
= state
->es
;
796 saved_state
->fs
= state
->fs
;
797 saved_state
->gs
= state
->gs
;
802 case x86_SAVED_STATE64
:
804 x86_saved_state64_t
*state
;
805 x86_saved_state64_t
*saved_state
;
807 if (count
< x86_SAVED_STATE64_COUNT
)
808 return(KERN_INVALID_ARGUMENT
);
810 if (!thread_is_64bit(thr_act
))
811 return(KERN_INVALID_ARGUMENT
);
813 state
= (x86_saved_state64_t
*) tstate
;
815 /* Verify that the supplied code segment selector is
816 * valid. In 64-bit mode, the FS and GS segment overrides
817 * use the FS.base and GS.base MSRs to calculate
818 * base addresses, and the trampolines don't directly
819 * restore the segment registers--hence they are no
820 * longer relevant for validation.
822 if (!valid_user_code_selector(state
->isf
.cs
))
823 return KERN_INVALID_ARGUMENT
;
825 /* Check pc and stack are canonical addresses */
826 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
827 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
828 return KERN_INVALID_ARGUMENT
;
830 pal_register_cache_state(thr_act
, DIRTY
);
832 saved_state
= USER_REGS64(thr_act
);
837 saved_state
->r8
= state
->r8
;
838 saved_state
->r9
= state
->r9
;
839 saved_state
->r10
= state
->r10
;
840 saved_state
->r11
= state
->r11
;
841 saved_state
->r12
= state
->r12
;
842 saved_state
->r13
= state
->r13
;
843 saved_state
->r14
= state
->r14
;
844 saved_state
->r15
= state
->r15
;
845 saved_state
->rdi
= state
->rdi
;
846 saved_state
->rsi
= state
->rsi
;
847 saved_state
->rbp
= state
->rbp
;
848 saved_state
->rbx
= state
->rbx
;
849 saved_state
->rdx
= state
->rdx
;
850 saved_state
->rcx
= state
->rcx
;
851 saved_state
->rax
= state
->rax
;
852 saved_state
->isf
.rsp
= state
->isf
.rsp
;
853 saved_state
->isf
.rip
= state
->isf
.rip
;
855 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
858 * User setting segment registers.
859 * Code and stack selectors have already been
860 * checked. Others will be reset by 'sys'
861 * if they are not valid.
863 saved_state
->isf
.cs
= state
->isf
.cs
;
864 saved_state
->isf
.ss
= state
->isf
.ss
;
865 saved_state
->fs
= state
->fs
;
866 saved_state
->gs
= state
->gs
;
871 case x86_FLOAT_STATE32
:
873 if (count
!= x86_FLOAT_STATE32_COUNT
)
874 return(KERN_INVALID_ARGUMENT
);
876 if (thread_is_64bit(thr_act
))
877 return(KERN_INVALID_ARGUMENT
);
879 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
882 case x86_FLOAT_STATE64
:
884 if (count
!= x86_FLOAT_STATE64_COUNT
)
885 return(KERN_INVALID_ARGUMENT
);
887 if ( !thread_is_64bit(thr_act
))
888 return(KERN_INVALID_ARGUMENT
);
890 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
893 case x86_FLOAT_STATE
:
895 x86_float_state_t
*state
;
897 if (count
!= x86_FLOAT_STATE_COUNT
)
898 return(KERN_INVALID_ARGUMENT
);
900 state
= (x86_float_state_t
*)tstate
;
901 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
902 thread_is_64bit(thr_act
)) {
903 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
905 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
906 !thread_is_64bit(thr_act
)) {
907 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
909 return(KERN_INVALID_ARGUMENT
);
912 case x86_AVX_STATE32
:
914 if (count
!= x86_AVX_STATE32_COUNT
)
915 return(KERN_INVALID_ARGUMENT
);
917 if (thread_is_64bit(thr_act
))
918 return(KERN_INVALID_ARGUMENT
);
920 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
923 case x86_AVX_STATE64
:
925 if (count
!= x86_AVX_STATE64_COUNT
)
926 return(KERN_INVALID_ARGUMENT
);
928 if (!thread_is_64bit(thr_act
))
929 return(KERN_INVALID_ARGUMENT
);
931 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
936 x86_avx_state_t
*state
;
938 if (count
!= x86_AVX_STATE_COUNT
)
939 return(KERN_INVALID_ARGUMENT
);
941 state
= (x86_avx_state_t
*)tstate
;
942 if (state
->ash
.flavor
== x86_AVX_STATE64
&&
943 state
->ash
.count
== x86_FLOAT_STATE64_COUNT
&&
944 thread_is_64bit(thr_act
)) {
945 return fpu_set_fxstate(thr_act
,
946 (thread_state_t
)&state
->ufs
.as64
,
949 if (state
->ash
.flavor
== x86_FLOAT_STATE32
&&
950 state
->ash
.count
== x86_FLOAT_STATE32_COUNT
&&
951 !thread_is_64bit(thr_act
)) {
952 return fpu_set_fxstate(thr_act
,
953 (thread_state_t
)&state
->ufs
.as32
,
956 return(KERN_INVALID_ARGUMENT
);
959 case x86_THREAD_STATE32
:
961 if (count
!= x86_THREAD_STATE32_COUNT
)
962 return(KERN_INVALID_ARGUMENT
);
964 if (thread_is_64bit(thr_act
))
965 return(KERN_INVALID_ARGUMENT
);
967 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
970 case x86_THREAD_STATE64
:
972 if (count
!= x86_THREAD_STATE64_COUNT
)
973 return(KERN_INVALID_ARGUMENT
);
975 if (!thread_is_64bit(thr_act
))
976 return(KERN_INVALID_ARGUMENT
);
978 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
981 case x86_THREAD_STATE
:
983 x86_thread_state_t
*state
;
985 if (count
!= x86_THREAD_STATE_COUNT
)
986 return(KERN_INVALID_ARGUMENT
);
988 state
= (x86_thread_state_t
*)tstate
;
990 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
991 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
992 thread_is_64bit(thr_act
)) {
993 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
994 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
995 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
996 !thread_is_64bit(thr_act
)) {
997 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
999 return(KERN_INVALID_ARGUMENT
);
1001 case x86_DEBUG_STATE32
:
1003 x86_debug_state32_t
*state
;
1006 if (thread_is_64bit(thr_act
))
1007 return(KERN_INVALID_ARGUMENT
);
1009 state
= (x86_debug_state32_t
*)tstate
;
1011 ret
= set_debug_state32(thr_act
, state
);
1015 case x86_DEBUG_STATE64
:
1017 x86_debug_state64_t
*state
;
1020 if (!thread_is_64bit(thr_act
))
1021 return(KERN_INVALID_ARGUMENT
);
1023 state
= (x86_debug_state64_t
*)tstate
;
1025 ret
= set_debug_state64(thr_act
, state
);
1029 case x86_DEBUG_STATE
:
1031 x86_debug_state_t
*state
;
1032 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1034 if (count
!= x86_DEBUG_STATE_COUNT
)
1035 return (KERN_INVALID_ARGUMENT
);
1037 state
= (x86_debug_state_t
*)tstate
;
1038 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1039 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1040 thread_is_64bit(thr_act
)) {
1041 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1044 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1045 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1046 !thread_is_64bit(thr_act
)) {
1047 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1052 return(KERN_INVALID_ARGUMENT
);
1055 return(KERN_SUCCESS
);
1063 * Get the status of the specified thread.
1067 machine_thread_get_state(
1069 thread_flavor_t flavor
,
1070 thread_state_t tstate
,
1071 mach_msg_type_number_t
*count
)
1076 case THREAD_STATE_FLAVOR_LIST
:
1079 return (KERN_INVALID_ARGUMENT
);
1081 tstate
[0] = i386_THREAD_STATE
;
1082 tstate
[1] = i386_FLOAT_STATE
;
1083 tstate
[2] = i386_EXCEPTION_STATE
;
1089 case THREAD_STATE_FLAVOR_LIST_NEW
:
1092 return (KERN_INVALID_ARGUMENT
);
1094 tstate
[0] = x86_THREAD_STATE
;
1095 tstate
[1] = x86_FLOAT_STATE
;
1096 tstate
[2] = x86_EXCEPTION_STATE
;
1097 tstate
[3] = x86_DEBUG_STATE
;
1103 case THREAD_STATE_FLAVOR_LIST_10_9
:
1106 return (KERN_INVALID_ARGUMENT
);
1108 tstate
[0] = x86_THREAD_STATE
;
1109 tstate
[1] = x86_FLOAT_STATE
;
1110 tstate
[2] = x86_EXCEPTION_STATE
;
1111 tstate
[3] = x86_DEBUG_STATE
;
1112 tstate
[4] = x86_AVX_STATE
;
1118 case x86_SAVED_STATE32
:
1120 x86_saved_state32_t
*state
;
1121 x86_saved_state32_t
*saved_state
;
1123 if (*count
< x86_SAVED_STATE32_COUNT
)
1124 return(KERN_INVALID_ARGUMENT
);
1126 if (thread_is_64bit(thr_act
))
1127 return(KERN_INVALID_ARGUMENT
);
1129 state
= (x86_saved_state32_t
*) tstate
;
1130 saved_state
= USER_REGS32(thr_act
);
1133 * First, copy everything:
1135 *state
= *saved_state
;
1136 state
->ds
= saved_state
->ds
& 0xffff;
1137 state
->es
= saved_state
->es
& 0xffff;
1138 state
->fs
= saved_state
->fs
& 0xffff;
1139 state
->gs
= saved_state
->gs
& 0xffff;
1141 *count
= x86_SAVED_STATE32_COUNT
;
1145 case x86_SAVED_STATE64
:
1147 x86_saved_state64_t
*state
;
1148 x86_saved_state64_t
*saved_state
;
1150 if (*count
< x86_SAVED_STATE64_COUNT
)
1151 return(KERN_INVALID_ARGUMENT
);
1153 if (!thread_is_64bit(thr_act
))
1154 return(KERN_INVALID_ARGUMENT
);
1156 state
= (x86_saved_state64_t
*)tstate
;
1157 saved_state
= USER_REGS64(thr_act
);
1160 * First, copy everything:
1162 *state
= *saved_state
;
1163 state
->fs
= saved_state
->fs
& 0xffff;
1164 state
->gs
= saved_state
->gs
& 0xffff;
1166 *count
= x86_SAVED_STATE64_COUNT
;
1170 case x86_FLOAT_STATE32
:
1172 if (*count
< x86_FLOAT_STATE32_COUNT
)
1173 return(KERN_INVALID_ARGUMENT
);
1175 if (thread_is_64bit(thr_act
))
1176 return(KERN_INVALID_ARGUMENT
);
1178 *count
= x86_FLOAT_STATE32_COUNT
;
1180 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1183 case x86_FLOAT_STATE64
:
1185 if (*count
< x86_FLOAT_STATE64_COUNT
)
1186 return(KERN_INVALID_ARGUMENT
);
1188 if ( !thread_is_64bit(thr_act
))
1189 return(KERN_INVALID_ARGUMENT
);
1191 *count
= x86_FLOAT_STATE64_COUNT
;
1193 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1196 case x86_FLOAT_STATE
:
1198 x86_float_state_t
*state
;
1201 if (*count
< x86_FLOAT_STATE_COUNT
)
1202 return(KERN_INVALID_ARGUMENT
);
1204 state
= (x86_float_state_t
*)tstate
;
1207 * no need to bzero... currently
1208 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1210 if (thread_is_64bit(thr_act
)) {
1211 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1212 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1214 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1216 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1217 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1219 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1221 *count
= x86_FLOAT_STATE_COUNT
;
1226 case x86_AVX_STATE32
:
1228 if (*count
!= x86_AVX_STATE32_COUNT
)
1229 return(KERN_INVALID_ARGUMENT
);
1231 if (thread_is_64bit(thr_act
))
1232 return(KERN_INVALID_ARGUMENT
);
1234 *count
= x86_AVX_STATE32_COUNT
;
1236 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1239 case x86_AVX_STATE64
:
1241 if (*count
!= x86_AVX_STATE64_COUNT
)
1242 return(KERN_INVALID_ARGUMENT
);
1244 if ( !thread_is_64bit(thr_act
))
1245 return(KERN_INVALID_ARGUMENT
);
1247 *count
= x86_AVX_STATE64_COUNT
;
1249 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1254 x86_avx_state_t
*state
;
1257 if (*count
< x86_AVX_STATE_COUNT
)
1258 return(KERN_INVALID_ARGUMENT
);
1260 state
= (x86_avx_state_t
*)tstate
;
1262 bzero((char *)state
, sizeof(x86_avx_state_t
));
1263 if (thread_is_64bit(thr_act
)) {
1264 state
->ash
.flavor
= x86_AVX_STATE64
;
1265 state
->ash
.count
= x86_AVX_STATE64_COUNT
;
1266 kret
= fpu_get_fxstate(thr_act
,
1267 (thread_state_t
)&state
->ufs
.as64
,
1270 state
->ash
.flavor
= x86_AVX_STATE32
;
1271 state
->ash
.count
= x86_AVX_STATE32_COUNT
;
1272 kret
= fpu_get_fxstate(thr_act
,
1273 (thread_state_t
)&state
->ufs
.as32
,
1276 *count
= x86_AVX_STATE_COUNT
;
1281 case x86_THREAD_STATE32
:
1283 if (*count
< x86_THREAD_STATE32_COUNT
)
1284 return(KERN_INVALID_ARGUMENT
);
1286 if (thread_is_64bit(thr_act
))
1287 return(KERN_INVALID_ARGUMENT
);
1289 *count
= x86_THREAD_STATE32_COUNT
;
1291 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1295 case x86_THREAD_STATE64
:
1297 if (*count
< x86_THREAD_STATE64_COUNT
)
1298 return(KERN_INVALID_ARGUMENT
);
1300 if ( !thread_is_64bit(thr_act
))
1301 return(KERN_INVALID_ARGUMENT
);
1303 *count
= x86_THREAD_STATE64_COUNT
;
1305 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1309 case x86_THREAD_STATE
:
1311 x86_thread_state_t
*state
;
1313 if (*count
< x86_THREAD_STATE_COUNT
)
1314 return(KERN_INVALID_ARGUMENT
);
1316 state
= (x86_thread_state_t
*)tstate
;
1318 bzero((char *)state
, sizeof(x86_thread_state_t
));
1320 if (thread_is_64bit(thr_act
)) {
1321 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1322 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1324 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1326 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1327 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1329 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1331 *count
= x86_THREAD_STATE_COUNT
;
1337 case x86_EXCEPTION_STATE32
:
1339 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1340 return(KERN_INVALID_ARGUMENT
);
1342 if (thread_is_64bit(thr_act
))
1343 return(KERN_INVALID_ARGUMENT
);
1345 *count
= x86_EXCEPTION_STATE32_COUNT
;
1347 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1349 * Suppress the cpu number for binary compatibility
1350 * of this deprecated state.
1352 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1356 case x86_EXCEPTION_STATE64
:
1358 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1359 return(KERN_INVALID_ARGUMENT
);
1361 if ( !thread_is_64bit(thr_act
))
1362 return(KERN_INVALID_ARGUMENT
);
1364 *count
= x86_EXCEPTION_STATE64_COUNT
;
1366 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1368 * Suppress the cpu number for binary compatibility
1369 * of this deprecated state.
1371 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1375 case x86_EXCEPTION_STATE
:
1377 x86_exception_state_t
*state
;
1379 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1380 return(KERN_INVALID_ARGUMENT
);
1382 state
= (x86_exception_state_t
*)tstate
;
1384 bzero((char *)state
, sizeof(x86_exception_state_t
));
1386 if (thread_is_64bit(thr_act
)) {
1387 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1388 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1390 get_exception_state64(thr_act
, &state
->ues
.es64
);
1392 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1393 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1395 get_exception_state32(thr_act
, &state
->ues
.es32
);
1397 *count
= x86_EXCEPTION_STATE_COUNT
;
1401 case x86_DEBUG_STATE32
:
1403 if (*count
< x86_DEBUG_STATE32_COUNT
)
1404 return(KERN_INVALID_ARGUMENT
);
1406 if (thread_is_64bit(thr_act
))
1407 return(KERN_INVALID_ARGUMENT
);
1409 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1411 *count
= x86_DEBUG_STATE32_COUNT
;
1415 case x86_DEBUG_STATE64
:
1417 if (*count
< x86_DEBUG_STATE64_COUNT
)
1418 return(KERN_INVALID_ARGUMENT
);
1420 if (!thread_is_64bit(thr_act
))
1421 return(KERN_INVALID_ARGUMENT
);
1423 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1425 *count
= x86_DEBUG_STATE64_COUNT
;
1429 case x86_DEBUG_STATE
:
1431 x86_debug_state_t
*state
;
1433 if (*count
< x86_DEBUG_STATE_COUNT
)
1434 return(KERN_INVALID_ARGUMENT
);
1436 state
= (x86_debug_state_t
*)tstate
;
1438 bzero(state
, sizeof *state
);
1440 if (thread_is_64bit(thr_act
)) {
1441 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1442 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1444 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1446 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1447 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1449 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1451 *count
= x86_DEBUG_STATE_COUNT
;
1455 return(KERN_INVALID_ARGUMENT
);
1458 return(KERN_SUCCESS
);
1462 machine_thread_get_kern_state(
1464 thread_flavor_t flavor
,
1465 thread_state_t tstate
,
1466 mach_msg_type_number_t
*count
)
1468 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1471 * This works only for an interrupted kernel thread
1473 if (thread
!= current_thread() || int_state
== NULL
)
1474 return KERN_FAILURE
;
1477 case x86_THREAD_STATE32
: {
1478 x86_thread_state32_t
*state
;
1479 x86_saved_state32_t
*saved_state
;
1481 if (!is_saved_state32(int_state
) ||
1482 *count
< x86_THREAD_STATE32_COUNT
)
1483 return (KERN_INVALID_ARGUMENT
);
1485 state
= (x86_thread_state32_t
*) tstate
;
1487 saved_state
= saved_state32(int_state
);
1489 * General registers.
1491 state
->eax
= saved_state
->eax
;
1492 state
->ebx
= saved_state
->ebx
;
1493 state
->ecx
= saved_state
->ecx
;
1494 state
->edx
= saved_state
->edx
;
1495 state
->edi
= saved_state
->edi
;
1496 state
->esi
= saved_state
->esi
;
1497 state
->ebp
= saved_state
->ebp
;
1498 state
->esp
= saved_state
->uesp
;
1499 state
->eflags
= saved_state
->efl
;
1500 state
->eip
= saved_state
->eip
;
1501 state
->cs
= saved_state
->cs
;
1502 state
->ss
= saved_state
->ss
;
1503 state
->ds
= saved_state
->ds
& 0xffff;
1504 state
->es
= saved_state
->es
& 0xffff;
1505 state
->fs
= saved_state
->fs
& 0xffff;
1506 state
->gs
= saved_state
->gs
& 0xffff;
1508 *count
= x86_THREAD_STATE32_COUNT
;
1510 return KERN_SUCCESS
;
1513 case x86_THREAD_STATE64
: {
1514 x86_thread_state64_t
*state
;
1515 x86_saved_state64_t
*saved_state
;
1517 if (!is_saved_state64(int_state
) ||
1518 *count
< x86_THREAD_STATE64_COUNT
)
1519 return (KERN_INVALID_ARGUMENT
);
1521 state
= (x86_thread_state64_t
*) tstate
;
1523 saved_state
= saved_state64(int_state
);
1525 * General registers.
1527 state
->rax
= saved_state
->rax
;
1528 state
->rbx
= saved_state
->rbx
;
1529 state
->rcx
= saved_state
->rcx
;
1530 state
->rdx
= saved_state
->rdx
;
1531 state
->rdi
= saved_state
->rdi
;
1532 state
->rsi
= saved_state
->rsi
;
1533 state
->rbp
= saved_state
->rbp
;
1534 state
->rsp
= saved_state
->isf
.rsp
;
1535 state
->r8
= saved_state
->r8
;
1536 state
->r9
= saved_state
->r9
;
1537 state
->r10
= saved_state
->r10
;
1538 state
->r11
= saved_state
->r11
;
1539 state
->r12
= saved_state
->r12
;
1540 state
->r13
= saved_state
->r13
;
1541 state
->r14
= saved_state
->r14
;
1542 state
->r15
= saved_state
->r15
;
1544 state
->rip
= saved_state
->isf
.rip
;
1545 state
->rflags
= saved_state
->isf
.rflags
;
1546 state
->cs
= saved_state
->isf
.cs
;
1547 state
->fs
= saved_state
->fs
& 0xffff;
1548 state
->gs
= saved_state
->gs
& 0xffff;
1549 *count
= x86_THREAD_STATE64_COUNT
;
1551 return KERN_SUCCESS
;
1554 case x86_THREAD_STATE
: {
1555 x86_thread_state_t
*state
= NULL
;
1557 if (*count
< x86_THREAD_STATE_COUNT
)
1558 return (KERN_INVALID_ARGUMENT
);
1560 state
= (x86_thread_state_t
*) tstate
;
1562 if (is_saved_state32(int_state
)) {
1563 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1565 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1566 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1569 * General registers.
1571 state
->uts
.ts32
.eax
= saved_state
->eax
;
1572 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1573 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1574 state
->uts
.ts32
.edx
= saved_state
->edx
;
1575 state
->uts
.ts32
.edi
= saved_state
->edi
;
1576 state
->uts
.ts32
.esi
= saved_state
->esi
;
1577 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1578 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1579 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1580 state
->uts
.ts32
.eip
= saved_state
->eip
;
1581 state
->uts
.ts32
.cs
= saved_state
->cs
;
1582 state
->uts
.ts32
.ss
= saved_state
->ss
;
1583 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1584 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1585 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1586 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1587 } else if (is_saved_state64(int_state
)) {
1588 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1590 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1591 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1594 * General registers.
1596 state
->uts
.ts64
.rax
= saved_state
->rax
;
1597 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1598 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1599 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1600 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1601 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1602 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1603 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1604 state
->uts
.ts64
.r8
= saved_state
->r8
;
1605 state
->uts
.ts64
.r9
= saved_state
->r9
;
1606 state
->uts
.ts64
.r10
= saved_state
->r10
;
1607 state
->uts
.ts64
.r11
= saved_state
->r11
;
1608 state
->uts
.ts64
.r12
= saved_state
->r12
;
1609 state
->uts
.ts64
.r13
= saved_state
->r13
;
1610 state
->uts
.ts64
.r14
= saved_state
->r14
;
1611 state
->uts
.ts64
.r15
= saved_state
->r15
;
1613 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1614 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1615 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1616 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1617 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1619 panic("unknown thread state");
1622 *count
= x86_THREAD_STATE_COUNT
;
1623 return KERN_SUCCESS
;
1626 return KERN_FAILURE
;
1631 machine_thread_switch_addrmode(thread_t thread
)
1634 * We don't want to be preempted until we're done
1635 * - particularly if we're switching the current thread
1637 disable_preemption();
1640 * Reset the state saveareas. As we're resetting, we anticipate no
1641 * memory allocations in this path.
1643 machine_thread_create(thread
, thread
->task
);
1645 /* If we're switching ourselves, reset the pcb addresses etc. */
1646 if (thread
== current_thread()) {
1647 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1648 act_machine_switch_pcb(NULL
, thread
);
1649 ml_set_interrupts_enabled(istate
);
1651 enable_preemption();
1657 * This is used to set the current thr_act/thread
1658 * when starting up a new processor
1661 machine_set_current_thread(thread_t thread
)
1663 current_cpu_datap()->cpu_active_thread
= thread
;
1668 * Perform machine-dependent per-thread initializations
1671 machine_thread_init(void)
1673 iss_zone
= zinit(sizeof(x86_saved_state_t
),
1674 thread_max
* sizeof(x86_saved_state_t
),
1675 THREAD_CHUNK
* sizeof(x86_saved_state_t
),
1676 "x86_64 saved state");
1678 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1679 thread_max
* sizeof(x86_debug_state64_t
),
1680 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1681 "x86_64 debug state");
1691 thread_t thr_act
= current_thread();
1693 if (thread_is_64bit(thr_act
)) {
1694 x86_saved_state64_t
*iss64
;
1696 iss64
= USER_REGS64(thr_act
);
1698 return(iss64
->isf
.rip
);
1700 x86_saved_state32_t
*iss32
;
1702 iss32
= USER_REGS32(thr_act
);
1709 * detach and return a kernel stack from a thread
1713 machine_stack_detach(thread_t thread
)
1717 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1718 (uintptr_t)thread_tid(thread
), thread
->priority
,
1719 thread
->sched_pri
, 0,
1722 stack
= thread
->kernel_stack
;
1723 thread
->kernel_stack
= 0;
1729 * attach a kernel stack to a thread and initialize it
1733 machine_stack_attach(
1737 struct x86_kernel_state
*statep
;
1739 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1740 (uintptr_t)thread_tid(thread
), thread
->priority
,
1741 thread
->sched_pri
, 0, 0);
1744 thread
->kernel_stack
= stack
;
1746 statep
= STACK_IKS(stack
);
1747 #if defined(__x86_64__)
1748 statep
->k_rip
= (unsigned long) Thread_continue
;
1749 statep
->k_rbx
= (unsigned long) thread_continue
;
1750 statep
->k_rsp
= (unsigned long) (STACK_IKS(stack
) - 1);
1752 statep
->k_eip
= (unsigned long) Thread_continue
;
1753 statep
->k_ebx
= (unsigned long) thread_continue
;
1754 statep
->k_esp
= (unsigned long) (STACK_IKS(stack
) - 1);
1761 * move a stack from old to new thread
1765 machine_stack_handoff(thread_t old
,
1775 stack
= old
->kernel_stack
;
1776 if (stack
== old
->reserved_stack
) {
1777 assert(new->reserved_stack
);
1778 old
->reserved_stack
= new->reserved_stack
;
1779 new->reserved_stack
= stack
;
1781 old
->kernel_stack
= 0;
1783 * A full call to machine_stack_attach() is unnecessry
1784 * because old stack is already initialized.
1786 new->kernel_stack
= stack
;
1788 fpu_save_context(old
);
1790 old
->machine
.specFlags
&= ~OnProc
;
1791 new->machine
.specFlags
|= OnProc
;
1793 pmap_switch_context(old
, new, cpu_number());
1794 act_machine_switch_pcb(old
, new);
1797 ml_hv_cswitch(old
, new);
1800 machine_set_current_thread(new);
1808 struct x86_act_context32
{
1809 x86_saved_state32_t ss
;
1810 x86_float_state32_t fs
;
1811 x86_debug_state32_t ds
;
1814 struct x86_act_context64
{
1815 x86_saved_state64_t ss
;
1816 x86_float_state64_t fs
;
1817 x86_debug_state64_t ds
;
1823 act_thread_csave(void)
1826 mach_msg_type_number_t val
;
1827 thread_t thr_act
= current_thread();
1829 if (thread_is_64bit(thr_act
)) {
1830 struct x86_act_context64
*ic64
;
1832 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
1834 if (ic64
== (struct x86_act_context64
*)NULL
)
1837 val
= x86_SAVED_STATE64_COUNT
;
1838 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
1839 (thread_state_t
) &ic64
->ss
, &val
);
1840 if (kret
!= KERN_SUCCESS
) {
1841 kfree(ic64
, sizeof(struct x86_act_context64
));
1844 val
= x86_FLOAT_STATE64_COUNT
;
1845 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
1846 (thread_state_t
) &ic64
->fs
, &val
);
1847 if (kret
!= KERN_SUCCESS
) {
1848 kfree(ic64
, sizeof(struct x86_act_context64
));
1852 val
= x86_DEBUG_STATE64_COUNT
;
1853 kret
= machine_thread_get_state(thr_act
,
1855 (thread_state_t
)&ic64
->ds
,
1857 if (kret
!= KERN_SUCCESS
) {
1858 kfree(ic64
, sizeof(struct x86_act_context64
));
1864 struct x86_act_context32
*ic32
;
1866 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
1868 if (ic32
== (struct x86_act_context32
*)NULL
)
1871 val
= x86_SAVED_STATE32_COUNT
;
1872 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
1873 (thread_state_t
) &ic32
->ss
, &val
);
1874 if (kret
!= KERN_SUCCESS
) {
1875 kfree(ic32
, sizeof(struct x86_act_context32
));
1878 val
= x86_FLOAT_STATE32_COUNT
;
1879 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
1880 (thread_state_t
) &ic32
->fs
, &val
);
1881 if (kret
!= KERN_SUCCESS
) {
1882 kfree(ic32
, sizeof(struct x86_act_context32
));
1886 val
= x86_DEBUG_STATE32_COUNT
;
1887 kret
= machine_thread_get_state(thr_act
,
1889 (thread_state_t
)&ic32
->ds
,
1891 if (kret
!= KERN_SUCCESS
) {
1892 kfree(ic32
, sizeof(struct x86_act_context32
));
1901 act_thread_catt(void *ctx
)
1903 thread_t thr_act
= current_thread();
1906 if (ctx
== (void *)NULL
)
1909 if (thread_is_64bit(thr_act
)) {
1910 struct x86_act_context64
*ic64
;
1912 ic64
= (struct x86_act_context64
*)ctx
;
1914 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
1915 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
1916 if (kret
== KERN_SUCCESS
) {
1917 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
1918 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
1920 kfree(ic64
, sizeof(struct x86_act_context64
));
1922 struct x86_act_context32
*ic32
;
1924 ic32
= (struct x86_act_context32
*)ctx
;
1926 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
1927 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
1928 if (kret
== KERN_SUCCESS
) {
1929 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
1930 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
1932 kfree(ic32
, sizeof(struct x86_act_context32
));
1937 void act_thread_cfree(__unused
void *ctx
)
1943 * Duplicate one x86_debug_state32_t to another. "all" parameter
1944 * chooses whether dr4 and dr5 are copied (they are never meant
1945 * to be installed when we do machine_task_set_state() or
1946 * machine_thread_set_state()).
1950 x86_debug_state32_t
*src
,
1951 x86_debug_state32_t
*target
,
1955 target
->dr4
= src
->dr4
;
1956 target
->dr5
= src
->dr5
;
1959 target
->dr0
= src
->dr0
;
1960 target
->dr1
= src
->dr1
;
1961 target
->dr2
= src
->dr2
;
1962 target
->dr3
= src
->dr3
;
1963 target
->dr6
= src
->dr6
;
1964 target
->dr7
= src
->dr7
;
1968 * Duplicate one x86_debug_state64_t to another. "all" parameter
1969 * chooses whether dr4 and dr5 are copied (they are never meant
1970 * to be installed when we do machine_task_set_state() or
1971 * machine_thread_set_state()).
1975 x86_debug_state64_t
*src
,
1976 x86_debug_state64_t
*target
,
1980 target
->dr4
= src
->dr4
;
1981 target
->dr5
= src
->dr5
;
1984 target
->dr0
= src
->dr0
;
1985 target
->dr1
= src
->dr1
;
1986 target
->dr2
= src
->dr2
;
1987 target
->dr3
= src
->dr3
;
1988 target
->dr6
= src
->dr6
;
1989 target
->dr7
= src
->dr7
;