2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
97 #include <i386/machine_routines.h>
98 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
102 #endif /* CONFIG_COUNTERS */
105 * Maps state flavor to number of words in the state:
107 unsigned int _MachineStateCount
[] = {
110 x86_THREAD_STATE32_COUNT
,
111 x86_FLOAT_STATE32_COUNT
,
112 x86_EXCEPTION_STATE32_COUNT
,
113 x86_THREAD_STATE64_COUNT
,
114 x86_FLOAT_STATE64_COUNT
,
115 x86_EXCEPTION_STATE64_COUNT
,
116 x86_THREAD_STATE_COUNT
,
117 x86_FLOAT_STATE_COUNT
,
118 x86_EXCEPTION_STATE_COUNT
,
120 x86_SAVED_STATE32_COUNT
,
121 x86_SAVED_STATE64_COUNT
,
122 x86_DEBUG_STATE32_COUNT
,
123 x86_DEBUG_STATE64_COUNT
,
124 x86_DEBUG_STATE_COUNT
127 zone_t iss_zone
; /* zone for saved_state area */
128 zone_t ids_zone
; /* zone for debug_state area */
132 extern void Thread_continue(void);
133 extern void Load_context(
137 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
140 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
143 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
146 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
149 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
152 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
156 machine_pmc_cswitch(thread_t
/* old */, thread_t
/* new */);
159 pmc_swi(thread_t
/* old */, thread_t
/*new */);
162 pmc_swi(thread_t old
, thread_t
new) {
163 current_cpu_datap()->csw_old_thread
= old
;
164 current_cpu_datap()->csw_new_thread
= new;
169 machine_pmc_cswitch(thread_t old
, thread_t
new) {
170 if (pmc_thread_eligible(old
) || pmc_thread_eligible(new)) {
175 void ml_get_csw_threads(thread_t
*old
, thread_t
*new) {
176 *old
= current_cpu_datap()->csw_old_thread
;
177 *new = current_cpu_datap()->csw_new_thread
;
180 #endif /* CONFIG_COUNTERS */
183 * Don't let an illegal value for dr7 get set. Specifically,
184 * check for undefined settings. Setting these bit patterns
185 * result in undefined behaviour and can lead to an unexpected
189 dr7_is_valid(uint32_t *dr7
)
192 uint32_t mask1
, mask2
;
195 * If the DE bit is set in CR4, R/W0-3 can be pattern
196 * "10B" to indicate i/o reads and write
198 if (!(get_cr4() & CR4_DE
))
199 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
200 i
++, mask1
<<= 4, mask2
<<= 4)
201 if ((*dr7
& mask1
) == mask2
)
205 * len0-3 pattern "10B" is ok for len on Merom and newer processors
206 * (it signifies an 8-byte wide region). We use the 64bit capability
207 * of the processor in lieu of the more laborious model/family checks
208 * as all 64-bit capable processors so far support this.
209 * Reject an attempt to use this on 64-bit incapable processors.
211 if (current_cpu_datap()->cpu_is64bit
== FALSE
)
212 for (i
= 0, mask1
= 0x3<<18, mask2
= 0x2<<18; i
< 4;
213 i
++, mask1
<<= 4, mask2
<<= 4)
214 if ((*dr7
& mask1
) == mask2
)
218 * if we are doing an instruction execution break (indicated
219 * by r/w[x] being "00B"), then the len[x] must also be set
222 for (i
= 0; i
< 4; i
++)
223 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
224 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
228 * Intel docs have these bits fixed.
230 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
231 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
232 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
233 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
234 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
237 * We don't allow anything to set the global breakpoints.
256 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
258 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
259 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
260 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
261 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
262 if (cpu_mode_is64bit())
263 cdp
->cpu_dr7
= ds
->dr7
;
266 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
269 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
272 * We need to enter 64-bit mode in order to set the full
273 * width of these registers
275 set_64bit_debug_regs(ds
);
276 cdp
->cpu_dr7
= ds
->dr7
;
280 debug_state_is_valid32(x86_debug_state32_t
*ds
)
282 if (!dr7_is_valid(&ds
->dr7
))
285 #if defined(__i386__)
287 * Only allow local breakpoints and make sure they are not
288 * in the trampoline code.
291 if (ds
->dr0
>= (unsigned long)HIGH_MEM_BASE
)
294 if (ds
->dr7
& (0x1<<2))
295 if (ds
->dr1
>= (unsigned long)HIGH_MEM_BASE
)
298 if (ds
->dr7
& (0x1<<4))
299 if (ds
->dr2
>= (unsigned long)HIGH_MEM_BASE
)
302 if (ds
->dr7
& (0x1<<6))
303 if (ds
->dr3
>= (unsigned long)HIGH_MEM_BASE
)
311 debug_state_is_valid64(x86_debug_state64_t
*ds
)
313 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
317 * Don't allow the user to set debug addresses above their max
321 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
324 if (ds
->dr7
& (0x1<<2))
325 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
328 if (ds
->dr7
& (0x1<<4))
329 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
332 if (ds
->dr7
& (0x1<<6))
333 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
341 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
343 x86_debug_state32_t
*ids
;
346 pcb
= THREAD_TO_PCB(thread
);
349 if (debug_state_is_valid32(ds
) != TRUE
) {
350 return KERN_INVALID_ARGUMENT
;
354 ids
= zalloc(ids_zone
);
355 bzero(ids
, sizeof *ids
);
357 simple_lock(&pcb
->lock
);
358 /* make sure it wasn't already alloc()'d elsewhere */
359 if (pcb
->ids
== NULL
) {
361 simple_unlock(&pcb
->lock
);
363 simple_unlock(&pcb
->lock
);
364 zfree(ids_zone
, ids
);
369 copy_debug_state32(ds
, ids
, FALSE
);
371 return (KERN_SUCCESS
);
375 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
377 x86_debug_state64_t
*ids
;
380 pcb
= THREAD_TO_PCB(thread
);
383 if (debug_state_is_valid64(ds
) != TRUE
) {
384 return KERN_INVALID_ARGUMENT
;
388 ids
= zalloc(ids_zone
);
389 bzero(ids
, sizeof *ids
);
391 simple_lock(&pcb
->lock
);
392 /* make sure it wasn't already alloc()'d elsewhere */
393 if (pcb
->ids
== NULL
) {
395 simple_unlock(&pcb
->lock
);
397 simple_unlock(&pcb
->lock
);
398 zfree(ids_zone
, ids
);
402 copy_debug_state64(ds
, ids
, FALSE
);
404 return (KERN_SUCCESS
);
408 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
410 x86_debug_state32_t
*saved_state
;
412 saved_state
= thread
->machine
.ids
;
415 copy_debug_state32(saved_state
, ds
, TRUE
);
417 bzero(ds
, sizeof *ds
);
421 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
423 x86_debug_state64_t
*saved_state
;
425 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
428 copy_debug_state64(saved_state
, ds
, TRUE
);
430 bzero(ds
, sizeof *ds
);
434 * consider_machine_collect:
436 * Try to collect machine-dependent pages
439 consider_machine_collect(void)
444 consider_machine_adjust(void)
449 * Switch to the first thread on a CPU.
452 machine_load_context(
456 machine_pmc_cswitch(NULL
, new);
458 new->machine
.specFlags
|= OnProc
;
459 act_machine_switch_pcb(NULL
, new);
464 * Switch to a new thread.
465 * Save the old thread`s kernel state or continuation,
469 machine_switch_context(
471 thread_continue_t continuation
,
475 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
478 machine_pmc_cswitch(old
, new);
481 * Save FP registers if in use.
483 fpu_save_context(old
);
485 old
->machine
.specFlags
&= ~OnProc
;
486 new->machine
.specFlags
|= OnProc
;
489 * Monitor the stack depth and report new max,
490 * not worrying about races.
492 vm_offset_t depth
= current_stack_depth();
493 if (depth
> kernel_stack_depth_max
) {
494 kernel_stack_depth_max
= depth
;
495 KERNEL_DEBUG_CONSTANT(
496 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
497 (long) depth
, 0, 0, 0, 0);
501 * Switch address maps if need be, even if not switching tasks.
502 * (A server activation may be "borrowing" a client map.)
504 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
507 * Load the rest of the user state for the new thread
509 act_machine_switch_pcb(old
, new);
511 return(Switch_context(old
, continuation
, new));
515 machine_processor_shutdown(
517 void (*doshutdown
)(processor_t
),
518 processor_t processor
)
523 fpu_save_context(thread
);
524 PMAP_SWITCH_CONTEXT(thread
, processor
->idle_thread
, cpu_number());
525 return(Shutdown_context(thread
, doshutdown
, processor
));
530 * This is where registers that are not normally specified by the mach-o
531 * file on an execve would be nullified, perhaps to avoid a covert channel.
534 machine_thread_state_initialize(
538 * If there's an fpu save area, free it.
539 * The initialized state will then be lazily faulted-in, if required.
540 * And if we're target, re-arm the no-fpu trap.
542 if (thread
->machine
.ifps
) {
543 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
545 if (thread
== current_thread())
549 if (thread
->machine
.ids
) {
550 zfree(ids_zone
, thread
->machine
.ids
);
551 thread
->machine
.ids
= NULL
;
558 get_eflags_exportmask(void)
564 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
565 * for 32bit tasks only
566 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
567 * for 64bit tasks only
568 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
569 * for 32bit tasks only
570 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
571 * for 64bit tasks only
572 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
573 * for either 32bit or 64bit tasks
574 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
575 * for 32bit tasks only
576 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
577 * for 64bit tasks only
578 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
579 * for either 32bit or 64bit tasks
580 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
581 * for 32bit tasks only
582 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
583 * for 64bit tasks only
584 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
585 * for either 32bit or 64bit tasks
590 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
592 x86_saved_state64_t
*saved_state
;
594 saved_state
= USER_REGS64(thread
);
596 es
->trapno
= saved_state
->isf
.trapno
;
597 es
->cpu
= saved_state
->isf
.cpu
;
598 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
599 es
->faultvaddr
= saved_state
->cr2
;
603 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
605 x86_saved_state32_t
*saved_state
;
607 saved_state
= USER_REGS32(thread
);
609 es
->trapno
= saved_state
->trapno
;
610 es
->cpu
= saved_state
->cpu
;
611 es
->err
= saved_state
->err
;
612 es
->faultvaddr
= saved_state
->cr2
;
617 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
619 x86_saved_state32_t
*saved_state
;
621 pal_register_cache_state(thread
, DIRTY
);
623 saved_state
= USER_REGS32(thread
);
626 * Scrub segment selector values:
630 if (ts
->ss
== 0) ts
->ss
= USER_DS
;
631 if (ts
->ds
== 0) ts
->ds
= USER_DS
;
632 if (ts
->es
== 0) ts
->es
= USER_DS
;
633 #else /* __x86_64__ */
635 * On a 64 bit kernel, we always override the data segments,
636 * as the actual selector numbers have changed. This also
637 * means that we don't support setting the data segments
645 /* Check segment selectors are safe */
646 if (!valid_user_segment_selectors(ts
->cs
,
652 return(KERN_INVALID_ARGUMENT
);
654 saved_state
->eax
= ts
->eax
;
655 saved_state
->ebx
= ts
->ebx
;
656 saved_state
->ecx
= ts
->ecx
;
657 saved_state
->edx
= ts
->edx
;
658 saved_state
->edi
= ts
->edi
;
659 saved_state
->esi
= ts
->esi
;
660 saved_state
->ebp
= ts
->ebp
;
661 saved_state
->uesp
= ts
->esp
;
662 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
663 saved_state
->eip
= ts
->eip
;
664 saved_state
->cs
= ts
->cs
;
665 saved_state
->ss
= ts
->ss
;
666 saved_state
->ds
= ts
->ds
;
667 saved_state
->es
= ts
->es
;
668 saved_state
->fs
= ts
->fs
;
669 saved_state
->gs
= ts
->gs
;
672 * If the trace trap bit is being set,
673 * ensure that the user returns via iret
674 * - which is signaled thusly:
676 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
677 saved_state
->cs
= SYSENTER_TF_CS
;
679 return(KERN_SUCCESS
);
683 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
685 x86_saved_state64_t
*saved_state
;
687 pal_register_cache_state(thread
, DIRTY
);
689 saved_state
= USER_REGS64(thread
);
691 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
692 !IS_USERADDR64_CANONICAL(ts
->rip
))
693 return(KERN_INVALID_ARGUMENT
);
695 saved_state
->r8
= ts
->r8
;
696 saved_state
->r9
= ts
->r9
;
697 saved_state
->r10
= ts
->r10
;
698 saved_state
->r11
= ts
->r11
;
699 saved_state
->r12
= ts
->r12
;
700 saved_state
->r13
= ts
->r13
;
701 saved_state
->r14
= ts
->r14
;
702 saved_state
->r15
= ts
->r15
;
703 saved_state
->rax
= ts
->rax
;
704 saved_state
->rbx
= ts
->rbx
;
705 saved_state
->rcx
= ts
->rcx
;
706 saved_state
->rdx
= ts
->rdx
;
707 saved_state
->rdi
= ts
->rdi
;
708 saved_state
->rsi
= ts
->rsi
;
709 saved_state
->rbp
= ts
->rbp
;
710 saved_state
->isf
.rsp
= ts
->rsp
;
711 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
712 saved_state
->isf
.rip
= ts
->rip
;
713 saved_state
->isf
.cs
= USER64_CS
;
714 saved_state
->fs
= (uint32_t)ts
->fs
;
715 saved_state
->gs
= (uint32_t)ts
->gs
;
717 return(KERN_SUCCESS
);
723 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
725 x86_saved_state32_t
*saved_state
;
727 pal_register_cache_state(thread
, VALID
);
729 saved_state
= USER_REGS32(thread
);
731 ts
->eax
= saved_state
->eax
;
732 ts
->ebx
= saved_state
->ebx
;
733 ts
->ecx
= saved_state
->ecx
;
734 ts
->edx
= saved_state
->edx
;
735 ts
->edi
= saved_state
->edi
;
736 ts
->esi
= saved_state
->esi
;
737 ts
->ebp
= saved_state
->ebp
;
738 ts
->esp
= saved_state
->uesp
;
739 ts
->eflags
= saved_state
->efl
;
740 ts
->eip
= saved_state
->eip
;
741 ts
->cs
= saved_state
->cs
;
742 ts
->ss
= saved_state
->ss
;
743 ts
->ds
= saved_state
->ds
;
744 ts
->es
= saved_state
->es
;
745 ts
->fs
= saved_state
->fs
;
746 ts
->gs
= saved_state
->gs
;
751 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
753 x86_saved_state64_t
*saved_state
;
755 pal_register_cache_state(thread
, VALID
);
757 saved_state
= USER_REGS64(thread
);
759 ts
->r8
= saved_state
->r8
;
760 ts
->r9
= saved_state
->r9
;
761 ts
->r10
= saved_state
->r10
;
762 ts
->r11
= saved_state
->r11
;
763 ts
->r12
= saved_state
->r12
;
764 ts
->r13
= saved_state
->r13
;
765 ts
->r14
= saved_state
->r14
;
766 ts
->r15
= saved_state
->r15
;
767 ts
->rax
= saved_state
->rax
;
768 ts
->rbx
= saved_state
->rbx
;
769 ts
->rcx
= saved_state
->rcx
;
770 ts
->rdx
= saved_state
->rdx
;
771 ts
->rdi
= saved_state
->rdi
;
772 ts
->rsi
= saved_state
->rsi
;
773 ts
->rbp
= saved_state
->rbp
;
774 ts
->rsp
= saved_state
->isf
.rsp
;
775 ts
->rflags
= saved_state
->isf
.rflags
;
776 ts
->rip
= saved_state
->isf
.rip
;
777 ts
->cs
= saved_state
->isf
.cs
;
778 ts
->fs
= saved_state
->fs
;
779 ts
->gs
= saved_state
->gs
;
784 * act_machine_set_state:
786 * Set the status of the specified thread.
790 machine_thread_set_state(
792 thread_flavor_t flavor
,
793 thread_state_t tstate
,
794 mach_msg_type_number_t count
)
797 case x86_SAVED_STATE32
:
799 x86_saved_state32_t
*state
;
800 x86_saved_state32_t
*saved_state
;
802 if (count
< x86_SAVED_STATE32_COUNT
)
803 return(KERN_INVALID_ARGUMENT
);
805 if (thread_is_64bit(thr_act
))
806 return(KERN_INVALID_ARGUMENT
);
808 state
= (x86_saved_state32_t
*) tstate
;
810 /* Check segment selectors are safe */
811 if (!valid_user_segment_selectors(state
->cs
,
817 return KERN_INVALID_ARGUMENT
;
819 pal_register_cache_state(thr_act
, DIRTY
);
821 saved_state
= USER_REGS32(thr_act
);
826 saved_state
->edi
= state
->edi
;
827 saved_state
->esi
= state
->esi
;
828 saved_state
->ebp
= state
->ebp
;
829 saved_state
->uesp
= state
->uesp
;
830 saved_state
->ebx
= state
->ebx
;
831 saved_state
->edx
= state
->edx
;
832 saved_state
->ecx
= state
->ecx
;
833 saved_state
->eax
= state
->eax
;
834 saved_state
->eip
= state
->eip
;
836 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
839 * If the trace trap bit is being set,
840 * ensure that the user returns via iret
841 * - which is signaled thusly:
843 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
844 state
->cs
= SYSENTER_TF_CS
;
847 * User setting segment registers.
848 * Code and stack selectors have already been
849 * checked. Others will be reset by 'iret'
850 * if they are not valid.
852 saved_state
->cs
= state
->cs
;
853 saved_state
->ss
= state
->ss
;
854 saved_state
->ds
= state
->ds
;
855 saved_state
->es
= state
->es
;
856 saved_state
->fs
= state
->fs
;
857 saved_state
->gs
= state
->gs
;
862 case x86_SAVED_STATE64
:
864 x86_saved_state64_t
*state
;
865 x86_saved_state64_t
*saved_state
;
867 if (count
< x86_SAVED_STATE64_COUNT
)
868 return(KERN_INVALID_ARGUMENT
);
870 if (!thread_is_64bit(thr_act
))
871 return(KERN_INVALID_ARGUMENT
);
873 state
= (x86_saved_state64_t
*) tstate
;
875 /* Verify that the supplied code segment selector is
876 * valid. In 64-bit mode, the FS and GS segment overrides
877 * use the FS.base and GS.base MSRs to calculate
878 * base addresses, and the trampolines don't directly
879 * restore the segment registers--hence they are no
880 * longer relevant for validation.
882 if (!valid_user_code_selector(state
->isf
.cs
))
883 return KERN_INVALID_ARGUMENT
;
885 /* Check pc and stack are canonical addresses */
886 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
887 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
888 return KERN_INVALID_ARGUMENT
;
890 pal_register_cache_state(thr_act
, DIRTY
);
892 saved_state
= USER_REGS64(thr_act
);
897 saved_state
->r8
= state
->r8
;
898 saved_state
->r9
= state
->r9
;
899 saved_state
->r10
= state
->r10
;
900 saved_state
->r11
= state
->r11
;
901 saved_state
->r12
= state
->r12
;
902 saved_state
->r13
= state
->r13
;
903 saved_state
->r14
= state
->r14
;
904 saved_state
->r15
= state
->r15
;
905 saved_state
->rdi
= state
->rdi
;
906 saved_state
->rsi
= state
->rsi
;
907 saved_state
->rbp
= state
->rbp
;
908 saved_state
->rbx
= state
->rbx
;
909 saved_state
->rdx
= state
->rdx
;
910 saved_state
->rcx
= state
->rcx
;
911 saved_state
->rax
= state
->rax
;
912 saved_state
->isf
.rsp
= state
->isf
.rsp
;
913 saved_state
->isf
.rip
= state
->isf
.rip
;
915 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
918 * User setting segment registers.
919 * Code and stack selectors have already been
920 * checked. Others will be reset by 'sys'
921 * if they are not valid.
923 saved_state
->isf
.cs
= state
->isf
.cs
;
924 saved_state
->isf
.ss
= state
->isf
.ss
;
925 saved_state
->fs
= state
->fs
;
926 saved_state
->gs
= state
->gs
;
931 case x86_FLOAT_STATE32
:
933 if (count
!= x86_FLOAT_STATE32_COUNT
)
934 return(KERN_INVALID_ARGUMENT
);
936 if (thread_is_64bit(thr_act
))
937 return(KERN_INVALID_ARGUMENT
);
939 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
942 case x86_FLOAT_STATE64
:
944 if (count
!= x86_FLOAT_STATE64_COUNT
)
945 return(KERN_INVALID_ARGUMENT
);
947 if ( !thread_is_64bit(thr_act
))
948 return(KERN_INVALID_ARGUMENT
);
950 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
953 case x86_FLOAT_STATE
:
955 x86_float_state_t
*state
;
957 if (count
!= x86_FLOAT_STATE_COUNT
)
958 return(KERN_INVALID_ARGUMENT
);
960 state
= (x86_float_state_t
*)tstate
;
961 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
962 thread_is_64bit(thr_act
)) {
963 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
965 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
966 !thread_is_64bit(thr_act
)) {
967 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
969 return(KERN_INVALID_ARGUMENT
);
972 case x86_AVX_STATE32
:
974 if (count
!= x86_AVX_STATE32_COUNT
)
975 return(KERN_INVALID_ARGUMENT
);
977 if (thread_is_64bit(thr_act
))
978 return(KERN_INVALID_ARGUMENT
);
980 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
983 case x86_AVX_STATE64
:
985 if (count
!= x86_AVX_STATE64_COUNT
)
986 return(KERN_INVALID_ARGUMENT
);
988 if (!thread_is_64bit(thr_act
))
989 return(KERN_INVALID_ARGUMENT
);
991 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
994 case x86_THREAD_STATE32
:
996 if (count
!= x86_THREAD_STATE32_COUNT
)
997 return(KERN_INVALID_ARGUMENT
);
999 if (thread_is_64bit(thr_act
))
1000 return(KERN_INVALID_ARGUMENT
);
1002 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1005 case x86_THREAD_STATE64
:
1007 if (count
!= x86_THREAD_STATE64_COUNT
)
1008 return(KERN_INVALID_ARGUMENT
);
1010 if (!thread_is_64bit(thr_act
))
1011 return(KERN_INVALID_ARGUMENT
);
1013 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1016 case x86_THREAD_STATE
:
1018 x86_thread_state_t
*state
;
1020 if (count
!= x86_THREAD_STATE_COUNT
)
1021 return(KERN_INVALID_ARGUMENT
);
1023 state
= (x86_thread_state_t
*)tstate
;
1025 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1026 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1027 thread_is_64bit(thr_act
)) {
1028 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1029 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1030 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1031 !thread_is_64bit(thr_act
)) {
1032 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1034 return(KERN_INVALID_ARGUMENT
);
1038 case x86_DEBUG_STATE32
:
1040 x86_debug_state32_t
*state
;
1043 if (thread_is_64bit(thr_act
))
1044 return(KERN_INVALID_ARGUMENT
);
1046 state
= (x86_debug_state32_t
*)tstate
;
1048 ret
= set_debug_state32(thr_act
, state
);
1052 case x86_DEBUG_STATE64
:
1054 x86_debug_state64_t
*state
;
1057 if (!thread_is_64bit(thr_act
))
1058 return(KERN_INVALID_ARGUMENT
);
1060 state
= (x86_debug_state64_t
*)tstate
;
1062 ret
= set_debug_state64(thr_act
, state
);
1066 case x86_DEBUG_STATE
:
1068 x86_debug_state_t
*state
;
1069 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1071 if (count
!= x86_DEBUG_STATE_COUNT
)
1072 return (KERN_INVALID_ARGUMENT
);
1074 state
= (x86_debug_state_t
*)tstate
;
1075 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1076 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1077 thread_is_64bit(thr_act
)) {
1078 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1081 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1082 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1083 !thread_is_64bit(thr_act
)) {
1084 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1089 return(KERN_INVALID_ARGUMENT
);
1092 return(KERN_SUCCESS
);
1100 * Get the status of the specified thread.
1104 machine_thread_get_state(
1106 thread_flavor_t flavor
,
1107 thread_state_t tstate
,
1108 mach_msg_type_number_t
*count
)
1113 case THREAD_STATE_FLAVOR_LIST
:
1116 return (KERN_INVALID_ARGUMENT
);
1118 tstate
[0] = i386_THREAD_STATE
;
1119 tstate
[1] = i386_FLOAT_STATE
;
1120 tstate
[2] = i386_EXCEPTION_STATE
;
1126 case THREAD_STATE_FLAVOR_LIST_NEW
:
1129 return (KERN_INVALID_ARGUMENT
);
1131 tstate
[0] = x86_THREAD_STATE
;
1132 tstate
[1] = x86_FLOAT_STATE
;
1133 tstate
[2] = x86_EXCEPTION_STATE
;
1134 tstate
[3] = x86_DEBUG_STATE
;
1140 case x86_SAVED_STATE32
:
1142 x86_saved_state32_t
*state
;
1143 x86_saved_state32_t
*saved_state
;
1145 if (*count
< x86_SAVED_STATE32_COUNT
)
1146 return(KERN_INVALID_ARGUMENT
);
1148 if (thread_is_64bit(thr_act
))
1149 return(KERN_INVALID_ARGUMENT
);
1151 state
= (x86_saved_state32_t
*) tstate
;
1152 saved_state
= USER_REGS32(thr_act
);
1155 * First, copy everything:
1157 *state
= *saved_state
;
1158 state
->ds
= saved_state
->ds
& 0xffff;
1159 state
->es
= saved_state
->es
& 0xffff;
1160 state
->fs
= saved_state
->fs
& 0xffff;
1161 state
->gs
= saved_state
->gs
& 0xffff;
1163 *count
= x86_SAVED_STATE32_COUNT
;
1167 case x86_SAVED_STATE64
:
1169 x86_saved_state64_t
*state
;
1170 x86_saved_state64_t
*saved_state
;
1172 if (*count
< x86_SAVED_STATE64_COUNT
)
1173 return(KERN_INVALID_ARGUMENT
);
1175 if (!thread_is_64bit(thr_act
))
1176 return(KERN_INVALID_ARGUMENT
);
1178 state
= (x86_saved_state64_t
*)tstate
;
1179 saved_state
= USER_REGS64(thr_act
);
1182 * First, copy everything:
1184 *state
= *saved_state
;
1185 state
->fs
= saved_state
->fs
& 0xffff;
1186 state
->gs
= saved_state
->gs
& 0xffff;
1188 *count
= x86_SAVED_STATE64_COUNT
;
1192 case x86_FLOAT_STATE32
:
1194 if (*count
< x86_FLOAT_STATE32_COUNT
)
1195 return(KERN_INVALID_ARGUMENT
);
1197 if (thread_is_64bit(thr_act
))
1198 return(KERN_INVALID_ARGUMENT
);
1200 *count
= x86_FLOAT_STATE32_COUNT
;
1202 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1205 case x86_FLOAT_STATE64
:
1207 if (*count
< x86_FLOAT_STATE64_COUNT
)
1208 return(KERN_INVALID_ARGUMENT
);
1210 if ( !thread_is_64bit(thr_act
))
1211 return(KERN_INVALID_ARGUMENT
);
1213 *count
= x86_FLOAT_STATE64_COUNT
;
1215 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1218 case x86_FLOAT_STATE
:
1220 x86_float_state_t
*state
;
1223 if (*count
< x86_FLOAT_STATE_COUNT
)
1224 return(KERN_INVALID_ARGUMENT
);
1226 state
= (x86_float_state_t
*)tstate
;
1229 * no need to bzero... currently
1230 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1232 if (thread_is_64bit(thr_act
)) {
1233 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1234 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1236 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1238 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1239 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1241 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1243 *count
= x86_FLOAT_STATE_COUNT
;
1248 case x86_AVX_STATE32
:
1250 if (*count
!= x86_AVX_STATE32_COUNT
)
1251 return(KERN_INVALID_ARGUMENT
);
1253 if (thread_is_64bit(thr_act
))
1254 return(KERN_INVALID_ARGUMENT
);
1256 *count
= x86_AVX_STATE32_COUNT
;
1258 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1261 case x86_AVX_STATE64
:
1263 if (*count
!= x86_AVX_STATE64_COUNT
)
1264 return(KERN_INVALID_ARGUMENT
);
1266 if ( !thread_is_64bit(thr_act
))
1267 return(KERN_INVALID_ARGUMENT
);
1269 *count
= x86_AVX_STATE64_COUNT
;
1271 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1274 case x86_THREAD_STATE32
:
1276 if (*count
< x86_THREAD_STATE32_COUNT
)
1277 return(KERN_INVALID_ARGUMENT
);
1279 if (thread_is_64bit(thr_act
))
1280 return(KERN_INVALID_ARGUMENT
);
1282 *count
= x86_THREAD_STATE32_COUNT
;
1284 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1288 case x86_THREAD_STATE64
:
1290 if (*count
< x86_THREAD_STATE64_COUNT
)
1291 return(KERN_INVALID_ARGUMENT
);
1293 if ( !thread_is_64bit(thr_act
))
1294 return(KERN_INVALID_ARGUMENT
);
1296 *count
= x86_THREAD_STATE64_COUNT
;
1298 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1302 case x86_THREAD_STATE
:
1304 x86_thread_state_t
*state
;
1306 if (*count
< x86_THREAD_STATE_COUNT
)
1307 return(KERN_INVALID_ARGUMENT
);
1309 state
= (x86_thread_state_t
*)tstate
;
1311 bzero((char *)state
, sizeof(x86_thread_state_t
));
1313 if (thread_is_64bit(thr_act
)) {
1314 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1315 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1317 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1319 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1320 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1322 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1324 *count
= x86_THREAD_STATE_COUNT
;
1330 case x86_EXCEPTION_STATE32
:
1332 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1333 return(KERN_INVALID_ARGUMENT
);
1335 if (thread_is_64bit(thr_act
))
1336 return(KERN_INVALID_ARGUMENT
);
1338 *count
= x86_EXCEPTION_STATE32_COUNT
;
1340 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1342 * Suppress the cpu number for binary compatibility
1343 * of this deprecated state.
1345 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1349 case x86_EXCEPTION_STATE64
:
1351 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1352 return(KERN_INVALID_ARGUMENT
);
1354 if ( !thread_is_64bit(thr_act
))
1355 return(KERN_INVALID_ARGUMENT
);
1357 *count
= x86_EXCEPTION_STATE64_COUNT
;
1359 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1361 * Suppress the cpu number for binary compatibility
1362 * of this deprecated state.
1364 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1368 case x86_EXCEPTION_STATE
:
1370 x86_exception_state_t
*state
;
1372 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1373 return(KERN_INVALID_ARGUMENT
);
1375 state
= (x86_exception_state_t
*)tstate
;
1377 bzero((char *)state
, sizeof(x86_exception_state_t
));
1379 if (thread_is_64bit(thr_act
)) {
1380 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1381 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1383 get_exception_state64(thr_act
, &state
->ues
.es64
);
1385 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1386 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1388 get_exception_state32(thr_act
, &state
->ues
.es32
);
1390 *count
= x86_EXCEPTION_STATE_COUNT
;
1394 case x86_DEBUG_STATE32
:
1396 if (*count
< x86_DEBUG_STATE32_COUNT
)
1397 return(KERN_INVALID_ARGUMENT
);
1399 if (thread_is_64bit(thr_act
))
1400 return(KERN_INVALID_ARGUMENT
);
1402 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1404 *count
= x86_DEBUG_STATE32_COUNT
;
1408 case x86_DEBUG_STATE64
:
1410 if (*count
< x86_DEBUG_STATE64_COUNT
)
1411 return(KERN_INVALID_ARGUMENT
);
1413 if (!thread_is_64bit(thr_act
))
1414 return(KERN_INVALID_ARGUMENT
);
1416 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1418 *count
= x86_DEBUG_STATE64_COUNT
;
1422 case x86_DEBUG_STATE
:
1424 x86_debug_state_t
*state
;
1426 if (*count
< x86_DEBUG_STATE_COUNT
)
1427 return(KERN_INVALID_ARGUMENT
);
1429 state
= (x86_debug_state_t
*)tstate
;
1431 bzero(state
, sizeof *state
);
1433 if (thread_is_64bit(thr_act
)) {
1434 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1435 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1437 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1439 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1440 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1442 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1444 *count
= x86_DEBUG_STATE_COUNT
;
1448 return(KERN_INVALID_ARGUMENT
);
1451 return(KERN_SUCCESS
);
1455 machine_thread_get_kern_state(
1457 thread_flavor_t flavor
,
1458 thread_state_t tstate
,
1459 mach_msg_type_number_t
*count
)
1461 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1464 * This works only for an interrupted kernel thread
1466 if (thread
!= current_thread() || int_state
== NULL
)
1467 return KERN_FAILURE
;
1470 case x86_THREAD_STATE32
: {
1471 x86_thread_state32_t
*state
;
1472 x86_saved_state32_t
*saved_state
;
1474 if (!is_saved_state32(int_state
) ||
1475 *count
< x86_THREAD_STATE32_COUNT
)
1476 return (KERN_INVALID_ARGUMENT
);
1478 state
= (x86_thread_state32_t
*) tstate
;
1480 saved_state
= saved_state32(int_state
);
1482 * General registers.
1484 state
->eax
= saved_state
->eax
;
1485 state
->ebx
= saved_state
->ebx
;
1486 state
->ecx
= saved_state
->ecx
;
1487 state
->edx
= saved_state
->edx
;
1488 state
->edi
= saved_state
->edi
;
1489 state
->esi
= saved_state
->esi
;
1490 state
->ebp
= saved_state
->ebp
;
1491 state
->esp
= saved_state
->uesp
;
1492 state
->eflags
= saved_state
->efl
;
1493 state
->eip
= saved_state
->eip
;
1494 state
->cs
= saved_state
->cs
;
1495 state
->ss
= saved_state
->ss
;
1496 state
->ds
= saved_state
->ds
& 0xffff;
1497 state
->es
= saved_state
->es
& 0xffff;
1498 state
->fs
= saved_state
->fs
& 0xffff;
1499 state
->gs
= saved_state
->gs
& 0xffff;
1501 *count
= x86_THREAD_STATE32_COUNT
;
1503 return KERN_SUCCESS
;
1506 case x86_THREAD_STATE64
: {
1507 x86_thread_state64_t
*state
;
1508 x86_saved_state64_t
*saved_state
;
1510 if (!is_saved_state64(int_state
) ||
1511 *count
< x86_THREAD_STATE64_COUNT
)
1512 return (KERN_INVALID_ARGUMENT
);
1514 state
= (x86_thread_state64_t
*) tstate
;
1516 saved_state
= saved_state64(int_state
);
1518 * General registers.
1520 state
->rax
= saved_state
->rax
;
1521 state
->rbx
= saved_state
->rbx
;
1522 state
->rcx
= saved_state
->rcx
;
1523 state
->rdx
= saved_state
->rdx
;
1524 state
->rdi
= saved_state
->rdi
;
1525 state
->rsi
= saved_state
->rsi
;
1526 state
->rbp
= saved_state
->rbp
;
1527 state
->rsp
= saved_state
->isf
.rsp
;
1528 state
->r8
= saved_state
->r8
;
1529 state
->r9
= saved_state
->r9
;
1530 state
->r10
= saved_state
->r10
;
1531 state
->r11
= saved_state
->r11
;
1532 state
->r12
= saved_state
->r12
;
1533 state
->r13
= saved_state
->r13
;
1534 state
->r14
= saved_state
->r14
;
1535 state
->r15
= saved_state
->r15
;
1537 state
->rip
= saved_state
->isf
.rip
;
1538 state
->rflags
= saved_state
->isf
.rflags
;
1539 state
->cs
= saved_state
->isf
.cs
;
1540 state
->fs
= saved_state
->fs
& 0xffff;
1541 state
->gs
= saved_state
->gs
& 0xffff;
1542 *count
= x86_THREAD_STATE64_COUNT
;
1544 return KERN_SUCCESS
;
1547 case x86_THREAD_STATE
: {
1548 x86_thread_state_t
*state
= NULL
;
1550 if (*count
< x86_THREAD_STATE_COUNT
)
1551 return (KERN_INVALID_ARGUMENT
);
1553 state
= (x86_thread_state_t
*) tstate
;
1555 if (is_saved_state32(int_state
)) {
1556 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1558 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1559 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1562 * General registers.
1564 state
->uts
.ts32
.eax
= saved_state
->eax
;
1565 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1566 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1567 state
->uts
.ts32
.edx
= saved_state
->edx
;
1568 state
->uts
.ts32
.edi
= saved_state
->edi
;
1569 state
->uts
.ts32
.esi
= saved_state
->esi
;
1570 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1571 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1572 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1573 state
->uts
.ts32
.eip
= saved_state
->eip
;
1574 state
->uts
.ts32
.cs
= saved_state
->cs
;
1575 state
->uts
.ts32
.ss
= saved_state
->ss
;
1576 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1577 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1578 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1579 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1580 } else if (is_saved_state64(int_state
)) {
1581 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1583 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1584 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1587 * General registers.
1589 state
->uts
.ts64
.rax
= saved_state
->rax
;
1590 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1591 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1592 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1593 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1594 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1595 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1596 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1597 state
->uts
.ts64
.r8
= saved_state
->r8
;
1598 state
->uts
.ts64
.r9
= saved_state
->r9
;
1599 state
->uts
.ts64
.r10
= saved_state
->r10
;
1600 state
->uts
.ts64
.r11
= saved_state
->r11
;
1601 state
->uts
.ts64
.r12
= saved_state
->r12
;
1602 state
->uts
.ts64
.r13
= saved_state
->r13
;
1603 state
->uts
.ts64
.r14
= saved_state
->r14
;
1604 state
->uts
.ts64
.r15
= saved_state
->r15
;
1606 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1607 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1608 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1609 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1610 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1612 panic("unknown thread state");
1615 *count
= x86_THREAD_STATE_COUNT
;
1616 return KERN_SUCCESS
;
1619 return KERN_FAILURE
;
1624 machine_thread_switch_addrmode(thread_t thread
)
1627 * We don't want to be preempted until we're done
1628 * - particularly if we're switching the current thread
1630 disable_preemption();
1633 * Reset the state saveareas. As we're resetting, we anticipate no
1634 * memory allocations in this path.
1636 machine_thread_create(thread
, thread
->task
);
1638 /* If we're switching ourselves, reset the pcb addresses etc. */
1639 if (thread
== current_thread()) {
1640 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1641 #if defined(__i386__)
1642 if (current_cpu_datap()->cpu_active_cr3
!= kernel_pmap
->pm_cr3
)
1643 pmap_load_kernel_cr3();
1644 #endif /* defined(__i386) */
1645 act_machine_switch_pcb(NULL
, thread
);
1646 ml_set_interrupts_enabled(istate
);
1648 enable_preemption();
1654 * This is used to set the current thr_act/thread
1655 * when starting up a new processor
1658 machine_set_current_thread(thread_t thread
)
1660 current_cpu_datap()->cpu_active_thread
= thread
;
1665 * Perform machine-dependent per-thread initializations
1668 machine_thread_init(void)
1670 if (cpu_mode_is64bit()) {
1671 assert(sizeof(x86_sframe_compat32_t
) % 16 == 0);
1672 iss_zone
= zinit(sizeof(x86_sframe64_t
),
1673 thread_max
* sizeof(x86_sframe64_t
),
1674 THREAD_CHUNK
* sizeof(x86_sframe64_t
),
1675 "x86_64 saved state");
1677 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1678 thread_max
* sizeof(x86_debug_state64_t
),
1679 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1680 "x86_64 debug state");
1683 iss_zone
= zinit(sizeof(x86_sframe32_t
),
1684 thread_max
* sizeof(x86_sframe32_t
),
1685 THREAD_CHUNK
* sizeof(x86_sframe32_t
),
1687 ids_zone
= zinit(sizeof(x86_debug_state32_t
),
1688 thread_max
* (sizeof(x86_debug_state32_t
)),
1689 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
1696 #if defined(__i386__)
1698 * Some routines for debugging activation code
1700 static void dump_handlers(thread_t
);
1701 void dump_regs(thread_t
);
1702 int dump_act(thread_t thr_act
);
1705 dump_handlers(thread_t thr_act
)
1707 ReturnHandler
*rhp
= thr_act
->handlers
;
1712 if (rhp
== &thr_act
->special_handler
){
1714 printf("[NON-Zero next ptr(%p)]", rhp
->next
);
1715 printf("special_handler()->");
1718 printf("hdlr_%d(%p)->", counter
, rhp
->handler
);
1720 if (++counter
> 32) {
1721 printf("Aborting: HUGE handler chain\n");
1725 printf("HLDR_NULL\n");
1729 dump_regs(thread_t thr_act
)
1731 if (thread_is_64bit(thr_act
)) {
1732 x86_saved_state64_t
*ssp
;
1734 ssp
= USER_REGS64(thr_act
);
1736 panic("dump_regs: 64bit tasks not yet supported");
1739 x86_saved_state32_t
*ssp
;
1741 ssp
= USER_REGS32(thr_act
);
1744 * Print out user register state
1746 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1747 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1749 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1750 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1752 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1757 dump_act(thread_t thr_act
)
1762 printf("thread(%p)(%d): task=%p(%d)\n",
1763 thr_act
, thr_act
->ref_count
,
1765 thr_act
->task
? thr_act
->task
->ref_count
: 0);
1767 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1768 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1769 thr_act
->active
, thr_act
->ast
);
1770 printf("\tpcb=%p\n", &thr_act
->machine
);
1772 if (thr_act
->kernel_stack
) {
1773 vm_offset_t stack
= thr_act
->kernel_stack
;
1775 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
1776 (long)stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1777 STACK_IKS(stack
)->k_esp
, thr_act
->machine
.iss
);
1780 dump_handlers(thr_act
);
1782 return((int)thr_act
);
1789 thread_t thr_act
= current_thread();
1791 if (thread_is_64bit(thr_act
)) {
1792 x86_saved_state64_t
*iss64
;
1794 iss64
= USER_REGS64(thr_act
);
1796 return(iss64
->isf
.rip
);
1798 x86_saved_state32_t
*iss32
;
1800 iss32
= USER_REGS32(thr_act
);
1807 * detach and return a kernel stack from a thread
1811 machine_stack_detach(thread_t thread
)
1815 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1816 (uintptr_t)thread_tid(thread
), thread
->priority
,
1817 thread
->sched_pri
, 0,
1820 stack
= thread
->kernel_stack
;
1821 thread
->kernel_stack
= 0;
1827 * attach a kernel stack to a thread and initialize it
1831 machine_stack_attach(
1835 struct x86_kernel_state
*statep
;
1837 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1838 (uintptr_t)thread_tid(thread
), thread
->priority
,
1839 thread
->sched_pri
, 0, 0);
1842 thread
->kernel_stack
= stack
;
1844 statep
= STACK_IKS(stack
);
1845 #if defined(__x86_64__)
1846 statep
->k_rip
= (unsigned long) Thread_continue
;
1847 statep
->k_rbx
= (unsigned long) thread_continue
;
1848 statep
->k_rsp
= (unsigned long) (STACK_IKS(stack
) - 1);
1850 statep
->k_eip
= (unsigned long) Thread_continue
;
1851 statep
->k_ebx
= (unsigned long) thread_continue
;
1852 statep
->k_esp
= (unsigned long) (STACK_IKS(stack
) - 1);
1859 * move a stack from old to new thread
1863 machine_stack_handoff(thread_t old
,
1872 machine_pmc_cswitch(old
, new);
1875 stack
= old
->kernel_stack
;
1876 if (stack
== old
->reserved_stack
) {
1877 assert(new->reserved_stack
);
1878 old
->reserved_stack
= new->reserved_stack
;
1879 new->reserved_stack
= stack
;
1881 old
->kernel_stack
= 0;
1883 * A full call to machine_stack_attach() is unnecessry
1884 * because old stack is already initialized.
1886 new->kernel_stack
= stack
;
1888 fpu_save_context(old
);
1890 old
->machine
.specFlags
&= ~OnProc
;
1891 new->machine
.specFlags
|= OnProc
;
1893 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
1894 act_machine_switch_pcb(old
, new);
1896 machine_set_current_thread(new);
1904 struct x86_act_context32
{
1905 x86_saved_state32_t ss
;
1906 x86_float_state32_t fs
;
1907 x86_debug_state32_t ds
;
1910 struct x86_act_context64
{
1911 x86_saved_state64_t ss
;
1912 x86_float_state64_t fs
;
1913 x86_debug_state64_t ds
;
1919 act_thread_csave(void)
1922 mach_msg_type_number_t val
;
1923 thread_t thr_act
= current_thread();
1925 if (thread_is_64bit(thr_act
)) {
1926 struct x86_act_context64
*ic64
;
1928 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
1930 if (ic64
== (struct x86_act_context64
*)NULL
)
1933 val
= x86_SAVED_STATE64_COUNT
;
1934 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
1935 (thread_state_t
) &ic64
->ss
, &val
);
1936 if (kret
!= KERN_SUCCESS
) {
1937 kfree(ic64
, sizeof(struct x86_act_context64
));
1940 val
= x86_FLOAT_STATE64_COUNT
;
1941 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
1942 (thread_state_t
) &ic64
->fs
, &val
);
1943 if (kret
!= KERN_SUCCESS
) {
1944 kfree(ic64
, sizeof(struct x86_act_context64
));
1948 val
= x86_DEBUG_STATE64_COUNT
;
1949 kret
= machine_thread_get_state(thr_act
,
1951 (thread_state_t
)&ic64
->ds
,
1953 if (kret
!= KERN_SUCCESS
) {
1954 kfree(ic64
, sizeof(struct x86_act_context64
));
1960 struct x86_act_context32
*ic32
;
1962 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
1964 if (ic32
== (struct x86_act_context32
*)NULL
)
1967 val
= x86_SAVED_STATE32_COUNT
;
1968 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
1969 (thread_state_t
) &ic32
->ss
, &val
);
1970 if (kret
!= KERN_SUCCESS
) {
1971 kfree(ic32
, sizeof(struct x86_act_context32
));
1974 val
= x86_FLOAT_STATE32_COUNT
;
1975 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
1976 (thread_state_t
) &ic32
->fs
, &val
);
1977 if (kret
!= KERN_SUCCESS
) {
1978 kfree(ic32
, sizeof(struct x86_act_context32
));
1982 val
= x86_DEBUG_STATE32_COUNT
;
1983 kret
= machine_thread_get_state(thr_act
,
1985 (thread_state_t
)&ic32
->ds
,
1987 if (kret
!= KERN_SUCCESS
) {
1988 kfree(ic32
, sizeof(struct x86_act_context32
));
1997 act_thread_catt(void *ctx
)
1999 thread_t thr_act
= current_thread();
2002 if (ctx
== (void *)NULL
)
2005 if (thread_is_64bit(thr_act
)) {
2006 struct x86_act_context64
*ic64
;
2008 ic64
= (struct x86_act_context64
*)ctx
;
2010 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2011 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2012 if (kret
== KERN_SUCCESS
) {
2013 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2014 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2016 kfree(ic64
, sizeof(struct x86_act_context64
));
2018 struct x86_act_context32
*ic32
;
2020 ic32
= (struct x86_act_context32
*)ctx
;
2022 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2023 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2024 if (kret
== KERN_SUCCESS
) {
2025 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2026 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2028 kfree(ic32
, sizeof(struct x86_act_context32
));
2033 void act_thread_cfree(__unused
void *ctx
)
2037 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
);
2038 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
) {
2039 thread
->machine
.arg_store_valid
= valid
;
2042 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
);
2044 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
) {
2045 return (thread
->machine
.arg_store_valid
);
2049 * Duplicate one x86_debug_state32_t to another. "all" parameter
2050 * chooses whether dr4 and dr5 are copied (they are never meant
2051 * to be installed when we do machine_task_set_state() or
2052 * machine_thread_set_state()).
2056 x86_debug_state32_t
*src
,
2057 x86_debug_state32_t
*target
,
2061 target
->dr4
= src
->dr4
;
2062 target
->dr5
= src
->dr5
;
2065 target
->dr0
= src
->dr0
;
2066 target
->dr1
= src
->dr1
;
2067 target
->dr2
= src
->dr2
;
2068 target
->dr3
= src
->dr3
;
2069 target
->dr6
= src
->dr6
;
2070 target
->dr7
= src
->dr7
;
2074 * Duplicate one x86_debug_state64_t to another. "all" parameter
2075 * chooses whether dr4 and dr5 are copied (they are never meant
2076 * to be installed when we do machine_task_set_state() or
2077 * machine_thread_set_state()).
2081 x86_debug_state64_t
*src
,
2082 x86_debug_state64_t
*target
,
2086 target
->dr4
= src
->dr4
;
2087 target
->dr5
= src
->dr5
;
2090 target
->dr0
= src
->dr0
;
2091 target
->dr1
= src
->dr1
;
2092 target
->dr2
= src
->dr2
;
2093 target
->dr3
= src
->dr3
;
2094 target
->dr6
= src
->dr6
;
2095 target
->dr7
= src
->dr7
;
2098 boolean_t
is_useraddr64_canonical(uint64_t addr64
);
2101 is_useraddr64_canonical(uint64_t addr64
)
2103 return IS_USERADDR64_CANONICAL(addr64
);