2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
97 #include <i386/machine_routines.h>
98 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
102 #endif /* CONFIG_COUNTERS */
105 * Maps state flavor to number of words in the state:
107 unsigned int _MachineStateCount
[] = {
110 x86_THREAD_STATE32_COUNT
,
111 x86_FLOAT_STATE32_COUNT
,
112 x86_EXCEPTION_STATE32_COUNT
,
113 x86_THREAD_STATE64_COUNT
,
114 x86_FLOAT_STATE64_COUNT
,
115 x86_EXCEPTION_STATE64_COUNT
,
116 x86_THREAD_STATE_COUNT
,
117 x86_FLOAT_STATE_COUNT
,
118 x86_EXCEPTION_STATE_COUNT
,
120 x86_SAVED_STATE32_COUNT
,
121 x86_SAVED_STATE64_COUNT
,
122 x86_DEBUG_STATE32_COUNT
,
123 x86_DEBUG_STATE64_COUNT
,
124 x86_DEBUG_STATE_COUNT
127 zone_t iss_zone
; /* zone for saved_state area */
128 zone_t ids_zone
; /* zone for debug_state area */
132 extern void Thread_continue(void);
133 extern void Load_context(
137 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
140 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
143 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
146 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
149 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
152 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
156 machine_pmc_cswitch(thread_t
/* old */, thread_t
/* new */);
158 static inline boolean_t
159 machine_thread_pmc_eligible(thread_t
);
162 pmc_swi(thread_t
/* old */, thread_t
/*new */);
164 static inline boolean_t
165 machine_thread_pmc_eligible(thread_t t
) {
167 * NOTE: Task-level reservations are propagated to child threads via
168 * thread_create_internal. Any mutation of task reservations forces a
169 * recalculate of t_chud (for the pmc flag) for all threads in that task.
170 * Consequently, we can simply check the current thread's flag against
171 * THREAD_PMC_FLAG. If the result is non-zero, we SWI for a PMC switch.
173 return (t
!= NULL
) ? ((t
->t_chud
& THREAD_PMC_FLAG
) ? TRUE
: FALSE
) : FALSE
;
177 pmc_swi(thread_t old
, thread_t
new) {
178 current_cpu_datap()->csw_old_thread
= old
;
179 current_cpu_datap()->csw_new_thread
= new;
184 machine_pmc_cswitch(thread_t old
, thread_t
new) {
185 if (machine_thread_pmc_eligible(old
) || machine_thread_pmc_eligible(new)) {
190 void ml_get_csw_threads(thread_t
*old
, thread_t
*new) {
191 *old
= current_cpu_datap()->csw_old_thread
;
192 *new = current_cpu_datap()->csw_new_thread
;
195 #endif /* CONFIG_COUNTERS */
198 * Don't let an illegal value for dr7 get set. Specifically,
199 * check for undefined settings. Setting these bit patterns
200 * result in undefined behaviour and can lead to an unexpected
204 dr7_is_valid(uint32_t *dr7
)
207 uint32_t mask1
, mask2
;
210 * If the DE bit is set in CR4, R/W0-3 can be pattern
211 * "10B" to indicate i/o reads and write
213 if (!(get_cr4() & CR4_DE
))
214 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
215 i
++, mask1
<<= 4, mask2
<<= 4)
216 if ((*dr7
& mask1
) == mask2
)
220 * len0-3 pattern "10B" is ok for len on Merom and newer processors
221 * (it signifies an 8-byte wide region). We use the 64bit capability
222 * of the processor in lieu of the more laborious model/family checks
223 * as all 64-bit capable processors so far support this.
224 * Reject an attempt to use this on 64-bit incapable processors.
226 if (current_cpu_datap()->cpu_is64bit
== FALSE
)
227 for (i
= 0, mask1
= 0x3<<18, mask2
= 0x2<<18; i
< 4;
228 i
++, mask1
<<= 4, mask2
<<= 4)
229 if ((*dr7
& mask1
) == mask2
)
233 * if we are doing an instruction execution break (indicated
234 * by r/w[x] being "00B"), then the len[x] must also be set
237 for (i
= 0; i
< 4; i
++)
238 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
239 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
243 * Intel docs have these bits fixed.
245 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
246 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
247 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
248 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
249 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
252 * We don't allow anything to set the global breakpoints.
271 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
273 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
274 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
275 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
276 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
277 if (cpu_mode_is64bit())
278 cdp
->cpu_dr7
= ds
->dr7
;
281 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
284 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
287 * We need to enter 64-bit mode in order to set the full
288 * width of these registers
290 set_64bit_debug_regs(ds
);
291 cdp
->cpu_dr7
= ds
->dr7
;
295 debug_state_is_valid32(x86_debug_state32_t
*ds
)
297 if (!dr7_is_valid(&ds
->dr7
))
300 #if defined(__i386__)
302 * Only allow local breakpoints and make sure they are not
303 * in the trampoline code.
306 if (ds
->dr0
>= (unsigned long)HIGH_MEM_BASE
)
309 if (ds
->dr7
& (0x1<<2))
310 if (ds
->dr1
>= (unsigned long)HIGH_MEM_BASE
)
313 if (ds
->dr7
& (0x1<<4))
314 if (ds
->dr2
>= (unsigned long)HIGH_MEM_BASE
)
317 if (ds
->dr7
& (0x1<<6))
318 if (ds
->dr3
>= (unsigned long)HIGH_MEM_BASE
)
326 debug_state_is_valid64(x86_debug_state64_t
*ds
)
328 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
332 * Don't allow the user to set debug addresses above their max
336 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
339 if (ds
->dr7
& (0x1<<2))
340 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
343 if (ds
->dr7
& (0x1<<4))
344 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
347 if (ds
->dr7
& (0x1<<6))
348 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
356 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
358 x86_debug_state32_t
*ids
;
361 pcb
= THREAD_TO_PCB(thread
);
364 if (debug_state_is_valid32(ds
) != TRUE
) {
365 return KERN_INVALID_ARGUMENT
;
369 ids
= zalloc(ids_zone
);
370 bzero(ids
, sizeof *ids
);
372 simple_lock(&pcb
->lock
);
373 /* make sure it wasn't already alloc()'d elsewhere */
374 if (pcb
->ids
== NULL
) {
376 simple_unlock(&pcb
->lock
);
378 simple_unlock(&pcb
->lock
);
379 zfree(ids_zone
, ids
);
384 copy_debug_state32(ds
, ids
, FALSE
);
386 return (KERN_SUCCESS
);
390 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
392 x86_debug_state64_t
*ids
;
395 pcb
= THREAD_TO_PCB(thread
);
398 if (debug_state_is_valid64(ds
) != TRUE
) {
399 return KERN_INVALID_ARGUMENT
;
403 ids
= zalloc(ids_zone
);
404 bzero(ids
, sizeof *ids
);
406 simple_lock(&pcb
->lock
);
407 /* make sure it wasn't already alloc()'d elsewhere */
408 if (pcb
->ids
== NULL
) {
410 simple_unlock(&pcb
->lock
);
412 simple_unlock(&pcb
->lock
);
413 zfree(ids_zone
, ids
);
417 copy_debug_state64(ds
, ids
, FALSE
);
419 return (KERN_SUCCESS
);
423 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
425 x86_debug_state32_t
*saved_state
;
427 saved_state
= thread
->machine
.ids
;
430 copy_debug_state32(saved_state
, ds
, TRUE
);
432 bzero(ds
, sizeof *ds
);
436 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
438 x86_debug_state64_t
*saved_state
;
440 saved_state
= (x86_debug_state64_t
*)thread
->machine
.ids
;
443 copy_debug_state64(saved_state
, ds
, TRUE
);
445 bzero(ds
, sizeof *ds
);
449 * consider_machine_collect:
451 * Try to collect machine-dependent pages
454 consider_machine_collect(void)
459 consider_machine_adjust(void)
464 * Switch to the first thread on a CPU.
467 machine_load_context(
471 machine_pmc_cswitch(NULL
, new);
473 new->machine
.specFlags
|= OnProc
;
474 act_machine_switch_pcb(NULL
, new);
479 * Switch to a new thread.
480 * Save the old thread`s kernel state or continuation,
484 machine_switch_context(
486 thread_continue_t continuation
,
490 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
493 machine_pmc_cswitch(old
, new);
496 * Save FP registers if in use.
498 fpu_save_context(old
);
500 old
->machine
.specFlags
&= ~OnProc
;
501 new->machine
.specFlags
|= OnProc
;
504 * Monitor the stack depth and report new max,
505 * not worrying about races.
507 vm_offset_t depth
= current_stack_depth();
508 if (depth
> kernel_stack_depth_max
) {
509 kernel_stack_depth_max
= depth
;
510 KERNEL_DEBUG_CONSTANT(
511 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
512 (long) depth
, 0, 0, 0, 0);
516 * Switch address maps if need be, even if not switching tasks.
517 * (A server activation may be "borrowing" a client map.)
519 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
522 * Load the rest of the user state for the new thread
524 act_machine_switch_pcb(old
, new);
526 return(Switch_context(old
, continuation
, new));
530 machine_processor_shutdown(
532 void (*doshutdown
)(processor_t
),
533 processor_t processor
)
538 fpu_save_context(thread
);
539 PMAP_SWITCH_CONTEXT(thread
, processor
->idle_thread
, cpu_number());
540 return(Shutdown_context(thread
, doshutdown
, processor
));
545 * This is where registers that are not normally specified by the mach-o
546 * file on an execve would be nullified, perhaps to avoid a covert channel.
549 machine_thread_state_initialize(
553 * If there's an fpu save area, free it.
554 * The initialized state will then be lazily faulted-in, if required.
555 * And if we're target, re-arm the no-fpu trap.
557 if (thread
->machine
.ifps
) {
558 (void) fpu_set_fxstate(thread
, NULL
, x86_FLOAT_STATE64
);
560 if (thread
== current_thread())
564 if (thread
->machine
.ids
) {
565 zfree(ids_zone
, thread
->machine
.ids
);
566 thread
->machine
.ids
= NULL
;
573 get_eflags_exportmask(void)
579 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
580 * for 32bit tasks only
581 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
582 * for 64bit tasks only
583 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
584 * for 32bit tasks only
585 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
586 * for 64bit tasks only
587 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
588 * for either 32bit or 64bit tasks
589 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
590 * for 32bit tasks only
591 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
592 * for 64bit tasks only
593 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
594 * for either 32bit or 64bit tasks
595 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
596 * for 32bit tasks only
597 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
598 * for 64bit tasks only
599 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
600 * for either 32bit or 64bit tasks
605 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
607 x86_saved_state64_t
*saved_state
;
609 saved_state
= USER_REGS64(thread
);
611 es
->trapno
= saved_state
->isf
.trapno
;
612 es
->cpu
= saved_state
->isf
.cpu
;
613 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
614 es
->faultvaddr
= saved_state
->cr2
;
618 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
620 x86_saved_state32_t
*saved_state
;
622 saved_state
= USER_REGS32(thread
);
624 es
->trapno
= saved_state
->trapno
;
625 es
->cpu
= saved_state
->cpu
;
626 es
->err
= saved_state
->err
;
627 es
->faultvaddr
= saved_state
->cr2
;
632 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
634 x86_saved_state32_t
*saved_state
;
636 pal_register_cache_state(thread
, DIRTY
);
638 saved_state
= USER_REGS32(thread
);
641 * Scrub segment selector values:
645 if (ts
->ss
== 0) ts
->ss
= USER_DS
;
646 if (ts
->ds
== 0) ts
->ds
= USER_DS
;
647 if (ts
->es
== 0) ts
->es
= USER_DS
;
648 #else /* __x86_64__ */
650 * On a 64 bit kernel, we always override the data segments,
651 * as the actual selector numbers have changed. This also
652 * means that we don't support setting the data segments
660 /* Check segment selectors are safe */
661 if (!valid_user_segment_selectors(ts
->cs
,
667 return(KERN_INVALID_ARGUMENT
);
669 saved_state
->eax
= ts
->eax
;
670 saved_state
->ebx
= ts
->ebx
;
671 saved_state
->ecx
= ts
->ecx
;
672 saved_state
->edx
= ts
->edx
;
673 saved_state
->edi
= ts
->edi
;
674 saved_state
->esi
= ts
->esi
;
675 saved_state
->ebp
= ts
->ebp
;
676 saved_state
->uesp
= ts
->esp
;
677 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
678 saved_state
->eip
= ts
->eip
;
679 saved_state
->cs
= ts
->cs
;
680 saved_state
->ss
= ts
->ss
;
681 saved_state
->ds
= ts
->ds
;
682 saved_state
->es
= ts
->es
;
683 saved_state
->fs
= ts
->fs
;
684 saved_state
->gs
= ts
->gs
;
687 * If the trace trap bit is being set,
688 * ensure that the user returns via iret
689 * - which is signaled thusly:
691 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
692 saved_state
->cs
= SYSENTER_TF_CS
;
694 return(KERN_SUCCESS
);
698 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
700 x86_saved_state64_t
*saved_state
;
702 pal_register_cache_state(thread
, DIRTY
);
704 saved_state
= USER_REGS64(thread
);
706 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
707 !IS_USERADDR64_CANONICAL(ts
->rip
))
708 return(KERN_INVALID_ARGUMENT
);
710 saved_state
->r8
= ts
->r8
;
711 saved_state
->r9
= ts
->r9
;
712 saved_state
->r10
= ts
->r10
;
713 saved_state
->r11
= ts
->r11
;
714 saved_state
->r12
= ts
->r12
;
715 saved_state
->r13
= ts
->r13
;
716 saved_state
->r14
= ts
->r14
;
717 saved_state
->r15
= ts
->r15
;
718 saved_state
->rax
= ts
->rax
;
719 saved_state
->rbx
= ts
->rbx
;
720 saved_state
->rcx
= ts
->rcx
;
721 saved_state
->rdx
= ts
->rdx
;
722 saved_state
->rdi
= ts
->rdi
;
723 saved_state
->rsi
= ts
->rsi
;
724 saved_state
->rbp
= ts
->rbp
;
725 saved_state
->isf
.rsp
= ts
->rsp
;
726 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
727 saved_state
->isf
.rip
= ts
->rip
;
728 saved_state
->isf
.cs
= USER64_CS
;
729 saved_state
->fs
= (uint32_t)ts
->fs
;
730 saved_state
->gs
= (uint32_t)ts
->gs
;
732 return(KERN_SUCCESS
);
738 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
740 x86_saved_state32_t
*saved_state
;
742 pal_register_cache_state(thread
, VALID
);
744 saved_state
= USER_REGS32(thread
);
746 ts
->eax
= saved_state
->eax
;
747 ts
->ebx
= saved_state
->ebx
;
748 ts
->ecx
= saved_state
->ecx
;
749 ts
->edx
= saved_state
->edx
;
750 ts
->edi
= saved_state
->edi
;
751 ts
->esi
= saved_state
->esi
;
752 ts
->ebp
= saved_state
->ebp
;
753 ts
->esp
= saved_state
->uesp
;
754 ts
->eflags
= saved_state
->efl
;
755 ts
->eip
= saved_state
->eip
;
756 ts
->cs
= saved_state
->cs
;
757 ts
->ss
= saved_state
->ss
;
758 ts
->ds
= saved_state
->ds
;
759 ts
->es
= saved_state
->es
;
760 ts
->fs
= saved_state
->fs
;
761 ts
->gs
= saved_state
->gs
;
766 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
768 x86_saved_state64_t
*saved_state
;
770 pal_register_cache_state(thread
, VALID
);
772 saved_state
= USER_REGS64(thread
);
774 ts
->r8
= saved_state
->r8
;
775 ts
->r9
= saved_state
->r9
;
776 ts
->r10
= saved_state
->r10
;
777 ts
->r11
= saved_state
->r11
;
778 ts
->r12
= saved_state
->r12
;
779 ts
->r13
= saved_state
->r13
;
780 ts
->r14
= saved_state
->r14
;
781 ts
->r15
= saved_state
->r15
;
782 ts
->rax
= saved_state
->rax
;
783 ts
->rbx
= saved_state
->rbx
;
784 ts
->rcx
= saved_state
->rcx
;
785 ts
->rdx
= saved_state
->rdx
;
786 ts
->rdi
= saved_state
->rdi
;
787 ts
->rsi
= saved_state
->rsi
;
788 ts
->rbp
= saved_state
->rbp
;
789 ts
->rsp
= saved_state
->isf
.rsp
;
790 ts
->rflags
= saved_state
->isf
.rflags
;
791 ts
->rip
= saved_state
->isf
.rip
;
792 ts
->cs
= saved_state
->isf
.cs
;
793 ts
->fs
= saved_state
->fs
;
794 ts
->gs
= saved_state
->gs
;
799 * act_machine_set_state:
801 * Set the status of the specified thread.
805 machine_thread_set_state(
807 thread_flavor_t flavor
,
808 thread_state_t tstate
,
809 mach_msg_type_number_t count
)
812 case x86_SAVED_STATE32
:
814 x86_saved_state32_t
*state
;
815 x86_saved_state32_t
*saved_state
;
817 if (count
< x86_SAVED_STATE32_COUNT
)
818 return(KERN_INVALID_ARGUMENT
);
820 if (thread_is_64bit(thr_act
))
821 return(KERN_INVALID_ARGUMENT
);
823 state
= (x86_saved_state32_t
*) tstate
;
825 /* Check segment selectors are safe */
826 if (!valid_user_segment_selectors(state
->cs
,
832 return KERN_INVALID_ARGUMENT
;
834 pal_register_cache_state(thr_act
, DIRTY
);
836 saved_state
= USER_REGS32(thr_act
);
841 saved_state
->edi
= state
->edi
;
842 saved_state
->esi
= state
->esi
;
843 saved_state
->ebp
= state
->ebp
;
844 saved_state
->uesp
= state
->uesp
;
845 saved_state
->ebx
= state
->ebx
;
846 saved_state
->edx
= state
->edx
;
847 saved_state
->ecx
= state
->ecx
;
848 saved_state
->eax
= state
->eax
;
849 saved_state
->eip
= state
->eip
;
851 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
854 * If the trace trap bit is being set,
855 * ensure that the user returns via iret
856 * - which is signaled thusly:
858 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
859 state
->cs
= SYSENTER_TF_CS
;
862 * User setting segment registers.
863 * Code and stack selectors have already been
864 * checked. Others will be reset by 'iret'
865 * if they are not valid.
867 saved_state
->cs
= state
->cs
;
868 saved_state
->ss
= state
->ss
;
869 saved_state
->ds
= state
->ds
;
870 saved_state
->es
= state
->es
;
871 saved_state
->fs
= state
->fs
;
872 saved_state
->gs
= state
->gs
;
877 case x86_SAVED_STATE64
:
879 x86_saved_state64_t
*state
;
880 x86_saved_state64_t
*saved_state
;
882 if (count
< x86_SAVED_STATE64_COUNT
)
883 return(KERN_INVALID_ARGUMENT
);
885 if (!thread_is_64bit(thr_act
))
886 return(KERN_INVALID_ARGUMENT
);
888 state
= (x86_saved_state64_t
*) tstate
;
890 /* Verify that the supplied code segment selector is
891 * valid. In 64-bit mode, the FS and GS segment overrides
892 * use the FS.base and GS.base MSRs to calculate
893 * base addresses, and the trampolines don't directly
894 * restore the segment registers--hence they are no
895 * longer relevant for validation.
897 if (!valid_user_code_selector(state
->isf
.cs
))
898 return KERN_INVALID_ARGUMENT
;
900 /* Check pc and stack are canonical addresses */
901 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
902 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
903 return KERN_INVALID_ARGUMENT
;
905 pal_register_cache_state(thr_act
, DIRTY
);
907 saved_state
= USER_REGS64(thr_act
);
912 saved_state
->r8
= state
->r8
;
913 saved_state
->r9
= state
->r9
;
914 saved_state
->r10
= state
->r10
;
915 saved_state
->r11
= state
->r11
;
916 saved_state
->r12
= state
->r12
;
917 saved_state
->r13
= state
->r13
;
918 saved_state
->r14
= state
->r14
;
919 saved_state
->r15
= state
->r15
;
920 saved_state
->rdi
= state
->rdi
;
921 saved_state
->rsi
= state
->rsi
;
922 saved_state
->rbp
= state
->rbp
;
923 saved_state
->rbx
= state
->rbx
;
924 saved_state
->rdx
= state
->rdx
;
925 saved_state
->rcx
= state
->rcx
;
926 saved_state
->rax
= state
->rax
;
927 saved_state
->isf
.rsp
= state
->isf
.rsp
;
928 saved_state
->isf
.rip
= state
->isf
.rip
;
930 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
933 * User setting segment registers.
934 * Code and stack selectors have already been
935 * checked. Others will be reset by 'sys'
936 * if they are not valid.
938 saved_state
->isf
.cs
= state
->isf
.cs
;
939 saved_state
->isf
.ss
= state
->isf
.ss
;
940 saved_state
->fs
= state
->fs
;
941 saved_state
->gs
= state
->gs
;
946 case x86_FLOAT_STATE32
:
948 if (count
!= x86_FLOAT_STATE32_COUNT
)
949 return(KERN_INVALID_ARGUMENT
);
951 if (thread_is_64bit(thr_act
))
952 return(KERN_INVALID_ARGUMENT
);
954 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
957 case x86_FLOAT_STATE64
:
959 if (count
!= x86_FLOAT_STATE64_COUNT
)
960 return(KERN_INVALID_ARGUMENT
);
962 if ( !thread_is_64bit(thr_act
))
963 return(KERN_INVALID_ARGUMENT
);
965 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
968 case x86_FLOAT_STATE
:
970 x86_float_state_t
*state
;
972 if (count
!= x86_FLOAT_STATE_COUNT
)
973 return(KERN_INVALID_ARGUMENT
);
975 state
= (x86_float_state_t
*)tstate
;
976 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
977 thread_is_64bit(thr_act
)) {
978 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
980 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
981 !thread_is_64bit(thr_act
)) {
982 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
984 return(KERN_INVALID_ARGUMENT
);
987 case x86_AVX_STATE32
:
989 if (count
!= x86_AVX_STATE32_COUNT
)
990 return(KERN_INVALID_ARGUMENT
);
992 if (thread_is_64bit(thr_act
))
993 return(KERN_INVALID_ARGUMENT
);
995 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
998 case x86_AVX_STATE64
:
1000 if (count
!= x86_AVX_STATE64_COUNT
)
1001 return(KERN_INVALID_ARGUMENT
);
1003 if (!thread_is_64bit(thr_act
))
1004 return(KERN_INVALID_ARGUMENT
);
1006 return fpu_set_fxstate(thr_act
, tstate
, flavor
);
1009 case x86_THREAD_STATE32
:
1011 if (count
!= x86_THREAD_STATE32_COUNT
)
1012 return(KERN_INVALID_ARGUMENT
);
1014 if (thread_is_64bit(thr_act
))
1015 return(KERN_INVALID_ARGUMENT
);
1017 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1020 case x86_THREAD_STATE64
:
1022 if (count
!= x86_THREAD_STATE64_COUNT
)
1023 return(KERN_INVALID_ARGUMENT
);
1025 if (!thread_is_64bit(thr_act
))
1026 return(KERN_INVALID_ARGUMENT
);
1028 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1031 case x86_THREAD_STATE
:
1033 x86_thread_state_t
*state
;
1035 if (count
!= x86_THREAD_STATE_COUNT
)
1036 return(KERN_INVALID_ARGUMENT
);
1038 state
= (x86_thread_state_t
*)tstate
;
1040 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1041 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1042 thread_is_64bit(thr_act
)) {
1043 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1044 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1045 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1046 !thread_is_64bit(thr_act
)) {
1047 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1049 return(KERN_INVALID_ARGUMENT
);
1053 case x86_DEBUG_STATE32
:
1055 x86_debug_state32_t
*state
;
1058 if (thread_is_64bit(thr_act
))
1059 return(KERN_INVALID_ARGUMENT
);
1061 state
= (x86_debug_state32_t
*)tstate
;
1063 ret
= set_debug_state32(thr_act
, state
);
1067 case x86_DEBUG_STATE64
:
1069 x86_debug_state64_t
*state
;
1072 if (!thread_is_64bit(thr_act
))
1073 return(KERN_INVALID_ARGUMENT
);
1075 state
= (x86_debug_state64_t
*)tstate
;
1077 ret
= set_debug_state64(thr_act
, state
);
1081 case x86_DEBUG_STATE
:
1083 x86_debug_state_t
*state
;
1084 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1086 if (count
!= x86_DEBUG_STATE_COUNT
)
1087 return (KERN_INVALID_ARGUMENT
);
1089 state
= (x86_debug_state_t
*)tstate
;
1090 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1091 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1092 thread_is_64bit(thr_act
)) {
1093 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1096 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1097 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1098 !thread_is_64bit(thr_act
)) {
1099 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1104 return(KERN_INVALID_ARGUMENT
);
1107 return(KERN_SUCCESS
);
1115 * Get the status of the specified thread.
1119 machine_thread_get_state(
1121 thread_flavor_t flavor
,
1122 thread_state_t tstate
,
1123 mach_msg_type_number_t
*count
)
1128 case THREAD_STATE_FLAVOR_LIST
:
1131 return (KERN_INVALID_ARGUMENT
);
1133 tstate
[0] = i386_THREAD_STATE
;
1134 tstate
[1] = i386_FLOAT_STATE
;
1135 tstate
[2] = i386_EXCEPTION_STATE
;
1141 case THREAD_STATE_FLAVOR_LIST_NEW
:
1144 return (KERN_INVALID_ARGUMENT
);
1146 tstate
[0] = x86_THREAD_STATE
;
1147 tstate
[1] = x86_FLOAT_STATE
;
1148 tstate
[2] = x86_EXCEPTION_STATE
;
1149 tstate
[3] = x86_DEBUG_STATE
;
1155 case x86_SAVED_STATE32
:
1157 x86_saved_state32_t
*state
;
1158 x86_saved_state32_t
*saved_state
;
1160 if (*count
< x86_SAVED_STATE32_COUNT
)
1161 return(KERN_INVALID_ARGUMENT
);
1163 if (thread_is_64bit(thr_act
))
1164 return(KERN_INVALID_ARGUMENT
);
1166 state
= (x86_saved_state32_t
*) tstate
;
1167 saved_state
= USER_REGS32(thr_act
);
1170 * First, copy everything:
1172 *state
= *saved_state
;
1173 state
->ds
= saved_state
->ds
& 0xffff;
1174 state
->es
= saved_state
->es
& 0xffff;
1175 state
->fs
= saved_state
->fs
& 0xffff;
1176 state
->gs
= saved_state
->gs
& 0xffff;
1178 *count
= x86_SAVED_STATE32_COUNT
;
1182 case x86_SAVED_STATE64
:
1184 x86_saved_state64_t
*state
;
1185 x86_saved_state64_t
*saved_state
;
1187 if (*count
< x86_SAVED_STATE64_COUNT
)
1188 return(KERN_INVALID_ARGUMENT
);
1190 if (!thread_is_64bit(thr_act
))
1191 return(KERN_INVALID_ARGUMENT
);
1193 state
= (x86_saved_state64_t
*)tstate
;
1194 saved_state
= USER_REGS64(thr_act
);
1197 * First, copy everything:
1199 *state
= *saved_state
;
1200 state
->fs
= saved_state
->fs
& 0xffff;
1201 state
->gs
= saved_state
->gs
& 0xffff;
1203 *count
= x86_SAVED_STATE64_COUNT
;
1207 case x86_FLOAT_STATE32
:
1209 if (*count
< x86_FLOAT_STATE32_COUNT
)
1210 return(KERN_INVALID_ARGUMENT
);
1212 if (thread_is_64bit(thr_act
))
1213 return(KERN_INVALID_ARGUMENT
);
1215 *count
= x86_FLOAT_STATE32_COUNT
;
1217 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1220 case x86_FLOAT_STATE64
:
1222 if (*count
< x86_FLOAT_STATE64_COUNT
)
1223 return(KERN_INVALID_ARGUMENT
);
1225 if ( !thread_is_64bit(thr_act
))
1226 return(KERN_INVALID_ARGUMENT
);
1228 *count
= x86_FLOAT_STATE64_COUNT
;
1230 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1233 case x86_FLOAT_STATE
:
1235 x86_float_state_t
*state
;
1238 if (*count
< x86_FLOAT_STATE_COUNT
)
1239 return(KERN_INVALID_ARGUMENT
);
1241 state
= (x86_float_state_t
*)tstate
;
1244 * no need to bzero... currently
1245 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1247 if (thread_is_64bit(thr_act
)) {
1248 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1249 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1251 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
, x86_FLOAT_STATE64
);
1253 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1254 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1256 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
, x86_FLOAT_STATE32
);
1258 *count
= x86_FLOAT_STATE_COUNT
;
1263 case x86_AVX_STATE32
:
1265 if (*count
!= x86_AVX_STATE32_COUNT
)
1266 return(KERN_INVALID_ARGUMENT
);
1268 if (thread_is_64bit(thr_act
))
1269 return(KERN_INVALID_ARGUMENT
);
1271 *count
= x86_AVX_STATE32_COUNT
;
1273 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1276 case x86_AVX_STATE64
:
1278 if (*count
!= x86_AVX_STATE64_COUNT
)
1279 return(KERN_INVALID_ARGUMENT
);
1281 if ( !thread_is_64bit(thr_act
))
1282 return(KERN_INVALID_ARGUMENT
);
1284 *count
= x86_AVX_STATE64_COUNT
;
1286 return fpu_get_fxstate(thr_act
, tstate
, flavor
);
1289 case x86_THREAD_STATE32
:
1291 if (*count
< x86_THREAD_STATE32_COUNT
)
1292 return(KERN_INVALID_ARGUMENT
);
1294 if (thread_is_64bit(thr_act
))
1295 return(KERN_INVALID_ARGUMENT
);
1297 *count
= x86_THREAD_STATE32_COUNT
;
1299 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1303 case x86_THREAD_STATE64
:
1305 if (*count
< x86_THREAD_STATE64_COUNT
)
1306 return(KERN_INVALID_ARGUMENT
);
1308 if ( !thread_is_64bit(thr_act
))
1309 return(KERN_INVALID_ARGUMENT
);
1311 *count
= x86_THREAD_STATE64_COUNT
;
1313 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1317 case x86_THREAD_STATE
:
1319 x86_thread_state_t
*state
;
1321 if (*count
< x86_THREAD_STATE_COUNT
)
1322 return(KERN_INVALID_ARGUMENT
);
1324 state
= (x86_thread_state_t
*)tstate
;
1326 bzero((char *)state
, sizeof(x86_thread_state_t
));
1328 if (thread_is_64bit(thr_act
)) {
1329 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1330 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1332 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1334 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1335 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1337 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1339 *count
= x86_THREAD_STATE_COUNT
;
1345 case x86_EXCEPTION_STATE32
:
1347 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1348 return(KERN_INVALID_ARGUMENT
);
1350 if (thread_is_64bit(thr_act
))
1351 return(KERN_INVALID_ARGUMENT
);
1353 *count
= x86_EXCEPTION_STATE32_COUNT
;
1355 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1357 * Suppress the cpu number for binary compatibility
1358 * of this deprecated state.
1360 ((x86_exception_state32_t
*)tstate
)->cpu
= 0;
1364 case x86_EXCEPTION_STATE64
:
1366 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1367 return(KERN_INVALID_ARGUMENT
);
1369 if ( !thread_is_64bit(thr_act
))
1370 return(KERN_INVALID_ARGUMENT
);
1372 *count
= x86_EXCEPTION_STATE64_COUNT
;
1374 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1376 * Suppress the cpu number for binary compatibility
1377 * of this deprecated state.
1379 ((x86_exception_state64_t
*)tstate
)->cpu
= 0;
1383 case x86_EXCEPTION_STATE
:
1385 x86_exception_state_t
*state
;
1387 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1388 return(KERN_INVALID_ARGUMENT
);
1390 state
= (x86_exception_state_t
*)tstate
;
1392 bzero((char *)state
, sizeof(x86_exception_state_t
));
1394 if (thread_is_64bit(thr_act
)) {
1395 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1396 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1398 get_exception_state64(thr_act
, &state
->ues
.es64
);
1400 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1401 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1403 get_exception_state32(thr_act
, &state
->ues
.es32
);
1405 *count
= x86_EXCEPTION_STATE_COUNT
;
1409 case x86_DEBUG_STATE32
:
1411 if (*count
< x86_DEBUG_STATE32_COUNT
)
1412 return(KERN_INVALID_ARGUMENT
);
1414 if (thread_is_64bit(thr_act
))
1415 return(KERN_INVALID_ARGUMENT
);
1417 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1419 *count
= x86_DEBUG_STATE32_COUNT
;
1423 case x86_DEBUG_STATE64
:
1425 if (*count
< x86_DEBUG_STATE64_COUNT
)
1426 return(KERN_INVALID_ARGUMENT
);
1428 if (!thread_is_64bit(thr_act
))
1429 return(KERN_INVALID_ARGUMENT
);
1431 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1433 *count
= x86_DEBUG_STATE64_COUNT
;
1437 case x86_DEBUG_STATE
:
1439 x86_debug_state_t
*state
;
1441 if (*count
< x86_DEBUG_STATE_COUNT
)
1442 return(KERN_INVALID_ARGUMENT
);
1444 state
= (x86_debug_state_t
*)tstate
;
1446 bzero(state
, sizeof *state
);
1448 if (thread_is_64bit(thr_act
)) {
1449 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1450 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1452 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1454 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1455 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1457 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1459 *count
= x86_DEBUG_STATE_COUNT
;
1463 return(KERN_INVALID_ARGUMENT
);
1466 return(KERN_SUCCESS
);
1470 machine_thread_get_kern_state(
1472 thread_flavor_t flavor
,
1473 thread_state_t tstate
,
1474 mach_msg_type_number_t
*count
)
1476 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1479 * This works only for an interrupted kernel thread
1481 if (thread
!= current_thread() || int_state
== NULL
)
1482 return KERN_FAILURE
;
1485 case x86_THREAD_STATE32
: {
1486 x86_thread_state32_t
*state
;
1487 x86_saved_state32_t
*saved_state
;
1489 if (!is_saved_state32(int_state
) ||
1490 *count
< x86_THREAD_STATE32_COUNT
)
1491 return (KERN_INVALID_ARGUMENT
);
1493 state
= (x86_thread_state32_t
*) tstate
;
1495 saved_state
= saved_state32(int_state
);
1497 * General registers.
1499 state
->eax
= saved_state
->eax
;
1500 state
->ebx
= saved_state
->ebx
;
1501 state
->ecx
= saved_state
->ecx
;
1502 state
->edx
= saved_state
->edx
;
1503 state
->edi
= saved_state
->edi
;
1504 state
->esi
= saved_state
->esi
;
1505 state
->ebp
= saved_state
->ebp
;
1506 state
->esp
= saved_state
->uesp
;
1507 state
->eflags
= saved_state
->efl
;
1508 state
->eip
= saved_state
->eip
;
1509 state
->cs
= saved_state
->cs
;
1510 state
->ss
= saved_state
->ss
;
1511 state
->ds
= saved_state
->ds
& 0xffff;
1512 state
->es
= saved_state
->es
& 0xffff;
1513 state
->fs
= saved_state
->fs
& 0xffff;
1514 state
->gs
= saved_state
->gs
& 0xffff;
1516 *count
= x86_THREAD_STATE32_COUNT
;
1518 return KERN_SUCCESS
;
1521 case x86_THREAD_STATE64
: {
1522 x86_thread_state64_t
*state
;
1523 x86_saved_state64_t
*saved_state
;
1525 if (!is_saved_state64(int_state
) ||
1526 *count
< x86_THREAD_STATE64_COUNT
)
1527 return (KERN_INVALID_ARGUMENT
);
1529 state
= (x86_thread_state64_t
*) tstate
;
1531 saved_state
= saved_state64(int_state
);
1533 * General registers.
1535 state
->rax
= saved_state
->rax
;
1536 state
->rbx
= saved_state
->rbx
;
1537 state
->rcx
= saved_state
->rcx
;
1538 state
->rdx
= saved_state
->rdx
;
1539 state
->rdi
= saved_state
->rdi
;
1540 state
->rsi
= saved_state
->rsi
;
1541 state
->rbp
= saved_state
->rbp
;
1542 state
->rsp
= saved_state
->isf
.rsp
;
1543 state
->r8
= saved_state
->r8
;
1544 state
->r9
= saved_state
->r9
;
1545 state
->r10
= saved_state
->r10
;
1546 state
->r11
= saved_state
->r11
;
1547 state
->r12
= saved_state
->r12
;
1548 state
->r13
= saved_state
->r13
;
1549 state
->r14
= saved_state
->r14
;
1550 state
->r15
= saved_state
->r15
;
1552 state
->rip
= saved_state
->isf
.rip
;
1553 state
->rflags
= saved_state
->isf
.rflags
;
1554 state
->cs
= saved_state
->isf
.cs
;
1555 state
->fs
= saved_state
->fs
& 0xffff;
1556 state
->gs
= saved_state
->gs
& 0xffff;
1557 *count
= x86_THREAD_STATE64_COUNT
;
1559 return KERN_SUCCESS
;
1562 case x86_THREAD_STATE
: {
1563 x86_thread_state_t
*state
= NULL
;
1565 if (*count
< x86_THREAD_STATE_COUNT
)
1566 return (KERN_INVALID_ARGUMENT
);
1568 state
= (x86_thread_state_t
*) tstate
;
1570 if (is_saved_state32(int_state
)) {
1571 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1573 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1574 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1577 * General registers.
1579 state
->uts
.ts32
.eax
= saved_state
->eax
;
1580 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1581 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1582 state
->uts
.ts32
.edx
= saved_state
->edx
;
1583 state
->uts
.ts32
.edi
= saved_state
->edi
;
1584 state
->uts
.ts32
.esi
= saved_state
->esi
;
1585 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1586 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1587 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1588 state
->uts
.ts32
.eip
= saved_state
->eip
;
1589 state
->uts
.ts32
.cs
= saved_state
->cs
;
1590 state
->uts
.ts32
.ss
= saved_state
->ss
;
1591 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1592 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1593 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1594 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1595 } else if (is_saved_state64(int_state
)) {
1596 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1598 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1599 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1602 * General registers.
1604 state
->uts
.ts64
.rax
= saved_state
->rax
;
1605 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1606 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1607 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1608 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1609 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1610 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1611 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1612 state
->uts
.ts64
.r8
= saved_state
->r8
;
1613 state
->uts
.ts64
.r9
= saved_state
->r9
;
1614 state
->uts
.ts64
.r10
= saved_state
->r10
;
1615 state
->uts
.ts64
.r11
= saved_state
->r11
;
1616 state
->uts
.ts64
.r12
= saved_state
->r12
;
1617 state
->uts
.ts64
.r13
= saved_state
->r13
;
1618 state
->uts
.ts64
.r14
= saved_state
->r14
;
1619 state
->uts
.ts64
.r15
= saved_state
->r15
;
1621 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1622 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1623 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1624 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1625 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1627 panic("unknown thread state");
1630 *count
= x86_THREAD_STATE_COUNT
;
1631 return KERN_SUCCESS
;
1634 return KERN_FAILURE
;
1639 machine_thread_switch_addrmode(thread_t thread
)
1642 * We don't want to be preempted until we're done
1643 * - particularly if we're switching the current thread
1645 disable_preemption();
1648 * Reset the state saveareas. As we're resetting, we anticipate no
1649 * memory allocations in this path.
1651 machine_thread_create(thread
, thread
->task
);
1653 /* If we're switching ourselves, reset the pcb addresses etc. */
1654 if (thread
== current_thread()) {
1655 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1656 #if defined(__i386__)
1657 if (current_cpu_datap()->cpu_active_cr3
!= kernel_pmap
->pm_cr3
)
1658 pmap_load_kernel_cr3();
1659 #endif /* defined(__i386) */
1660 act_machine_switch_pcb(NULL
, thread
);
1661 ml_set_interrupts_enabled(istate
);
1663 enable_preemption();
1669 * This is used to set the current thr_act/thread
1670 * when starting up a new processor
1673 machine_set_current_thread(thread_t thread
)
1675 current_cpu_datap()->cpu_active_thread
= thread
;
1679 * This is called when a task is terminated, and also on exec().
1680 * Clear machine-dependent state that is stored on the task.
1683 machine_thread_terminate_self(void)
1685 task_t self_task
= current_task();
1687 user_ldt_t user_ldt
= self_task
->i386_ldt
;
1688 if (user_ldt
!= 0) {
1689 self_task
->i386_ldt
= 0;
1690 user_ldt_free(user_ldt
);
1693 if (self_task
->task_debug
!= NULL
) {
1694 zfree(ids_zone
, self_task
->task_debug
);
1695 self_task
->task_debug
= NULL
;
1701 * Perform machine-dependent per-thread initializations
1704 machine_thread_init(void)
1706 if (cpu_mode_is64bit()) {
1707 assert(sizeof(x86_sframe_compat32_t
) % 16 == 0);
1708 iss_zone
= zinit(sizeof(x86_sframe64_t
),
1709 thread_max
* sizeof(x86_sframe64_t
),
1710 THREAD_CHUNK
* sizeof(x86_sframe64_t
),
1711 "x86_64 saved state");
1713 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1714 thread_max
* sizeof(x86_debug_state64_t
),
1715 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1716 "x86_64 debug state");
1719 iss_zone
= zinit(sizeof(x86_sframe32_t
),
1720 thread_max
* sizeof(x86_sframe32_t
),
1721 THREAD_CHUNK
* sizeof(x86_sframe32_t
),
1723 ids_zone
= zinit(sizeof(x86_debug_state32_t
),
1724 thread_max
* (sizeof(x86_debug_state32_t
)),
1725 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
1732 #if defined(__i386__)
1734 * Some routines for debugging activation code
1736 static void dump_handlers(thread_t
);
1737 void dump_regs(thread_t
);
1738 int dump_act(thread_t thr_act
);
1741 dump_handlers(thread_t thr_act
)
1743 ReturnHandler
*rhp
= thr_act
->handlers
;
1748 if (rhp
== &thr_act
->special_handler
){
1750 printf("[NON-Zero next ptr(%p)]", rhp
->next
);
1751 printf("special_handler()->");
1754 printf("hdlr_%d(%p)->", counter
, rhp
->handler
);
1756 if (++counter
> 32) {
1757 printf("Aborting: HUGE handler chain\n");
1761 printf("HLDR_NULL\n");
1765 dump_regs(thread_t thr_act
)
1767 if (thread_is_64bit(thr_act
)) {
1768 x86_saved_state64_t
*ssp
;
1770 ssp
= USER_REGS64(thr_act
);
1772 panic("dump_regs: 64bit tasks not yet supported");
1775 x86_saved_state32_t
*ssp
;
1777 ssp
= USER_REGS32(thr_act
);
1780 * Print out user register state
1782 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1783 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1785 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1786 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1788 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1793 dump_act(thread_t thr_act
)
1798 printf("thread(%p)(%d): task=%p(%d)\n",
1799 thr_act
, thr_act
->ref_count
,
1801 thr_act
->task
? thr_act
->task
->ref_count
: 0);
1803 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1804 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1805 thr_act
->active
, thr_act
->ast
);
1806 printf("\tpcb=%p\n", &thr_act
->machine
);
1808 if (thr_act
->kernel_stack
) {
1809 vm_offset_t stack
= thr_act
->kernel_stack
;
1811 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
1812 (long)stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1813 STACK_IKS(stack
)->k_esp
, thr_act
->machine
.iss
);
1816 dump_handlers(thr_act
);
1818 return((int)thr_act
);
1825 thread_t thr_act
= current_thread();
1827 if (thread_is_64bit(thr_act
)) {
1828 x86_saved_state64_t
*iss64
;
1830 iss64
= USER_REGS64(thr_act
);
1832 return(iss64
->isf
.rip
);
1834 x86_saved_state32_t
*iss32
;
1836 iss32
= USER_REGS32(thr_act
);
1843 * detach and return a kernel stack from a thread
1847 machine_stack_detach(thread_t thread
)
1851 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
1852 (uintptr_t)thread_tid(thread
), thread
->priority
,
1853 thread
->sched_pri
, 0,
1856 stack
= thread
->kernel_stack
;
1857 thread
->kernel_stack
= 0;
1863 * attach a kernel stack to a thread and initialize it
1867 machine_stack_attach(
1871 struct x86_kernel_state
*statep
;
1873 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
1874 (uintptr_t)thread_tid(thread
), thread
->priority
,
1875 thread
->sched_pri
, 0, 0);
1878 thread
->kernel_stack
= stack
;
1880 statep
= STACK_IKS(stack
);
1881 #if defined(__x86_64__)
1882 statep
->k_rip
= (unsigned long) Thread_continue
;
1883 statep
->k_rbx
= (unsigned long) thread_continue
;
1884 statep
->k_rsp
= (unsigned long) (STACK_IKS(stack
) - 1);
1886 statep
->k_eip
= (unsigned long) Thread_continue
;
1887 statep
->k_ebx
= (unsigned long) thread_continue
;
1888 statep
->k_esp
= (unsigned long) (STACK_IKS(stack
) - 1);
1895 * move a stack from old to new thread
1899 machine_stack_handoff(thread_t old
,
1908 machine_pmc_cswitch(old
, new);
1911 stack
= old
->kernel_stack
;
1912 if (stack
== old
->reserved_stack
) {
1913 assert(new->reserved_stack
);
1914 old
->reserved_stack
= new->reserved_stack
;
1915 new->reserved_stack
= stack
;
1917 old
->kernel_stack
= 0;
1919 * A full call to machine_stack_attach() is unnecessry
1920 * because old stack is already initialized.
1922 new->kernel_stack
= stack
;
1924 fpu_save_context(old
);
1926 old
->machine
.specFlags
&= ~OnProc
;
1927 new->machine
.specFlags
|= OnProc
;
1929 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
1930 act_machine_switch_pcb(old
, new);
1932 machine_set_current_thread(new);
1940 struct x86_act_context32
{
1941 x86_saved_state32_t ss
;
1942 x86_float_state32_t fs
;
1943 x86_debug_state32_t ds
;
1946 struct x86_act_context64
{
1947 x86_saved_state64_t ss
;
1948 x86_float_state64_t fs
;
1949 x86_debug_state64_t ds
;
1955 act_thread_csave(void)
1958 mach_msg_type_number_t val
;
1959 thread_t thr_act
= current_thread();
1961 if (thread_is_64bit(thr_act
)) {
1962 struct x86_act_context64
*ic64
;
1964 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
1966 if (ic64
== (struct x86_act_context64
*)NULL
)
1969 val
= x86_SAVED_STATE64_COUNT
;
1970 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
1971 (thread_state_t
) &ic64
->ss
, &val
);
1972 if (kret
!= KERN_SUCCESS
) {
1973 kfree(ic64
, sizeof(struct x86_act_context64
));
1976 val
= x86_FLOAT_STATE64_COUNT
;
1977 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
1978 (thread_state_t
) &ic64
->fs
, &val
);
1979 if (kret
!= KERN_SUCCESS
) {
1980 kfree(ic64
, sizeof(struct x86_act_context64
));
1984 val
= x86_DEBUG_STATE64_COUNT
;
1985 kret
= machine_thread_get_state(thr_act
,
1987 (thread_state_t
)&ic64
->ds
,
1989 if (kret
!= KERN_SUCCESS
) {
1990 kfree(ic64
, sizeof(struct x86_act_context64
));
1996 struct x86_act_context32
*ic32
;
1998 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2000 if (ic32
== (struct x86_act_context32
*)NULL
)
2003 val
= x86_SAVED_STATE32_COUNT
;
2004 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2005 (thread_state_t
) &ic32
->ss
, &val
);
2006 if (kret
!= KERN_SUCCESS
) {
2007 kfree(ic32
, sizeof(struct x86_act_context32
));
2010 val
= x86_FLOAT_STATE32_COUNT
;
2011 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2012 (thread_state_t
) &ic32
->fs
, &val
);
2013 if (kret
!= KERN_SUCCESS
) {
2014 kfree(ic32
, sizeof(struct x86_act_context32
));
2018 val
= x86_DEBUG_STATE32_COUNT
;
2019 kret
= machine_thread_get_state(thr_act
,
2021 (thread_state_t
)&ic32
->ds
,
2023 if (kret
!= KERN_SUCCESS
) {
2024 kfree(ic32
, sizeof(struct x86_act_context32
));
2033 act_thread_catt(void *ctx
)
2035 thread_t thr_act
= current_thread();
2038 if (ctx
== (void *)NULL
)
2041 if (thread_is_64bit(thr_act
)) {
2042 struct x86_act_context64
*ic64
;
2044 ic64
= (struct x86_act_context64
*)ctx
;
2046 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2047 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2048 if (kret
== KERN_SUCCESS
) {
2049 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2050 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2052 kfree(ic64
, sizeof(struct x86_act_context64
));
2054 struct x86_act_context32
*ic32
;
2056 ic32
= (struct x86_act_context32
*)ctx
;
2058 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2059 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2060 if (kret
== KERN_SUCCESS
) {
2061 (void) machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2062 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2064 kfree(ic32
, sizeof(struct x86_act_context32
));
2069 void act_thread_cfree(__unused
void *ctx
)
2073 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
);
2074 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
) {
2075 thread
->machine
.arg_store_valid
= valid
;
2078 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
);
2080 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
) {
2081 return (thread
->machine
.arg_store_valid
);
2085 * Duplicate one x86_debug_state32_t to another. "all" parameter
2086 * chooses whether dr4 and dr5 are copied (they are never meant
2087 * to be installed when we do machine_task_set_state() or
2088 * machine_thread_set_state()).
2092 x86_debug_state32_t
*src
,
2093 x86_debug_state32_t
*target
,
2097 target
->dr4
= src
->dr4
;
2098 target
->dr5
= src
->dr5
;
2101 target
->dr0
= src
->dr0
;
2102 target
->dr1
= src
->dr1
;
2103 target
->dr2
= src
->dr2
;
2104 target
->dr3
= src
->dr3
;
2105 target
->dr6
= src
->dr6
;
2106 target
->dr7
= src
->dr7
;
2110 * Duplicate one x86_debug_state64_t to another. "all" parameter
2111 * chooses whether dr4 and dr5 are copied (they are never meant
2112 * to be installed when we do machine_task_set_state() or
2113 * machine_thread_set_state()).
2117 x86_debug_state64_t
*src
,
2118 x86_debug_state64_t
*target
,
2122 target
->dr4
= src
->dr4
;
2123 target
->dr5
= src
->dr5
;
2126 target
->dr0
= src
->dr0
;
2127 target
->dr1
= src
->dr1
;
2128 target
->dr2
= src
->dr2
;
2129 target
->dr3
= src
->dr3
;
2130 target
->dr6
= src
->dr6
;
2131 target
->dr7
= src
->dr7
;