2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <i386/cpu_data.h>
68 #include <i386/cpu_number.h>
70 #include <kern/counters.h>
71 #include <kern/kalloc.h>
72 #include <kern/mach_param.h>
73 #include <kern/processor.h>
74 #include <kern/cpu_data.h>
75 #include <kern/cpu_number.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/sched_prim.h>
79 #include <kern/misc_protos.h>
80 #include <kern/assert.h>
82 #include <kern/machine.h>
83 #include <ipc/ipc_port.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_map.h>
87 #include <vm/vm_protos.h>
89 #include <i386/thread.h>
90 #include <i386/eflags.h>
91 #include <i386/proc_reg.h>
94 #include <i386/user_ldt.h>
96 #include <i386/mp_desc.h>
97 #include <i386/cpu_data.h>
98 #include <i386/misc_protos.h>
99 #include <i386/machine_routines.h>
101 #include <machine/commpage.h>
104 * Maps state flavor to number of words in the state:
106 unsigned int _MachineStateCount
[] = {
109 x86_THREAD_STATE32_COUNT
,
110 x86_FLOAT_STATE32_COUNT
,
111 x86_EXCEPTION_STATE32_COUNT
,
112 x86_THREAD_STATE64_COUNT
,
113 x86_FLOAT_STATE64_COUNT
,
114 x86_EXCEPTION_STATE64_COUNT
,
115 x86_THREAD_STATE_COUNT
,
116 x86_FLOAT_STATE_COUNT
,
117 x86_EXCEPTION_STATE_COUNT
,
119 x86_SAVED_STATE32_COUNT
,
120 x86_SAVED_STATE64_COUNT
,
121 x86_DEBUG_STATE32_COUNT
,
122 x86_DEBUG_STATE64_COUNT
,
123 x86_DEBUG_STATE_COUNT
126 zone_t iss_zone
; /* zone for saved_state area */
127 zone_t ids_zone
; /* zone for debug_state area */
131 void act_machine_throughcall(thread_t thr_act
);
132 void act_machine_return(int);
134 extern void Thread_continue(void);
135 extern void Load_context(
139 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
142 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
145 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
148 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
151 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
154 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
157 * Don't let an illegal value for dr7 get set. Specifically,
158 * check for undefined settings. Setting these bit patterns
159 * result in undefined behaviour and can lead to an unexpected
163 dr7_is_valid(uint32_t *dr7
)
166 uint32_t mask1
, mask2
;
169 * If the DE bit is set in CR4, R/W0-3 can be pattern
170 * "10B" to indicate i/o reads and write
172 if (!(get_cr4() & CR4_DE
))
173 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
174 i
++, mask1
<<= 4, mask2
<<= 4)
175 if ((*dr7
& mask1
) == mask2
)
179 * len0-3 pattern "10B" is ok for len on 64-bit.
181 if (current_cpu_datap()->cpu_is64bit
== TRUE
)
182 for (i
= 0, mask1
= 0x3<<18, mask2
= 0x2<<18; i
< 4;
183 i
++, mask1
<<= 4, mask2
<<= 4)
184 if ((*dr7
& mask1
) == mask2
)
188 * if we are doing an instruction execution break (indicated
189 * by r/w[x] being "00B"), then the len[x] must also be set
192 for (i
= 0; i
< 4; i
++)
193 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
194 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
198 * Intel docs have these bits fixed.
200 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
201 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
202 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
203 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
204 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
207 * We don't allow anything to set the global breakpoints.
226 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
228 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
229 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
230 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
231 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
232 if (cpu_mode_is64bit())
233 cdp
->cpu_dr7
= ds
->dr7
;
236 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
239 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
242 * We need to enter 64-bit mode in order to set the full
243 * width of these registers
245 set_64bit_debug_regs(ds
);
246 cdp
->cpu_dr7
= ds
->dr7
;
250 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
252 x86_debug_state32_t
*ids
;
255 pcb
= thread
->machine
.pcb
;
259 ids
= zalloc(ids_zone
);
260 bzero(ids
, sizeof *ids
);
262 simple_lock(&pcb
->lock
);
263 /* make sure it wasn't already alloc()'d elsewhere */
264 if (pcb
->ids
== NULL
) {
266 simple_unlock(&pcb
->lock
);
268 simple_unlock(&pcb
->lock
);
269 zfree(ids_zone
, ids
);
273 if (!dr7_is_valid(&ds
->dr7
))
277 * Only allow local breakpoints and make sure they are not
278 * in the trampoline code.
282 if (ds
->dr0
>= (unsigned long)HIGH_MEM_BASE
)
285 if (ds
->dr7
& (0x1<<2))
286 if (ds
->dr1
>= (unsigned long)HIGH_MEM_BASE
)
289 if (ds
->dr7
& (0x1<<4))
290 if (ds
->dr2
>= (unsigned long)HIGH_MEM_BASE
)
293 if (ds
->dr7
& (0x1<<6))
294 if (ds
->dr3
>= (unsigned long)HIGH_MEM_BASE
)
304 return (KERN_SUCCESS
);
307 return (KERN_INVALID_ARGUMENT
);
311 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
313 x86_debug_state64_t
*ids
;
316 pcb
= thread
->machine
.pcb
;
320 ids
= zalloc(ids_zone
);
321 bzero(ids
, sizeof *ids
);
323 simple_lock(&pcb
->lock
);
324 /* make sure it wasn't already alloc()'d elsewhere */
325 if (pcb
->ids
== NULL
) {
327 simple_unlock(&pcb
->lock
);
329 simple_unlock(&pcb
->lock
);
330 zfree(ids_zone
, ids
);
334 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
338 * Don't allow the user to set debug addresses above their max
342 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
345 if (ds
->dr7
& (0x1<<2))
346 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
349 if (ds
->dr7
& (0x1<<4))
350 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
353 if (ds
->dr7
& (0x1<<6))
354 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
364 return (KERN_SUCCESS
);
367 return (KERN_INVALID_ARGUMENT
);
371 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
373 x86_debug_state32_t
*saved_state
;
375 saved_state
= thread
->machine
.pcb
->ids
;
378 ds
->dr0
= saved_state
->dr0
;
379 ds
->dr1
= saved_state
->dr1
;
380 ds
->dr2
= saved_state
->dr2
;
381 ds
->dr3
= saved_state
->dr3
;
382 ds
->dr4
= saved_state
->dr4
;
383 ds
->dr5
= saved_state
->dr5
;
384 ds
->dr6
= saved_state
->dr6
;
385 ds
->dr7
= saved_state
->dr7
;
387 bzero(ds
, sizeof *ds
);
391 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
393 x86_debug_state64_t
*saved_state
;
395 saved_state
= (x86_debug_state64_t
*)thread
->machine
.pcb
->ids
;
398 ds
->dr0
= saved_state
->dr0
;
399 ds
->dr1
= saved_state
->dr1
;
400 ds
->dr2
= saved_state
->dr2
;
401 ds
->dr3
= saved_state
->dr3
;
402 ds
->dr4
= saved_state
->dr4
;
403 ds
->dr5
= saved_state
->dr5
;
404 ds
->dr6
= saved_state
->dr6
;
405 ds
->dr7
= saved_state
->dr7
;
407 bzero(ds
, sizeof *ds
);
411 * consider_machine_collect:
413 * Try to collect machine-dependent pages
416 consider_machine_collect(void)
421 consider_machine_adjust(void)
424 extern void *get_bsduthreadarg(thread_t th
);
427 act_machine_switch_pcb( thread_t
new )
429 pcb_t pcb
= new->machine
.pcb
;
430 struct real_descriptor
*ldtp
;
431 vm_offset_t pcb_stack_top
;
432 vm_offset_t hi_pcb_stack_top
;
434 cpu_data_t
*cdp
= current_cpu_datap();
436 assert(new->kernel_stack
!= 0);
437 STACK_IEL(new->kernel_stack
)->saved_state
= pcb
->iss
;
439 if (!cpu_mode_is64bit()) {
440 x86_saved_state32_tagged_t
*hi_iss32
;
442 * Save a pointer to the top of the "kernel" stack -
443 * actually the place in the PCB where a trap into
444 * kernel mode will push the registers.
446 hi_iss
= (vm_offset_t
)((unsigned long)
447 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0
) |
448 ((unsigned long)pcb
->iss
& PAGE_MASK
));
450 cdp
->cpu_hi_iss
= (void *)hi_iss
;
452 pmap_high_map(pcb
->iss_pte0
, HIGH_CPU_ISS0
);
453 pmap_high_map(pcb
->iss_pte1
, HIGH_CPU_ISS1
);
455 hi_iss32
= (x86_saved_state32_tagged_t
*) hi_iss
;
456 assert(hi_iss32
->tag
== x86_SAVED_STATE32
);
458 hi_pcb_stack_top
= (int) (hi_iss32
+ 1);
461 * For fast syscall, top of interrupt stack points to pcb stack
463 *(vm_offset_t
*) current_sstk() = hi_pcb_stack_top
;
465 current_ktss()->esp0
= hi_pcb_stack_top
;
467 } else if (is_saved_state64(pcb
->iss
)) {
469 * The test above is performed against the thread save state
470 * flavor and not task's 64-bit feature flag because of the
471 * thread/task 64-bit state divergence that can arise in
472 * task_set_64bit() x86: the task state is changed before
473 * the individual thread(s).
475 x86_saved_state64_tagged_t
*iss64
;
478 assert(is_saved_state64(pcb
->iss
));
480 iss64
= (x86_saved_state64_tagged_t
*) pcb
->iss
;
483 * Set pointer to PCB's interrupt stack frame in cpu data.
484 * Used by syscall and double-fault trap handlers.
486 isf
= (vm_offset_t
) &iss64
->state
.isf
;
487 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
488 pcb_stack_top
= (vm_offset_t
) (iss64
+ 1);
489 /* require 16-byte alignment */
490 assert((pcb_stack_top
& 0xF) == 0);
491 /* Interrupt stack is pcb */
492 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
495 * Top of temporary sysenter stack points to pcb stack.
496 * Although this is not normally used by 64-bit users,
497 * it needs to be set in case a sysenter is attempted.
499 *current_sstk64() = UBER64(pcb_stack_top
);
501 cdp
->cpu_task_map
= new->map
->pmap
->pm_task_map
;
504 * Enable the 64-bit user code segment, USER64_CS.
505 * Disable the 32-bit user code segment, USER_CS.
507 ldt_desc_p(USER64_CS
)->access
|= ACC_PL_U
;
508 ldt_desc_p(USER_CS
)->access
&= ~ACC_PL_U
;
511 x86_saved_state_compat32_t
*iss32compat
;
514 assert(is_saved_state32(pcb
->iss
));
515 iss32compat
= (x86_saved_state_compat32_t
*) pcb
->iss
;
517 pcb_stack_top
= (int) (iss32compat
+ 1);
518 /* require 16-byte alignment */
519 assert((pcb_stack_top
& 0xF) == 0);
522 * Set pointer to PCB's interrupt stack frame in cpu data.
523 * Used by debug trap handler.
525 isf
= (vm_offset_t
) &iss32compat
->isf64
;
526 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
528 /* Top of temporary sysenter stack points to pcb stack */
529 *current_sstk64() = UBER64(pcb_stack_top
);
531 /* Interrupt stack is pcb */
532 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
534 cdp
->cpu_task_map
= TASK_MAP_32BIT
;
535 /* Precalculate pointers to syscall argument store, for use
536 * in the trampolines.
538 cdp
->cpu_uber_arg_store
= UBER64((vm_offset_t
)get_bsduthreadarg(new));
539 cdp
->cpu_uber_arg_store_valid
= UBER64((vm_offset_t
)&pcb
->arg_store_valid
);
540 pcb
->arg_store_valid
= 0;
546 ldt_desc_p(USER64_CS
)->access
&= ~ACC_PL_U
;
547 ldt_desc_p(USER_CS
)->access
|= ACC_PL_U
;
551 * Set the thread`s cthread (a.k.a pthread)
552 * For 32-bit user this involves setting the USER_CTHREAD
553 * descriptor in the LDT to point to the cthread data.
554 * The involves copying in the pre-initialized descriptor.
556 ldtp
= (struct real_descriptor
*)current_ldt();
557 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
558 if (pcb
->uldt_selector
!= 0)
559 ldtp
[sel_idx(pcb
->uldt_selector
)] = pcb
->uldt_desc
;
561 * For 64-bit, we additionally set the 64-bit User GS base
562 * address. On return to 64-bit user, the GS.Base MSR will be written.
564 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
567 * Set the thread`s LDT or LDT entry.
569 if (new->task
== TASK_NULL
|| new->task
->i386_ldt
== 0) {
573 ml_cpu_set_ldt(KERNEL_LDT
);
576 * Task has its own LDT.
582 * Bump the scheduler generation count in the commpage.
583 * This can be read by user code to detect its preemption.
585 commpage_sched_gen_inc();
589 * Switch to the first thread on a CPU.
592 machine_load_context(
595 new->machine
.specFlags
|= OnProc
;
596 act_machine_switch_pcb(new);
601 * Switch to a new thread.
602 * Save the old thread`s kernel state or continuation,
606 machine_switch_context(
608 thread_continue_t continuation
,
612 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
616 * Save FP registers if in use.
618 fpu_save_context(old
);
620 old
->machine
.specFlags
&= ~OnProc
;
621 new->machine
.specFlags
|= OnProc
;
624 * Switch address maps if need be, even if not switching tasks.
625 * (A server activation may be "borrowing" a client map.)
627 PMAP_SWITCH_CONTEXT(old
, new, cpu_number())
630 * Load the rest of the user state for the new thread
632 act_machine_switch_pcb(new);
634 return(Switch_context(old
, continuation
, new));
638 * act_machine_sv_free
639 * release saveareas associated with an act. if flag is true, release
640 * user level savearea(s) too, else don't
643 act_machine_sv_free(__unused thread_t act
, __unused
int flag
)
649 * This is where registers that are not normally specified by the mach-o
650 * file on an execve would be nullified, perhaps to avoid a covert channel.
653 machine_thread_state_initialize(
657 * If there's an fpu save area, free it.
658 * The initialized state will then be lazily faulted-in, if required.
659 * And if we're target, re-arm the no-fpu trap.
661 if (thread
->machine
.pcb
->ifps
) {
662 (void) fpu_set_fxstate(thread
, NULL
);
664 if (thread
== current_thread())
671 get_eflags_exportmask(void)
677 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
678 * for 32bit tasks only
679 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
680 * for 64bit tasks only
681 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
682 * for 32bit tasks only
683 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
684 * for 64bit tasks only
685 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
686 * for either 32bit or 64bit tasks
687 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
688 * for 32bit tasks only
689 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
690 * for 64bit tasks only
691 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
692 * for either 32bit or 64bit tasks
693 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
694 * for 32bit tasks only
695 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
696 * for 64bit tasks only
697 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
698 * for either 32bit or 64bit tasks
703 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
705 x86_saved_state64_t
*saved_state
;
707 saved_state
= USER_REGS64(thread
);
709 es
->trapno
= saved_state
->isf
.trapno
;
710 es
->err
= saved_state
->isf
.err
;
711 es
->faultvaddr
= saved_state
->cr2
;
715 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
717 x86_saved_state32_t
*saved_state
;
719 saved_state
= USER_REGS32(thread
);
721 es
->trapno
= saved_state
->trapno
;
722 es
->err
= saved_state
->err
;
723 es
->faultvaddr
= saved_state
->cr2
;
728 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
730 x86_saved_state32_t
*saved_state
;
732 saved_state
= USER_REGS32(thread
);
735 * Scrub segment selector values:
737 if (ts
->cs
!= USER_CS
) ts
->cs
= USER_CS
;
738 if (ts
->ss
== 0) ts
->ss
= USER_DS
;
739 if (ts
->ds
== 0) ts
->ds
= USER_DS
;
740 if (ts
->es
== 0) ts
->es
= USER_DS
;
742 /* Check segment selectors are safe */
743 if (!valid_user_segment_selectors(ts
->cs
,
749 return(KERN_INVALID_ARGUMENT
);
751 saved_state
->eax
= ts
->eax
;
752 saved_state
->ebx
= ts
->ebx
;
753 saved_state
->ecx
= ts
->ecx
;
754 saved_state
->edx
= ts
->edx
;
755 saved_state
->edi
= ts
->edi
;
756 saved_state
->esi
= ts
->esi
;
757 saved_state
->ebp
= ts
->ebp
;
758 saved_state
->uesp
= ts
->esp
;
759 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
760 saved_state
->eip
= ts
->eip
;
761 saved_state
->cs
= ts
->cs
;
762 saved_state
->ss
= ts
->ss
;
763 saved_state
->ds
= ts
->ds
;
764 saved_state
->es
= ts
->es
;
765 saved_state
->fs
= ts
->fs
;
766 saved_state
->gs
= ts
->gs
;
769 * If the trace trap bit is being set,
770 * ensure that the user returns via iret
771 * - which is signaled thusly:
773 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
774 saved_state
->cs
= SYSENTER_TF_CS
;
776 return(KERN_SUCCESS
);
780 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
782 x86_saved_state64_t
*saved_state
;
784 saved_state
= USER_REGS64(thread
);
786 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
787 !IS_USERADDR64_CANONICAL(ts
->rip
))
788 return(KERN_INVALID_ARGUMENT
);
790 saved_state
->r8
= ts
->r8
;
791 saved_state
->r9
= ts
->r9
;
792 saved_state
->r10
= ts
->r10
;
793 saved_state
->r11
= ts
->r11
;
794 saved_state
->r12
= ts
->r12
;
795 saved_state
->r13
= ts
->r13
;
796 saved_state
->r14
= ts
->r14
;
797 saved_state
->r15
= ts
->r15
;
798 saved_state
->rax
= ts
->rax
;
799 saved_state
->rbx
= ts
->rbx
;
800 saved_state
->rcx
= ts
->rcx
;
801 saved_state
->rdx
= ts
->rdx
;
802 saved_state
->rdi
= ts
->rdi
;
803 saved_state
->rsi
= ts
->rsi
;
804 saved_state
->rbp
= ts
->rbp
;
805 saved_state
->isf
.rsp
= ts
->rsp
;
806 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
807 saved_state
->isf
.rip
= ts
->rip
;
808 saved_state
->isf
.cs
= USER64_CS
;
809 saved_state
->fs
= ts
->fs
;
810 saved_state
->gs
= ts
->gs
;
812 return(KERN_SUCCESS
);
818 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
820 x86_saved_state32_t
*saved_state
;
822 saved_state
= USER_REGS32(thread
);
824 ts
->eax
= saved_state
->eax
;
825 ts
->ebx
= saved_state
->ebx
;
826 ts
->ecx
= saved_state
->ecx
;
827 ts
->edx
= saved_state
->edx
;
828 ts
->edi
= saved_state
->edi
;
829 ts
->esi
= saved_state
->esi
;
830 ts
->ebp
= saved_state
->ebp
;
831 ts
->esp
= saved_state
->uesp
;
832 ts
->eflags
= saved_state
->efl
;
833 ts
->eip
= saved_state
->eip
;
834 ts
->cs
= saved_state
->cs
;
835 ts
->ss
= saved_state
->ss
;
836 ts
->ds
= saved_state
->ds
;
837 ts
->es
= saved_state
->es
;
838 ts
->fs
= saved_state
->fs
;
839 ts
->gs
= saved_state
->gs
;
844 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
846 x86_saved_state64_t
*saved_state
;
848 saved_state
= USER_REGS64(thread
);
850 ts
->r8
= saved_state
->r8
;
851 ts
->r9
= saved_state
->r9
;
852 ts
->r10
= saved_state
->r10
;
853 ts
->r11
= saved_state
->r11
;
854 ts
->r12
= saved_state
->r12
;
855 ts
->r13
= saved_state
->r13
;
856 ts
->r14
= saved_state
->r14
;
857 ts
->r15
= saved_state
->r15
;
858 ts
->rax
= saved_state
->rax
;
859 ts
->rbx
= saved_state
->rbx
;
860 ts
->rcx
= saved_state
->rcx
;
861 ts
->rdx
= saved_state
->rdx
;
862 ts
->rdi
= saved_state
->rdi
;
863 ts
->rsi
= saved_state
->rsi
;
864 ts
->rbp
= saved_state
->rbp
;
865 ts
->rsp
= saved_state
->isf
.rsp
;
866 ts
->rflags
= saved_state
->isf
.rflags
;
867 ts
->rip
= saved_state
->isf
.rip
;
868 ts
->cs
= saved_state
->isf
.cs
;
869 ts
->fs
= saved_state
->fs
;
870 ts
->gs
= saved_state
->gs
;
875 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
877 x86_thread_state32_t
*state
;
878 x86_saved_state32_t
*saved_state
;
879 thread_t curth
= current_thread();
881 saved_state
= USER_REGS32(thread
);
882 state
= (x86_thread_state32_t
*)tstate
;
887 saved_state
->eip
= state
->eip
;
888 saved_state
->eax
= state
->eax
;
889 saved_state
->ebx
= state
->ebx
;
890 saved_state
->ecx
= state
->ecx
;
891 saved_state
->edx
= state
->edx
;
892 saved_state
->edi
= state
->edi
;
893 saved_state
->esi
= state
->esi
;
894 saved_state
->uesp
= state
->esp
;
895 saved_state
->efl
= EFL_USER_SET
;
897 saved_state
->cs
= USER_CS
;
898 saved_state
->ss
= USER_DS
;
899 saved_state
->ds
= USER_DS
;
900 saved_state
->es
= USER_DS
;
903 thread_unlock(thread
);
908 thread_set_wq_state64(thread_t thread
, thread_state_t tstate
)
910 x86_thread_state64_t
*state
;
911 x86_saved_state64_t
*saved_state
;
912 thread_t curth
= current_thread();
914 saved_state
= USER_REGS64(thread
);
915 state
= (x86_thread_state64_t
*)tstate
;
920 saved_state
->rdi
= state
->rdi
;
921 saved_state
->rsi
= state
->rsi
;
922 saved_state
->rdx
= state
->rdx
;
923 saved_state
->rcx
= state
->rcx
;
924 saved_state
->r8
= state
->r8
;
925 saved_state
->r9
= state
->r9
;
927 saved_state
->isf
.rip
= state
->rip
;
928 saved_state
->isf
.rsp
= state
->rsp
;
929 saved_state
->isf
.cs
= USER64_CS
;
930 saved_state
->isf
.rflags
= EFL_USER_SET
;
933 thread_unlock(thread
);
939 * act_machine_set_state:
941 * Set the status of the specified thread.
945 machine_thread_set_state(
947 thread_flavor_t flavor
,
948 thread_state_t tstate
,
949 mach_msg_type_number_t count
)
952 case x86_SAVED_STATE32
:
954 x86_saved_state32_t
*state
;
955 x86_saved_state32_t
*saved_state
;
957 if (count
< x86_SAVED_STATE32_COUNT
)
958 return(KERN_INVALID_ARGUMENT
);
960 if (thread_is_64bit(thr_act
))
961 return(KERN_INVALID_ARGUMENT
);
963 state
= (x86_saved_state32_t
*) tstate
;
965 /* Check segment selectors are safe */
966 if (!valid_user_segment_selectors(state
->cs
,
972 return KERN_INVALID_ARGUMENT
;
974 saved_state
= USER_REGS32(thr_act
);
979 saved_state
->edi
= state
->edi
;
980 saved_state
->esi
= state
->esi
;
981 saved_state
->ebp
= state
->ebp
;
982 saved_state
->uesp
= state
->uesp
;
983 saved_state
->ebx
= state
->ebx
;
984 saved_state
->edx
= state
->edx
;
985 saved_state
->ecx
= state
->ecx
;
986 saved_state
->eax
= state
->eax
;
987 saved_state
->eip
= state
->eip
;
989 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
992 * If the trace trap bit is being set,
993 * ensure that the user returns via iret
994 * - which is signaled thusly:
996 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
997 state
->cs
= SYSENTER_TF_CS
;
1000 * User setting segment registers.
1001 * Code and stack selectors have already been
1002 * checked. Others will be reset by 'iret'
1003 * if they are not valid.
1005 saved_state
->cs
= state
->cs
;
1006 saved_state
->ss
= state
->ss
;
1007 saved_state
->ds
= state
->ds
;
1008 saved_state
->es
= state
->es
;
1009 saved_state
->fs
= state
->fs
;
1010 saved_state
->gs
= state
->gs
;
1014 case x86_SAVED_STATE64
:
1016 x86_saved_state64_t
*state
;
1017 x86_saved_state64_t
*saved_state
;
1019 if (count
< x86_SAVED_STATE64_COUNT
)
1020 return(KERN_INVALID_ARGUMENT
);
1022 if (!thread_is_64bit(thr_act
))
1023 return(KERN_INVALID_ARGUMENT
);
1025 state
= (x86_saved_state64_t
*) tstate
;
1027 /* Verify that the supplied code segment selector is
1028 * valid. In 64-bit mode, the FS and GS segment overrides
1029 * use the FS.base and GS.base MSRs to calculate
1030 * base addresses, and the trampolines don't directly
1031 * restore the segment registers--hence they are no
1032 * longer relevant for validation.
1034 if (!valid_user_code_selector(state
->isf
.cs
))
1035 return KERN_INVALID_ARGUMENT
;
1037 /* Check pc and stack are canonical addresses */
1038 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
1039 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
1040 return KERN_INVALID_ARGUMENT
;
1042 saved_state
= USER_REGS64(thr_act
);
1047 saved_state
->r8
= state
->r8
;
1048 saved_state
->r9
= state
->r9
;
1049 saved_state
->r10
= state
->r10
;
1050 saved_state
->r11
= state
->r11
;
1051 saved_state
->r12
= state
->r12
;
1052 saved_state
->r13
= state
->r13
;
1053 saved_state
->r14
= state
->r14
;
1054 saved_state
->r15
= state
->r15
;
1055 saved_state
->rdi
= state
->rdi
;
1056 saved_state
->rsi
= state
->rsi
;
1057 saved_state
->rbp
= state
->rbp
;
1058 saved_state
->rbx
= state
->rbx
;
1059 saved_state
->rdx
= state
->rdx
;
1060 saved_state
->rcx
= state
->rcx
;
1061 saved_state
->rax
= state
->rax
;
1062 saved_state
->isf
.rsp
= state
->isf
.rsp
;
1063 saved_state
->isf
.rip
= state
->isf
.rip
;
1065 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1068 * User setting segment registers.
1069 * Code and stack selectors have already been
1070 * checked. Others will be reset by 'sys'
1071 * if they are not valid.
1073 saved_state
->isf
.cs
= state
->isf
.cs
;
1074 saved_state
->isf
.ss
= state
->isf
.ss
;
1075 saved_state
->fs
= state
->fs
;
1076 saved_state
->gs
= state
->gs
;
1080 case x86_FLOAT_STATE32
:
1082 if (count
!= x86_FLOAT_STATE32_COUNT
)
1083 return(KERN_INVALID_ARGUMENT
);
1085 if (thread_is_64bit(thr_act
))
1086 return(KERN_INVALID_ARGUMENT
);
1088 return fpu_set_fxstate(thr_act
, tstate
);
1091 case x86_FLOAT_STATE64
:
1093 if (count
!= x86_FLOAT_STATE64_COUNT
)
1094 return(KERN_INVALID_ARGUMENT
);
1096 if ( !thread_is_64bit(thr_act
))
1097 return(KERN_INVALID_ARGUMENT
);
1099 return fpu_set_fxstate(thr_act
, tstate
);
1102 case x86_FLOAT_STATE
:
1104 x86_float_state_t
*state
;
1106 if (count
!= x86_FLOAT_STATE_COUNT
)
1107 return(KERN_INVALID_ARGUMENT
);
1109 state
= (x86_float_state_t
*)tstate
;
1110 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
1111 thread_is_64bit(thr_act
)) {
1112 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1114 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
1115 !thread_is_64bit(thr_act
)) {
1116 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1118 return(KERN_INVALID_ARGUMENT
);
1121 case x86_THREAD_STATE32
:
1123 if (count
!= x86_THREAD_STATE32_COUNT
)
1124 return(KERN_INVALID_ARGUMENT
);
1126 if (thread_is_64bit(thr_act
))
1127 return(KERN_INVALID_ARGUMENT
);
1129 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1132 case x86_THREAD_STATE64
:
1134 if (count
!= x86_THREAD_STATE64_COUNT
)
1135 return(KERN_INVALID_ARGUMENT
);
1137 if (!thread_is_64bit(thr_act
))
1138 return(KERN_INVALID_ARGUMENT
);
1140 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1143 case x86_THREAD_STATE
:
1145 x86_thread_state_t
*state
;
1147 if (count
!= x86_THREAD_STATE_COUNT
)
1148 return(KERN_INVALID_ARGUMENT
);
1150 state
= (x86_thread_state_t
*)tstate
;
1152 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1153 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1154 thread_is_64bit(thr_act
)) {
1155 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1156 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1157 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1158 !thread_is_64bit(thr_act
)) {
1159 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1161 return(KERN_INVALID_ARGUMENT
);
1165 case x86_DEBUG_STATE32
:
1167 x86_debug_state32_t
*state
;
1170 if (thread_is_64bit(thr_act
))
1171 return(KERN_INVALID_ARGUMENT
);
1173 state
= (x86_debug_state32_t
*)tstate
;
1175 ret
= set_debug_state32(thr_act
, state
);
1179 case x86_DEBUG_STATE64
:
1181 x86_debug_state64_t
*state
;
1184 if (!thread_is_64bit(thr_act
))
1185 return(KERN_INVALID_ARGUMENT
);
1187 state
= (x86_debug_state64_t
*)tstate
;
1189 ret
= set_debug_state64(thr_act
, state
);
1193 case x86_DEBUG_STATE
:
1195 x86_debug_state_t
*state
;
1196 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1198 if (count
!= x86_DEBUG_STATE_COUNT
)
1199 return (KERN_INVALID_ARGUMENT
);
1201 state
= (x86_debug_state_t
*)tstate
;
1202 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1203 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1204 thread_is_64bit(thr_act
)) {
1205 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1208 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1209 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1210 !thread_is_64bit(thr_act
)) {
1211 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1216 return(KERN_INVALID_ARGUMENT
);
1219 return(KERN_SUCCESS
);
1227 * Get the status of the specified thread.
1231 machine_thread_get_state(
1233 thread_flavor_t flavor
,
1234 thread_state_t tstate
,
1235 mach_msg_type_number_t
*count
)
1240 case THREAD_STATE_FLAVOR_LIST
:
1243 return (KERN_INVALID_ARGUMENT
);
1245 tstate
[0] = i386_THREAD_STATE
;
1246 tstate
[1] = i386_FLOAT_STATE
;
1247 tstate
[2] = i386_EXCEPTION_STATE
;
1253 case THREAD_STATE_FLAVOR_LIST_NEW
:
1256 return (KERN_INVALID_ARGUMENT
);
1258 tstate
[0] = x86_THREAD_STATE
;
1259 tstate
[1] = x86_FLOAT_STATE
;
1260 tstate
[2] = x86_EXCEPTION_STATE
;
1261 tstate
[3] = x86_DEBUG_STATE
;
1267 case x86_SAVED_STATE32
:
1269 x86_saved_state32_t
*state
;
1270 x86_saved_state32_t
*saved_state
;
1272 if (*count
< x86_SAVED_STATE32_COUNT
)
1273 return(KERN_INVALID_ARGUMENT
);
1275 if (thread_is_64bit(thr_act
))
1276 return(KERN_INVALID_ARGUMENT
);
1278 state
= (x86_saved_state32_t
*) tstate
;
1279 saved_state
= USER_REGS32(thr_act
);
1282 * First, copy everything:
1284 *state
= *saved_state
;
1285 state
->ds
= saved_state
->ds
& 0xffff;
1286 state
->es
= saved_state
->es
& 0xffff;
1287 state
->fs
= saved_state
->fs
& 0xffff;
1288 state
->gs
= saved_state
->gs
& 0xffff;
1290 *count
= x86_SAVED_STATE32_COUNT
;
1294 case x86_SAVED_STATE64
:
1296 x86_saved_state64_t
*state
;
1297 x86_saved_state64_t
*saved_state
;
1299 if (*count
< x86_SAVED_STATE64_COUNT
)
1300 return(KERN_INVALID_ARGUMENT
);
1302 if (!thread_is_64bit(thr_act
))
1303 return(KERN_INVALID_ARGUMENT
);
1305 state
= (x86_saved_state64_t
*)tstate
;
1306 saved_state
= USER_REGS64(thr_act
);
1309 * First, copy everything:
1311 *state
= *saved_state
;
1312 state
->fs
= saved_state
->fs
& 0xffff;
1313 state
->gs
= saved_state
->gs
& 0xffff;
1315 *count
= x86_SAVED_STATE64_COUNT
;
1319 case x86_FLOAT_STATE32
:
1321 if (*count
< x86_FLOAT_STATE32_COUNT
)
1322 return(KERN_INVALID_ARGUMENT
);
1324 if (thread_is_64bit(thr_act
))
1325 return(KERN_INVALID_ARGUMENT
);
1327 *count
= x86_FLOAT_STATE32_COUNT
;
1329 return fpu_get_fxstate(thr_act
, tstate
);
1332 case x86_FLOAT_STATE64
:
1334 if (*count
< x86_FLOAT_STATE64_COUNT
)
1335 return(KERN_INVALID_ARGUMENT
);
1337 if ( !thread_is_64bit(thr_act
))
1338 return(KERN_INVALID_ARGUMENT
);
1340 *count
= x86_FLOAT_STATE64_COUNT
;
1342 return fpu_get_fxstate(thr_act
, tstate
);
1345 case x86_FLOAT_STATE
:
1347 x86_float_state_t
*state
;
1350 if (*count
< x86_FLOAT_STATE_COUNT
)
1351 return(KERN_INVALID_ARGUMENT
);
1353 state
= (x86_float_state_t
*)tstate
;
1356 * no need to bzero... currently
1357 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1359 if (thread_is_64bit(thr_act
)) {
1360 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1361 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1363 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1365 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1366 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1368 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1370 *count
= x86_FLOAT_STATE_COUNT
;
1375 case x86_THREAD_STATE32
:
1377 if (*count
< x86_THREAD_STATE32_COUNT
)
1378 return(KERN_INVALID_ARGUMENT
);
1380 if (thread_is_64bit(thr_act
))
1381 return(KERN_INVALID_ARGUMENT
);
1383 *count
= x86_THREAD_STATE32_COUNT
;
1385 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1389 case x86_THREAD_STATE64
:
1391 if (*count
< x86_THREAD_STATE64_COUNT
)
1392 return(KERN_INVALID_ARGUMENT
);
1394 if ( !thread_is_64bit(thr_act
))
1395 return(KERN_INVALID_ARGUMENT
);
1397 *count
= x86_THREAD_STATE64_COUNT
;
1399 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1403 case x86_THREAD_STATE
:
1405 x86_thread_state_t
*state
;
1407 if (*count
< x86_THREAD_STATE_COUNT
)
1408 return(KERN_INVALID_ARGUMENT
);
1410 state
= (x86_thread_state_t
*)tstate
;
1412 bzero((char *)state
, sizeof(x86_thread_state_t
));
1414 if (thread_is_64bit(thr_act
)) {
1415 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1416 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1418 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1420 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1421 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1423 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1425 *count
= x86_THREAD_STATE_COUNT
;
1431 case x86_EXCEPTION_STATE32
:
1433 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1434 return(KERN_INVALID_ARGUMENT
);
1436 if (thread_is_64bit(thr_act
))
1437 return(KERN_INVALID_ARGUMENT
);
1439 *count
= x86_EXCEPTION_STATE32_COUNT
;
1441 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1445 case x86_EXCEPTION_STATE64
:
1447 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1448 return(KERN_INVALID_ARGUMENT
);
1450 if ( !thread_is_64bit(thr_act
))
1451 return(KERN_INVALID_ARGUMENT
);
1453 *count
= x86_EXCEPTION_STATE64_COUNT
;
1455 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1459 case x86_EXCEPTION_STATE
:
1461 x86_exception_state_t
*state
;
1463 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1464 return(KERN_INVALID_ARGUMENT
);
1466 state
= (x86_exception_state_t
*)tstate
;
1468 bzero((char *)state
, sizeof(x86_exception_state_t
));
1470 if (thread_is_64bit(thr_act
)) {
1471 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1472 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1474 get_exception_state64(thr_act
, &state
->ues
.es64
);
1476 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1477 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1479 get_exception_state32(thr_act
, &state
->ues
.es32
);
1481 *count
= x86_EXCEPTION_STATE_COUNT
;
1485 case x86_DEBUG_STATE32
:
1487 if (*count
< x86_DEBUG_STATE32_COUNT
)
1488 return(KERN_INVALID_ARGUMENT
);
1490 if (thread_is_64bit(thr_act
))
1491 return(KERN_INVALID_ARGUMENT
);
1493 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1495 *count
= x86_DEBUG_STATE32_COUNT
;
1499 case x86_DEBUG_STATE64
:
1501 if (*count
< x86_DEBUG_STATE64_COUNT
)
1502 return(KERN_INVALID_ARGUMENT
);
1504 if (!thread_is_64bit(thr_act
))
1505 return(KERN_INVALID_ARGUMENT
);
1507 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1509 *count
= x86_DEBUG_STATE64_COUNT
;
1513 case x86_DEBUG_STATE
:
1515 x86_debug_state_t
*state
;
1517 if (*count
< x86_DEBUG_STATE_COUNT
)
1518 return(KERN_INVALID_ARGUMENT
);
1520 state
= (x86_debug_state_t
*)tstate
;
1522 bzero(state
, sizeof *state
);
1524 if (thread_is_64bit(thr_act
)) {
1525 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1526 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1528 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1530 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1531 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1533 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1535 *count
= x86_DEBUG_STATE_COUNT
;
1539 return(KERN_INVALID_ARGUMENT
);
1542 return(KERN_SUCCESS
);
1546 machine_thread_get_kern_state(
1548 thread_flavor_t flavor
,
1549 thread_state_t tstate
,
1550 mach_msg_type_number_t
*count
)
1554 * This works only for an interrupted kernel thread
1556 if (thread
!= current_thread() || current_cpu_datap()->cpu_int_state
== NULL
)
1557 return KERN_FAILURE
;
1560 case x86_THREAD_STATE32
:
1562 x86_thread_state32_t
*state
;
1563 x86_saved_state32_t
*saved_state
;
1565 if (*count
< x86_THREAD_STATE32_COUNT
)
1566 return(KERN_INVALID_ARGUMENT
);
1568 state
= (x86_thread_state32_t
*)tstate
;
1570 assert(is_saved_state32(current_cpu_datap()->cpu_int_state
));
1571 saved_state
= saved_state32(current_cpu_datap()->cpu_int_state
);
1573 * General registers.
1575 state
->eax
= saved_state
->eax
;
1576 state
->ebx
= saved_state
->ebx
;
1577 state
->ecx
= saved_state
->ecx
;
1578 state
->edx
= saved_state
->edx
;
1579 state
->edi
= saved_state
->edi
;
1580 state
->esi
= saved_state
->esi
;
1581 state
->ebp
= saved_state
->ebp
;
1582 state
->esp
= saved_state
->uesp
;
1583 state
->eflags
= saved_state
->efl
;
1584 state
->eip
= saved_state
->eip
;
1585 state
->cs
= saved_state
->cs
;
1586 state
->ss
= saved_state
->ss
;
1587 state
->ds
= saved_state
->ds
& 0xffff;
1588 state
->es
= saved_state
->es
& 0xffff;
1589 state
->fs
= saved_state
->fs
& 0xffff;
1590 state
->gs
= saved_state
->gs
& 0xffff;
1592 *count
= x86_THREAD_STATE32_COUNT
;
1594 return KERN_SUCCESS
;
1598 case x86_THREAD_STATE
:
1600 // wrap a 32 bit thread state into a 32/64bit clean thread state
1601 x86_thread_state_t
*state
;
1602 x86_saved_state32_t
*saved_state
;
1604 if(*count
< x86_THREAD_STATE_COUNT
)
1605 return (KERN_INVALID_ARGUMENT
);
1607 state
= (x86_thread_state_t
*)tstate
;
1608 assert(is_saved_state32(current_cpu_datap()->cpu_int_state
));
1609 saved_state
= saved_state32(current_cpu_datap()->cpu_int_state
);
1611 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1612 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1615 * General registers.
1618 state
->uts
.ts32
.eax
= saved_state
->eax
;
1619 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1620 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1621 state
->uts
.ts32
.edx
= saved_state
->edx
;
1622 state
->uts
.ts32
.edi
= saved_state
->edi
;
1623 state
->uts
.ts32
.esi
= saved_state
->esi
;
1624 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1625 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1626 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1627 state
->uts
.ts32
.eip
= saved_state
->eip
;
1628 state
->uts
.ts32
.cs
= saved_state
->cs
;
1629 state
->uts
.ts32
.ss
= saved_state
->ss
;
1630 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1631 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1632 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1633 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1635 *count
= x86_THREAD_STATE_COUNT
;
1636 return KERN_SUCCESS
;
1640 return KERN_FAILURE
;
1645 * Initialize the machine-dependent state for a new thread.
1648 machine_thread_create(
1652 pcb_t pcb
= &thread
->machine
.xxx_pcb
;
1653 struct real_descriptor
*ldtp
;
1655 x86_saved_state_t
*iss
;
1657 inval_copy_windows(thread
);
1659 thread
->machine
.physwindow_pte
= 0;
1660 thread
->machine
.physwindow_busy
= 0;
1663 * Allocate pcb only if required.
1665 if (pcb
->sf
== NULL
) {
1666 pcb
->sf
= zalloc(iss_zone
);
1667 if (pcb
->sf
== NULL
)
1671 if (task_has_64BitAddr(task
)) {
1672 x86_sframe64_t
*sf64
;
1674 sf64
= (x86_sframe64_t
*) pcb
->sf
;
1676 bzero((char *)sf64
, sizeof(x86_sframe64_t
));
1678 iss
= (x86_saved_state_t
*) &sf64
->ssf
;
1679 iss
->flavor
= x86_SAVED_STATE64
;
1681 * Guarantee that the bootstrapped thread will be in user
1684 iss
->ss_64
.isf
.rflags
= EFL_USER_SET
;
1685 iss
->ss_64
.isf
.cs
= USER64_CS
;
1686 iss
->ss_64
.isf
.ss
= USER_DS
;
1687 iss
->ss_64
.fs
= USER_DS
;
1688 iss
->ss_64
.gs
= USER_DS
;
1690 if (cpu_mode_is64bit()) {
1691 x86_sframe_compat32_t
*sfc32
;
1693 sfc32
= (x86_sframe_compat32_t
*)pcb
->sf
;
1695 bzero((char *)sfc32
, sizeof(x86_sframe_compat32_t
));
1697 iss
= (x86_saved_state_t
*) &sfc32
->ssf
.iss32
;
1698 iss
->flavor
= x86_SAVED_STATE32
;
1701 x86_saved_state_compat32_t
*xssc
;
1703 xssc
= (x86_saved_state_compat32_t
*) iss
;
1704 xssc
->pad_for_16byte_alignment
[0] = 0x64326432;
1705 xssc
->pad_for_16byte_alignment
[1] = 0x64326432;
1709 x86_sframe32_t
*sf32
;
1711 sf32
= (x86_sframe32_t
*) pcb
->sf
;
1713 bzero((char *)sf32
, sizeof(x86_sframe32_t
));
1715 iss
= (x86_saved_state_t
*) &sf32
->ssf
;
1716 iss
->flavor
= x86_SAVED_STATE32
;
1719 * Guarantee that the bootstrapped thread will be in user
1722 iss
->ss_32
.cs
= USER_CS
;
1723 iss
->ss_32
.ss
= USER_DS
;
1724 iss
->ss_32
.ds
= USER_DS
;
1725 iss
->ss_32
.es
= USER_DS
;
1726 iss
->ss_32
.fs
= USER_DS
;
1727 iss
->ss_32
.gs
= USER_DS
;
1728 iss
->ss_32
.efl
= EFL_USER_SET
;
1733 thread
->machine
.pcb
= pcb
;
1734 simple_lock_init(&pcb
->lock
, 0);
1736 ldtp
= (struct real_descriptor
*)pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN
);
1737 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
1738 pcb
->uldt_desc
= ldtp
[sel_idx(USER_DS
)];
1739 pcb
->uldt_selector
= 0;
1741 pcb
->iss_pte0
= (uint64_t)pte_kernel_rw(kvtophys((vm_offset_t
)pcb
->iss
));
1742 pcb
->arg_store_valid
= 0;
1744 if (0 == (paddr
= pa_to_pte(kvtophys((vm_offset_t
)(pcb
->iss
) + PAGE_SIZE
))))
1745 pcb
->iss_pte1
= INTEL_PTE_INVALID
;
1747 pcb
->iss_pte1
= (uint64_t)pte_kernel_rw(paddr
);
1749 return(KERN_SUCCESS
);
1753 * Machine-dependent cleanup prior to destroying a thread
1756 machine_thread_destroy(
1759 register pcb_t pcb
= thread
->machine
.pcb
;
1764 fpu_free(pcb
->ifps
);
1766 zfree(iss_zone
, pcb
->sf
);
1770 zfree(ids_zone
, pcb
->ids
);
1773 thread
->machine
.pcb
= (pcb_t
)0;
1778 machine_thread_switch_addrmode(thread_t thread
)
1781 * We don't want to be preempted until we're done
1782 * - particularly if we're switching the current thread
1784 disable_preemption();
1787 * Reset the state saveareas.
1789 machine_thread_create(thread
, thread
->task
);
1791 /* If we're switching ourselves, reset the pcb addresses etc. */
1792 if (thread
== current_thread())
1793 act_machine_switch_pcb(thread
);
1795 enable_preemption();
1801 * This is used to set the current thr_act/thread
1802 * when starting up a new processor
1805 machine_set_current_thread( thread_t thread
)
1807 current_cpu_datap()->cpu_active_thread
= thread
;
1811 * This is called when a task is termianted.
1814 machine_thread_terminate_self(void)
1816 task_t self_task
= current_task();
1818 user_ldt_t user_ldt
= self_task
->i386_ldt
;
1819 if (user_ldt
!= 0) {
1820 self_task
->i386_ldt
= 0;
1821 user_ldt_free(user_ldt
);
1828 #if CONFIG_NO_PANIC_STRINGS
1836 * This code is called with nothing locked.
1837 * It also returns with nothing locked, if it returns.
1839 * This routine terminates the current thread activation.
1840 * If this is the only activation associated with its
1841 * thread shuttle, then the entire thread (shuttle plus
1842 * activation) is terminated.
1844 assert( code
== KERN_TERMINATED
);
1846 thread_terminate_self();
1850 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code
);
1855 * Perform machine-dependent per-thread initializations
1858 machine_thread_init(void)
1860 if (cpu_mode_is64bit()) {
1861 assert(sizeof(x86_sframe_compat32_t
) % 16 == 0);
1862 iss_zone
= zinit(sizeof(x86_sframe64_t
),
1863 THREAD_MAX
* sizeof(x86_sframe64_t
),
1864 THREAD_CHUNK
* sizeof(x86_sframe64_t
),
1865 "x86_64 saved state");
1867 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
1868 THREAD_MAX
* sizeof(x86_debug_state64_t
),
1869 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
1870 "x86_64 debug state");
1873 iss_zone
= zinit(sizeof(x86_sframe32_t
),
1874 THREAD_MAX
* sizeof(x86_sframe32_t
),
1875 THREAD_CHUNK
* sizeof(x86_sframe32_t
),
1877 ids_zone
= zinit(sizeof(x86_debug_state32_t
),
1878 THREAD_MAX
* (sizeof(x86_debug_state32_t
)),
1879 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
1887 * Some routines for debugging activation code
1889 static void dump_handlers(thread_t
);
1890 void dump_regs(thread_t
);
1891 int dump_act(thread_t thr_act
);
1894 dump_handlers(thread_t thr_act
)
1896 ReturnHandler
*rhp
= thr_act
->handlers
;
1901 if (rhp
== &thr_act
->special_handler
){
1903 printf("[NON-Zero next ptr(%p)]", rhp
->next
);
1904 printf("special_handler()->");
1907 printf("hdlr_%d(%p)->", counter
, rhp
->handler
);
1909 if (++counter
> 32) {
1910 printf("Aborting: HUGE handler chain\n");
1914 printf("HLDR_NULL\n");
1918 dump_regs(thread_t thr_act
)
1920 if (thr_act
->machine
.pcb
== NULL
)
1923 if (thread_is_64bit(thr_act
)) {
1924 x86_saved_state64_t
*ssp
;
1926 ssp
= USER_REGS64(thr_act
);
1928 panic("dump_regs: 64bit tasks not yet supported");
1931 x86_saved_state32_t
*ssp
;
1933 ssp
= USER_REGS32(thr_act
);
1936 * Print out user register state
1938 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1939 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
1941 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1942 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
1944 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
1949 dump_act(thread_t thr_act
)
1954 printf("thread(%p)(%d): task=%p(%d)\n",
1955 thr_act
, thr_act
->ref_count
,
1957 thr_act
->task
? thr_act
->task
->ref_count
: 0);
1959 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1960 thr_act
->suspend_count
, thr_act
->user_stop_count
,
1961 thr_act
->active
, thr_act
->ast
);
1962 printf("\tpcb=%p\n", thr_act
->machine
.pcb
);
1964 if (thr_act
->kernel_stack
) {
1965 vm_offset_t stack
= thr_act
->kernel_stack
;
1967 printf("\tk_stk %x eip %x ebx %x esp %x iss %p\n",
1968 stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
1969 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
1972 dump_handlers(thr_act
);
1974 return((int)thr_act
);
1980 thread_t thr_act
= current_thread();
1982 if (thr_act
->machine
.pcb
== NULL
)
1985 if (thread_is_64bit(thr_act
)) {
1986 x86_saved_state64_t
*iss64
;
1988 iss64
= USER_REGS64(thr_act
);
1990 return(iss64
->isf
.rip
);
1992 x86_saved_state32_t
*iss32
;
1994 iss32
= USER_REGS32(thr_act
);
2001 * detach and return a kernel stack from a thread
2005 machine_stack_detach(thread_t thread
)
2009 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
2010 thread
, thread
->priority
,
2011 thread
->sched_pri
, 0,
2014 stack
= thread
->kernel_stack
;
2015 thread
->kernel_stack
= 0;
2021 * attach a kernel stack to a thread and initialize it
2025 machine_stack_attach(
2029 struct x86_kernel_state32
*statep
;
2031 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
2032 thread
, thread
->priority
,
2033 thread
->sched_pri
, 0, 0);
2036 thread
->kernel_stack
= stack
;
2038 statep
= STACK_IKS(stack
);
2039 statep
->k_eip
= (unsigned long) Thread_continue
;
2040 statep
->k_ebx
= (unsigned long) thread_continue
;
2041 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
2047 * move a stack from old to new thread
2051 machine_stack_handoff(thread_t old
,
2059 stack
= old
->kernel_stack
;
2060 if (stack
== old
->reserved_stack
) {
2061 assert(new->reserved_stack
);
2062 old
->reserved_stack
= new->reserved_stack
;
2063 new->reserved_stack
= stack
;
2065 old
->kernel_stack
= 0;
2067 * A full call to machine_stack_attach() is unnecessry
2068 * because old stack is already initialized.
2070 new->kernel_stack
= stack
;
2072 fpu_save_context(old
);
2074 old
->machine
.specFlags
&= ~OnProc
;
2075 new->machine
.specFlags
|= OnProc
;
2077 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
2078 act_machine_switch_pcb(new);
2080 machine_set_current_thread(new);
2088 struct x86_act_context32
{
2089 x86_saved_state32_t ss
;
2090 x86_float_state32_t fs
;
2091 x86_debug_state32_t ds
;
2094 struct x86_act_context64
{
2095 x86_saved_state64_t ss
;
2096 x86_float_state64_t fs
;
2097 x86_debug_state64_t ds
;
2103 act_thread_csave(void)
2106 mach_msg_type_number_t val
;
2107 thread_t thr_act
= current_thread();
2109 if (thread_is_64bit(thr_act
)) {
2110 struct x86_act_context64
*ic64
;
2112 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
2114 if (ic64
== (struct x86_act_context64
*)NULL
)
2117 val
= x86_SAVED_STATE64_COUNT
;
2118 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
2119 (thread_state_t
) &ic64
->ss
, &val
);
2120 if (kret
!= KERN_SUCCESS
) {
2121 kfree(ic64
, sizeof(struct x86_act_context64
));
2124 val
= x86_FLOAT_STATE64_COUNT
;
2125 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
2126 (thread_state_t
) &ic64
->fs
, &val
);
2128 if (kret
!= KERN_SUCCESS
) {
2129 kfree(ic64
, sizeof(struct x86_act_context64
));
2133 val
= x86_DEBUG_STATE64_COUNT
;
2134 kret
= machine_thread_get_state(thr_act
,
2136 (thread_state_t
)&ic64
->ds
,
2138 if (kret
!= KERN_SUCCESS
) {
2139 kfree(ic64
, sizeof(struct x86_act_context64
));
2145 struct x86_act_context32
*ic32
;
2147 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2149 if (ic32
== (struct x86_act_context32
*)NULL
)
2152 val
= x86_SAVED_STATE32_COUNT
;
2153 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2154 (thread_state_t
) &ic32
->ss
, &val
);
2155 if (kret
!= KERN_SUCCESS
) {
2156 kfree(ic32
, sizeof(struct x86_act_context32
));
2159 val
= x86_FLOAT_STATE32_COUNT
;
2160 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2161 (thread_state_t
) &ic32
->fs
, &val
);
2162 if (kret
!= KERN_SUCCESS
) {
2163 kfree(ic32
, sizeof(struct x86_act_context32
));
2167 val
= x86_DEBUG_STATE32_COUNT
;
2168 kret
= machine_thread_get_state(thr_act
,
2170 (thread_state_t
)&ic32
->ds
,
2172 if (kret
!= KERN_SUCCESS
) {
2173 kfree(ic32
, sizeof(struct x86_act_context32
));
2182 act_thread_catt(void *ctx
)
2184 thread_t thr_act
= current_thread();
2187 if (ctx
== (void *)NULL
)
2190 if (thread_is_64bit(thr_act
)) {
2191 struct x86_act_context64
*ic64
;
2193 ic64
= (struct x86_act_context64
*)ctx
;
2195 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2196 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2197 if (kret
== KERN_SUCCESS
) {
2198 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2199 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2201 kfree(ic64
, sizeof(struct x86_act_context64
));
2203 struct x86_act_context32
*ic32
;
2205 ic32
= (struct x86_act_context32
*)ctx
;
2207 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2208 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2209 if (kret
== KERN_SUCCESS
) {
2210 kret
= machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2211 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2212 if (kret
== KERN_SUCCESS
&& thr_act
->machine
.pcb
->ids
)
2213 machine_thread_set_state(thr_act
,
2215 (thread_state_t
)&ic32
->ds
,
2216 x86_DEBUG_STATE32_COUNT
);
2218 kfree(ic32
, sizeof(struct x86_act_context32
));
2223 void act_thread_cfree(__unused
void *ctx
)
2227 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
);
2228 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
) {
2229 thread
->machine
.pcb
->arg_store_valid
= valid
;
2232 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
);
2234 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
) {
2235 return (thread
->machine
.pcb
->arg_store_valid
);