2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/user_ldt.h>
93 #include <i386/mp_desc.h>
94 #include <i386/misc_protos.h>
95 #include <i386/thread.h>
100 #include <i386/machine_routines.h>
101 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
103 #include <machine/commpage.h>
107 #endif /* CONFIG_COUNTERS */
110 * Maps state flavor to number of words in the state:
112 unsigned int _MachineStateCount
[] = {
115 x86_THREAD_STATE32_COUNT
,
116 x86_FLOAT_STATE32_COUNT
,
117 x86_EXCEPTION_STATE32_COUNT
,
118 x86_THREAD_STATE64_COUNT
,
119 x86_FLOAT_STATE64_COUNT
,
120 x86_EXCEPTION_STATE64_COUNT
,
121 x86_THREAD_STATE_COUNT
,
122 x86_FLOAT_STATE_COUNT
,
123 x86_EXCEPTION_STATE_COUNT
,
125 x86_SAVED_STATE32_COUNT
,
126 x86_SAVED_STATE64_COUNT
,
127 x86_DEBUG_STATE32_COUNT
,
128 x86_DEBUG_STATE64_COUNT
,
129 x86_DEBUG_STATE_COUNT
132 zone_t iss_zone
; /* zone for saved_state area */
133 zone_t ids_zone
; /* zone for debug_state area */
137 void act_machine_throughcall(thread_t thr_act
);
138 void act_machine_return(int);
140 extern void Thread_continue(void);
141 extern void Load_context(
145 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
148 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
151 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
154 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
157 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
160 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
164 machine_pmc_cswitch(thread_t
/* old */, thread_t
/* new */);
166 static inline boolean_t
167 machine_thread_pmc_eligible(thread_t
);
170 pmc_swi(thread_t
/* old */, thread_t
/*new */);
172 static inline boolean_t
173 machine_thread_pmc_eligible(thread_t t
) {
175 * NOTE: Task-level reservations are propagated to child threads via
176 * thread_create_internal. Any mutation of task reservations forces a
177 * recalculate of t_chud (for the pmc flag) for all threads in that task.
178 * Consequently, we can simply check the current thread's flag against
179 * THREAD_PMC_FLAG. If the result is non-zero, we SWI for a PMC switch.
181 return (t
!= NULL
) ? ((t
->t_chud
& THREAD_PMC_FLAG
) ? TRUE
: FALSE
) : FALSE
;
185 pmc_swi(thread_t old
, thread_t
new) {
186 current_cpu_datap()->csw_old_thread
= old
;
187 current_cpu_datap()->csw_new_thread
= new;
188 __asm__
__volatile__("int %0"::"i"(LAPIC_PMC_SWI_VECTOR
):"memory");
192 machine_pmc_cswitch(thread_t old
, thread_t
new) {
193 if (machine_thread_pmc_eligible(old
) || machine_thread_pmc_eligible(new)) {
198 void ml_get_csw_threads(thread_t
*old
, thread_t
*new) {
199 *old
= current_cpu_datap()->csw_old_thread
;
200 *new = current_cpu_datap()->csw_new_thread
;
203 #endif /* CONFIG_COUNTERS */
206 * Don't let an illegal value for dr7 get set. Specifically,
207 * check for undefined settings. Setting these bit patterns
208 * result in undefined behaviour and can lead to an unexpected
212 dr7_is_valid(uint32_t *dr7
)
215 uint32_t mask1
, mask2
;
218 * If the DE bit is set in CR4, R/W0-3 can be pattern
219 * "10B" to indicate i/o reads and write
221 if (!(get_cr4() & CR4_DE
))
222 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
223 i
++, mask1
<<= 4, mask2
<<= 4)
224 if ((*dr7
& mask1
) == mask2
)
228 * len0-3 pattern "10B" is ok for len on Merom and newer processors
229 * (it signifies an 8-byte wide region). We use the 64bit capability
230 * of the processor in lieu of the more laborious model/family checks
231 * as all 64-bit capable processors so far support this.
232 * Reject an attempt to use this on 64-bit incapable processors.
234 if (current_cpu_datap()->cpu_is64bit
== FALSE
)
235 for (i
= 0, mask1
= 0x3<<18, mask2
= 0x2<<18; i
< 4;
236 i
++, mask1
<<= 4, mask2
<<= 4)
237 if ((*dr7
& mask1
) == mask2
)
241 * if we are doing an instruction execution break (indicated
242 * by r/w[x] being "00B"), then the len[x] must also be set
245 for (i
= 0; i
< 4; i
++)
246 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
247 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
251 * Intel docs have these bits fixed.
253 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
254 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
255 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
256 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
257 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
260 * We don't allow anything to set the global breakpoints.
279 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
281 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
282 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
283 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
284 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
285 if (cpu_mode_is64bit())
286 cdp
->cpu_dr7
= ds
->dr7
;
289 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
292 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
295 * We need to enter 64-bit mode in order to set the full
296 * width of these registers
298 set_64bit_debug_regs(ds
);
299 cdp
->cpu_dr7
= ds
->dr7
;
303 debug_state_is_valid32(x86_debug_state32_t
*ds
)
305 if (!dr7_is_valid(&ds
->dr7
))
308 #if defined(__i386__)
310 * Only allow local breakpoints and make sure they are not
311 * in the trampoline code.
314 if (ds
->dr0
>= (unsigned long)HIGH_MEM_BASE
)
317 if (ds
->dr7
& (0x1<<2))
318 if (ds
->dr1
>= (unsigned long)HIGH_MEM_BASE
)
321 if (ds
->dr7
& (0x1<<4))
322 if (ds
->dr2
>= (unsigned long)HIGH_MEM_BASE
)
325 if (ds
->dr7
& (0x1<<6))
326 if (ds
->dr3
>= (unsigned long)HIGH_MEM_BASE
)
334 debug_state_is_valid64(x86_debug_state64_t
*ds
)
336 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
340 * Don't allow the user to set debug addresses above their max
344 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
347 if (ds
->dr7
& (0x1<<2))
348 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
351 if (ds
->dr7
& (0x1<<4))
352 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
355 if (ds
->dr7
& (0x1<<6))
356 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
364 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
366 x86_debug_state32_t
*ids
;
369 pcb
= thread
->machine
.pcb
;
372 if (debug_state_is_valid32(ds
) != TRUE
) {
373 return KERN_INVALID_ARGUMENT
;
377 ids
= zalloc(ids_zone
);
378 bzero(ids
, sizeof *ids
);
380 simple_lock(&pcb
->lock
);
381 /* make sure it wasn't already alloc()'d elsewhere */
382 if (pcb
->ids
== NULL
) {
384 simple_unlock(&pcb
->lock
);
386 simple_unlock(&pcb
->lock
);
387 zfree(ids_zone
, ids
);
392 copy_debug_state32(ds
, ids
, FALSE
);
394 return (KERN_SUCCESS
);
398 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
400 x86_debug_state64_t
*ids
;
403 pcb
= thread
->machine
.pcb
;
406 if (debug_state_is_valid64(ds
) != TRUE
) {
407 return KERN_INVALID_ARGUMENT
;
411 ids
= zalloc(ids_zone
);
412 bzero(ids
, sizeof *ids
);
414 simple_lock(&pcb
->lock
);
415 /* make sure it wasn't already alloc()'d elsewhere */
416 if (pcb
->ids
== NULL
) {
418 simple_unlock(&pcb
->lock
);
420 simple_unlock(&pcb
->lock
);
421 zfree(ids_zone
, ids
);
425 copy_debug_state64(ds
, ids
, FALSE
);
427 return (KERN_SUCCESS
);
431 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
433 x86_debug_state32_t
*saved_state
;
435 saved_state
= thread
->machine
.pcb
->ids
;
438 copy_debug_state32(saved_state
, ds
, TRUE
);
440 bzero(ds
, sizeof *ds
);
444 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
446 x86_debug_state64_t
*saved_state
;
448 saved_state
= (x86_debug_state64_t
*)thread
->machine
.pcb
->ids
;
451 copy_debug_state64(saved_state
, ds
, TRUE
);
453 bzero(ds
, sizeof *ds
);
457 * consider_machine_collect:
459 * Try to collect machine-dependent pages
462 consider_machine_collect(void)
467 consider_machine_adjust(void)
470 extern void *get_bsduthreadarg(thread_t th
);
472 #if defined(__x86_64__)
474 act_machine_switch_pcb( thread_t
new )
476 pcb_t pcb
= new->machine
.pcb
;
477 struct real_descriptor
*ldtp
;
478 mach_vm_offset_t pcb_stack_top
;
479 cpu_data_t
*cdp
= current_cpu_datap();
481 assert(new->kernel_stack
!= 0);
483 if (!cpu_mode_is64bit()) {
484 panic("K64 is 64bit!");
485 } else if (is_saved_state64(pcb
->iss
)) {
487 * The test above is performed against the thread save state
488 * flavor and not task's 64-bit feature flag because of the
489 * thread/task 64-bit state divergence that can arise in
490 * task_set_64bit() x86: the task state is changed before
491 * the individual thread(s).
493 x86_saved_state64_tagged_t
*iss64
;
496 assert(is_saved_state64(pcb
->iss
));
498 iss64
= (x86_saved_state64_tagged_t
*) pcb
->iss
;
501 * Set pointer to PCB's interrupt stack frame in cpu data.
502 * Used by syscall and double-fault trap handlers.
504 isf
= (vm_offset_t
) &iss64
->state
.isf
;
505 cdp
->cpu_uber
.cu_isf
= isf
;
506 pcb_stack_top
= (vm_offset_t
) (iss64
+ 1);
507 /* require 16-byte alignment */
508 assert((pcb_stack_top
& 0xF) == 0);
510 /* Interrupt stack is pcb */
511 current_ktss64()->rsp0
= pcb_stack_top
;
514 * Top of temporary sysenter stack points to pcb stack.
515 * Although this is not normally used by 64-bit users,
516 * it needs to be set in case a sysenter is attempted.
518 *current_sstk64() = pcb_stack_top
;
520 cdp
->cpu_task_map
= new->map
->pmap
->pm_task_map
;
523 * Enable the 64-bit user code segment, USER64_CS.
524 * Disable the 32-bit user code segment, USER_CS.
526 ldt_desc_p(USER64_CS
)->access
|= ACC_PL_U
;
527 ldt_desc_p(USER_CS
)->access
&= ~ACC_PL_U
;
530 * Switch user's GS base if necessary
531 * by setting the Kernel's GS base MSR
532 * - this will become the user's on the swapgs when
533 * returning to user-space.
535 if (cdp
->cpu_uber
.cu_user_gs_base
!= pcb
->cthread_self
) {
536 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
537 wrmsr64(MSR_IA32_KERNEL_GS_BASE
, pcb
->cthread_self
);
540 x86_saved_state_compat32_t
*iss32compat
;
543 assert(is_saved_state32(pcb
->iss
));
544 iss32compat
= (x86_saved_state_compat32_t
*) pcb
->iss
;
546 pcb_stack_top
= (uintptr_t) (iss32compat
+ 1);
547 /* require 16-byte alignment */
548 assert((pcb_stack_top
& 0xF) == 0);
551 * Set pointer to PCB's interrupt stack frame in cpu data.
552 * Used by debug trap handler.
554 isf
= (vm_offset_t
) &iss32compat
->isf64
;
555 cdp
->cpu_uber
.cu_isf
= isf
;
557 /* Top of temporary sysenter stack points to pcb stack */
558 *current_sstk64() = pcb_stack_top
;
560 /* Interrupt stack is pcb */
561 current_ktss64()->rsp0
= pcb_stack_top
;
563 cdp
->cpu_task_map
= TASK_MAP_32BIT
;
564 /* Precalculate pointers to syscall argument store, for use
565 * in the trampolines.
567 cdp
->cpu_uber_arg_store
= (vm_offset_t
)get_bsduthreadarg(new);
568 cdp
->cpu_uber_arg_store_valid
= (vm_offset_t
)&pcb
->arg_store_valid
;
569 pcb
->arg_store_valid
= 0;
575 ldt_desc_p(USER64_CS
)->access
&= ~ACC_PL_U
;
576 ldt_desc_p(USER_CS
)->access
|= ACC_PL_U
;
579 * Set the thread`s cthread (a.k.a pthread)
580 * For 32-bit user this involves setting the USER_CTHREAD
581 * descriptor in the LDT to point to the cthread data.
582 * The involves copying in the pre-initialized descriptor.
584 ldtp
= (struct real_descriptor
*)current_ldt();
585 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
586 if (pcb
->uldt_selector
!= 0)
587 ldtp
[sel_idx(pcb
->uldt_selector
)] = pcb
->uldt_desc
;
588 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
591 * Set the thread`s LDT or LDT entry.
593 if (new->task
== TASK_NULL
|| new->task
->i386_ldt
== 0) {
597 ml_cpu_set_ldt(KERNEL_LDT
);
600 * Task has its own LDT.
607 * Bump the scheduler generation count in the commpage.
608 * This can be read by user code to detect its preemption.
610 commpage_sched_gen_inc();
614 act_machine_switch_pcb( thread_t
new )
616 pcb_t pcb
= new->machine
.pcb
;
617 struct real_descriptor
*ldtp
;
618 vm_offset_t pcb_stack_top
;
619 vm_offset_t hi_pcb_stack_top
;
621 cpu_data_t
*cdp
= current_cpu_datap();
623 assert(new->kernel_stack
!= 0);
624 STACK_IEL(new->kernel_stack
)->saved_state
= pcb
->iss
;
626 if (!cpu_mode_is64bit()) {
627 x86_saved_state32_tagged_t
*hi_iss32
;
629 * Save a pointer to the top of the "kernel" stack -
630 * actually the place in the PCB where a trap into
631 * kernel mode will push the registers.
633 hi_iss
= (vm_offset_t
)((unsigned long)
634 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0
) |
635 ((unsigned long)pcb
->iss
& PAGE_MASK
));
637 cdp
->cpu_hi_iss
= (void *)hi_iss
;
639 pmap_high_map(pcb
->iss_pte0
, HIGH_CPU_ISS0
);
640 pmap_high_map(pcb
->iss_pte1
, HIGH_CPU_ISS1
);
642 hi_iss32
= (x86_saved_state32_tagged_t
*) hi_iss
;
643 assert(hi_iss32
->tag
== x86_SAVED_STATE32
);
645 hi_pcb_stack_top
= (int) (hi_iss32
+ 1);
648 * For fast syscall, top of interrupt stack points to pcb stack
650 *(vm_offset_t
*) current_sstk() = hi_pcb_stack_top
;
652 current_ktss()->esp0
= hi_pcb_stack_top
;
654 } else if (is_saved_state64(pcb
->iss
)) {
656 * The test above is performed against the thread save state
657 * flavor and not task's 64-bit feature flag because of the
658 * thread/task 64-bit state divergence that can arise in
659 * task_set_64bit() x86: the task state is changed before
660 * the individual thread(s).
662 x86_saved_state64_tagged_t
*iss64
;
665 assert(is_saved_state64(pcb
->iss
));
667 iss64
= (x86_saved_state64_tagged_t
*) pcb
->iss
;
670 * Set pointer to PCB's interrupt stack frame in cpu data.
671 * Used by syscall and double-fault trap handlers.
673 isf
= (vm_offset_t
) &iss64
->state
.isf
;
674 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
675 pcb_stack_top
= (vm_offset_t
) (iss64
+ 1);
676 /* require 16-byte alignment */
677 assert((pcb_stack_top
& 0xF) == 0);
678 /* Interrupt stack is pcb */
679 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
682 * Top of temporary sysenter stack points to pcb stack.
683 * Although this is not normally used by 64-bit users,
684 * it needs to be set in case a sysenter is attempted.
686 *current_sstk64() = UBER64(pcb_stack_top
);
688 cdp
->cpu_task_map
= new->map
->pmap
->pm_task_map
;
691 * Enable the 64-bit user code segment, USER64_CS.
692 * Disable the 32-bit user code segment, USER_CS.
694 ldt_desc_p(USER64_CS
)->access
|= ACC_PL_U
;
695 ldt_desc_p(USER_CS
)->access
&= ~ACC_PL_U
;
698 x86_saved_state_compat32_t
*iss32compat
;
701 assert(is_saved_state32(pcb
->iss
));
702 iss32compat
= (x86_saved_state_compat32_t
*) pcb
->iss
;
704 pcb_stack_top
= (int) (iss32compat
+ 1);
705 /* require 16-byte alignment */
706 assert((pcb_stack_top
& 0xF) == 0);
709 * Set pointer to PCB's interrupt stack frame in cpu data.
710 * Used by debug trap handler.
712 isf
= (vm_offset_t
) &iss32compat
->isf64
;
713 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
715 /* Top of temporary sysenter stack points to pcb stack */
716 *current_sstk64() = UBER64(pcb_stack_top
);
718 /* Interrupt stack is pcb */
719 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
721 cdp
->cpu_task_map
= TASK_MAP_32BIT
;
722 /* Precalculate pointers to syscall argument store, for use
723 * in the trampolines.
725 cdp
->cpu_uber_arg_store
= UBER64((vm_offset_t
)get_bsduthreadarg(new));
726 cdp
->cpu_uber_arg_store_valid
= UBER64((vm_offset_t
)&pcb
->arg_store_valid
);
727 pcb
->arg_store_valid
= 0;
733 ldt_desc_p(USER64_CS
)->access
&= ~ACC_PL_U
;
734 ldt_desc_p(USER_CS
)->access
|= ACC_PL_U
;
738 * Set the thread`s cthread (a.k.a pthread)
739 * For 32-bit user this involves setting the USER_CTHREAD
740 * descriptor in the LDT to point to the cthread data.
741 * The involves copying in the pre-initialized descriptor.
743 ldtp
= (struct real_descriptor
*)current_ldt();
744 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
745 if (pcb
->uldt_selector
!= 0)
746 ldtp
[sel_idx(pcb
->uldt_selector
)] = pcb
->uldt_desc
;
750 * For 64-bit, we additionally set the 64-bit User GS base
751 * address. On return to 64-bit user, the GS.Base MSR will be written.
753 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
756 * Set the thread`s LDT or LDT entry.
758 if (new->task
== TASK_NULL
|| new->task
->i386_ldt
== 0) {
762 ml_cpu_set_ldt(KERNEL_LDT
);
765 * Task has its own LDT.
771 * Bump the scheduler generation count in the commpage.
772 * This can be read by user code to detect its preemption.
774 commpage_sched_gen_inc();
779 * Switch to the first thread on a CPU.
782 machine_load_context(
786 machine_pmc_cswitch(NULL
, new);
788 new->machine
.specFlags
|= OnProc
;
789 act_machine_switch_pcb(new);
794 * Switch to a new thread.
795 * Save the old thread`s kernel state or continuation,
799 machine_switch_context(
801 thread_continue_t continuation
,
805 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
808 machine_pmc_cswitch(old
, new);
811 * Save FP registers if in use.
813 fpu_save_context(old
);
816 old
->machine
.specFlags
&= ~OnProc
;
817 new->machine
.specFlags
|= OnProc
;
820 * Monitor the stack depth and report new max,
821 * not worrying about races.
823 vm_offset_t depth
= current_stack_depth();
824 if (depth
> kernel_stack_depth_max
) {
825 kernel_stack_depth_max
= depth
;
826 KERNEL_DEBUG_CONSTANT(
827 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
828 (long) depth
, 0, 0, 0, 0);
832 * Switch address maps if need be, even if not switching tasks.
833 * (A server activation may be "borrowing" a client map.)
835 PMAP_SWITCH_CONTEXT(old
, new, cpu_number())
838 * Load the rest of the user state for the new thread
840 act_machine_switch_pcb(new);
842 return(Switch_context(old
, continuation
, new));
846 machine_processor_shutdown(
848 void (*doshutdown
)(processor_t
),
849 processor_t processor
)
854 fpu_save_context(thread
);
855 PMAP_SWITCH_CONTEXT(thread
, processor
->idle_thread
, cpu_number());
856 return(Shutdown_context(thread
, doshutdown
, processor
));
860 * act_machine_sv_free
861 * release saveareas associated with an act. if flag is true, release
862 * user level savearea(s) too, else don't
865 act_machine_sv_free(__unused thread_t act
, __unused
int flag
)
871 * This is where registers that are not normally specified by the mach-o
872 * file on an execve would be nullified, perhaps to avoid a covert channel.
875 machine_thread_state_initialize(
879 * If there's an fpu save area, free it.
880 * The initialized state will then be lazily faulted-in, if required.
881 * And if we're target, re-arm the no-fpu trap.
883 if (thread
->machine
.pcb
->ifps
) {
884 (void) fpu_set_fxstate(thread
, NULL
);
886 if (thread
== current_thread())
890 if (thread
->machine
.pcb
->ids
) {
891 zfree(ids_zone
, thread
->machine
.pcb
->ids
);
892 thread
->machine
.pcb
->ids
= NULL
;
899 get_eflags_exportmask(void)
905 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
906 * for 32bit tasks only
907 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
908 * for 64bit tasks only
909 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
910 * for 32bit tasks only
911 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
912 * for 64bit tasks only
913 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
914 * for either 32bit or 64bit tasks
915 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
916 * for 32bit tasks only
917 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
918 * for 64bit tasks only
919 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
920 * for either 32bit or 64bit tasks
921 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
922 * for 32bit tasks only
923 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
924 * for 64bit tasks only
925 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
926 * for either 32bit or 64bit tasks
931 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
933 x86_saved_state64_t
*saved_state
;
935 saved_state
= USER_REGS64(thread
);
937 es
->trapno
= saved_state
->isf
.trapno
;
938 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
939 es
->faultvaddr
= saved_state
->cr2
;
943 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
945 x86_saved_state32_t
*saved_state
;
947 saved_state
= USER_REGS32(thread
);
949 es
->trapno
= saved_state
->trapno
;
950 es
->err
= saved_state
->err
;
951 es
->faultvaddr
= saved_state
->cr2
;
956 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
958 x86_saved_state32_t
*saved_state
;
961 saved_state
= USER_REGS32(thread
);
964 * Scrub segment selector values:
968 if (ts
->ss
== 0) ts
->ss
= USER_DS
;
969 if (ts
->ds
== 0) ts
->ds
= USER_DS
;
970 if (ts
->es
== 0) ts
->es
= USER_DS
;
971 #else /* __x86_64__ */
973 * On a 64 bit kernel, we always override the data segments,
974 * as the actual selector numbers have changed. This also
975 * means that we don't support setting the data segments
983 /* Check segment selectors are safe */
984 if (!valid_user_segment_selectors(ts
->cs
,
990 return(KERN_INVALID_ARGUMENT
);
992 saved_state
->eax
= ts
->eax
;
993 saved_state
->ebx
= ts
->ebx
;
994 saved_state
->ecx
= ts
->ecx
;
995 saved_state
->edx
= ts
->edx
;
996 saved_state
->edi
= ts
->edi
;
997 saved_state
->esi
= ts
->esi
;
998 saved_state
->ebp
= ts
->ebp
;
999 saved_state
->uesp
= ts
->esp
;
1000 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1001 saved_state
->eip
= ts
->eip
;
1002 saved_state
->cs
= ts
->cs
;
1003 saved_state
->ss
= ts
->ss
;
1004 saved_state
->ds
= ts
->ds
;
1005 saved_state
->es
= ts
->es
;
1006 saved_state
->fs
= ts
->fs
;
1007 saved_state
->gs
= ts
->gs
;
1010 * If the trace trap bit is being set,
1011 * ensure that the user returns via iret
1012 * - which is signaled thusly:
1014 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
1015 saved_state
->cs
= SYSENTER_TF_CS
;
1017 return(KERN_SUCCESS
);
1021 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
1023 x86_saved_state64_t
*saved_state
;
1026 saved_state
= USER_REGS64(thread
);
1028 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
1029 !IS_USERADDR64_CANONICAL(ts
->rip
))
1030 return(KERN_INVALID_ARGUMENT
);
1032 saved_state
->r8
= ts
->r8
;
1033 saved_state
->r9
= ts
->r9
;
1034 saved_state
->r10
= ts
->r10
;
1035 saved_state
->r11
= ts
->r11
;
1036 saved_state
->r12
= ts
->r12
;
1037 saved_state
->r13
= ts
->r13
;
1038 saved_state
->r14
= ts
->r14
;
1039 saved_state
->r15
= ts
->r15
;
1040 saved_state
->rax
= ts
->rax
;
1041 saved_state
->rbx
= ts
->rbx
;
1042 saved_state
->rcx
= ts
->rcx
;
1043 saved_state
->rdx
= ts
->rdx
;
1044 saved_state
->rdi
= ts
->rdi
;
1045 saved_state
->rsi
= ts
->rsi
;
1046 saved_state
->rbp
= ts
->rbp
;
1047 saved_state
->isf
.rsp
= ts
->rsp
;
1048 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1049 saved_state
->isf
.rip
= ts
->rip
;
1050 saved_state
->isf
.cs
= USER64_CS
;
1051 saved_state
->fs
= (uint32_t)ts
->fs
;
1052 saved_state
->gs
= (uint32_t)ts
->gs
;
1054 return(KERN_SUCCESS
);
1060 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
1062 x86_saved_state32_t
*saved_state
;
1065 saved_state
= USER_REGS32(thread
);
1067 ts
->eax
= saved_state
->eax
;
1068 ts
->ebx
= saved_state
->ebx
;
1069 ts
->ecx
= saved_state
->ecx
;
1070 ts
->edx
= saved_state
->edx
;
1071 ts
->edi
= saved_state
->edi
;
1072 ts
->esi
= saved_state
->esi
;
1073 ts
->ebp
= saved_state
->ebp
;
1074 ts
->esp
= saved_state
->uesp
;
1075 ts
->eflags
= saved_state
->efl
;
1076 ts
->eip
= saved_state
->eip
;
1077 ts
->cs
= saved_state
->cs
;
1078 ts
->ss
= saved_state
->ss
;
1079 ts
->ds
= saved_state
->ds
;
1080 ts
->es
= saved_state
->es
;
1081 ts
->fs
= saved_state
->fs
;
1082 ts
->gs
= saved_state
->gs
;
1087 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
1089 x86_saved_state64_t
*saved_state
;
1092 saved_state
= USER_REGS64(thread
);
1094 ts
->r8
= saved_state
->r8
;
1095 ts
->r9
= saved_state
->r9
;
1096 ts
->r10
= saved_state
->r10
;
1097 ts
->r11
= saved_state
->r11
;
1098 ts
->r12
= saved_state
->r12
;
1099 ts
->r13
= saved_state
->r13
;
1100 ts
->r14
= saved_state
->r14
;
1101 ts
->r15
= saved_state
->r15
;
1102 ts
->rax
= saved_state
->rax
;
1103 ts
->rbx
= saved_state
->rbx
;
1104 ts
->rcx
= saved_state
->rcx
;
1105 ts
->rdx
= saved_state
->rdx
;
1106 ts
->rdi
= saved_state
->rdi
;
1107 ts
->rsi
= saved_state
->rsi
;
1108 ts
->rbp
= saved_state
->rbp
;
1109 ts
->rsp
= saved_state
->isf
.rsp
;
1110 ts
->rflags
= saved_state
->isf
.rflags
;
1111 ts
->rip
= saved_state
->isf
.rip
;
1112 ts
->cs
= saved_state
->isf
.cs
;
1113 ts
->fs
= saved_state
->fs
;
1114 ts
->gs
= saved_state
->gs
;
1119 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
1121 x86_thread_state32_t
*state
;
1122 x86_saved_state32_t
*saved_state
;
1123 thread_t curth
= current_thread();
1127 saved_state
= USER_REGS32(thread
);
1129 state
= (x86_thread_state32_t
*)tstate
;
1131 if (curth
!= thread
) {
1133 thread_lock(thread
);
1136 saved_state
->ebp
= 0;
1137 saved_state
->eip
= state
->eip
;
1138 saved_state
->eax
= state
->eax
;
1139 saved_state
->ebx
= state
->ebx
;
1140 saved_state
->ecx
= state
->ecx
;
1141 saved_state
->edx
= state
->edx
;
1142 saved_state
->edi
= state
->edi
;
1143 saved_state
->esi
= state
->esi
;
1144 saved_state
->uesp
= state
->esp
;
1145 saved_state
->efl
= EFL_USER_SET
;
1147 saved_state
->cs
= USER_CS
;
1148 saved_state
->ss
= USER_DS
;
1149 saved_state
->ds
= USER_DS
;
1150 saved_state
->es
= USER_DS
;
1153 if (curth
!= thread
) {
1154 thread_unlock(thread
);
1161 thread_set_wq_state64(thread_t thread
, thread_state_t tstate
)
1163 x86_thread_state64_t
*state
;
1164 x86_saved_state64_t
*saved_state
;
1165 thread_t curth
= current_thread();
1169 saved_state
= USER_REGS64(thread
);
1170 state
= (x86_thread_state64_t
*)tstate
;
1172 if (curth
!= thread
) {
1174 thread_lock(thread
);
1177 saved_state
->rbp
= 0;
1178 saved_state
->rdi
= state
->rdi
;
1179 saved_state
->rsi
= state
->rsi
;
1180 saved_state
->rdx
= state
->rdx
;
1181 saved_state
->rcx
= state
->rcx
;
1182 saved_state
->r8
= state
->r8
;
1183 saved_state
->r9
= state
->r9
;
1185 saved_state
->isf
.rip
= state
->rip
;
1186 saved_state
->isf
.rsp
= state
->rsp
;
1187 saved_state
->isf
.cs
= USER64_CS
;
1188 saved_state
->isf
.rflags
= EFL_USER_SET
;
1191 if (curth
!= thread
) {
1192 thread_unlock(thread
);
1200 * act_machine_set_state:
1202 * Set the status of the specified thread.
1206 machine_thread_set_state(
1208 thread_flavor_t flavor
,
1209 thread_state_t tstate
,
1210 mach_msg_type_number_t count
)
1213 case x86_SAVED_STATE32
:
1215 x86_saved_state32_t
*state
;
1216 x86_saved_state32_t
*saved_state
;
1218 if (count
< x86_SAVED_STATE32_COUNT
)
1219 return(KERN_INVALID_ARGUMENT
);
1221 if (thread_is_64bit(thr_act
))
1222 return(KERN_INVALID_ARGUMENT
);
1224 state
= (x86_saved_state32_t
*) tstate
;
1226 /* Check segment selectors are safe */
1227 if (!valid_user_segment_selectors(state
->cs
,
1233 return KERN_INVALID_ARGUMENT
;
1236 saved_state
= USER_REGS32(thr_act
);
1241 saved_state
->edi
= state
->edi
;
1242 saved_state
->esi
= state
->esi
;
1243 saved_state
->ebp
= state
->ebp
;
1244 saved_state
->uesp
= state
->uesp
;
1245 saved_state
->ebx
= state
->ebx
;
1246 saved_state
->edx
= state
->edx
;
1247 saved_state
->ecx
= state
->ecx
;
1248 saved_state
->eax
= state
->eax
;
1249 saved_state
->eip
= state
->eip
;
1251 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1254 * If the trace trap bit is being set,
1255 * ensure that the user returns via iret
1256 * - which is signaled thusly:
1258 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
1259 state
->cs
= SYSENTER_TF_CS
;
1262 * User setting segment registers.
1263 * Code and stack selectors have already been
1264 * checked. Others will be reset by 'iret'
1265 * if they are not valid.
1267 saved_state
->cs
= state
->cs
;
1268 saved_state
->ss
= state
->ss
;
1269 saved_state
->ds
= state
->ds
;
1270 saved_state
->es
= state
->es
;
1271 saved_state
->fs
= state
->fs
;
1272 saved_state
->gs
= state
->gs
;
1277 case x86_SAVED_STATE64
:
1279 x86_saved_state64_t
*state
;
1280 x86_saved_state64_t
*saved_state
;
1282 if (count
< x86_SAVED_STATE64_COUNT
)
1283 return(KERN_INVALID_ARGUMENT
);
1285 if (!thread_is_64bit(thr_act
))
1286 return(KERN_INVALID_ARGUMENT
);
1288 state
= (x86_saved_state64_t
*) tstate
;
1290 /* Verify that the supplied code segment selector is
1291 * valid. In 64-bit mode, the FS and GS segment overrides
1292 * use the FS.base and GS.base MSRs to calculate
1293 * base addresses, and the trampolines don't directly
1294 * restore the segment registers--hence they are no
1295 * longer relevant for validation.
1297 if (!valid_user_code_selector(state
->isf
.cs
))
1298 return KERN_INVALID_ARGUMENT
;
1300 /* Check pc and stack are canonical addresses */
1301 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
1302 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
1303 return KERN_INVALID_ARGUMENT
;
1306 saved_state
= USER_REGS64(thr_act
);
1311 saved_state
->r8
= state
->r8
;
1312 saved_state
->r9
= state
->r9
;
1313 saved_state
->r10
= state
->r10
;
1314 saved_state
->r11
= state
->r11
;
1315 saved_state
->r12
= state
->r12
;
1316 saved_state
->r13
= state
->r13
;
1317 saved_state
->r14
= state
->r14
;
1318 saved_state
->r15
= state
->r15
;
1319 saved_state
->rdi
= state
->rdi
;
1320 saved_state
->rsi
= state
->rsi
;
1321 saved_state
->rbp
= state
->rbp
;
1322 saved_state
->rbx
= state
->rbx
;
1323 saved_state
->rdx
= state
->rdx
;
1324 saved_state
->rcx
= state
->rcx
;
1325 saved_state
->rax
= state
->rax
;
1326 saved_state
->isf
.rsp
= state
->isf
.rsp
;
1327 saved_state
->isf
.rip
= state
->isf
.rip
;
1329 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1332 * User setting segment registers.
1333 * Code and stack selectors have already been
1334 * checked. Others will be reset by 'sys'
1335 * if they are not valid.
1337 saved_state
->isf
.cs
= state
->isf
.cs
;
1338 saved_state
->isf
.ss
= state
->isf
.ss
;
1339 saved_state
->fs
= state
->fs
;
1340 saved_state
->gs
= state
->gs
;
1345 case x86_FLOAT_STATE32
:
1347 if (count
!= x86_FLOAT_STATE32_COUNT
)
1348 return(KERN_INVALID_ARGUMENT
);
1350 if (thread_is_64bit(thr_act
))
1351 return(KERN_INVALID_ARGUMENT
);
1353 return fpu_set_fxstate(thr_act
, tstate
);
1356 case x86_FLOAT_STATE64
:
1358 if (count
!= x86_FLOAT_STATE64_COUNT
)
1359 return(KERN_INVALID_ARGUMENT
);
1361 if ( !thread_is_64bit(thr_act
))
1362 return(KERN_INVALID_ARGUMENT
);
1364 return fpu_set_fxstate(thr_act
, tstate
);
1367 case x86_FLOAT_STATE
:
1369 x86_float_state_t
*state
;
1371 if (count
!= x86_FLOAT_STATE_COUNT
)
1372 return(KERN_INVALID_ARGUMENT
);
1374 state
= (x86_float_state_t
*)tstate
;
1375 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
1376 thread_is_64bit(thr_act
)) {
1377 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1379 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
1380 !thread_is_64bit(thr_act
)) {
1381 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1383 return(KERN_INVALID_ARGUMENT
);
1386 case x86_THREAD_STATE32
:
1388 if (count
!= x86_THREAD_STATE32_COUNT
)
1389 return(KERN_INVALID_ARGUMENT
);
1391 if (thread_is_64bit(thr_act
))
1392 return(KERN_INVALID_ARGUMENT
);
1394 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1397 case x86_THREAD_STATE64
:
1399 if (count
!= x86_THREAD_STATE64_COUNT
)
1400 return(KERN_INVALID_ARGUMENT
);
1402 if (!thread_is_64bit(thr_act
))
1403 return(KERN_INVALID_ARGUMENT
);
1405 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1408 case x86_THREAD_STATE
:
1410 x86_thread_state_t
*state
;
1412 if (count
!= x86_THREAD_STATE_COUNT
)
1413 return(KERN_INVALID_ARGUMENT
);
1415 state
= (x86_thread_state_t
*)tstate
;
1417 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1418 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1419 thread_is_64bit(thr_act
)) {
1420 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1421 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1422 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1423 !thread_is_64bit(thr_act
)) {
1424 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1426 return(KERN_INVALID_ARGUMENT
);
1430 case x86_DEBUG_STATE32
:
1432 x86_debug_state32_t
*state
;
1435 if (thread_is_64bit(thr_act
))
1436 return(KERN_INVALID_ARGUMENT
);
1438 state
= (x86_debug_state32_t
*)tstate
;
1440 ret
= set_debug_state32(thr_act
, state
);
1444 case x86_DEBUG_STATE64
:
1446 x86_debug_state64_t
*state
;
1449 if (!thread_is_64bit(thr_act
))
1450 return(KERN_INVALID_ARGUMENT
);
1452 state
= (x86_debug_state64_t
*)tstate
;
1454 ret
= set_debug_state64(thr_act
, state
);
1458 case x86_DEBUG_STATE
:
1460 x86_debug_state_t
*state
;
1461 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1463 if (count
!= x86_DEBUG_STATE_COUNT
)
1464 return (KERN_INVALID_ARGUMENT
);
1466 state
= (x86_debug_state_t
*)tstate
;
1467 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1468 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1469 thread_is_64bit(thr_act
)) {
1470 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1473 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1474 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1475 !thread_is_64bit(thr_act
)) {
1476 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1481 return(KERN_INVALID_ARGUMENT
);
1484 return(KERN_SUCCESS
);
1492 * Get the status of the specified thread.
1496 machine_thread_get_state(
1498 thread_flavor_t flavor
,
1499 thread_state_t tstate
,
1500 mach_msg_type_number_t
*count
)
1505 case THREAD_STATE_FLAVOR_LIST
:
1508 return (KERN_INVALID_ARGUMENT
);
1510 tstate
[0] = i386_THREAD_STATE
;
1511 tstate
[1] = i386_FLOAT_STATE
;
1512 tstate
[2] = i386_EXCEPTION_STATE
;
1518 case THREAD_STATE_FLAVOR_LIST_NEW
:
1521 return (KERN_INVALID_ARGUMENT
);
1523 tstate
[0] = x86_THREAD_STATE
;
1524 tstate
[1] = x86_FLOAT_STATE
;
1525 tstate
[2] = x86_EXCEPTION_STATE
;
1526 tstate
[3] = x86_DEBUG_STATE
;
1532 case x86_SAVED_STATE32
:
1534 x86_saved_state32_t
*state
;
1535 x86_saved_state32_t
*saved_state
;
1537 if (*count
< x86_SAVED_STATE32_COUNT
)
1538 return(KERN_INVALID_ARGUMENT
);
1540 if (thread_is_64bit(thr_act
))
1541 return(KERN_INVALID_ARGUMENT
);
1543 state
= (x86_saved_state32_t
*) tstate
;
1544 saved_state
= USER_REGS32(thr_act
);
1547 * First, copy everything:
1549 *state
= *saved_state
;
1550 state
->ds
= saved_state
->ds
& 0xffff;
1551 state
->es
= saved_state
->es
& 0xffff;
1552 state
->fs
= saved_state
->fs
& 0xffff;
1553 state
->gs
= saved_state
->gs
& 0xffff;
1555 *count
= x86_SAVED_STATE32_COUNT
;
1559 case x86_SAVED_STATE64
:
1561 x86_saved_state64_t
*state
;
1562 x86_saved_state64_t
*saved_state
;
1564 if (*count
< x86_SAVED_STATE64_COUNT
)
1565 return(KERN_INVALID_ARGUMENT
);
1567 if (!thread_is_64bit(thr_act
))
1568 return(KERN_INVALID_ARGUMENT
);
1570 state
= (x86_saved_state64_t
*)tstate
;
1571 saved_state
= USER_REGS64(thr_act
);
1574 * First, copy everything:
1576 *state
= *saved_state
;
1577 state
->fs
= saved_state
->fs
& 0xffff;
1578 state
->gs
= saved_state
->gs
& 0xffff;
1580 *count
= x86_SAVED_STATE64_COUNT
;
1584 case x86_FLOAT_STATE32
:
1586 if (*count
< x86_FLOAT_STATE32_COUNT
)
1587 return(KERN_INVALID_ARGUMENT
);
1589 if (thread_is_64bit(thr_act
))
1590 return(KERN_INVALID_ARGUMENT
);
1592 *count
= x86_FLOAT_STATE32_COUNT
;
1594 return fpu_get_fxstate(thr_act
, tstate
);
1597 case x86_FLOAT_STATE64
:
1599 if (*count
< x86_FLOAT_STATE64_COUNT
)
1600 return(KERN_INVALID_ARGUMENT
);
1602 if ( !thread_is_64bit(thr_act
))
1603 return(KERN_INVALID_ARGUMENT
);
1605 *count
= x86_FLOAT_STATE64_COUNT
;
1607 return fpu_get_fxstate(thr_act
, tstate
);
1610 case x86_FLOAT_STATE
:
1612 x86_float_state_t
*state
;
1615 if (*count
< x86_FLOAT_STATE_COUNT
)
1616 return(KERN_INVALID_ARGUMENT
);
1618 state
= (x86_float_state_t
*)tstate
;
1621 * no need to bzero... currently
1622 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1624 if (thread_is_64bit(thr_act
)) {
1625 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1626 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1628 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1630 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1631 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1633 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1635 *count
= x86_FLOAT_STATE_COUNT
;
1640 case x86_THREAD_STATE32
:
1642 if (*count
< x86_THREAD_STATE32_COUNT
)
1643 return(KERN_INVALID_ARGUMENT
);
1645 if (thread_is_64bit(thr_act
))
1646 return(KERN_INVALID_ARGUMENT
);
1648 *count
= x86_THREAD_STATE32_COUNT
;
1650 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1654 case x86_THREAD_STATE64
:
1656 if (*count
< x86_THREAD_STATE64_COUNT
)
1657 return(KERN_INVALID_ARGUMENT
);
1659 if ( !thread_is_64bit(thr_act
))
1660 return(KERN_INVALID_ARGUMENT
);
1662 *count
= x86_THREAD_STATE64_COUNT
;
1664 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1668 case x86_THREAD_STATE
:
1670 x86_thread_state_t
*state
;
1672 if (*count
< x86_THREAD_STATE_COUNT
)
1673 return(KERN_INVALID_ARGUMENT
);
1675 state
= (x86_thread_state_t
*)tstate
;
1677 bzero((char *)state
, sizeof(x86_thread_state_t
));
1679 if (thread_is_64bit(thr_act
)) {
1680 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1681 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1683 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1685 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1686 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1688 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1690 *count
= x86_THREAD_STATE_COUNT
;
1696 case x86_EXCEPTION_STATE32
:
1698 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1699 return(KERN_INVALID_ARGUMENT
);
1701 if (thread_is_64bit(thr_act
))
1702 return(KERN_INVALID_ARGUMENT
);
1704 *count
= x86_EXCEPTION_STATE32_COUNT
;
1706 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1710 case x86_EXCEPTION_STATE64
:
1712 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1713 return(KERN_INVALID_ARGUMENT
);
1715 if ( !thread_is_64bit(thr_act
))
1716 return(KERN_INVALID_ARGUMENT
);
1718 *count
= x86_EXCEPTION_STATE64_COUNT
;
1720 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1724 case x86_EXCEPTION_STATE
:
1726 x86_exception_state_t
*state
;
1728 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1729 return(KERN_INVALID_ARGUMENT
);
1731 state
= (x86_exception_state_t
*)tstate
;
1733 bzero((char *)state
, sizeof(x86_exception_state_t
));
1735 if (thread_is_64bit(thr_act
)) {
1736 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1737 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1739 get_exception_state64(thr_act
, &state
->ues
.es64
);
1741 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1742 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1744 get_exception_state32(thr_act
, &state
->ues
.es32
);
1746 *count
= x86_EXCEPTION_STATE_COUNT
;
1750 case x86_DEBUG_STATE32
:
1752 if (*count
< x86_DEBUG_STATE32_COUNT
)
1753 return(KERN_INVALID_ARGUMENT
);
1755 if (thread_is_64bit(thr_act
))
1756 return(KERN_INVALID_ARGUMENT
);
1758 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1760 *count
= x86_DEBUG_STATE32_COUNT
;
1764 case x86_DEBUG_STATE64
:
1766 if (*count
< x86_DEBUG_STATE64_COUNT
)
1767 return(KERN_INVALID_ARGUMENT
);
1769 if (!thread_is_64bit(thr_act
))
1770 return(KERN_INVALID_ARGUMENT
);
1772 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1774 *count
= x86_DEBUG_STATE64_COUNT
;
1778 case x86_DEBUG_STATE
:
1780 x86_debug_state_t
*state
;
1782 if (*count
< x86_DEBUG_STATE_COUNT
)
1783 return(KERN_INVALID_ARGUMENT
);
1785 state
= (x86_debug_state_t
*)tstate
;
1787 bzero(state
, sizeof *state
);
1789 if (thread_is_64bit(thr_act
)) {
1790 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1791 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1793 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1795 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1796 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1798 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1800 *count
= x86_DEBUG_STATE_COUNT
;
1804 return(KERN_INVALID_ARGUMENT
);
1807 return(KERN_SUCCESS
);
1811 machine_thread_get_kern_state(
1813 thread_flavor_t flavor
,
1814 thread_state_t tstate
,
1815 mach_msg_type_number_t
*count
)
1817 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1820 * This works only for an interrupted kernel thread
1822 if (thread
!= current_thread() || int_state
== NULL
)
1823 return KERN_FAILURE
;
1826 case x86_THREAD_STATE32
: {
1827 x86_thread_state32_t
*state
;
1828 x86_saved_state32_t
*saved_state
;
1830 if (!is_saved_state32(int_state
) ||
1831 *count
< x86_THREAD_STATE32_COUNT
)
1832 return (KERN_INVALID_ARGUMENT
);
1834 state
= (x86_thread_state32_t
*) tstate
;
1836 saved_state
= saved_state32(int_state
);
1838 * General registers.
1840 state
->eax
= saved_state
->eax
;
1841 state
->ebx
= saved_state
->ebx
;
1842 state
->ecx
= saved_state
->ecx
;
1843 state
->edx
= saved_state
->edx
;
1844 state
->edi
= saved_state
->edi
;
1845 state
->esi
= saved_state
->esi
;
1846 state
->ebp
= saved_state
->ebp
;
1847 state
->esp
= saved_state
->uesp
;
1848 state
->eflags
= saved_state
->efl
;
1849 state
->eip
= saved_state
->eip
;
1850 state
->cs
= saved_state
->cs
;
1851 state
->ss
= saved_state
->ss
;
1852 state
->ds
= saved_state
->ds
& 0xffff;
1853 state
->es
= saved_state
->es
& 0xffff;
1854 state
->fs
= saved_state
->fs
& 0xffff;
1855 state
->gs
= saved_state
->gs
& 0xffff;
1857 *count
= x86_THREAD_STATE32_COUNT
;
1859 return KERN_SUCCESS
;
1862 case x86_THREAD_STATE64
: {
1863 x86_thread_state64_t
*state
;
1864 x86_saved_state64_t
*saved_state
;
1866 if (!is_saved_state64(int_state
) ||
1867 *count
< x86_THREAD_STATE64_COUNT
)
1868 return (KERN_INVALID_ARGUMENT
);
1870 state
= (x86_thread_state64_t
*) tstate
;
1872 saved_state
= saved_state64(int_state
);
1874 * General registers.
1876 state
->rax
= saved_state
->rax
;
1877 state
->rbx
= saved_state
->rbx
;
1878 state
->rcx
= saved_state
->rcx
;
1879 state
->rdx
= saved_state
->rdx
;
1880 state
->rdi
= saved_state
->rdi
;
1881 state
->rsi
= saved_state
->rsi
;
1882 state
->rbp
= saved_state
->rbp
;
1883 state
->rsp
= saved_state
->isf
.rsp
;
1884 state
->r8
= saved_state
->r8
;
1885 state
->r9
= saved_state
->r9
;
1886 state
->r10
= saved_state
->r10
;
1887 state
->r11
= saved_state
->r11
;
1888 state
->r12
= saved_state
->r12
;
1889 state
->r13
= saved_state
->r13
;
1890 state
->r14
= saved_state
->r14
;
1891 state
->r15
= saved_state
->r15
;
1893 state
->rip
= saved_state
->isf
.rip
;
1894 state
->rflags
= saved_state
->isf
.rflags
;
1895 state
->cs
= saved_state
->isf
.cs
;
1896 state
->fs
= saved_state
->fs
& 0xffff;
1897 state
->gs
= saved_state
->gs
& 0xffff;
1898 *count
= x86_THREAD_STATE64_COUNT
;
1900 return KERN_SUCCESS
;
1903 case x86_THREAD_STATE
: {
1904 x86_thread_state_t
*state
= NULL
;
1906 if (*count
< x86_THREAD_STATE_COUNT
)
1907 return (KERN_INVALID_ARGUMENT
);
1909 state
= (x86_thread_state_t
*) tstate
;
1911 if (is_saved_state32(int_state
)) {
1912 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1914 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1915 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1918 * General registers.
1920 state
->uts
.ts32
.eax
= saved_state
->eax
;
1921 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1922 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1923 state
->uts
.ts32
.edx
= saved_state
->edx
;
1924 state
->uts
.ts32
.edi
= saved_state
->edi
;
1925 state
->uts
.ts32
.esi
= saved_state
->esi
;
1926 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1927 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1928 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1929 state
->uts
.ts32
.eip
= saved_state
->eip
;
1930 state
->uts
.ts32
.cs
= saved_state
->cs
;
1931 state
->uts
.ts32
.ss
= saved_state
->ss
;
1932 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1933 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1934 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1935 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1936 } else if (is_saved_state64(int_state
)) {
1937 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1939 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1940 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1943 * General registers.
1945 state
->uts
.ts64
.rax
= saved_state
->rax
;
1946 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1947 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1948 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1949 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1950 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1951 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1952 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1953 state
->uts
.ts64
.r8
= saved_state
->r8
;
1954 state
->uts
.ts64
.r9
= saved_state
->r9
;
1955 state
->uts
.ts64
.r10
= saved_state
->r10
;
1956 state
->uts
.ts64
.r11
= saved_state
->r11
;
1957 state
->uts
.ts64
.r12
= saved_state
->r12
;
1958 state
->uts
.ts64
.r13
= saved_state
->r13
;
1959 state
->uts
.ts64
.r14
= saved_state
->r14
;
1960 state
->uts
.ts64
.r15
= saved_state
->r15
;
1962 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1963 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1964 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1965 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1966 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1968 panic("unknown thread state");
1971 *count
= x86_THREAD_STATE_COUNT
;
1972 return KERN_SUCCESS
;
1975 return KERN_FAILURE
;
1980 * Initialize the machine-dependent state for a new thread.
1983 machine_thread_create(
1987 pcb_t pcb
= &thread
->machine
.xxx_pcb
;
1988 x86_saved_state_t
*iss
;
1990 #if NCOPY_WINDOWS > 0
1991 inval_copy_windows(thread
);
1993 thread
->machine
.physwindow_pte
= 0;
1994 thread
->machine
.physwindow_busy
= 0;
1998 * Allocate pcb only if required.
2000 if (pcb
->sf
== NULL
) {
2001 pcb
->sf
= zalloc(iss_zone
);
2002 if (pcb
->sf
== NULL
)
2006 if (task_has_64BitAddr(task
)) {
2007 x86_sframe64_t
*sf64
;
2009 sf64
= (x86_sframe64_t
*) pcb
->sf
;
2011 bzero((char *)sf64
, sizeof(x86_sframe64_t
));
2013 iss
= (x86_saved_state_t
*) &sf64
->ssf
;
2014 iss
->flavor
= x86_SAVED_STATE64
;
2016 * Guarantee that the bootstrapped thread will be in user
2019 iss
->ss_64
.isf
.rflags
= EFL_USER_SET
;
2020 iss
->ss_64
.isf
.cs
= USER64_CS
;
2021 iss
->ss_64
.isf
.ss
= USER_DS
;
2022 iss
->ss_64
.fs
= USER_DS
;
2023 iss
->ss_64
.gs
= USER_DS
;
2025 if (cpu_mode_is64bit()) {
2026 x86_sframe_compat32_t
*sfc32
;
2028 sfc32
= (x86_sframe_compat32_t
*)pcb
->sf
;
2030 bzero((char *)sfc32
, sizeof(x86_sframe_compat32_t
));
2032 iss
= (x86_saved_state_t
*) &sfc32
->ssf
.iss32
;
2033 iss
->flavor
= x86_SAVED_STATE32
;
2034 #if defined(__i386__)
2037 x86_saved_state_compat32_t
*xssc
;
2039 xssc
= (x86_saved_state_compat32_t
*) iss
;
2041 xssc
->pad_for_16byte_alignment
[0] = 0x64326432;
2042 xssc
->pad_for_16byte_alignment
[1] = 0x64326432;
2046 x86_sframe32_t
*sf32
;
2047 struct real_descriptor
*ldtp
;
2050 sf32
= (x86_sframe32_t
*) pcb
->sf
;
2052 bzero((char *)sf32
, sizeof(x86_sframe32_t
));
2054 iss
= (x86_saved_state_t
*) &sf32
->ssf
;
2055 iss
->flavor
= x86_SAVED_STATE32
;
2056 pcb
->iss_pte0
= pte_kernel_rw(kvtophys((vm_offset_t
)iss
));
2057 if (0 == (paddr
= pa_to_pte(kvtophys((vm_offset_t
)iss
+ PAGE_SIZE
))))
2058 pcb
->iss_pte1
= INTEL_PTE_INVALID
;
2060 pcb
->iss_pte1
= pte_kernel_rw(paddr
);
2063 ldtp
= (struct real_descriptor
*)
2064 pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN
);
2065 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
2066 pcb
->uldt_desc
= ldtp
[sel_idx(USER_DS
)];
2067 #endif /* __i386__ */
2070 * Guarantee that the bootstrapped thread will be in user
2073 iss
->ss_32
.cs
= USER_CS
;
2074 iss
->ss_32
.ss
= USER_DS
;
2075 iss
->ss_32
.ds
= USER_DS
;
2076 iss
->ss_32
.es
= USER_DS
;
2077 iss
->ss_32
.fs
= USER_DS
;
2078 iss
->ss_32
.gs
= USER_DS
;
2079 iss
->ss_32
.efl
= EFL_USER_SET
;
2084 thread
->machine
.pcb
= pcb
;
2085 simple_lock_init(&pcb
->lock
, 0);
2087 pcb
->arg_store_valid
= 0;
2088 pcb
->cthread_self
= 0;
2089 pcb
->uldt_selector
= 0;
2092 return(KERN_SUCCESS
);
2096 * Machine-dependent cleanup prior to destroying a thread
2099 machine_thread_destroy(
2102 register pcb_t pcb
= thread
->machine
.pcb
;
2107 fpu_free(pcb
->ifps
);
2109 zfree(iss_zone
, pcb
->sf
);
2113 zfree(ids_zone
, pcb
->ids
);
2116 thread
->machine
.pcb
= (pcb_t
)0;
2121 machine_thread_switch_addrmode(thread_t thread
)
2124 * We don't want to be preempted until we're done
2125 * - particularly if we're switching the current thread
2127 disable_preemption();
2130 * Reset the state saveareas.
2132 machine_thread_create(thread
, thread
->task
);
2134 /* If we're switching ourselves, reset the pcb addresses etc. */
2135 if (thread
== current_thread()) {
2136 #if defined(__i386__)
2137 if (current_cpu_datap()->cpu_active_cr3
!= kernel_pmap
->pm_cr3
)
2138 pmap_load_kernel_cr3();
2139 #endif /* defined(__i386) */
2140 act_machine_switch_pcb(thread
);
2142 enable_preemption();
2148 * This is used to set the current thr_act/thread
2149 * when starting up a new processor
2152 machine_set_current_thread(thread_t thread
)
2154 current_cpu_datap()->cpu_active_thread
= thread
;
2158 * This is called when a task is terminated, and also on exec().
2159 * Clear machine-dependent state that is stored on the task.
2162 machine_thread_terminate_self(void)
2164 task_t self_task
= current_task();
2166 user_ldt_t user_ldt
= self_task
->i386_ldt
;
2167 if (user_ldt
!= 0) {
2168 self_task
->i386_ldt
= 0;
2169 user_ldt_free(user_ldt
);
2172 if (self_task
->task_debug
!= NULL
) {
2173 zfree(ids_zone
, self_task
->task_debug
);
2174 self_task
->task_debug
= NULL
;
2185 * This code is called with nothing locked.
2186 * It also returns with nothing locked, if it returns.
2188 * This routine terminates the current thread activation.
2189 * If this is the only activation associated with its
2190 * thread shuttle, then the entire thread (shuttle plus
2191 * activation) is terminated.
2193 assert( code
== KERN_TERMINATED
);
2195 thread_terminate_self();
2199 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code
);
2204 * Perform machine-dependent per-thread initializations
2207 machine_thread_init(void)
2209 if (cpu_mode_is64bit()) {
2210 assert(sizeof(x86_sframe_compat32_t
) % 16 == 0);
2211 iss_zone
= zinit(sizeof(x86_sframe64_t
),
2212 thread_max
* sizeof(x86_sframe64_t
),
2213 THREAD_CHUNK
* sizeof(x86_sframe64_t
),
2214 "x86_64 saved state");
2216 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
2217 thread_max
* sizeof(x86_debug_state64_t
),
2218 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
2219 "x86_64 debug state");
2222 iss_zone
= zinit(sizeof(x86_sframe32_t
),
2223 thread_max
* sizeof(x86_sframe32_t
),
2224 THREAD_CHUNK
* sizeof(x86_sframe32_t
),
2226 ids_zone
= zinit(sizeof(x86_debug_state32_t
),
2227 thread_max
* (sizeof(x86_debug_state32_t
)),
2228 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
2235 #if defined(__i386__)
2237 * Some routines for debugging activation code
2239 static void dump_handlers(thread_t
);
2240 void dump_regs(thread_t
);
2241 int dump_act(thread_t thr_act
);
2244 dump_handlers(thread_t thr_act
)
2246 ReturnHandler
*rhp
= thr_act
->handlers
;
2251 if (rhp
== &thr_act
->special_handler
){
2253 printf("[NON-Zero next ptr(%p)]", rhp
->next
);
2254 printf("special_handler()->");
2257 printf("hdlr_%d(%p)->", counter
, rhp
->handler
);
2259 if (++counter
> 32) {
2260 printf("Aborting: HUGE handler chain\n");
2264 printf("HLDR_NULL\n");
2268 dump_regs(thread_t thr_act
)
2270 if (thr_act
->machine
.pcb
== NULL
)
2273 if (thread_is_64bit(thr_act
)) {
2274 x86_saved_state64_t
*ssp
;
2276 ssp
= USER_REGS64(thr_act
);
2278 panic("dump_regs: 64bit tasks not yet supported");
2281 x86_saved_state32_t
*ssp
;
2283 ssp
= USER_REGS32(thr_act
);
2286 * Print out user register state
2288 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
2289 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
2291 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
2292 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
2294 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
2299 dump_act(thread_t thr_act
)
2304 printf("thread(%p)(%d): task=%p(%d)\n",
2305 thr_act
, thr_act
->ref_count
,
2307 thr_act
->task
? thr_act
->task
->ref_count
: 0);
2309 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
2310 thr_act
->suspend_count
, thr_act
->user_stop_count
,
2311 thr_act
->active
, thr_act
->ast
);
2312 printf("\tpcb=%p\n", thr_act
->machine
.pcb
);
2314 if (thr_act
->kernel_stack
) {
2315 vm_offset_t stack
= thr_act
->kernel_stack
;
2317 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
2318 (long)stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
2319 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
2322 dump_handlers(thr_act
);
2324 return((int)thr_act
);
2331 thread_t thr_act
= current_thread();
2333 if (thr_act
->machine
.pcb
== NULL
)
2336 if (thread_is_64bit(thr_act
)) {
2337 x86_saved_state64_t
*iss64
;
2339 iss64
= USER_REGS64(thr_act
);
2341 return(iss64
->isf
.rip
);
2343 x86_saved_state32_t
*iss32
;
2345 iss32
= USER_REGS32(thr_act
);
2352 * detach and return a kernel stack from a thread
2356 machine_stack_detach(thread_t thread
)
2360 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
2361 (uintptr_t)thread_tid(thread
), thread
->priority
,
2362 thread
->sched_pri
, 0,
2365 stack
= thread
->kernel_stack
;
2366 thread
->kernel_stack
= 0;
2372 * attach a kernel stack to a thread and initialize it
2376 machine_stack_attach(
2380 struct x86_kernel_state
*statep
;
2382 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
2383 (uintptr_t)thread_tid(thread
), thread
->priority
,
2384 thread
->sched_pri
, 0, 0);
2387 thread
->kernel_stack
= stack
;
2389 statep
= STACK_IKS(stack
);
2390 #if defined(__x86_64__)
2391 statep
->k_rip
= (unsigned long) Thread_continue
;
2392 statep
->k_rbx
= (unsigned long) thread_continue
;
2393 statep
->k_rsp
= (unsigned long) STACK_IEL(stack
);
2395 statep
->k_eip
= (unsigned long) Thread_continue
;
2396 statep
->k_ebx
= (unsigned long) thread_continue
;
2397 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
2404 * move a stack from old to new thread
2408 machine_stack_handoff(thread_t old
,
2417 machine_pmc_cswitch(old
, new);
2420 stack
= old
->kernel_stack
;
2421 if (stack
== old
->reserved_stack
) {
2422 assert(new->reserved_stack
);
2423 old
->reserved_stack
= new->reserved_stack
;
2424 new->reserved_stack
= stack
;
2426 old
->kernel_stack
= 0;
2428 * A full call to machine_stack_attach() is unnecessry
2429 * because old stack is already initialized.
2431 new->kernel_stack
= stack
;
2433 fpu_save_context(old
);
2436 old
->machine
.specFlags
&= ~OnProc
;
2437 new->machine
.specFlags
|= OnProc
;
2439 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
2440 act_machine_switch_pcb(new);
2442 machine_set_current_thread(new);
2450 struct x86_act_context32
{
2451 x86_saved_state32_t ss
;
2452 x86_float_state32_t fs
;
2453 x86_debug_state32_t ds
;
2456 struct x86_act_context64
{
2457 x86_saved_state64_t ss
;
2458 x86_float_state64_t fs
;
2459 x86_debug_state64_t ds
;
2465 act_thread_csave(void)
2468 mach_msg_type_number_t val
;
2469 thread_t thr_act
= current_thread();
2471 if (thread_is_64bit(thr_act
)) {
2472 struct x86_act_context64
*ic64
;
2474 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
2476 if (ic64
== (struct x86_act_context64
*)NULL
)
2479 val
= x86_SAVED_STATE64_COUNT
;
2480 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
2481 (thread_state_t
) &ic64
->ss
, &val
);
2482 if (kret
!= KERN_SUCCESS
) {
2483 kfree(ic64
, sizeof(struct x86_act_context64
));
2486 val
= x86_FLOAT_STATE64_COUNT
;
2487 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
2488 (thread_state_t
) &ic64
->fs
, &val
);
2490 if (kret
!= KERN_SUCCESS
) {
2491 kfree(ic64
, sizeof(struct x86_act_context64
));
2495 val
= x86_DEBUG_STATE64_COUNT
;
2496 kret
= machine_thread_get_state(thr_act
,
2498 (thread_state_t
)&ic64
->ds
,
2500 if (kret
!= KERN_SUCCESS
) {
2501 kfree(ic64
, sizeof(struct x86_act_context64
));
2507 struct x86_act_context32
*ic32
;
2509 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2511 if (ic32
== (struct x86_act_context32
*)NULL
)
2514 val
= x86_SAVED_STATE32_COUNT
;
2515 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2516 (thread_state_t
) &ic32
->ss
, &val
);
2517 if (kret
!= KERN_SUCCESS
) {
2518 kfree(ic32
, sizeof(struct x86_act_context32
));
2521 val
= x86_FLOAT_STATE32_COUNT
;
2522 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2523 (thread_state_t
) &ic32
->fs
, &val
);
2524 if (kret
!= KERN_SUCCESS
) {
2525 kfree(ic32
, sizeof(struct x86_act_context32
));
2529 val
= x86_DEBUG_STATE32_COUNT
;
2530 kret
= machine_thread_get_state(thr_act
,
2532 (thread_state_t
)&ic32
->ds
,
2534 if (kret
!= KERN_SUCCESS
) {
2535 kfree(ic32
, sizeof(struct x86_act_context32
));
2544 act_thread_catt(void *ctx
)
2546 thread_t thr_act
= current_thread();
2549 if (ctx
== (void *)NULL
)
2552 if (thread_is_64bit(thr_act
)) {
2553 struct x86_act_context64
*ic64
;
2555 ic64
= (struct x86_act_context64
*)ctx
;
2557 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2558 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2559 if (kret
== KERN_SUCCESS
) {
2560 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2561 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2563 kfree(ic64
, sizeof(struct x86_act_context64
));
2565 struct x86_act_context32
*ic32
;
2567 ic32
= (struct x86_act_context32
*)ctx
;
2569 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2570 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2571 if (kret
== KERN_SUCCESS
) {
2572 kret
= machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2573 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2574 if (kret
== KERN_SUCCESS
&& thr_act
->machine
.pcb
->ids
)
2575 machine_thread_set_state(thr_act
,
2577 (thread_state_t
)&ic32
->ds
,
2578 x86_DEBUG_STATE32_COUNT
);
2580 kfree(ic32
, sizeof(struct x86_act_context32
));
2585 void act_thread_cfree(__unused
void *ctx
)
2589 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
);
2590 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
) {
2591 thread
->machine
.pcb
->arg_store_valid
= valid
;
2594 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
);
2596 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
) {
2597 return (thread
->machine
.pcb
->arg_store_valid
);
2601 * Duplicate one x86_debug_state32_t to another. "all" parameter
2602 * chooses whether dr4 and dr5 are copied (they are never meant
2603 * to be installed when we do machine_task_set_state() or
2604 * machine_thread_set_state()).
2608 x86_debug_state32_t
*src
,
2609 x86_debug_state32_t
*target
,
2613 target
->dr4
= src
->dr4
;
2614 target
->dr5
= src
->dr5
;
2617 target
->dr0
= src
->dr0
;
2618 target
->dr1
= src
->dr1
;
2619 target
->dr2
= src
->dr2
;
2620 target
->dr3
= src
->dr3
;
2621 target
->dr6
= src
->dr6
;
2622 target
->dr7
= src
->dr7
;
2626 * Duplicate one x86_debug_state64_t to another. "all" parameter
2627 * chooses whether dr4 and dr5 are copied (they are never meant
2628 * to be installed when we do machine_task_set_state() or
2629 * machine_thread_set_state()).
2633 x86_debug_state64_t
*src
,
2634 x86_debug_state64_t
*target
,
2638 target
->dr4
= src
->dr4
;
2639 target
->dr5
= src
->dr5
;
2642 target
->dr0
= src
->dr0
;
2643 target
->dr1
= src
->dr1
;
2644 target
->dr2
= src
->dr2
;
2645 target
->dr3
= src
->dr3
;
2646 target
->dr6
= src
->dr6
;
2647 target
->dr7
= src
->dr7
;