2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
61 #include <sys/kdebug.h>
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
84 #include <vm/vm_protos.h>
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
91 #include <i386/user_ldt.h>
93 #include <i386/mp_desc.h>
94 #include <i386/misc_protos.h>
95 #include <i386/thread.h>
100 #include <i386/machine_routines.h>
101 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
103 #include <machine/commpage.h>
107 #endif /* CONFIG_COUNTERS */
110 * Maps state flavor to number of words in the state:
112 unsigned int _MachineStateCount
[] = {
115 x86_THREAD_STATE32_COUNT
,
116 x86_FLOAT_STATE32_COUNT
,
117 x86_EXCEPTION_STATE32_COUNT
,
118 x86_THREAD_STATE64_COUNT
,
119 x86_FLOAT_STATE64_COUNT
,
120 x86_EXCEPTION_STATE64_COUNT
,
121 x86_THREAD_STATE_COUNT
,
122 x86_FLOAT_STATE_COUNT
,
123 x86_EXCEPTION_STATE_COUNT
,
125 x86_SAVED_STATE32_COUNT
,
126 x86_SAVED_STATE64_COUNT
,
127 x86_DEBUG_STATE32_COUNT
,
128 x86_DEBUG_STATE64_COUNT
,
129 x86_DEBUG_STATE_COUNT
132 zone_t iss_zone
; /* zone for saved_state area */
133 zone_t ids_zone
; /* zone for debug_state area */
137 void act_machine_throughcall(thread_t thr_act
);
138 void act_machine_return(int);
140 extern void Thread_continue(void);
141 extern void Load_context(
145 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
);
148 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
);
151 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
154 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
157 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
);
160 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
);
164 machine_pmc_cswitch(thread_t
/* old */, thread_t
/* new */);
166 static inline boolean_t
167 machine_thread_pmc_eligible(thread_t
);
170 pmc_swi(thread_t
/* old */, thread_t
/*new */);
172 static inline boolean_t
173 machine_thread_pmc_eligible(thread_t t
) {
175 * NOTE: Task-level reservations are propagated to child threads via
176 * thread_create_internal. Any mutation of task reservations forces a
177 * recalculate of t_chud (for the pmc flag) for all threads in that task.
178 * Consequently, we can simply check the current thread's flag against
179 * THREAD_PMC_FLAG. If the result is non-zero, we SWI for a PMC switch.
181 return (t
!= NULL
) ? ((t
->t_chud
& THREAD_PMC_FLAG
) ? TRUE
: FALSE
) : FALSE
;
185 pmc_swi(thread_t old
, thread_t
new) {
186 current_cpu_datap()->csw_old_thread
= old
;
187 current_cpu_datap()->csw_new_thread
= new;
188 __asm__
__volatile__("int %0"::"i"(LAPIC_PMC_SWI_VECTOR
):"memory");
192 machine_pmc_cswitch(thread_t old
, thread_t
new) {
193 if (machine_thread_pmc_eligible(old
) || machine_thread_pmc_eligible(new)) {
198 void ml_get_csw_threads(thread_t
*old
, thread_t
*new) {
199 *old
= current_cpu_datap()->csw_old_thread
;
200 *new = current_cpu_datap()->csw_new_thread
;
203 #endif /* CONFIG_COUNTERS */
206 * Don't let an illegal value for dr7 get set. Specifically,
207 * check for undefined settings. Setting these bit patterns
208 * result in undefined behaviour and can lead to an unexpected
212 dr7_is_valid(uint32_t *dr7
)
215 uint32_t mask1
, mask2
;
218 * If the DE bit is set in CR4, R/W0-3 can be pattern
219 * "10B" to indicate i/o reads and write
221 if (!(get_cr4() & CR4_DE
))
222 for (i
= 0, mask1
= 0x3<<16, mask2
= 0x2<<16; i
< 4;
223 i
++, mask1
<<= 4, mask2
<<= 4)
224 if ((*dr7
& mask1
) == mask2
)
228 * len0-3 pattern "10B" is ok for len on Merom and newer processors
229 * (it signifies an 8-byte wide region). We use the 64bit capability
230 * of the processor in lieu of the more laborious model/family checks
231 * as all 64-bit capable processors so far support this.
232 * Reject an attempt to use this on 64-bit incapable processors.
234 if (current_cpu_datap()->cpu_is64bit
== FALSE
)
235 for (i
= 0, mask1
= 0x3<<18, mask2
= 0x2<<18; i
< 4;
236 i
++, mask1
<<= 4, mask2
<<= 4)
237 if ((*dr7
& mask1
) == mask2
)
241 * if we are doing an instruction execution break (indicated
242 * by r/w[x] being "00B"), then the len[x] must also be set
245 for (i
= 0; i
< 4; i
++)
246 if (((((*dr7
>> (16 + i
*4))) & 0x3) == 0) &&
247 ((((*dr7
>> (18 + i
*4))) & 0x3) != 0))
251 * Intel docs have these bits fixed.
253 *dr7
|= 0x1 << 10; /* set bit 10 to 1 */
254 *dr7
&= ~(0x1 << 11); /* set bit 11 to 0 */
255 *dr7
&= ~(0x1 << 12); /* set bit 12 to 0 */
256 *dr7
&= ~(0x1 << 14); /* set bit 14 to 0 */
257 *dr7
&= ~(0x1 << 15); /* set bit 15 to 0 */
260 * We don't allow anything to set the global breakpoints.
279 set_live_debug_state32(cpu_data_t
*cdp
, x86_debug_state32_t
*ds
)
281 __asm__
volatile ("movl %0,%%db0" : :"r" (ds
->dr0
));
282 __asm__
volatile ("movl %0,%%db1" : :"r" (ds
->dr1
));
283 __asm__
volatile ("movl %0,%%db2" : :"r" (ds
->dr2
));
284 __asm__
volatile ("movl %0,%%db3" : :"r" (ds
->dr3
));
285 if (cpu_mode_is64bit())
286 cdp
->cpu_dr7
= ds
->dr7
;
289 extern void set_64bit_debug_regs(x86_debug_state64_t
*ds
);
292 set_live_debug_state64(cpu_data_t
*cdp
, x86_debug_state64_t
*ds
)
295 * We need to enter 64-bit mode in order to set the full
296 * width of these registers
298 set_64bit_debug_regs(ds
);
299 cdp
->cpu_dr7
= ds
->dr7
;
303 debug_state_is_valid32(x86_debug_state32_t
*ds
)
305 if (!dr7_is_valid(&ds
->dr7
))
308 #if defined(__i386__)
310 * Only allow local breakpoints and make sure they are not
311 * in the trampoline code.
314 if (ds
->dr0
>= (unsigned long)HIGH_MEM_BASE
)
317 if (ds
->dr7
& (0x1<<2))
318 if (ds
->dr1
>= (unsigned long)HIGH_MEM_BASE
)
321 if (ds
->dr7
& (0x1<<4))
322 if (ds
->dr2
>= (unsigned long)HIGH_MEM_BASE
)
325 if (ds
->dr7
& (0x1<<6))
326 if (ds
->dr3
>= (unsigned long)HIGH_MEM_BASE
)
334 debug_state_is_valid64(x86_debug_state64_t
*ds
)
336 if (!dr7_is_valid((uint32_t *)&ds
->dr7
))
340 * Don't allow the user to set debug addresses above their max
344 if (ds
->dr0
>= VM_MAX_PAGE_ADDRESS
)
347 if (ds
->dr7
& (0x1<<2))
348 if (ds
->dr1
>= VM_MAX_PAGE_ADDRESS
)
351 if (ds
->dr7
& (0x1<<4))
352 if (ds
->dr2
>= VM_MAX_PAGE_ADDRESS
)
355 if (ds
->dr7
& (0x1<<6))
356 if (ds
->dr3
>= VM_MAX_PAGE_ADDRESS
)
364 set_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
366 x86_debug_state32_t
*ids
;
369 pcb
= thread
->machine
.pcb
;
372 if (debug_state_is_valid32(ds
) != TRUE
) {
373 return KERN_INVALID_ARGUMENT
;
377 ids
= zalloc(ids_zone
);
378 bzero(ids
, sizeof *ids
);
380 simple_lock(&pcb
->lock
);
381 /* make sure it wasn't already alloc()'d elsewhere */
382 if (pcb
->ids
== NULL
) {
384 simple_unlock(&pcb
->lock
);
386 simple_unlock(&pcb
->lock
);
387 zfree(ids_zone
, ids
);
392 copy_debug_state32(ds
, ids
, FALSE
);
394 return (KERN_SUCCESS
);
398 set_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
400 x86_debug_state64_t
*ids
;
403 pcb
= thread
->machine
.pcb
;
406 if (debug_state_is_valid64(ds
) != TRUE
) {
407 return KERN_INVALID_ARGUMENT
;
411 ids
= zalloc(ids_zone
);
412 bzero(ids
, sizeof *ids
);
414 simple_lock(&pcb
->lock
);
415 /* make sure it wasn't already alloc()'d elsewhere */
416 if (pcb
->ids
== NULL
) {
418 simple_unlock(&pcb
->lock
);
420 simple_unlock(&pcb
->lock
);
421 zfree(ids_zone
, ids
);
425 copy_debug_state64(ds
, ids
, FALSE
);
427 return (KERN_SUCCESS
);
431 get_debug_state32(thread_t thread
, x86_debug_state32_t
*ds
)
433 x86_debug_state32_t
*saved_state
;
435 saved_state
= thread
->machine
.pcb
->ids
;
438 copy_debug_state32(saved_state
, ds
, TRUE
);
440 bzero(ds
, sizeof *ds
);
444 get_debug_state64(thread_t thread
, x86_debug_state64_t
*ds
)
446 x86_debug_state64_t
*saved_state
;
448 saved_state
= (x86_debug_state64_t
*)thread
->machine
.pcb
->ids
;
451 copy_debug_state64(saved_state
, ds
, TRUE
);
453 bzero(ds
, sizeof *ds
);
457 * consider_machine_collect:
459 * Try to collect machine-dependent pages
462 consider_machine_collect(void)
467 consider_machine_adjust(void)
470 extern void *get_bsduthreadarg(thread_t th
);
472 #if defined(__x86_64__)
474 act_machine_switch_pcb( thread_t
new )
476 pcb_t pcb
= new->machine
.pcb
;
477 struct real_descriptor
*ldtp
;
478 mach_vm_offset_t pcb_stack_top
;
479 cpu_data_t
*cdp
= current_cpu_datap();
481 assert(new->kernel_stack
!= 0);
483 if (!cpu_mode_is64bit()) {
484 panic("K64 is 64bit!");
485 } else if (is_saved_state64(pcb
->iss
)) {
487 * The test above is performed against the thread save state
488 * flavor and not task's 64-bit feature flag because of the
489 * thread/task 64-bit state divergence that can arise in
490 * task_set_64bit() x86: the task state is changed before
491 * the individual thread(s).
493 x86_saved_state64_tagged_t
*iss64
;
496 assert(is_saved_state64(pcb
->iss
));
498 iss64
= (x86_saved_state64_tagged_t
*) pcb
->iss
;
501 * Set pointer to PCB's interrupt stack frame in cpu data.
502 * Used by syscall and double-fault trap handlers.
504 isf
= (vm_offset_t
) &iss64
->state
.isf
;
505 cdp
->cpu_uber
.cu_isf
= isf
;
506 pcb_stack_top
= (vm_offset_t
) (iss64
+ 1);
507 /* require 16-byte alignment */
508 assert((pcb_stack_top
& 0xF) == 0);
510 /* Interrupt stack is pcb */
511 current_ktss64()->rsp0
= pcb_stack_top
;
514 * Top of temporary sysenter stack points to pcb stack.
515 * Although this is not normally used by 64-bit users,
516 * it needs to be set in case a sysenter is attempted.
518 *current_sstk64() = pcb_stack_top
;
520 cdp
->cpu_task_map
= new->map
->pmap
->pm_task_map
;
523 * Enable the 64-bit user code segment, USER64_CS.
524 * Disable the 32-bit user code segment, USER_CS.
526 ldt_desc_p(USER64_CS
)->access
|= ACC_PL_U
;
527 ldt_desc_p(USER_CS
)->access
&= ~ACC_PL_U
;
530 * Switch user's GS base if necessary
531 * by setting the Kernel GS base MSR
532 * - this will become the user's on the swapgs when
533 * returning to user-space. Avoid this for
534 * kernel threads (no user TLS support required)
535 * and verify the memory shadow of the segment base
536 * in the event it was altered in user space.
538 if ((pcb
->cthread_self
!= 0) || (new->task
!= kernel_task
)) {
539 if ((cdp
->cpu_uber
.cu_user_gs_base
!= pcb
->cthread_self
) || (pcb
->cthread_self
!= rdmsr64(MSR_IA32_KERNEL_GS_BASE
))) {
540 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
541 wrmsr64(MSR_IA32_KERNEL_GS_BASE
, pcb
->cthread_self
);
545 x86_saved_state_compat32_t
*iss32compat
;
548 assert(is_saved_state32(pcb
->iss
));
549 iss32compat
= (x86_saved_state_compat32_t
*) pcb
->iss
;
551 pcb_stack_top
= (uintptr_t) (iss32compat
+ 1);
552 /* require 16-byte alignment */
553 assert((pcb_stack_top
& 0xF) == 0);
556 * Set pointer to PCB's interrupt stack frame in cpu data.
557 * Used by debug trap handler.
559 isf
= (vm_offset_t
) &iss32compat
->isf64
;
560 cdp
->cpu_uber
.cu_isf
= isf
;
562 /* Top of temporary sysenter stack points to pcb stack */
563 *current_sstk64() = pcb_stack_top
;
565 /* Interrupt stack is pcb */
566 current_ktss64()->rsp0
= pcb_stack_top
;
568 cdp
->cpu_task_map
= TASK_MAP_32BIT
;
569 /* Precalculate pointers to syscall argument store, for use
570 * in the trampolines.
572 cdp
->cpu_uber_arg_store
= (vm_offset_t
)get_bsduthreadarg(new);
573 cdp
->cpu_uber_arg_store_valid
= (vm_offset_t
)&pcb
->arg_store_valid
;
574 pcb
->arg_store_valid
= 0;
580 ldt_desc_p(USER64_CS
)->access
&= ~ACC_PL_U
;
581 ldt_desc_p(USER_CS
)->access
|= ACC_PL_U
;
584 * Set the thread`s cthread (a.k.a pthread)
585 * For 32-bit user this involves setting the USER_CTHREAD
586 * descriptor in the LDT to point to the cthread data.
587 * The involves copying in the pre-initialized descriptor.
589 ldtp
= (struct real_descriptor
*)current_ldt();
590 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
591 if (pcb
->uldt_selector
!= 0)
592 ldtp
[sel_idx(pcb
->uldt_selector
)] = pcb
->uldt_desc
;
593 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
596 * Set the thread`s LDT or LDT entry.
598 if (new->task
== TASK_NULL
|| new->task
->i386_ldt
== 0) {
602 ml_cpu_set_ldt(KERNEL_LDT
);
605 * Task has its own LDT.
612 * Bump the scheduler generation count in the commpage.
613 * This can be read by user code to detect its preemption.
615 commpage_sched_gen_inc();
619 act_machine_switch_pcb( thread_t
new )
621 pcb_t pcb
= new->machine
.pcb
;
622 struct real_descriptor
*ldtp
;
623 vm_offset_t pcb_stack_top
;
624 vm_offset_t hi_pcb_stack_top
;
626 cpu_data_t
*cdp
= current_cpu_datap();
628 assert(new->kernel_stack
!= 0);
629 STACK_IEL(new->kernel_stack
)->saved_state
= pcb
->iss
;
631 if (!cpu_mode_is64bit()) {
632 x86_saved_state32_tagged_t
*hi_iss32
;
634 * Save a pointer to the top of the "kernel" stack -
635 * actually the place in the PCB where a trap into
636 * kernel mode will push the registers.
638 hi_iss
= (vm_offset_t
)((unsigned long)
639 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0
) |
640 ((unsigned long)pcb
->iss
& PAGE_MASK
));
642 cdp
->cpu_hi_iss
= (void *)hi_iss
;
644 pmap_high_map(pcb
->iss_pte0
, HIGH_CPU_ISS0
);
645 pmap_high_map(pcb
->iss_pte1
, HIGH_CPU_ISS1
);
647 hi_iss32
= (x86_saved_state32_tagged_t
*) hi_iss
;
648 assert(hi_iss32
->tag
== x86_SAVED_STATE32
);
650 hi_pcb_stack_top
= (int) (hi_iss32
+ 1);
653 * For fast syscall, top of interrupt stack points to pcb stack
655 *(vm_offset_t
*) current_sstk() = hi_pcb_stack_top
;
657 current_ktss()->esp0
= hi_pcb_stack_top
;
659 } else if (is_saved_state64(pcb
->iss
)) {
661 * The test above is performed against the thread save state
662 * flavor and not task's 64-bit feature flag because of the
663 * thread/task 64-bit state divergence that can arise in
664 * task_set_64bit() x86: the task state is changed before
665 * the individual thread(s).
667 x86_saved_state64_tagged_t
*iss64
;
670 assert(is_saved_state64(pcb
->iss
));
672 iss64
= (x86_saved_state64_tagged_t
*) pcb
->iss
;
675 * Set pointer to PCB's interrupt stack frame in cpu data.
676 * Used by syscall and double-fault trap handlers.
678 isf
= (vm_offset_t
) &iss64
->state
.isf
;
679 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
680 pcb_stack_top
= (vm_offset_t
) (iss64
+ 1);
681 /* require 16-byte alignment */
682 assert((pcb_stack_top
& 0xF) == 0);
683 /* Interrupt stack is pcb */
684 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
687 * Top of temporary sysenter stack points to pcb stack.
688 * Although this is not normally used by 64-bit users,
689 * it needs to be set in case a sysenter is attempted.
691 *current_sstk64() = UBER64(pcb_stack_top
);
693 cdp
->cpu_task_map
= new->map
->pmap
->pm_task_map
;
696 * Enable the 64-bit user code segment, USER64_CS.
697 * Disable the 32-bit user code segment, USER_CS.
699 ldt_desc_p(USER64_CS
)->access
|= ACC_PL_U
;
700 ldt_desc_p(USER_CS
)->access
&= ~ACC_PL_U
;
703 x86_saved_state_compat32_t
*iss32compat
;
706 assert(is_saved_state32(pcb
->iss
));
707 iss32compat
= (x86_saved_state_compat32_t
*) pcb
->iss
;
709 pcb_stack_top
= (int) (iss32compat
+ 1);
710 /* require 16-byte alignment */
711 assert((pcb_stack_top
& 0xF) == 0);
714 * Set pointer to PCB's interrupt stack frame in cpu data.
715 * Used by debug trap handler.
717 isf
= (vm_offset_t
) &iss32compat
->isf64
;
718 cdp
->cpu_uber
.cu_isf
= UBER64(isf
);
720 /* Top of temporary sysenter stack points to pcb stack */
721 *current_sstk64() = UBER64(pcb_stack_top
);
723 /* Interrupt stack is pcb */
724 current_ktss64()->rsp0
= UBER64(pcb_stack_top
);
726 cdp
->cpu_task_map
= TASK_MAP_32BIT
;
727 /* Precalculate pointers to syscall argument store, for use
728 * in the trampolines.
730 cdp
->cpu_uber_arg_store
= UBER64((vm_offset_t
)get_bsduthreadarg(new));
731 cdp
->cpu_uber_arg_store_valid
= UBER64((vm_offset_t
)&pcb
->arg_store_valid
);
732 pcb
->arg_store_valid
= 0;
738 ldt_desc_p(USER64_CS
)->access
&= ~ACC_PL_U
;
739 ldt_desc_p(USER_CS
)->access
|= ACC_PL_U
;
743 * Set the thread`s cthread (a.k.a pthread)
744 * For 32-bit user this involves setting the USER_CTHREAD
745 * descriptor in the LDT to point to the cthread data.
746 * The involves copying in the pre-initialized descriptor.
748 ldtp
= (struct real_descriptor
*)current_ldt();
749 ldtp
[sel_idx(USER_CTHREAD
)] = pcb
->cthread_desc
;
750 if (pcb
->uldt_selector
!= 0)
751 ldtp
[sel_idx(pcb
->uldt_selector
)] = pcb
->uldt_desc
;
755 * For 64-bit, we additionally set the 64-bit User GS base
756 * address. On return to 64-bit user, the GS.Base MSR will be written.
758 cdp
->cpu_uber
.cu_user_gs_base
= pcb
->cthread_self
;
761 * Set the thread`s LDT or LDT entry.
763 if (new->task
== TASK_NULL
|| new->task
->i386_ldt
== 0) {
767 ml_cpu_set_ldt(KERNEL_LDT
);
770 * Task has its own LDT.
776 * Bump the scheduler generation count in the commpage.
777 * This can be read by user code to detect its preemption.
779 commpage_sched_gen_inc();
784 * Switch to the first thread on a CPU.
787 machine_load_context(
791 machine_pmc_cswitch(NULL
, new);
793 new->machine
.specFlags
|= OnProc
;
794 act_machine_switch_pcb(new);
799 * Switch to a new thread.
800 * Save the old thread`s kernel state or continuation,
804 machine_switch_context(
806 thread_continue_t continuation
,
810 assert(current_cpu_datap()->cpu_active_stack
== old
->kernel_stack
);
813 machine_pmc_cswitch(old
, new);
816 * Save FP registers if in use.
818 fpu_save_context(old
);
821 old
->machine
.specFlags
&= ~OnProc
;
822 new->machine
.specFlags
|= OnProc
;
825 * Monitor the stack depth and report new max,
826 * not worrying about races.
828 vm_offset_t depth
= current_stack_depth();
829 if (depth
> kernel_stack_depth_max
) {
830 kernel_stack_depth_max
= depth
;
831 KERNEL_DEBUG_CONSTANT(
832 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
833 (long) depth
, 0, 0, 0, 0);
837 * Switch address maps if need be, even if not switching tasks.
838 * (A server activation may be "borrowing" a client map.)
840 PMAP_SWITCH_CONTEXT(old
, new, cpu_number())
843 * Load the rest of the user state for the new thread
845 act_machine_switch_pcb(new);
847 return(Switch_context(old
, continuation
, new));
851 machine_processor_shutdown(
853 void (*doshutdown
)(processor_t
),
854 processor_t processor
)
859 fpu_save_context(thread
);
860 PMAP_SWITCH_CONTEXT(thread
, processor
->idle_thread
, cpu_number());
861 return(Shutdown_context(thread
, doshutdown
, processor
));
865 * act_machine_sv_free
866 * release saveareas associated with an act. if flag is true, release
867 * user level savearea(s) too, else don't
870 act_machine_sv_free(__unused thread_t act
, __unused
int flag
)
876 * This is where registers that are not normally specified by the mach-o
877 * file on an execve would be nullified, perhaps to avoid a covert channel.
880 machine_thread_state_initialize(
884 * If there's an fpu save area, free it.
885 * The initialized state will then be lazily faulted-in, if required.
886 * And if we're target, re-arm the no-fpu trap.
888 if (thread
->machine
.pcb
->ifps
) {
889 (void) fpu_set_fxstate(thread
, NULL
);
891 if (thread
== current_thread())
895 if (thread
->machine
.pcb
->ids
) {
896 zfree(ids_zone
, thread
->machine
.pcb
->ids
);
897 thread
->machine
.pcb
->ids
= NULL
;
904 get_eflags_exportmask(void)
910 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
911 * for 32bit tasks only
912 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
913 * for 64bit tasks only
914 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
915 * for 32bit tasks only
916 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
917 * for 64bit tasks only
918 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
919 * for either 32bit or 64bit tasks
920 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
921 * for 32bit tasks only
922 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
923 * for 64bit tasks only
924 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
925 * for either 32bit or 64bit tasks
926 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
927 * for 32bit tasks only
928 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
929 * for 64bit tasks only
930 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
931 * for either 32bit or 64bit tasks
936 get_exception_state64(thread_t thread
, x86_exception_state64_t
*es
)
938 x86_saved_state64_t
*saved_state
;
940 saved_state
= USER_REGS64(thread
);
942 es
->trapno
= saved_state
->isf
.trapno
;
943 es
->err
= (typeof(es
->err
))saved_state
->isf
.err
;
944 es
->faultvaddr
= saved_state
->cr2
;
948 get_exception_state32(thread_t thread
, x86_exception_state32_t
*es
)
950 x86_saved_state32_t
*saved_state
;
952 saved_state
= USER_REGS32(thread
);
954 es
->trapno
= saved_state
->trapno
;
955 es
->err
= saved_state
->err
;
956 es
->faultvaddr
= saved_state
->cr2
;
961 set_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
963 x86_saved_state32_t
*saved_state
;
966 saved_state
= USER_REGS32(thread
);
969 * Scrub segment selector values:
973 if (ts
->ss
== 0) ts
->ss
= USER_DS
;
974 if (ts
->ds
== 0) ts
->ds
= USER_DS
;
975 if (ts
->es
== 0) ts
->es
= USER_DS
;
976 #else /* __x86_64__ */
978 * On a 64 bit kernel, we always override the data segments,
979 * as the actual selector numbers have changed. This also
980 * means that we don't support setting the data segments
988 /* Check segment selectors are safe */
989 if (!valid_user_segment_selectors(ts
->cs
,
995 return(KERN_INVALID_ARGUMENT
);
997 saved_state
->eax
= ts
->eax
;
998 saved_state
->ebx
= ts
->ebx
;
999 saved_state
->ecx
= ts
->ecx
;
1000 saved_state
->edx
= ts
->edx
;
1001 saved_state
->edi
= ts
->edi
;
1002 saved_state
->esi
= ts
->esi
;
1003 saved_state
->ebp
= ts
->ebp
;
1004 saved_state
->uesp
= ts
->esp
;
1005 saved_state
->efl
= (ts
->eflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1006 saved_state
->eip
= ts
->eip
;
1007 saved_state
->cs
= ts
->cs
;
1008 saved_state
->ss
= ts
->ss
;
1009 saved_state
->ds
= ts
->ds
;
1010 saved_state
->es
= ts
->es
;
1011 saved_state
->fs
= ts
->fs
;
1012 saved_state
->gs
= ts
->gs
;
1015 * If the trace trap bit is being set,
1016 * ensure that the user returns via iret
1017 * - which is signaled thusly:
1019 if ((saved_state
->efl
& EFL_TF
) && saved_state
->cs
== SYSENTER_CS
)
1020 saved_state
->cs
= SYSENTER_TF_CS
;
1022 return(KERN_SUCCESS
);
1026 set_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
1028 x86_saved_state64_t
*saved_state
;
1031 saved_state
= USER_REGS64(thread
);
1033 if (!IS_USERADDR64_CANONICAL(ts
->rsp
) ||
1034 !IS_USERADDR64_CANONICAL(ts
->rip
))
1035 return(KERN_INVALID_ARGUMENT
);
1037 saved_state
->r8
= ts
->r8
;
1038 saved_state
->r9
= ts
->r9
;
1039 saved_state
->r10
= ts
->r10
;
1040 saved_state
->r11
= ts
->r11
;
1041 saved_state
->r12
= ts
->r12
;
1042 saved_state
->r13
= ts
->r13
;
1043 saved_state
->r14
= ts
->r14
;
1044 saved_state
->r15
= ts
->r15
;
1045 saved_state
->rax
= ts
->rax
;
1046 saved_state
->rbx
= ts
->rbx
;
1047 saved_state
->rcx
= ts
->rcx
;
1048 saved_state
->rdx
= ts
->rdx
;
1049 saved_state
->rdi
= ts
->rdi
;
1050 saved_state
->rsi
= ts
->rsi
;
1051 saved_state
->rbp
= ts
->rbp
;
1052 saved_state
->isf
.rsp
= ts
->rsp
;
1053 saved_state
->isf
.rflags
= (ts
->rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1054 saved_state
->isf
.rip
= ts
->rip
;
1055 saved_state
->isf
.cs
= USER64_CS
;
1056 saved_state
->fs
= (uint32_t)ts
->fs
;
1057 saved_state
->gs
= (uint32_t)ts
->gs
;
1059 return(KERN_SUCCESS
);
1065 get_thread_state32(thread_t thread
, x86_thread_state32_t
*ts
)
1067 x86_saved_state32_t
*saved_state
;
1070 saved_state
= USER_REGS32(thread
);
1072 ts
->eax
= saved_state
->eax
;
1073 ts
->ebx
= saved_state
->ebx
;
1074 ts
->ecx
= saved_state
->ecx
;
1075 ts
->edx
= saved_state
->edx
;
1076 ts
->edi
= saved_state
->edi
;
1077 ts
->esi
= saved_state
->esi
;
1078 ts
->ebp
= saved_state
->ebp
;
1079 ts
->esp
= saved_state
->uesp
;
1080 ts
->eflags
= saved_state
->efl
;
1081 ts
->eip
= saved_state
->eip
;
1082 ts
->cs
= saved_state
->cs
;
1083 ts
->ss
= saved_state
->ss
;
1084 ts
->ds
= saved_state
->ds
;
1085 ts
->es
= saved_state
->es
;
1086 ts
->fs
= saved_state
->fs
;
1087 ts
->gs
= saved_state
->gs
;
1092 get_thread_state64(thread_t thread
, x86_thread_state64_t
*ts
)
1094 x86_saved_state64_t
*saved_state
;
1097 saved_state
= USER_REGS64(thread
);
1099 ts
->r8
= saved_state
->r8
;
1100 ts
->r9
= saved_state
->r9
;
1101 ts
->r10
= saved_state
->r10
;
1102 ts
->r11
= saved_state
->r11
;
1103 ts
->r12
= saved_state
->r12
;
1104 ts
->r13
= saved_state
->r13
;
1105 ts
->r14
= saved_state
->r14
;
1106 ts
->r15
= saved_state
->r15
;
1107 ts
->rax
= saved_state
->rax
;
1108 ts
->rbx
= saved_state
->rbx
;
1109 ts
->rcx
= saved_state
->rcx
;
1110 ts
->rdx
= saved_state
->rdx
;
1111 ts
->rdi
= saved_state
->rdi
;
1112 ts
->rsi
= saved_state
->rsi
;
1113 ts
->rbp
= saved_state
->rbp
;
1114 ts
->rsp
= saved_state
->isf
.rsp
;
1115 ts
->rflags
= saved_state
->isf
.rflags
;
1116 ts
->rip
= saved_state
->isf
.rip
;
1117 ts
->cs
= saved_state
->isf
.cs
;
1118 ts
->fs
= saved_state
->fs
;
1119 ts
->gs
= saved_state
->gs
;
1124 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
1126 x86_thread_state32_t
*state
;
1127 x86_saved_state32_t
*saved_state
;
1128 thread_t curth
= current_thread();
1132 saved_state
= USER_REGS32(thread
);
1134 state
= (x86_thread_state32_t
*)tstate
;
1136 if (curth
!= thread
) {
1138 thread_lock(thread
);
1141 saved_state
->ebp
= 0;
1142 saved_state
->eip
= state
->eip
;
1143 saved_state
->eax
= state
->eax
;
1144 saved_state
->ebx
= state
->ebx
;
1145 saved_state
->ecx
= state
->ecx
;
1146 saved_state
->edx
= state
->edx
;
1147 saved_state
->edi
= state
->edi
;
1148 saved_state
->esi
= state
->esi
;
1149 saved_state
->uesp
= state
->esp
;
1150 saved_state
->efl
= EFL_USER_SET
;
1152 saved_state
->cs
= USER_CS
;
1153 saved_state
->ss
= USER_DS
;
1154 saved_state
->ds
= USER_DS
;
1155 saved_state
->es
= USER_DS
;
1158 if (curth
!= thread
) {
1159 thread_unlock(thread
);
1166 thread_set_wq_state64(thread_t thread
, thread_state_t tstate
)
1168 x86_thread_state64_t
*state
;
1169 x86_saved_state64_t
*saved_state
;
1170 thread_t curth
= current_thread();
1174 saved_state
= USER_REGS64(thread
);
1175 state
= (x86_thread_state64_t
*)tstate
;
1177 if (curth
!= thread
) {
1179 thread_lock(thread
);
1182 saved_state
->rbp
= 0;
1183 saved_state
->rdi
= state
->rdi
;
1184 saved_state
->rsi
= state
->rsi
;
1185 saved_state
->rdx
= state
->rdx
;
1186 saved_state
->rcx
= state
->rcx
;
1187 saved_state
->r8
= state
->r8
;
1188 saved_state
->r9
= state
->r9
;
1190 saved_state
->isf
.rip
= state
->rip
;
1191 saved_state
->isf
.rsp
= state
->rsp
;
1192 saved_state
->isf
.cs
= USER64_CS
;
1193 saved_state
->isf
.rflags
= EFL_USER_SET
;
1196 if (curth
!= thread
) {
1197 thread_unlock(thread
);
1205 * act_machine_set_state:
1207 * Set the status of the specified thread.
1211 machine_thread_set_state(
1213 thread_flavor_t flavor
,
1214 thread_state_t tstate
,
1215 mach_msg_type_number_t count
)
1218 case x86_SAVED_STATE32
:
1220 x86_saved_state32_t
*state
;
1221 x86_saved_state32_t
*saved_state
;
1223 if (count
< x86_SAVED_STATE32_COUNT
)
1224 return(KERN_INVALID_ARGUMENT
);
1226 if (thread_is_64bit(thr_act
))
1227 return(KERN_INVALID_ARGUMENT
);
1229 state
= (x86_saved_state32_t
*) tstate
;
1231 /* Check segment selectors are safe */
1232 if (!valid_user_segment_selectors(state
->cs
,
1238 return KERN_INVALID_ARGUMENT
;
1241 saved_state
= USER_REGS32(thr_act
);
1246 saved_state
->edi
= state
->edi
;
1247 saved_state
->esi
= state
->esi
;
1248 saved_state
->ebp
= state
->ebp
;
1249 saved_state
->uesp
= state
->uesp
;
1250 saved_state
->ebx
= state
->ebx
;
1251 saved_state
->edx
= state
->edx
;
1252 saved_state
->ecx
= state
->ecx
;
1253 saved_state
->eax
= state
->eax
;
1254 saved_state
->eip
= state
->eip
;
1256 saved_state
->efl
= (state
->efl
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1259 * If the trace trap bit is being set,
1260 * ensure that the user returns via iret
1261 * - which is signaled thusly:
1263 if ((saved_state
->efl
& EFL_TF
) && state
->cs
== SYSENTER_CS
)
1264 state
->cs
= SYSENTER_TF_CS
;
1267 * User setting segment registers.
1268 * Code and stack selectors have already been
1269 * checked. Others will be reset by 'iret'
1270 * if they are not valid.
1272 saved_state
->cs
= state
->cs
;
1273 saved_state
->ss
= state
->ss
;
1274 saved_state
->ds
= state
->ds
;
1275 saved_state
->es
= state
->es
;
1276 saved_state
->fs
= state
->fs
;
1277 saved_state
->gs
= state
->gs
;
1282 case x86_SAVED_STATE64
:
1284 x86_saved_state64_t
*state
;
1285 x86_saved_state64_t
*saved_state
;
1287 if (count
< x86_SAVED_STATE64_COUNT
)
1288 return(KERN_INVALID_ARGUMENT
);
1290 if (!thread_is_64bit(thr_act
))
1291 return(KERN_INVALID_ARGUMENT
);
1293 state
= (x86_saved_state64_t
*) tstate
;
1295 /* Verify that the supplied code segment selector is
1296 * valid. In 64-bit mode, the FS and GS segment overrides
1297 * use the FS.base and GS.base MSRs to calculate
1298 * base addresses, and the trampolines don't directly
1299 * restore the segment registers--hence they are no
1300 * longer relevant for validation.
1302 if (!valid_user_code_selector(state
->isf
.cs
))
1303 return KERN_INVALID_ARGUMENT
;
1305 /* Check pc and stack are canonical addresses */
1306 if (!IS_USERADDR64_CANONICAL(state
->isf
.rsp
) ||
1307 !IS_USERADDR64_CANONICAL(state
->isf
.rip
))
1308 return KERN_INVALID_ARGUMENT
;
1311 saved_state
= USER_REGS64(thr_act
);
1316 saved_state
->r8
= state
->r8
;
1317 saved_state
->r9
= state
->r9
;
1318 saved_state
->r10
= state
->r10
;
1319 saved_state
->r11
= state
->r11
;
1320 saved_state
->r12
= state
->r12
;
1321 saved_state
->r13
= state
->r13
;
1322 saved_state
->r14
= state
->r14
;
1323 saved_state
->r15
= state
->r15
;
1324 saved_state
->rdi
= state
->rdi
;
1325 saved_state
->rsi
= state
->rsi
;
1326 saved_state
->rbp
= state
->rbp
;
1327 saved_state
->rbx
= state
->rbx
;
1328 saved_state
->rdx
= state
->rdx
;
1329 saved_state
->rcx
= state
->rcx
;
1330 saved_state
->rax
= state
->rax
;
1331 saved_state
->isf
.rsp
= state
->isf
.rsp
;
1332 saved_state
->isf
.rip
= state
->isf
.rip
;
1334 saved_state
->isf
.rflags
= (state
->isf
.rflags
& ~EFL_USER_CLEAR
) | EFL_USER_SET
;
1337 * User setting segment registers.
1338 * Code and stack selectors have already been
1339 * checked. Others will be reset by 'sys'
1340 * if they are not valid.
1342 saved_state
->isf
.cs
= state
->isf
.cs
;
1343 saved_state
->isf
.ss
= state
->isf
.ss
;
1344 saved_state
->fs
= state
->fs
;
1345 saved_state
->gs
= state
->gs
;
1350 case x86_FLOAT_STATE32
:
1352 if (count
!= x86_FLOAT_STATE32_COUNT
)
1353 return(KERN_INVALID_ARGUMENT
);
1355 if (thread_is_64bit(thr_act
))
1356 return(KERN_INVALID_ARGUMENT
);
1358 return fpu_set_fxstate(thr_act
, tstate
);
1361 case x86_FLOAT_STATE64
:
1363 if (count
!= x86_FLOAT_STATE64_COUNT
)
1364 return(KERN_INVALID_ARGUMENT
);
1366 if ( !thread_is_64bit(thr_act
))
1367 return(KERN_INVALID_ARGUMENT
);
1369 return fpu_set_fxstate(thr_act
, tstate
);
1372 case x86_FLOAT_STATE
:
1374 x86_float_state_t
*state
;
1376 if (count
!= x86_FLOAT_STATE_COUNT
)
1377 return(KERN_INVALID_ARGUMENT
);
1379 state
= (x86_float_state_t
*)tstate
;
1380 if (state
->fsh
.flavor
== x86_FLOAT_STATE64
&& state
->fsh
.count
== x86_FLOAT_STATE64_COUNT
&&
1381 thread_is_64bit(thr_act
)) {
1382 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1384 if (state
->fsh
.flavor
== x86_FLOAT_STATE32
&& state
->fsh
.count
== x86_FLOAT_STATE32_COUNT
&&
1385 !thread_is_64bit(thr_act
)) {
1386 return fpu_set_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1388 return(KERN_INVALID_ARGUMENT
);
1391 case x86_THREAD_STATE32
:
1393 if (count
!= x86_THREAD_STATE32_COUNT
)
1394 return(KERN_INVALID_ARGUMENT
);
1396 if (thread_is_64bit(thr_act
))
1397 return(KERN_INVALID_ARGUMENT
);
1399 return set_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1402 case x86_THREAD_STATE64
:
1404 if (count
!= x86_THREAD_STATE64_COUNT
)
1405 return(KERN_INVALID_ARGUMENT
);
1407 if (!thread_is_64bit(thr_act
))
1408 return(KERN_INVALID_ARGUMENT
);
1410 return set_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1413 case x86_THREAD_STATE
:
1415 x86_thread_state_t
*state
;
1417 if (count
!= x86_THREAD_STATE_COUNT
)
1418 return(KERN_INVALID_ARGUMENT
);
1420 state
= (x86_thread_state_t
*)tstate
;
1422 if (state
->tsh
.flavor
== x86_THREAD_STATE64
&&
1423 state
->tsh
.count
== x86_THREAD_STATE64_COUNT
&&
1424 thread_is_64bit(thr_act
)) {
1425 return set_thread_state64(thr_act
, &state
->uts
.ts64
);
1426 } else if (state
->tsh
.flavor
== x86_THREAD_STATE32
&&
1427 state
->tsh
.count
== x86_THREAD_STATE32_COUNT
&&
1428 !thread_is_64bit(thr_act
)) {
1429 return set_thread_state32(thr_act
, &state
->uts
.ts32
);
1431 return(KERN_INVALID_ARGUMENT
);
1435 case x86_DEBUG_STATE32
:
1437 x86_debug_state32_t
*state
;
1440 if (thread_is_64bit(thr_act
))
1441 return(KERN_INVALID_ARGUMENT
);
1443 state
= (x86_debug_state32_t
*)tstate
;
1445 ret
= set_debug_state32(thr_act
, state
);
1449 case x86_DEBUG_STATE64
:
1451 x86_debug_state64_t
*state
;
1454 if (!thread_is_64bit(thr_act
))
1455 return(KERN_INVALID_ARGUMENT
);
1457 state
= (x86_debug_state64_t
*)tstate
;
1459 ret
= set_debug_state64(thr_act
, state
);
1463 case x86_DEBUG_STATE
:
1465 x86_debug_state_t
*state
;
1466 kern_return_t ret
= KERN_INVALID_ARGUMENT
;
1468 if (count
!= x86_DEBUG_STATE_COUNT
)
1469 return (KERN_INVALID_ARGUMENT
);
1471 state
= (x86_debug_state_t
*)tstate
;
1472 if (state
->dsh
.flavor
== x86_DEBUG_STATE64
&&
1473 state
->dsh
.count
== x86_DEBUG_STATE64_COUNT
&&
1474 thread_is_64bit(thr_act
)) {
1475 ret
= set_debug_state64(thr_act
, &state
->uds
.ds64
);
1478 if (state
->dsh
.flavor
== x86_DEBUG_STATE32
&&
1479 state
->dsh
.count
== x86_DEBUG_STATE32_COUNT
&&
1480 !thread_is_64bit(thr_act
)) {
1481 ret
= set_debug_state32(thr_act
, &state
->uds
.ds32
);
1486 return(KERN_INVALID_ARGUMENT
);
1489 return(KERN_SUCCESS
);
1497 * Get the status of the specified thread.
1501 machine_thread_get_state(
1503 thread_flavor_t flavor
,
1504 thread_state_t tstate
,
1505 mach_msg_type_number_t
*count
)
1510 case THREAD_STATE_FLAVOR_LIST
:
1513 return (KERN_INVALID_ARGUMENT
);
1515 tstate
[0] = i386_THREAD_STATE
;
1516 tstate
[1] = i386_FLOAT_STATE
;
1517 tstate
[2] = i386_EXCEPTION_STATE
;
1523 case THREAD_STATE_FLAVOR_LIST_NEW
:
1526 return (KERN_INVALID_ARGUMENT
);
1528 tstate
[0] = x86_THREAD_STATE
;
1529 tstate
[1] = x86_FLOAT_STATE
;
1530 tstate
[2] = x86_EXCEPTION_STATE
;
1531 tstate
[3] = x86_DEBUG_STATE
;
1537 case x86_SAVED_STATE32
:
1539 x86_saved_state32_t
*state
;
1540 x86_saved_state32_t
*saved_state
;
1542 if (*count
< x86_SAVED_STATE32_COUNT
)
1543 return(KERN_INVALID_ARGUMENT
);
1545 if (thread_is_64bit(thr_act
))
1546 return(KERN_INVALID_ARGUMENT
);
1548 state
= (x86_saved_state32_t
*) tstate
;
1549 saved_state
= USER_REGS32(thr_act
);
1552 * First, copy everything:
1554 *state
= *saved_state
;
1555 state
->ds
= saved_state
->ds
& 0xffff;
1556 state
->es
= saved_state
->es
& 0xffff;
1557 state
->fs
= saved_state
->fs
& 0xffff;
1558 state
->gs
= saved_state
->gs
& 0xffff;
1560 *count
= x86_SAVED_STATE32_COUNT
;
1564 case x86_SAVED_STATE64
:
1566 x86_saved_state64_t
*state
;
1567 x86_saved_state64_t
*saved_state
;
1569 if (*count
< x86_SAVED_STATE64_COUNT
)
1570 return(KERN_INVALID_ARGUMENT
);
1572 if (!thread_is_64bit(thr_act
))
1573 return(KERN_INVALID_ARGUMENT
);
1575 state
= (x86_saved_state64_t
*)tstate
;
1576 saved_state
= USER_REGS64(thr_act
);
1579 * First, copy everything:
1581 *state
= *saved_state
;
1582 state
->fs
= saved_state
->fs
& 0xffff;
1583 state
->gs
= saved_state
->gs
& 0xffff;
1585 *count
= x86_SAVED_STATE64_COUNT
;
1589 case x86_FLOAT_STATE32
:
1591 if (*count
< x86_FLOAT_STATE32_COUNT
)
1592 return(KERN_INVALID_ARGUMENT
);
1594 if (thread_is_64bit(thr_act
))
1595 return(KERN_INVALID_ARGUMENT
);
1597 *count
= x86_FLOAT_STATE32_COUNT
;
1599 return fpu_get_fxstate(thr_act
, tstate
);
1602 case x86_FLOAT_STATE64
:
1604 if (*count
< x86_FLOAT_STATE64_COUNT
)
1605 return(KERN_INVALID_ARGUMENT
);
1607 if ( !thread_is_64bit(thr_act
))
1608 return(KERN_INVALID_ARGUMENT
);
1610 *count
= x86_FLOAT_STATE64_COUNT
;
1612 return fpu_get_fxstate(thr_act
, tstate
);
1615 case x86_FLOAT_STATE
:
1617 x86_float_state_t
*state
;
1620 if (*count
< x86_FLOAT_STATE_COUNT
)
1621 return(KERN_INVALID_ARGUMENT
);
1623 state
= (x86_float_state_t
*)tstate
;
1626 * no need to bzero... currently
1627 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1629 if (thread_is_64bit(thr_act
)) {
1630 state
->fsh
.flavor
= x86_FLOAT_STATE64
;
1631 state
->fsh
.count
= x86_FLOAT_STATE64_COUNT
;
1633 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs64
);
1635 state
->fsh
.flavor
= x86_FLOAT_STATE32
;
1636 state
->fsh
.count
= x86_FLOAT_STATE32_COUNT
;
1638 kret
= fpu_get_fxstate(thr_act
, (thread_state_t
)&state
->ufs
.fs32
);
1640 *count
= x86_FLOAT_STATE_COUNT
;
1645 case x86_THREAD_STATE32
:
1647 if (*count
< x86_THREAD_STATE32_COUNT
)
1648 return(KERN_INVALID_ARGUMENT
);
1650 if (thread_is_64bit(thr_act
))
1651 return(KERN_INVALID_ARGUMENT
);
1653 *count
= x86_THREAD_STATE32_COUNT
;
1655 get_thread_state32(thr_act
, (x86_thread_state32_t
*)tstate
);
1659 case x86_THREAD_STATE64
:
1661 if (*count
< x86_THREAD_STATE64_COUNT
)
1662 return(KERN_INVALID_ARGUMENT
);
1664 if ( !thread_is_64bit(thr_act
))
1665 return(KERN_INVALID_ARGUMENT
);
1667 *count
= x86_THREAD_STATE64_COUNT
;
1669 get_thread_state64(thr_act
, (x86_thread_state64_t
*)tstate
);
1673 case x86_THREAD_STATE
:
1675 x86_thread_state_t
*state
;
1677 if (*count
< x86_THREAD_STATE_COUNT
)
1678 return(KERN_INVALID_ARGUMENT
);
1680 state
= (x86_thread_state_t
*)tstate
;
1682 bzero((char *)state
, sizeof(x86_thread_state_t
));
1684 if (thread_is_64bit(thr_act
)) {
1685 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1686 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1688 get_thread_state64(thr_act
, &state
->uts
.ts64
);
1690 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1691 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1693 get_thread_state32(thr_act
, &state
->uts
.ts32
);
1695 *count
= x86_THREAD_STATE_COUNT
;
1701 case x86_EXCEPTION_STATE32
:
1703 if (*count
< x86_EXCEPTION_STATE32_COUNT
)
1704 return(KERN_INVALID_ARGUMENT
);
1706 if (thread_is_64bit(thr_act
))
1707 return(KERN_INVALID_ARGUMENT
);
1709 *count
= x86_EXCEPTION_STATE32_COUNT
;
1711 get_exception_state32(thr_act
, (x86_exception_state32_t
*)tstate
);
1715 case x86_EXCEPTION_STATE64
:
1717 if (*count
< x86_EXCEPTION_STATE64_COUNT
)
1718 return(KERN_INVALID_ARGUMENT
);
1720 if ( !thread_is_64bit(thr_act
))
1721 return(KERN_INVALID_ARGUMENT
);
1723 *count
= x86_EXCEPTION_STATE64_COUNT
;
1725 get_exception_state64(thr_act
, (x86_exception_state64_t
*)tstate
);
1729 case x86_EXCEPTION_STATE
:
1731 x86_exception_state_t
*state
;
1733 if (*count
< x86_EXCEPTION_STATE_COUNT
)
1734 return(KERN_INVALID_ARGUMENT
);
1736 state
= (x86_exception_state_t
*)tstate
;
1738 bzero((char *)state
, sizeof(x86_exception_state_t
));
1740 if (thread_is_64bit(thr_act
)) {
1741 state
->esh
.flavor
= x86_EXCEPTION_STATE64
;
1742 state
->esh
.count
= x86_EXCEPTION_STATE64_COUNT
;
1744 get_exception_state64(thr_act
, &state
->ues
.es64
);
1746 state
->esh
.flavor
= x86_EXCEPTION_STATE32
;
1747 state
->esh
.count
= x86_EXCEPTION_STATE32_COUNT
;
1749 get_exception_state32(thr_act
, &state
->ues
.es32
);
1751 *count
= x86_EXCEPTION_STATE_COUNT
;
1755 case x86_DEBUG_STATE32
:
1757 if (*count
< x86_DEBUG_STATE32_COUNT
)
1758 return(KERN_INVALID_ARGUMENT
);
1760 if (thread_is_64bit(thr_act
))
1761 return(KERN_INVALID_ARGUMENT
);
1763 get_debug_state32(thr_act
, (x86_debug_state32_t
*)tstate
);
1765 *count
= x86_DEBUG_STATE32_COUNT
;
1769 case x86_DEBUG_STATE64
:
1771 if (*count
< x86_DEBUG_STATE64_COUNT
)
1772 return(KERN_INVALID_ARGUMENT
);
1774 if (!thread_is_64bit(thr_act
))
1775 return(KERN_INVALID_ARGUMENT
);
1777 get_debug_state64(thr_act
, (x86_debug_state64_t
*)tstate
);
1779 *count
= x86_DEBUG_STATE64_COUNT
;
1783 case x86_DEBUG_STATE
:
1785 x86_debug_state_t
*state
;
1787 if (*count
< x86_DEBUG_STATE_COUNT
)
1788 return(KERN_INVALID_ARGUMENT
);
1790 state
= (x86_debug_state_t
*)tstate
;
1792 bzero(state
, sizeof *state
);
1794 if (thread_is_64bit(thr_act
)) {
1795 state
->dsh
.flavor
= x86_DEBUG_STATE64
;
1796 state
->dsh
.count
= x86_DEBUG_STATE64_COUNT
;
1798 get_debug_state64(thr_act
, &state
->uds
.ds64
);
1800 state
->dsh
.flavor
= x86_DEBUG_STATE32
;
1801 state
->dsh
.count
= x86_DEBUG_STATE32_COUNT
;
1803 get_debug_state32(thr_act
, &state
->uds
.ds32
);
1805 *count
= x86_DEBUG_STATE_COUNT
;
1809 return(KERN_INVALID_ARGUMENT
);
1812 return(KERN_SUCCESS
);
1816 machine_thread_get_kern_state(
1818 thread_flavor_t flavor
,
1819 thread_state_t tstate
,
1820 mach_msg_type_number_t
*count
)
1822 x86_saved_state_t
*int_state
= current_cpu_datap()->cpu_int_state
;
1825 * This works only for an interrupted kernel thread
1827 if (thread
!= current_thread() || int_state
== NULL
)
1828 return KERN_FAILURE
;
1831 case x86_THREAD_STATE32
: {
1832 x86_thread_state32_t
*state
;
1833 x86_saved_state32_t
*saved_state
;
1835 if (!is_saved_state32(int_state
) ||
1836 *count
< x86_THREAD_STATE32_COUNT
)
1837 return (KERN_INVALID_ARGUMENT
);
1839 state
= (x86_thread_state32_t
*) tstate
;
1841 saved_state
= saved_state32(int_state
);
1843 * General registers.
1845 state
->eax
= saved_state
->eax
;
1846 state
->ebx
= saved_state
->ebx
;
1847 state
->ecx
= saved_state
->ecx
;
1848 state
->edx
= saved_state
->edx
;
1849 state
->edi
= saved_state
->edi
;
1850 state
->esi
= saved_state
->esi
;
1851 state
->ebp
= saved_state
->ebp
;
1852 state
->esp
= saved_state
->uesp
;
1853 state
->eflags
= saved_state
->efl
;
1854 state
->eip
= saved_state
->eip
;
1855 state
->cs
= saved_state
->cs
;
1856 state
->ss
= saved_state
->ss
;
1857 state
->ds
= saved_state
->ds
& 0xffff;
1858 state
->es
= saved_state
->es
& 0xffff;
1859 state
->fs
= saved_state
->fs
& 0xffff;
1860 state
->gs
= saved_state
->gs
& 0xffff;
1862 *count
= x86_THREAD_STATE32_COUNT
;
1864 return KERN_SUCCESS
;
1867 case x86_THREAD_STATE64
: {
1868 x86_thread_state64_t
*state
;
1869 x86_saved_state64_t
*saved_state
;
1871 if (!is_saved_state64(int_state
) ||
1872 *count
< x86_THREAD_STATE64_COUNT
)
1873 return (KERN_INVALID_ARGUMENT
);
1875 state
= (x86_thread_state64_t
*) tstate
;
1877 saved_state
= saved_state64(int_state
);
1879 * General registers.
1881 state
->rax
= saved_state
->rax
;
1882 state
->rbx
= saved_state
->rbx
;
1883 state
->rcx
= saved_state
->rcx
;
1884 state
->rdx
= saved_state
->rdx
;
1885 state
->rdi
= saved_state
->rdi
;
1886 state
->rsi
= saved_state
->rsi
;
1887 state
->rbp
= saved_state
->rbp
;
1888 state
->rsp
= saved_state
->isf
.rsp
;
1889 state
->r8
= saved_state
->r8
;
1890 state
->r9
= saved_state
->r9
;
1891 state
->r10
= saved_state
->r10
;
1892 state
->r11
= saved_state
->r11
;
1893 state
->r12
= saved_state
->r12
;
1894 state
->r13
= saved_state
->r13
;
1895 state
->r14
= saved_state
->r14
;
1896 state
->r15
= saved_state
->r15
;
1898 state
->rip
= saved_state
->isf
.rip
;
1899 state
->rflags
= saved_state
->isf
.rflags
;
1900 state
->cs
= saved_state
->isf
.cs
;
1901 state
->fs
= saved_state
->fs
& 0xffff;
1902 state
->gs
= saved_state
->gs
& 0xffff;
1903 *count
= x86_THREAD_STATE64_COUNT
;
1905 return KERN_SUCCESS
;
1908 case x86_THREAD_STATE
: {
1909 x86_thread_state_t
*state
= NULL
;
1911 if (*count
< x86_THREAD_STATE_COUNT
)
1912 return (KERN_INVALID_ARGUMENT
);
1914 state
= (x86_thread_state_t
*) tstate
;
1916 if (is_saved_state32(int_state
)) {
1917 x86_saved_state32_t
*saved_state
= saved_state32(int_state
);
1919 state
->tsh
.flavor
= x86_THREAD_STATE32
;
1920 state
->tsh
.count
= x86_THREAD_STATE32_COUNT
;
1923 * General registers.
1925 state
->uts
.ts32
.eax
= saved_state
->eax
;
1926 state
->uts
.ts32
.ebx
= saved_state
->ebx
;
1927 state
->uts
.ts32
.ecx
= saved_state
->ecx
;
1928 state
->uts
.ts32
.edx
= saved_state
->edx
;
1929 state
->uts
.ts32
.edi
= saved_state
->edi
;
1930 state
->uts
.ts32
.esi
= saved_state
->esi
;
1931 state
->uts
.ts32
.ebp
= saved_state
->ebp
;
1932 state
->uts
.ts32
.esp
= saved_state
->uesp
;
1933 state
->uts
.ts32
.eflags
= saved_state
->efl
;
1934 state
->uts
.ts32
.eip
= saved_state
->eip
;
1935 state
->uts
.ts32
.cs
= saved_state
->cs
;
1936 state
->uts
.ts32
.ss
= saved_state
->ss
;
1937 state
->uts
.ts32
.ds
= saved_state
->ds
& 0xffff;
1938 state
->uts
.ts32
.es
= saved_state
->es
& 0xffff;
1939 state
->uts
.ts32
.fs
= saved_state
->fs
& 0xffff;
1940 state
->uts
.ts32
.gs
= saved_state
->gs
& 0xffff;
1941 } else if (is_saved_state64(int_state
)) {
1942 x86_saved_state64_t
*saved_state
= saved_state64(int_state
);
1944 state
->tsh
.flavor
= x86_THREAD_STATE64
;
1945 state
->tsh
.count
= x86_THREAD_STATE64_COUNT
;
1948 * General registers.
1950 state
->uts
.ts64
.rax
= saved_state
->rax
;
1951 state
->uts
.ts64
.rbx
= saved_state
->rbx
;
1952 state
->uts
.ts64
.rcx
= saved_state
->rcx
;
1953 state
->uts
.ts64
.rdx
= saved_state
->rdx
;
1954 state
->uts
.ts64
.rdi
= saved_state
->rdi
;
1955 state
->uts
.ts64
.rsi
= saved_state
->rsi
;
1956 state
->uts
.ts64
.rbp
= saved_state
->rbp
;
1957 state
->uts
.ts64
.rsp
= saved_state
->isf
.rsp
;
1958 state
->uts
.ts64
.r8
= saved_state
->r8
;
1959 state
->uts
.ts64
.r9
= saved_state
->r9
;
1960 state
->uts
.ts64
.r10
= saved_state
->r10
;
1961 state
->uts
.ts64
.r11
= saved_state
->r11
;
1962 state
->uts
.ts64
.r12
= saved_state
->r12
;
1963 state
->uts
.ts64
.r13
= saved_state
->r13
;
1964 state
->uts
.ts64
.r14
= saved_state
->r14
;
1965 state
->uts
.ts64
.r15
= saved_state
->r15
;
1967 state
->uts
.ts64
.rip
= saved_state
->isf
.rip
;
1968 state
->uts
.ts64
.rflags
= saved_state
->isf
.rflags
;
1969 state
->uts
.ts64
.cs
= saved_state
->isf
.cs
;
1970 state
->uts
.ts64
.fs
= saved_state
->fs
& 0xffff;
1971 state
->uts
.ts64
.gs
= saved_state
->gs
& 0xffff;
1973 panic("unknown thread state");
1976 *count
= x86_THREAD_STATE_COUNT
;
1977 return KERN_SUCCESS
;
1980 return KERN_FAILURE
;
1985 * Initialize the machine-dependent state for a new thread.
1988 machine_thread_create(
1992 pcb_t pcb
= &thread
->machine
.xxx_pcb
;
1993 x86_saved_state_t
*iss
;
1995 #if NCOPY_WINDOWS > 0
1996 inval_copy_windows(thread
);
1998 thread
->machine
.physwindow_pte
= 0;
1999 thread
->machine
.physwindow_busy
= 0;
2003 * Allocate pcb only if required.
2005 if (pcb
->sf
== NULL
) {
2006 pcb
->sf
= zalloc(iss_zone
);
2007 if (pcb
->sf
== NULL
)
2011 if (task_has_64BitAddr(task
)) {
2012 x86_sframe64_t
*sf64
;
2014 sf64
= (x86_sframe64_t
*) pcb
->sf
;
2016 bzero((char *)sf64
, sizeof(x86_sframe64_t
));
2018 iss
= (x86_saved_state_t
*) &sf64
->ssf
;
2019 iss
->flavor
= x86_SAVED_STATE64
;
2021 * Guarantee that the bootstrapped thread will be in user
2024 iss
->ss_64
.isf
.rflags
= EFL_USER_SET
;
2025 iss
->ss_64
.isf
.cs
= USER64_CS
;
2026 iss
->ss_64
.isf
.ss
= USER_DS
;
2027 iss
->ss_64
.fs
= USER_DS
;
2028 iss
->ss_64
.gs
= USER_DS
;
2030 if (cpu_mode_is64bit()) {
2031 x86_sframe_compat32_t
*sfc32
;
2033 sfc32
= (x86_sframe_compat32_t
*)pcb
->sf
;
2035 bzero((char *)sfc32
, sizeof(x86_sframe_compat32_t
));
2037 iss
= (x86_saved_state_t
*) &sfc32
->ssf
.iss32
;
2038 iss
->flavor
= x86_SAVED_STATE32
;
2039 #if defined(__i386__)
2042 x86_saved_state_compat32_t
*xssc
;
2044 xssc
= (x86_saved_state_compat32_t
*) iss
;
2046 xssc
->pad_for_16byte_alignment
[0] = 0x64326432;
2047 xssc
->pad_for_16byte_alignment
[1] = 0x64326432;
2051 x86_sframe32_t
*sf32
;
2052 struct real_descriptor
*ldtp
;
2055 sf32
= (x86_sframe32_t
*) pcb
->sf
;
2057 bzero((char *)sf32
, sizeof(x86_sframe32_t
));
2059 iss
= (x86_saved_state_t
*) &sf32
->ssf
;
2060 iss
->flavor
= x86_SAVED_STATE32
;
2061 pcb
->iss_pte0
= pte_kernel_rw(kvtophys((vm_offset_t
)iss
));
2062 if (0 == (paddr
= pa_to_pte(kvtophys((vm_offset_t
)iss
+ PAGE_SIZE
))))
2063 pcb
->iss_pte1
= INTEL_PTE_INVALID
;
2065 pcb
->iss_pte1
= pte_kernel_rw(paddr
);
2068 ldtp
= (struct real_descriptor
*)
2069 pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN
);
2070 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
2071 pcb
->uldt_desc
= ldtp
[sel_idx(USER_DS
)];
2072 #endif /* __i386__ */
2075 * Guarantee that the bootstrapped thread will be in user
2078 iss
->ss_32
.cs
= USER_CS
;
2079 iss
->ss_32
.ss
= USER_DS
;
2080 iss
->ss_32
.ds
= USER_DS
;
2081 iss
->ss_32
.es
= USER_DS
;
2082 iss
->ss_32
.fs
= USER_DS
;
2083 iss
->ss_32
.gs
= USER_DS
;
2084 iss
->ss_32
.efl
= EFL_USER_SET
;
2089 thread
->machine
.pcb
= pcb
;
2090 simple_lock_init(&pcb
->lock
, 0);
2092 pcb
->arg_store_valid
= 0;
2093 pcb
->cthread_self
= 0;
2094 pcb
->uldt_selector
= 0;
2096 /* Ensure that the "cthread" descriptor describes a valid
2099 if ((pcb
->cthread_desc
.access
& ACC_P
) == 0) {
2100 struct real_descriptor
*ldtp
;
2101 ldtp
= (struct real_descriptor
*)current_ldt();
2102 pcb
->cthread_desc
= ldtp
[sel_idx(USER_DS
)];
2106 return(KERN_SUCCESS
);
2110 * Machine-dependent cleanup prior to destroying a thread
2113 machine_thread_destroy(
2116 register pcb_t pcb
= thread
->machine
.pcb
;
2121 fpu_free(pcb
->ifps
);
2123 zfree(iss_zone
, pcb
->sf
);
2127 zfree(ids_zone
, pcb
->ids
);
2130 thread
->machine
.pcb
= (pcb_t
)0;
2135 machine_thread_switch_addrmode(thread_t thread
)
2138 * We don't want to be preempted until we're done
2139 * - particularly if we're switching the current thread
2141 disable_preemption();
2144 * Reset the state saveareas.
2146 machine_thread_create(thread
, thread
->task
);
2148 /* If we're switching ourselves, reset the pcb addresses etc. */
2149 if (thread
== current_thread()) {
2150 #if defined(__i386__)
2151 if (current_cpu_datap()->cpu_active_cr3
!= kernel_pmap
->pm_cr3
)
2152 pmap_load_kernel_cr3();
2153 #endif /* defined(__i386) */
2154 act_machine_switch_pcb(thread
);
2156 enable_preemption();
2162 * This is used to set the current thr_act/thread
2163 * when starting up a new processor
2166 machine_set_current_thread(thread_t thread
)
2168 current_cpu_datap()->cpu_active_thread
= thread
;
2172 * This is called when a task is terminated, and also on exec().
2173 * Clear machine-dependent state that is stored on the task.
2176 machine_thread_terminate_self(void)
2178 task_t self_task
= current_task();
2180 user_ldt_t user_ldt
= self_task
->i386_ldt
;
2181 if (user_ldt
!= 0) {
2182 self_task
->i386_ldt
= 0;
2183 user_ldt_free(user_ldt
);
2186 if (self_task
->task_debug
!= NULL
) {
2187 zfree(ids_zone
, self_task
->task_debug
);
2188 self_task
->task_debug
= NULL
;
2199 * This code is called with nothing locked.
2200 * It also returns with nothing locked, if it returns.
2202 * This routine terminates the current thread activation.
2203 * If this is the only activation associated with its
2204 * thread shuttle, then the entire thread (shuttle plus
2205 * activation) is terminated.
2207 assert( code
== KERN_TERMINATED
);
2209 thread_terminate_self();
2213 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code
);
2218 * Perform machine-dependent per-thread initializations
2221 machine_thread_init(void)
2223 if (cpu_mode_is64bit()) {
2224 assert(sizeof(x86_sframe_compat32_t
) % 16 == 0);
2225 iss_zone
= zinit(sizeof(x86_sframe64_t
),
2226 thread_max
* sizeof(x86_sframe64_t
),
2227 THREAD_CHUNK
* sizeof(x86_sframe64_t
),
2228 "x86_64 saved state");
2230 ids_zone
= zinit(sizeof(x86_debug_state64_t
),
2231 thread_max
* sizeof(x86_debug_state64_t
),
2232 THREAD_CHUNK
* sizeof(x86_debug_state64_t
),
2233 "x86_64 debug state");
2236 iss_zone
= zinit(sizeof(x86_sframe32_t
),
2237 thread_max
* sizeof(x86_sframe32_t
),
2238 THREAD_CHUNK
* sizeof(x86_sframe32_t
),
2240 ids_zone
= zinit(sizeof(x86_debug_state32_t
),
2241 thread_max
* (sizeof(x86_debug_state32_t
)),
2242 THREAD_CHUNK
* (sizeof(x86_debug_state32_t
)),
2249 #if defined(__i386__)
2251 * Some routines for debugging activation code
2253 static void dump_handlers(thread_t
);
2254 void dump_regs(thread_t
);
2255 int dump_act(thread_t thr_act
);
2258 dump_handlers(thread_t thr_act
)
2260 ReturnHandler
*rhp
= thr_act
->handlers
;
2265 if (rhp
== &thr_act
->special_handler
){
2267 printf("[NON-Zero next ptr(%p)]", rhp
->next
);
2268 printf("special_handler()->");
2271 printf("hdlr_%d(%p)->", counter
, rhp
->handler
);
2273 if (++counter
> 32) {
2274 printf("Aborting: HUGE handler chain\n");
2278 printf("HLDR_NULL\n");
2282 dump_regs(thread_t thr_act
)
2284 if (thr_act
->machine
.pcb
== NULL
)
2287 if (thread_is_64bit(thr_act
)) {
2288 x86_saved_state64_t
*ssp
;
2290 ssp
= USER_REGS64(thr_act
);
2292 panic("dump_regs: 64bit tasks not yet supported");
2295 x86_saved_state32_t
*ssp
;
2297 ssp
= USER_REGS32(thr_act
);
2300 * Print out user register state
2302 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
2303 ssp
->edi
, ssp
->esi
, ssp
->ebp
, ssp
->ebx
, ssp
->edx
);
2305 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
2306 ssp
->ecx
, ssp
->eax
, ssp
->eip
, ssp
->efl
, ssp
->uesp
);
2308 printf("\t\tcs=%x ss=%x\n", ssp
->cs
, ssp
->ss
);
2313 dump_act(thread_t thr_act
)
2318 printf("thread(%p)(%d): task=%p(%d)\n",
2319 thr_act
, thr_act
->ref_count
,
2321 thr_act
->task
? thr_act
->task
->ref_count
: 0);
2323 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
2324 thr_act
->suspend_count
, thr_act
->user_stop_count
,
2325 thr_act
->active
, thr_act
->ast
);
2326 printf("\tpcb=%p\n", thr_act
->machine
.pcb
);
2328 if (thr_act
->kernel_stack
) {
2329 vm_offset_t stack
= thr_act
->kernel_stack
;
2331 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
2332 (long)stack
, STACK_IKS(stack
)->k_eip
, STACK_IKS(stack
)->k_ebx
,
2333 STACK_IKS(stack
)->k_esp
, STACK_IEL(stack
)->saved_state
);
2336 dump_handlers(thr_act
);
2338 return((int)thr_act
);
2345 thread_t thr_act
= current_thread();
2347 if (thr_act
->machine
.pcb
== NULL
)
2350 if (thread_is_64bit(thr_act
)) {
2351 x86_saved_state64_t
*iss64
;
2353 iss64
= USER_REGS64(thr_act
);
2355 return(iss64
->isf
.rip
);
2357 x86_saved_state32_t
*iss32
;
2359 iss32
= USER_REGS32(thr_act
);
2366 * detach and return a kernel stack from a thread
2370 machine_stack_detach(thread_t thread
)
2374 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
2375 (uintptr_t)thread_tid(thread
), thread
->priority
,
2376 thread
->sched_pri
, 0,
2379 stack
= thread
->kernel_stack
;
2380 thread
->kernel_stack
= 0;
2386 * attach a kernel stack to a thread and initialize it
2390 machine_stack_attach(
2394 struct x86_kernel_state
*statep
;
2396 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
2397 (uintptr_t)thread_tid(thread
), thread
->priority
,
2398 thread
->sched_pri
, 0, 0);
2401 thread
->kernel_stack
= stack
;
2403 statep
= STACK_IKS(stack
);
2404 #if defined(__x86_64__)
2405 statep
->k_rip
= (unsigned long) Thread_continue
;
2406 statep
->k_rbx
= (unsigned long) thread_continue
;
2407 statep
->k_rsp
= (unsigned long) STACK_IEL(stack
);
2409 statep
->k_eip
= (unsigned long) Thread_continue
;
2410 statep
->k_ebx
= (unsigned long) thread_continue
;
2411 statep
->k_esp
= (unsigned long) STACK_IEL(stack
);
2418 * move a stack from old to new thread
2422 machine_stack_handoff(thread_t old
,
2431 machine_pmc_cswitch(old
, new);
2434 stack
= old
->kernel_stack
;
2435 if (stack
== old
->reserved_stack
) {
2436 assert(new->reserved_stack
);
2437 old
->reserved_stack
= new->reserved_stack
;
2438 new->reserved_stack
= stack
;
2440 old
->kernel_stack
= 0;
2442 * A full call to machine_stack_attach() is unnecessry
2443 * because old stack is already initialized.
2445 new->kernel_stack
= stack
;
2447 fpu_save_context(old
);
2450 old
->machine
.specFlags
&= ~OnProc
;
2451 new->machine
.specFlags
|= OnProc
;
2453 PMAP_SWITCH_CONTEXT(old
, new, cpu_number());
2454 act_machine_switch_pcb(new);
2456 machine_set_current_thread(new);
2464 struct x86_act_context32
{
2465 x86_saved_state32_t ss
;
2466 x86_float_state32_t fs
;
2467 x86_debug_state32_t ds
;
2470 struct x86_act_context64
{
2471 x86_saved_state64_t ss
;
2472 x86_float_state64_t fs
;
2473 x86_debug_state64_t ds
;
2479 act_thread_csave(void)
2482 mach_msg_type_number_t val
;
2483 thread_t thr_act
= current_thread();
2485 if (thread_is_64bit(thr_act
)) {
2486 struct x86_act_context64
*ic64
;
2488 ic64
= (struct x86_act_context64
*)kalloc(sizeof(struct x86_act_context64
));
2490 if (ic64
== (struct x86_act_context64
*)NULL
)
2493 val
= x86_SAVED_STATE64_COUNT
;
2494 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE64
,
2495 (thread_state_t
) &ic64
->ss
, &val
);
2496 if (kret
!= KERN_SUCCESS
) {
2497 kfree(ic64
, sizeof(struct x86_act_context64
));
2500 val
= x86_FLOAT_STATE64_COUNT
;
2501 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE64
,
2502 (thread_state_t
) &ic64
->fs
, &val
);
2504 if (kret
!= KERN_SUCCESS
) {
2505 kfree(ic64
, sizeof(struct x86_act_context64
));
2509 val
= x86_DEBUG_STATE64_COUNT
;
2510 kret
= machine_thread_get_state(thr_act
,
2512 (thread_state_t
)&ic64
->ds
,
2514 if (kret
!= KERN_SUCCESS
) {
2515 kfree(ic64
, sizeof(struct x86_act_context64
));
2521 struct x86_act_context32
*ic32
;
2523 ic32
= (struct x86_act_context32
*)kalloc(sizeof(struct x86_act_context32
));
2525 if (ic32
== (struct x86_act_context32
*)NULL
)
2528 val
= x86_SAVED_STATE32_COUNT
;
2529 kret
= machine_thread_get_state(thr_act
, x86_SAVED_STATE32
,
2530 (thread_state_t
) &ic32
->ss
, &val
);
2531 if (kret
!= KERN_SUCCESS
) {
2532 kfree(ic32
, sizeof(struct x86_act_context32
));
2535 val
= x86_FLOAT_STATE32_COUNT
;
2536 kret
= machine_thread_get_state(thr_act
, x86_FLOAT_STATE32
,
2537 (thread_state_t
) &ic32
->fs
, &val
);
2538 if (kret
!= KERN_SUCCESS
) {
2539 kfree(ic32
, sizeof(struct x86_act_context32
));
2543 val
= x86_DEBUG_STATE32_COUNT
;
2544 kret
= machine_thread_get_state(thr_act
,
2546 (thread_state_t
)&ic32
->ds
,
2548 if (kret
!= KERN_SUCCESS
) {
2549 kfree(ic32
, sizeof(struct x86_act_context32
));
2558 act_thread_catt(void *ctx
)
2560 thread_t thr_act
= current_thread();
2563 if (ctx
== (void *)NULL
)
2566 if (thread_is_64bit(thr_act
)) {
2567 struct x86_act_context64
*ic64
;
2569 ic64
= (struct x86_act_context64
*)ctx
;
2571 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE64
,
2572 (thread_state_t
) &ic64
->ss
, x86_SAVED_STATE64_COUNT
);
2573 if (kret
== KERN_SUCCESS
) {
2574 machine_thread_set_state(thr_act
, x86_FLOAT_STATE64
,
2575 (thread_state_t
) &ic64
->fs
, x86_FLOAT_STATE64_COUNT
);
2577 kfree(ic64
, sizeof(struct x86_act_context64
));
2579 struct x86_act_context32
*ic32
;
2581 ic32
= (struct x86_act_context32
*)ctx
;
2583 kret
= machine_thread_set_state(thr_act
, x86_SAVED_STATE32
,
2584 (thread_state_t
) &ic32
->ss
, x86_SAVED_STATE32_COUNT
);
2585 if (kret
== KERN_SUCCESS
) {
2586 kret
= machine_thread_set_state(thr_act
, x86_FLOAT_STATE32
,
2587 (thread_state_t
) &ic32
->fs
, x86_FLOAT_STATE32_COUNT
);
2588 if (kret
== KERN_SUCCESS
&& thr_act
->machine
.pcb
->ids
)
2589 machine_thread_set_state(thr_act
,
2591 (thread_state_t
)&ic32
->ds
,
2592 x86_DEBUG_STATE32_COUNT
);
2594 kfree(ic32
, sizeof(struct x86_act_context32
));
2599 void act_thread_cfree(__unused
void *ctx
)
2603 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
);
2604 void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
) {
2605 thread
->machine
.pcb
->arg_store_valid
= valid
;
2608 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
);
2610 boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
) {
2611 return (thread
->machine
.pcb
->arg_store_valid
);
2615 * Duplicate one x86_debug_state32_t to another. "all" parameter
2616 * chooses whether dr4 and dr5 are copied (they are never meant
2617 * to be installed when we do machine_task_set_state() or
2618 * machine_thread_set_state()).
2622 x86_debug_state32_t
*src
,
2623 x86_debug_state32_t
*target
,
2627 target
->dr4
= src
->dr4
;
2628 target
->dr5
= src
->dr5
;
2631 target
->dr0
= src
->dr0
;
2632 target
->dr1
= src
->dr1
;
2633 target
->dr2
= src
->dr2
;
2634 target
->dr3
= src
->dr3
;
2635 target
->dr6
= src
->dr6
;
2636 target
->dr7
= src
->dr7
;
2640 * Duplicate one x86_debug_state64_t to another. "all" parameter
2641 * chooses whether dr4 and dr5 are copied (they are never meant
2642 * to be installed when we do machine_task_set_state() or
2643 * machine_thread_set_state()).
2647 x86_debug_state64_t
*src
,
2648 x86_debug_state64_t
*target
,
2652 target
->dr4
= src
->dr4
;
2653 target
->dr5
= src
->dr5
;
2656 target
->dr0
= src
->dr0
;
2657 target
->dr1
= src
->dr1
;
2658 target
->dr2
= src
->dr2
;
2659 target
->dr3
= src
->dr3
;
2660 target
->dr6
= src
->dr6
;
2661 target
->dr7
= src
->dr7
;