]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <mach_debug.h>
59#include <mach_ldebug.h>
60
61#include <sys/kdebug.h>
62
63#include <mach/kern_return.h>
64#include <mach/thread_status.h>
65#include <mach/vm_param.h>
1c79356b
A
66
67#include <kern/counters.h>
91447636 68#include <kern/kalloc.h>
1c79356b 69#include <kern/mach_param.h>
91447636
A
70#include <kern/processor.h>
71#include <kern/cpu_data.h>
72#include <kern/cpu_number.h>
1c79356b
A
73#include <kern/task.h>
74#include <kern/thread.h>
1c79356b
A
75#include <kern/sched_prim.h>
76#include <kern/misc_protos.h>
77#include <kern/assert.h>
78#include <kern/spl.h>
91447636 79#include <kern/machine.h>
1c79356b
A
80#include <ipc/ipc_port.h>
81#include <vm/vm_kern.h>
91447636 82#include <vm/vm_map.h>
1c79356b 83#include <vm/pmap.h>
91447636 84#include <vm/vm_protos.h>
1c79356b 85
b0d623f7
A
86#include <i386/cpu_data.h>
87#include <i386/cpu_number.h>
1c79356b
A
88#include <i386/eflags.h>
89#include <i386/proc_reg.h>
1c79356b
A
90#include <i386/tss.h>
91#include <i386/user_ldt.h>
92#include <i386/fpu.h>
91447636 93#include <i386/mp_desc.h>
2d21ac55 94#include <i386/misc_protos.h>
b0d623f7
A
95#include <i386/thread.h>
96#if defined(__i386__)
97#include <i386/fpu.h>
98#endif
99#include <i386/seg.h>
0c530ab8 100#include <i386/machine_routines.h>
b0d623f7 101#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
55e303ae 102
2d21ac55
A
103#include <machine/commpage.h>
104
b0d623f7
A
105#if CONFIG_COUNTERS
106#include <pmc/pmc.h>
107#endif /* CONFIG_COUNTERS */
108
1c79356b
A
109/*
110 * Maps state flavor to number of words in the state:
111 */
91447636 112unsigned int _MachineStateCount[] = {
0c530ab8
A
113 /* FLAVOR_LIST */
114 0,
115 x86_THREAD_STATE32_COUNT,
116 x86_FLOAT_STATE32_COUNT,
117 x86_EXCEPTION_STATE32_COUNT,
118 x86_THREAD_STATE64_COUNT,
119 x86_FLOAT_STATE64_COUNT,
120 x86_EXCEPTION_STATE64_COUNT,
121 x86_THREAD_STATE_COUNT,
122 x86_FLOAT_STATE_COUNT,
123 x86_EXCEPTION_STATE_COUNT,
124 0,
125 x86_SAVED_STATE32_COUNT,
126 x86_SAVED_STATE64_COUNT,
127 x86_DEBUG_STATE32_COUNT,
128 x86_DEBUG_STATE64_COUNT,
129 x86_DEBUG_STATE_COUNT
1c79356b
A
130};
131
2d21ac55
A
132zone_t iss_zone; /* zone for saved_state area */
133zone_t ids_zone; /* zone for debug_state area */
0c530ab8 134
1c79356b
A
135/* Forward */
136
91447636 137void act_machine_throughcall(thread_t thr_act);
91447636 138void act_machine_return(int);
91447636 139
1c79356b
A
140extern void Thread_continue(void);
141extern void Load_context(
91447636 142 thread_t thread);
1c79356b 143
0c530ab8
A
144static void
145get_exception_state32(thread_t thread, x86_exception_state32_t *es);
146
147static void
148get_exception_state64(thread_t thread, x86_exception_state64_t *es);
149
150static void
151get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
152
153static void
154get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
155
156static int
157set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
158
159static int
160set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
161
b0d623f7
A
162#if CONFIG_COUNTERS
163static inline void
164machine_pmc_cswitch(thread_t /* old */, thread_t /* new */);
165
166static inline boolean_t
167machine_thread_pmc_eligible(thread_t);
168
169static inline void
170pmc_swi(thread_t /* old */, thread_t /*new */);
171
172static inline boolean_t
173machine_thread_pmc_eligible(thread_t t) {
174 /*
175 * NOTE: Task-level reservations are propagated to child threads via
176 * thread_create_internal. Any mutation of task reservations forces a
177 * recalculate of t_chud (for the pmc flag) for all threads in that task.
178 * Consequently, we can simply check the current thread's flag against
179 * THREAD_PMC_FLAG. If the result is non-zero, we SWI for a PMC switch.
180 */
181 return (t != NULL) ? ((t->t_chud & THREAD_PMC_FLAG) ? TRUE : FALSE) : FALSE;
182}
183
184static inline void
185pmc_swi(thread_t old, thread_t new) {
186 current_cpu_datap()->csw_old_thread = old;
187 current_cpu_datap()->csw_new_thread = new;
188 __asm__ __volatile__("int %0"::"i"(LAPIC_PMC_SWI_VECTOR):"memory");
189}
190
191static inline void
192machine_pmc_cswitch(thread_t old, thread_t new) {
193 if (machine_thread_pmc_eligible(old) || machine_thread_pmc_eligible(new)) {
194 pmc_swi(old, new);
195 }
196}
197
198void ml_get_csw_threads(thread_t *old, thread_t *new) {
199 *old = current_cpu_datap()->csw_old_thread;
200 *new = current_cpu_datap()->csw_new_thread;
201}
202
203#endif /* CONFIG_COUNTERS */
204
0c530ab8
A
205/*
206 * Don't let an illegal value for dr7 get set. Specifically,
207 * check for undefined settings. Setting these bit patterns
208 * result in undefined behaviour and can lead to an unexpected
209 * TRCTRAP.
210 */
211static boolean_t
212dr7_is_valid(uint32_t *dr7)
213{
214 int i;
215 uint32_t mask1, mask2;
216
217 /*
218 * If the DE bit is set in CR4, R/W0-3 can be pattern
219 * "10B" to indicate i/o reads and write
220 */
221 if (!(get_cr4() & CR4_DE))
222 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
223 i++, mask1 <<= 4, mask2 <<= 4)
224 if ((*dr7 & mask1) == mask2)
225 return (FALSE);
226
227 /*
b0d623f7
A
228 * len0-3 pattern "10B" is ok for len on Merom and newer processors
229 * (it signifies an 8-byte wide region). We use the 64bit capability
230 * of the processor in lieu of the more laborious model/family checks
231 * as all 64-bit capable processors so far support this.
232 * Reject an attempt to use this on 64-bit incapable processors.
0c530ab8 233 */
b0d623f7 234 if (current_cpu_datap()->cpu_is64bit == FALSE)
0c530ab8
A
235 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4;
236 i++, mask1 <<= 4, mask2 <<= 4)
237 if ((*dr7 & mask1) == mask2)
238 return (FALSE);
239
240 /*
241 * if we are doing an instruction execution break (indicated
242 * by r/w[x] being "00B"), then the len[x] must also be set
243 * to "00B"
244 */
245 for (i = 0; i < 4; i++)
246 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) &&
247 ((((*dr7 >> (18 + i*4))) & 0x3) != 0))
248 return (FALSE);
249
250 /*
251 * Intel docs have these bits fixed.
252 */
253 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */
254 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */
255 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */
256 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */
257 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */
258
259 /*
260 * We don't allow anything to set the global breakpoints.
261 */
262
263 if (*dr7 & 0x2)
264 return (FALSE);
265
266 if (*dr7 & (0x2<<2))
267 return (FALSE);
268
269 if (*dr7 & (0x2<<4))
270 return (FALSE);
271
272 if (*dr7 & (0x2<<6))
273 return (FALSE);
274
275 return (TRUE);
276}
277
278static inline void
279set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds)
280{
281 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0));
282 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1));
283 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2));
284 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3));
285 if (cpu_mode_is64bit())
286 cdp->cpu_dr7 = ds->dr7;
287}
288
289extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
290
291static inline void
292set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds)
293{
294 /*
295 * We need to enter 64-bit mode in order to set the full
296 * width of these registers
297 */
298 set_64bit_debug_regs(ds);
299 cdp->cpu_dr7 = ds->dr7;
300}
301
b0d623f7
A
302boolean_t
303debug_state_is_valid32(x86_debug_state32_t *ds)
304{
305 if (!dr7_is_valid(&ds->dr7))
306 return FALSE;
307
308#if defined(__i386__)
309 /*
310 * Only allow local breakpoints and make sure they are not
311 * in the trampoline code.
312 */
313 if (ds->dr7 & 0x1)
314 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE)
315 return FALSE;
316
317 if (ds->dr7 & (0x1<<2))
318 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE)
319 return FALSE;
320
321 if (ds->dr7 & (0x1<<4))
322 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE)
323 return FALSE;
324
325 if (ds->dr7 & (0x1<<6))
326 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE)
327 return FALSE;
328#endif
329
330 return TRUE;
331}
332
333boolean_t
334debug_state_is_valid64(x86_debug_state64_t *ds)
335{
336 if (!dr7_is_valid((uint32_t *)&ds->dr7))
337 return FALSE;
338
339 /*
340 * Don't allow the user to set debug addresses above their max
341 * value
342 */
343 if (ds->dr7 & 0x1)
344 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
345 return FALSE;
346
347 if (ds->dr7 & (0x1<<2))
348 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
349 return FALSE;
350
351 if (ds->dr7 & (0x1<<4))
352 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
353 return FALSE;
354
355 if (ds->dr7 & (0x1<<6))
356 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
357 return FALSE;
358
359 return TRUE;
360}
361
362
0c530ab8
A
363static kern_return_t
364set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
365{
366 x86_debug_state32_t *ids;
367 pcb_t pcb;
368
369 pcb = thread->machine.pcb;
370 ids = pcb->ids;
371
b0d623f7
A
372 if (debug_state_is_valid32(ds) != TRUE) {
373 return KERN_INVALID_ARGUMENT;
374 }
375
0c530ab8 376 if (ids == NULL) {
2d21ac55 377 ids = zalloc(ids_zone);
0c530ab8
A
378 bzero(ids, sizeof *ids);
379
380 simple_lock(&pcb->lock);
381 /* make sure it wasn't already alloc()'d elsewhere */
382 if (pcb->ids == NULL) {
383 pcb->ids = ids;
384 simple_unlock(&pcb->lock);
385 } else {
386 simple_unlock(&pcb->lock);
2d21ac55 387 zfree(ids_zone, ids);
0c530ab8
A
388 }
389 }
390
0c530ab8 391
b0d623f7 392 copy_debug_state32(ds, ids, FALSE);
0c530ab8
A
393
394 return (KERN_SUCCESS);
0c530ab8
A
395}
396
397static kern_return_t
398set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
399{
400 x86_debug_state64_t *ids;
401 pcb_t pcb;
402
403 pcb = thread->machine.pcb;
404 ids = pcb->ids;
405
b0d623f7
A
406 if (debug_state_is_valid64(ds) != TRUE) {
407 return KERN_INVALID_ARGUMENT;
408 }
409
0c530ab8 410 if (ids == NULL) {
2d21ac55 411 ids = zalloc(ids_zone);
0c530ab8
A
412 bzero(ids, sizeof *ids);
413
414 simple_lock(&pcb->lock);
415 /* make sure it wasn't already alloc()'d elsewhere */
416 if (pcb->ids == NULL) {
417 pcb->ids = ids;
418 simple_unlock(&pcb->lock);
419 } else {
420 simple_unlock(&pcb->lock);
2d21ac55 421 zfree(ids_zone, ids);
0c530ab8
A
422 }
423 }
424
b0d623f7 425 copy_debug_state64(ds, ids, FALSE);
0c530ab8
A
426
427 return (KERN_SUCCESS);
0c530ab8
A
428}
429
430static void
431get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
432{
433 x86_debug_state32_t *saved_state;
434
435 saved_state = thread->machine.pcb->ids;
436
437 if (saved_state) {
b0d623f7 438 copy_debug_state32(saved_state, ds, TRUE);
0c530ab8
A
439 } else
440 bzero(ds, sizeof *ds);
441}
442
443static void
444get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
445{
446 x86_debug_state64_t *saved_state;
447
448 saved_state = (x86_debug_state64_t *)thread->machine.pcb->ids;
449
450 if (saved_state) {
b0d623f7 451 copy_debug_state64(saved_state, ds, TRUE);
0c530ab8
A
452 } else
453 bzero(ds, sizeof *ds);
454}
455
1c79356b
A
456/*
457 * consider_machine_collect:
458 *
459 * Try to collect machine-dependent pages
460 */
461void
91447636 462consider_machine_collect(void)
1c79356b
A
463{
464}
465
1c79356b 466void
91447636 467consider_machine_adjust(void)
1c79356b 468{
1c79356b 469}
2d21ac55 470extern void *get_bsduthreadarg(thread_t th);
1c79356b 471
b0d623f7
A
472#if defined(__x86_64__)
473static void
474act_machine_switch_pcb( thread_t new )
475{
476 pcb_t pcb = new->machine.pcb;
477 struct real_descriptor *ldtp;
478 mach_vm_offset_t pcb_stack_top;
479 cpu_data_t *cdp = current_cpu_datap();
480
481 assert(new->kernel_stack != 0);
482
483 if (!cpu_mode_is64bit()) {
484 panic("K64 is 64bit!");
485 } else if (is_saved_state64(pcb->iss)) {
486 /*
487 * The test above is performed against the thread save state
488 * flavor and not task's 64-bit feature flag because of the
489 * thread/task 64-bit state divergence that can arise in
490 * task_set_64bit() x86: the task state is changed before
491 * the individual thread(s).
492 */
493 x86_saved_state64_tagged_t *iss64;
494 vm_offset_t isf;
495
496 assert(is_saved_state64(pcb->iss));
497
498 iss64 = (x86_saved_state64_tagged_t *) pcb->iss;
499
500 /*
501 * Set pointer to PCB's interrupt stack frame in cpu data.
502 * Used by syscall and double-fault trap handlers.
503 */
504 isf = (vm_offset_t) &iss64->state.isf;
505 cdp->cpu_uber.cu_isf = isf;
506 pcb_stack_top = (vm_offset_t) (iss64 + 1);
507 /* require 16-byte alignment */
508 assert((pcb_stack_top & 0xF) == 0);
509
510 /* Interrupt stack is pcb */
511 current_ktss64()->rsp0 = pcb_stack_top;
512
513 /*
514 * Top of temporary sysenter stack points to pcb stack.
515 * Although this is not normally used by 64-bit users,
516 * it needs to be set in case a sysenter is attempted.
517 */
518 *current_sstk64() = pcb_stack_top;
519
520 cdp->cpu_task_map = new->map->pmap->pm_task_map;
521
522 /*
523 * Enable the 64-bit user code segment, USER64_CS.
524 * Disable the 32-bit user code segment, USER_CS.
525 */
526 ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
527 ldt_desc_p(USER_CS)->access &= ~ACC_PL_U;
528
529 /*
530 * Switch user's GS base if necessary
0b4c1975 531 * by setting the Kernel GS base MSR
b0d623f7 532 * - this will become the user's on the swapgs when
0b4c1975
A
533 * returning to user-space. Avoid this for
534 * kernel threads (no user TLS support required)
535 * and verify the memory shadow of the segment base
536 * in the event it was altered in user space.
b0d623f7 537 */
0b4c1975
A
538 if ((pcb->cthread_self != 0) || (new->task != kernel_task)) {
539 if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) || (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) {
540 cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
541 wrmsr64(MSR_IA32_KERNEL_GS_BASE, pcb->cthread_self);
542 }
b0d623f7
A
543 }
544 } else {
545 x86_saved_state_compat32_t *iss32compat;
546 vm_offset_t isf;
547
548 assert(is_saved_state32(pcb->iss));
549 iss32compat = (x86_saved_state_compat32_t *) pcb->iss;
550
551 pcb_stack_top = (uintptr_t) (iss32compat + 1);
552 /* require 16-byte alignment */
553 assert((pcb_stack_top & 0xF) == 0);
554
555 /*
556 * Set pointer to PCB's interrupt stack frame in cpu data.
557 * Used by debug trap handler.
558 */
559 isf = (vm_offset_t) &iss32compat->isf64;
560 cdp->cpu_uber.cu_isf = isf;
561
562 /* Top of temporary sysenter stack points to pcb stack */
563 *current_sstk64() = pcb_stack_top;
564
565 /* Interrupt stack is pcb */
566 current_ktss64()->rsp0 = pcb_stack_top;
567
568 cdp->cpu_task_map = TASK_MAP_32BIT;
569 /* Precalculate pointers to syscall argument store, for use
570 * in the trampolines.
571 */
572 cdp->cpu_uber_arg_store = (vm_offset_t)get_bsduthreadarg(new);
573 cdp->cpu_uber_arg_store_valid = (vm_offset_t)&pcb->arg_store_valid;
574 pcb->arg_store_valid = 0;
575
576 /*
577 * Disable USER64_CS
578 * Enable USER_CS
579 */
580 ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
581 ldt_desc_p(USER_CS)->access |= ACC_PL_U;
582
583 /*
584 * Set the thread`s cthread (a.k.a pthread)
585 * For 32-bit user this involves setting the USER_CTHREAD
586 * descriptor in the LDT to point to the cthread data.
587 * The involves copying in the pre-initialized descriptor.
588 */
589 ldtp = (struct real_descriptor *)current_ldt();
590 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
591 if (pcb->uldt_selector != 0)
592 ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
593 cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
594
595 /*
596 * Set the thread`s LDT or LDT entry.
597 */
598 if (new->task == TASK_NULL || new->task->i386_ldt == 0) {
599 /*
600 * Use system LDT.
601 */
602 ml_cpu_set_ldt(KERNEL_LDT);
603 } else {
604 /*
605 * Task has its own LDT.
606 */
607 user_ldt_set(new);
608 }
609 }
610
611 /*
612 * Bump the scheduler generation count in the commpage.
613 * This can be read by user code to detect its preemption.
614 */
615 commpage_sched_gen_inc();
616}
617#else
91447636
A
618static void
619act_machine_switch_pcb( thread_t new )
1c79356b 620{
0c530ab8
A
621 pcb_t pcb = new->machine.pcb;
622 struct real_descriptor *ldtp;
1c79356b 623 vm_offset_t pcb_stack_top;
0c530ab8 624 vm_offset_t hi_pcb_stack_top;
b0d623f7 625 vm_offset_t hi_iss;
0c530ab8 626 cpu_data_t *cdp = current_cpu_datap();
8ad349bb 627
0c530ab8
A
628 assert(new->kernel_stack != 0);
629 STACK_IEL(new->kernel_stack)->saved_state = pcb->iss;
4452a7af 630
0c530ab8
A
631 if (!cpu_mode_is64bit()) {
632 x86_saved_state32_tagged_t *hi_iss32;
0c530ab8
A
633 /*
634 * Save a pointer to the top of the "kernel" stack -
635 * actually the place in the PCB where a trap into
636 * kernel mode will push the registers.
637 */
638 hi_iss = (vm_offset_t)((unsigned long)
639 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0) |
640 ((unsigned long)pcb->iss & PAGE_MASK));
2d21ac55 641
0c530ab8 642 cdp->cpu_hi_iss = (void *)hi_iss;
8f6c56a5 643
0c530ab8
A
644 pmap_high_map(pcb->iss_pte0, HIGH_CPU_ISS0);
645 pmap_high_map(pcb->iss_pte1, HIGH_CPU_ISS1);
646
647 hi_iss32 = (x86_saved_state32_tagged_t *) hi_iss;
648 assert(hi_iss32->tag == x86_SAVED_STATE32);
649
650 hi_pcb_stack_top = (int) (hi_iss32 + 1);
651
652 /*
653 * For fast syscall, top of interrupt stack points to pcb stack
654 */
655 *(vm_offset_t *) current_sstk() = hi_pcb_stack_top;
656
657 current_ktss()->esp0 = hi_pcb_stack_top;
2d21ac55 658
0c530ab8 659 } else if (is_saved_state64(pcb->iss)) {
2d21ac55
A
660 /*
661 * The test above is performed against the thread save state
662 * flavor and not task's 64-bit feature flag because of the
663 * thread/task 64-bit state divergence that can arise in
664 * task_set_64bit() x86: the task state is changed before
665 * the individual thread(s).
666 */
0c530ab8
A
667 x86_saved_state64_tagged_t *iss64;
668 vm_offset_t isf;
669
670 assert(is_saved_state64(pcb->iss));
671
672 iss64 = (x86_saved_state64_tagged_t *) pcb->iss;
6601e61a 673
0c530ab8
A
674 /*
675 * Set pointer to PCB's interrupt stack frame in cpu data.
676 * Used by syscall and double-fault trap handlers.
677 */
678 isf = (vm_offset_t) &iss64->state.isf;
679 cdp->cpu_uber.cu_isf = UBER64(isf);
680 pcb_stack_top = (vm_offset_t) (iss64 + 1);
681 /* require 16-byte alignment */
682 assert((pcb_stack_top & 0xF) == 0);
683 /* Interrupt stack is pcb */
684 current_ktss64()->rsp0 = UBER64(pcb_stack_top);
685
686 /*
687 * Top of temporary sysenter stack points to pcb stack.
688 * Although this is not normally used by 64-bit users,
689 * it needs to be set in case a sysenter is attempted.
690 */
691 *current_sstk64() = UBER64(pcb_stack_top);
692
2d21ac55 693 cdp->cpu_task_map = new->map->pmap->pm_task_map;
0c530ab8
A
694
695 /*
696 * Enable the 64-bit user code segment, USER64_CS.
2d21ac55 697 * Disable the 32-bit user code segment, USER_CS.
0c530ab8
A
698 */
699 ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
2d21ac55 700 ldt_desc_p(USER_CS)->access &= ~ACC_PL_U;
0c530ab8
A
701
702 } else {
703 x86_saved_state_compat32_t *iss32compat;
704 vm_offset_t isf;
705
706 assert(is_saved_state32(pcb->iss));
707 iss32compat = (x86_saved_state_compat32_t *) pcb->iss;
708
709 pcb_stack_top = (int) (iss32compat + 1);
710 /* require 16-byte alignment */
711 assert((pcb_stack_top & 0xF) == 0);
712
713 /*
714 * Set pointer to PCB's interrupt stack frame in cpu data.
715 * Used by debug trap handler.
716 */
717 isf = (vm_offset_t) &iss32compat->isf64;
718 cdp->cpu_uber.cu_isf = UBER64(isf);
719
720 /* Top of temporary sysenter stack points to pcb stack */
721 *current_sstk64() = UBER64(pcb_stack_top);
722
723 /* Interrupt stack is pcb */
724 current_ktss64()->rsp0 = UBER64(pcb_stack_top);
725
726 cdp->cpu_task_map = TASK_MAP_32BIT;
2d21ac55
A
727 /* Precalculate pointers to syscall argument store, for use
728 * in the trampolines.
729 */
730 cdp->cpu_uber_arg_store = UBER64((vm_offset_t)get_bsduthreadarg(new));
731 cdp->cpu_uber_arg_store_valid = UBER64((vm_offset_t)&pcb->arg_store_valid);
732 pcb->arg_store_valid = 0;
0c530ab8
A
733
734 /*
735 * Disable USER64_CS
2d21ac55 736 * Enable USER_CS
0c530ab8
A
737 */
738 ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
2d21ac55 739 ldt_desc_p(USER_CS)->access |= ACC_PL_U;
6601e61a 740 }
21362eb3 741
4452a7af 742 /*
0c530ab8
A
743 * Set the thread`s cthread (a.k.a pthread)
744 * For 32-bit user this involves setting the USER_CTHREAD
745 * descriptor in the LDT to point to the cthread data.
746 * The involves copying in the pre-initialized descriptor.
747 */
748 ldtp = (struct real_descriptor *)current_ldt();
749 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
750 if (pcb->uldt_selector != 0)
751 ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
b0d623f7
A
752
753
0c530ab8
A
754 /*
755 * For 64-bit, we additionally set the 64-bit User GS base
756 * address. On return to 64-bit user, the GS.Base MSR will be written.
4452a7af 757 */
0c530ab8 758 cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
6601e61a 759
0c530ab8
A
760 /*
761 * Set the thread`s LDT or LDT entry.
762 */
763 if (new->task == TASK_NULL || new->task->i386_ldt == 0) {
764 /*
765 * Use system LDT.
766 */
767 ml_cpu_set_ldt(KERNEL_LDT);
768 } else {
769 /*
770 * Task has its own LDT.
771 */
772 user_ldt_set(new);
773 }
2d21ac55
A
774
775 /*
776 * Bump the scheduler generation count in the commpage.
777 * This can be read by user code to detect its preemption.
778 */
779 commpage_sched_gen_inc();
1c79356b 780}
b0d623f7 781#endif
1c79356b 782
1c79356b
A
783/*
784 * Switch to the first thread on a CPU.
785 */
786void
55e303ae 787machine_load_context(
1c79356b
A
788 thread_t new)
789{
b0d623f7
A
790#if CONFIG_COUNTERS
791 machine_pmc_cswitch(NULL, new);
792#endif
0c530ab8 793 new->machine.specFlags |= OnProc;
1c79356b 794 act_machine_switch_pcb(new);
91447636 795 Load_context(new);
1c79356b
A
796}
797
798/*
799 * Switch to a new thread.
800 * Save the old thread`s kernel state or continuation,
801 * and return it.
802 */
803thread_t
55e303ae 804machine_switch_context(
91447636
A
805 thread_t old,
806 thread_continue_t continuation,
807 thread_t new)
1c79356b 808{
1c79356b 809#if MACH_RT
91447636 810 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
1c79356b 811#endif
b0d623f7
A
812#if CONFIG_COUNTERS
813 machine_pmc_cswitch(old, new);
814#endif
1c79356b
A
815 /*
816 * Save FP registers if in use.
817 */
818 fpu_save_context(old);
819
b0d623f7 820
0c530ab8
A
821 old->machine.specFlags &= ~OnProc;
822 new->machine.specFlags |= OnProc;
823
b0d623f7
A
824 /*
825 * Monitor the stack depth and report new max,
826 * not worrying about races.
827 */
828 vm_offset_t depth = current_stack_depth();
829 if (depth > kernel_stack_depth_max) {
830 kernel_stack_depth_max = depth;
831 KERNEL_DEBUG_CONSTANT(
832 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
833 (long) depth, 0, 0, 0, 0);
834 }
835
1c79356b
A
836 /*
837 * Switch address maps if need be, even if not switching tasks.
838 * (A server activation may be "borrowing" a client map.)
839 */
0c530ab8 840 PMAP_SWITCH_CONTEXT(old, new, cpu_number())
1c79356b
A
841
842 /*
843 * Load the rest of the user state for the new thread
844 */
91447636 845 act_machine_switch_pcb(new);
2d21ac55 846
1c79356b
A
847 return(Switch_context(old, continuation, new));
848}
849
b0d623f7
A
850thread_t
851machine_processor_shutdown(
852 thread_t thread,
853 void (*doshutdown)(processor_t),
854 processor_t processor)
855{
856#if CONFIG_VMX
857 vmx_suspend();
858#endif
859 fpu_save_context(thread);
860 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number());
861 return(Shutdown_context(thread, doshutdown, processor));
862}
863
1c79356b
A
864/*
865 * act_machine_sv_free
2d21ac55 866 * release saveareas associated with an act. if flag is true, release
1c79356b
A
867 * user level savearea(s) too, else don't
868 */
869void
91447636 870act_machine_sv_free(__unused thread_t act, __unused int flag)
1c79356b 871{
1c79356b
A
872}
873
91447636
A
874
875/*
876 * This is where registers that are not normally specified by the mach-o
877 * file on an execve would be nullified, perhaps to avoid a covert channel.
878 */
879kern_return_t
880machine_thread_state_initialize(
881 thread_t thread)
882{
2d21ac55
A
883 /*
884 * If there's an fpu save area, free it.
885 * The initialized state will then be lazily faulted-in, if required.
886 * And if we're target, re-arm the no-fpu trap.
887 */
b0d623f7
A
888 if (thread->machine.pcb->ifps) {
889 (void) fpu_set_fxstate(thread, NULL);
4452a7af 890
b0d623f7
A
891 if (thread == current_thread())
892 clear_fpu();
893 }
894
895 if (thread->machine.pcb->ids) {
896 zfree(ids_zone, thread->machine.pcb->ids);
897 thread->machine.pcb->ids = NULL;
898 }
899
900 return KERN_SUCCESS;
4452a7af 901}
0c530ab8
A
902
903uint32_t
904get_eflags_exportmask(void)
905{
906 return EFL_USER_SET;
907}
908
909/*
910 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
911 * for 32bit tasks only
912 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
913 * for 64bit tasks only
914 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
915 * for 32bit tasks only
916 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
917 * for 64bit tasks only
918 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
919 * for either 32bit or 64bit tasks
920 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
921 * for 32bit tasks only
922 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
923 * for 64bit tasks only
924 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
925 * for either 32bit or 64bit tasks
926 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
927 * for 32bit tasks only
928 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
929 * for 64bit tasks only
930 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
931 * for either 32bit or 64bit tasks
932 */
933
2d21ac55 934
0c530ab8
A
935static void
936get_exception_state64(thread_t thread, x86_exception_state64_t *es)
937{
938 x86_saved_state64_t *saved_state;
939
940 saved_state = USER_REGS64(thread);
941
942 es->trapno = saved_state->isf.trapno;
b0d623f7 943 es->err = (typeof(es->err))saved_state->isf.err;
0c530ab8
A
944 es->faultvaddr = saved_state->cr2;
945}
946
947static void
948get_exception_state32(thread_t thread, x86_exception_state32_t *es)
949{
950 x86_saved_state32_t *saved_state;
951
952 saved_state = USER_REGS32(thread);
953
954 es->trapno = saved_state->trapno;
955 es->err = saved_state->err;
956 es->faultvaddr = saved_state->cr2;
957}
958
959
960static int
961set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
962{
963 x86_saved_state32_t *saved_state;
964
b0d623f7 965
0c530ab8
A
966 saved_state = USER_REGS32(thread);
967
968 /*
969 * Scrub segment selector values:
970 */
b0d623f7
A
971 ts->cs = USER_CS;
972#ifdef __i386__
0c530ab8
A
973 if (ts->ss == 0) ts->ss = USER_DS;
974 if (ts->ds == 0) ts->ds = USER_DS;
975 if (ts->es == 0) ts->es = USER_DS;
b0d623f7
A
976#else /* __x86_64__ */
977 /*
978 * On a 64 bit kernel, we always override the data segments,
979 * as the actual selector numbers have changed. This also
980 * means that we don't support setting the data segments
981 * manually any more.
982 */
983 ts->ss = USER_DS;
984 ts->ds = USER_DS;
985 ts->es = USER_DS;
986#endif
0c530ab8
A
987
988 /* Check segment selectors are safe */
989 if (!valid_user_segment_selectors(ts->cs,
990 ts->ss,
991 ts->ds,
992 ts->es,
993 ts->fs,
994 ts->gs))
995 return(KERN_INVALID_ARGUMENT);
996
997 saved_state->eax = ts->eax;
998 saved_state->ebx = ts->ebx;
999 saved_state->ecx = ts->ecx;
1000 saved_state->edx = ts->edx;
1001 saved_state->edi = ts->edi;
1002 saved_state->esi = ts->esi;
1003 saved_state->ebp = ts->ebp;
1004 saved_state->uesp = ts->esp;
1005 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
1006 saved_state->eip = ts->eip;
1007 saved_state->cs = ts->cs;
1008 saved_state->ss = ts->ss;
1009 saved_state->ds = ts->ds;
1010 saved_state->es = ts->es;
1011 saved_state->fs = ts->fs;
1012 saved_state->gs = ts->gs;
1013
1014 /*
1015 * If the trace trap bit is being set,
1016 * ensure that the user returns via iret
1017 * - which is signaled thusly:
1018 */
1019 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
1020 saved_state->cs = SYSENTER_TF_CS;
1021
1022 return(KERN_SUCCESS);
1023}
1024
1025static int
1026set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
1027{
1028 x86_saved_state64_t *saved_state;
1029
b0d623f7 1030
0c530ab8
A
1031 saved_state = USER_REGS64(thread);
1032
1033 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
1034 !IS_USERADDR64_CANONICAL(ts->rip))
1035 return(KERN_INVALID_ARGUMENT);
1036
1037 saved_state->r8 = ts->r8;
1038 saved_state->r9 = ts->r9;
1039 saved_state->r10 = ts->r10;
1040 saved_state->r11 = ts->r11;
1041 saved_state->r12 = ts->r12;
1042 saved_state->r13 = ts->r13;
1043 saved_state->r14 = ts->r14;
1044 saved_state->r15 = ts->r15;
1045 saved_state->rax = ts->rax;
0c530ab8
A
1046 saved_state->rbx = ts->rbx;
1047 saved_state->rcx = ts->rcx;
1048 saved_state->rdx = ts->rdx;
1049 saved_state->rdi = ts->rdi;
1050 saved_state->rsi = ts->rsi;
1051 saved_state->rbp = ts->rbp;
1052 saved_state->isf.rsp = ts->rsp;
1053 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
1054 saved_state->isf.rip = ts->rip;
1055 saved_state->isf.cs = USER64_CS;
b0d623f7
A
1056 saved_state->fs = (uint32_t)ts->fs;
1057 saved_state->gs = (uint32_t)ts->gs;
0c530ab8
A
1058
1059 return(KERN_SUCCESS);
1060}
1061
1062
1063
1064static void
1065get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
1066{
1067 x86_saved_state32_t *saved_state;
1068
b0d623f7 1069
0c530ab8
A
1070 saved_state = USER_REGS32(thread);
1071
1072 ts->eax = saved_state->eax;
1073 ts->ebx = saved_state->ebx;
1074 ts->ecx = saved_state->ecx;
1075 ts->edx = saved_state->edx;
1076 ts->edi = saved_state->edi;
1077 ts->esi = saved_state->esi;
1078 ts->ebp = saved_state->ebp;
1079 ts->esp = saved_state->uesp;
1080 ts->eflags = saved_state->efl;
1081 ts->eip = saved_state->eip;
1082 ts->cs = saved_state->cs;
1083 ts->ss = saved_state->ss;
1084 ts->ds = saved_state->ds;
1085 ts->es = saved_state->es;
1086 ts->fs = saved_state->fs;
1087 ts->gs = saved_state->gs;
1088}
1089
1090
1091static void
1092get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
1093{
1094 x86_saved_state64_t *saved_state;
1095
b0d623f7 1096
0c530ab8
A
1097 saved_state = USER_REGS64(thread);
1098
1099 ts->r8 = saved_state->r8;
1100 ts->r9 = saved_state->r9;
1101 ts->r10 = saved_state->r10;
1102 ts->r11 = saved_state->r11;
1103 ts->r12 = saved_state->r12;
1104 ts->r13 = saved_state->r13;
1105 ts->r14 = saved_state->r14;
1106 ts->r15 = saved_state->r15;
1107 ts->rax = saved_state->rax;
1108 ts->rbx = saved_state->rbx;
1109 ts->rcx = saved_state->rcx;
1110 ts->rdx = saved_state->rdx;
1111 ts->rdi = saved_state->rdi;
1112 ts->rsi = saved_state->rsi;
1113 ts->rbp = saved_state->rbp;
1114 ts->rsp = saved_state->isf.rsp;
1115 ts->rflags = saved_state->isf.rflags;
1116 ts->rip = saved_state->isf.rip;
1117 ts->cs = saved_state->isf.cs;
1118 ts->fs = saved_state->fs;
1119 ts->gs = saved_state->gs;
1120}
1121
1122
2d21ac55
A
1123void
1124thread_set_wq_state32(thread_t thread, thread_state_t tstate)
1125{
1126 x86_thread_state32_t *state;
1127 x86_saved_state32_t *saved_state;
1128 thread_t curth = current_thread();
b0d623f7
A
1129 spl_t s=0;
1130
2d21ac55
A
1131
1132 saved_state = USER_REGS32(thread);
b0d623f7 1133
2d21ac55
A
1134 state = (x86_thread_state32_t *)tstate;
1135
b0d623f7
A
1136 if (curth != thread) {
1137 s = splsched();
2d21ac55 1138 thread_lock(thread);
b0d623f7 1139 }
2d21ac55 1140
b0d623f7 1141 saved_state->ebp = 0;
2d21ac55
A
1142 saved_state->eip = state->eip;
1143 saved_state->eax = state->eax;
1144 saved_state->ebx = state->ebx;
1145 saved_state->ecx = state->ecx;
1146 saved_state->edx = state->edx;
1147 saved_state->edi = state->edi;
1148 saved_state->esi = state->esi;
1149 saved_state->uesp = state->esp;
1150 saved_state->efl = EFL_USER_SET;
1151
1152 saved_state->cs = USER_CS;
1153 saved_state->ss = USER_DS;
1154 saved_state->ds = USER_DS;
1155 saved_state->es = USER_DS;
1156
b0d623f7
A
1157
1158 if (curth != thread) {
2d21ac55 1159 thread_unlock(thread);
b0d623f7
A
1160 splx(s);
1161 }
2d21ac55
A
1162}
1163
1164
1165void
1166thread_set_wq_state64(thread_t thread, thread_state_t tstate)
1167{
1168 x86_thread_state64_t *state;
1169 x86_saved_state64_t *saved_state;
1170 thread_t curth = current_thread();
b0d623f7
A
1171 spl_t s=0;
1172
2d21ac55
A
1173
1174 saved_state = USER_REGS64(thread);
1175 state = (x86_thread_state64_t *)tstate;
1176
b0d623f7
A
1177 if (curth != thread) {
1178 s = splsched();
2d21ac55 1179 thread_lock(thread);
b0d623f7 1180 }
2d21ac55 1181
b0d623f7 1182 saved_state->rbp = 0;
2d21ac55
A
1183 saved_state->rdi = state->rdi;
1184 saved_state->rsi = state->rsi;
1185 saved_state->rdx = state->rdx;
1186 saved_state->rcx = state->rcx;
1187 saved_state->r8 = state->r8;
1188 saved_state->r9 = state->r9;
1189
1190 saved_state->isf.rip = state->rip;
1191 saved_state->isf.rsp = state->rsp;
1192 saved_state->isf.cs = USER64_CS;
1193 saved_state->isf.rflags = EFL_USER_SET;
1194
b0d623f7
A
1195
1196 if (curth != thread) {
2d21ac55 1197 thread_unlock(thread);
b0d623f7
A
1198 splx(s);
1199 }
2d21ac55
A
1200}
1201
1202
0c530ab8 1203
1c79356b
A
1204/*
1205 * act_machine_set_state:
1206 *
91447636 1207 * Set the status of the specified thread.
1c79356b
A
1208 */
1209
1210kern_return_t
55e303ae 1211machine_thread_set_state(
91447636 1212 thread_t thr_act,
1c79356b
A
1213 thread_flavor_t flavor,
1214 thread_state_t tstate,
1215 mach_msg_type_number_t count)
1216{
2d21ac55
A
1217 switch (flavor) {
1218 case x86_SAVED_STATE32:
0c530ab8 1219 {
0c530ab8
A
1220 x86_saved_state32_t *state;
1221 x86_saved_state32_t *saved_state;
1c79356b 1222
0c530ab8 1223 if (count < x86_SAVED_STATE32_COUNT)
2d21ac55
A
1224 return(KERN_INVALID_ARGUMENT);
1225
1226 if (thread_is_64bit(thr_act))
1227 return(KERN_INVALID_ARGUMENT);
1c79356b 1228
0c530ab8 1229 state = (x86_saved_state32_t *) tstate;
1c79356b 1230
91447636 1231 /* Check segment selectors are safe */
0c530ab8 1232 if (!valid_user_segment_selectors(state->cs,
2d21ac55
A
1233 state->ss,
1234 state->ds,
1235 state->es,
1236 state->fs,
1237 state->gs))
1238 return KERN_INVALID_ARGUMENT;
1239
b0d623f7 1240
0c530ab8 1241 saved_state = USER_REGS32(thr_act);
1c79356b
A
1242
1243 /*
1244 * General registers
1245 */
1246 saved_state->edi = state->edi;
1247 saved_state->esi = state->esi;
1248 saved_state->ebp = state->ebp;
1249 saved_state->uesp = state->uesp;
1250 saved_state->ebx = state->ebx;
1251 saved_state->edx = state->edx;
1252 saved_state->ecx = state->ecx;
1253 saved_state->eax = state->eax;
1254 saved_state->eip = state->eip;
0c530ab8
A
1255
1256 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
c0fea474 1257
8f6c56a5 1258 /*
0c530ab8
A
1259 * If the trace trap bit is being set,
1260 * ensure that the user returns via iret
1261 * - which is signaled thusly:
21362eb3 1262 */
0c530ab8
A
1263 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
1264 state->cs = SYSENTER_TF_CS;
1265
1266 /*
1267 * User setting segment registers.
1268 * Code and stack selectors have already been
1269 * checked. Others will be reset by 'iret'
1270 * if they are not valid.
1271 */
1272 saved_state->cs = state->cs;
1273 saved_state->ss = state->ss;
1274 saved_state->ds = state->ds;
1275 saved_state->es = state->es;
1276 saved_state->fs = state->fs;
1277 saved_state->gs = state->gs;
b0d623f7 1278
4452a7af 1279 break;
2d21ac55 1280 }
4452a7af 1281
2d21ac55
A
1282 case x86_SAVED_STATE64:
1283 {
0c530ab8
A
1284 x86_saved_state64_t *state;
1285 x86_saved_state64_t *saved_state;
89b3af67 1286
0c530ab8 1287 if (count < x86_SAVED_STATE64_COUNT)
2d21ac55
A
1288 return(KERN_INVALID_ARGUMENT);
1289
1290 if (!thread_is_64bit(thr_act))
1291 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1292
1293 state = (x86_saved_state64_t *) tstate;
5d5c5d0d 1294
0c530ab8
A
1295 /* Verify that the supplied code segment selector is
1296 * valid. In 64-bit mode, the FS and GS segment overrides
1297 * use the FS.base and GS.base MSRs to calculate
1298 * base addresses, and the trampolines don't directly
1299 * restore the segment registers--hence they are no
1300 * longer relevant for validation.
1301 */
1302 if (!valid_user_code_selector(state->isf.cs))
1303 return KERN_INVALID_ARGUMENT;
1304
1305 /* Check pc and stack are canonical addresses */
1306 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
1307 !IS_USERADDR64_CANONICAL(state->isf.rip))
6601e61a 1308 return KERN_INVALID_ARGUMENT;
5d5c5d0d 1309
b0d623f7 1310
0c530ab8 1311 saved_state = USER_REGS64(thr_act);
5d5c5d0d 1312
21362eb3 1313 /*
6601e61a 1314 * General registers
21362eb3 1315 */
0c530ab8
A
1316 saved_state->r8 = state->r8;
1317 saved_state->r9 = state->r9;
1318 saved_state->r10 = state->r10;
1319 saved_state->r11 = state->r11;
1320 saved_state->r12 = state->r12;
1321 saved_state->r13 = state->r13;
1322 saved_state->r14 = state->r14;
1323 saved_state->r15 = state->r15;
1324 saved_state->rdi = state->rdi;
1325 saved_state->rsi = state->rsi;
1326 saved_state->rbp = state->rbp;
1327 saved_state->rbx = state->rbx;
1328 saved_state->rdx = state->rdx;
1329 saved_state->rcx = state->rcx;
1330 saved_state->rax = state->rax;
1331 saved_state->isf.rsp = state->isf.rsp;
1332 saved_state->isf.rip = state->isf.rip;
1333
1334 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
1335
2d21ac55 1336 /*
0c530ab8
A
1337 * User setting segment registers.
1338 * Code and stack selectors have already been
1339 * checked. Others will be reset by 'sys'
1340 * if they are not valid.
6601e61a 1341 */
2d21ac55 1342 saved_state->isf.cs = state->isf.cs;
0c530ab8
A
1343 saved_state->isf.ss = state->isf.ss;
1344 saved_state->fs = state->fs;
1345 saved_state->gs = state->gs;
b0d623f7 1346
89b3af67 1347 break;
2d21ac55 1348 }
89b3af67 1349
2d21ac55
A
1350 case x86_FLOAT_STATE32:
1351 {
0c530ab8
A
1352 if (count != x86_FLOAT_STATE32_COUNT)
1353 return(KERN_INVALID_ARGUMENT);
1354
1355 if (thread_is_64bit(thr_act))
4452a7af 1356 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1357
1358 return fpu_set_fxstate(thr_act, tstate);
2d21ac55 1359 }
4452a7af 1360
2d21ac55
A
1361 case x86_FLOAT_STATE64:
1362 {
0c530ab8 1363 if (count != x86_FLOAT_STATE64_COUNT)
4452a7af
A
1364 return(KERN_INVALID_ARGUMENT);
1365
0c530ab8
A
1366 if ( !thread_is_64bit(thr_act))
1367 return(KERN_INVALID_ARGUMENT);
1368
1369 return fpu_set_fxstate(thr_act, tstate);
2d21ac55 1370 }
5d5c5d0d 1371
2d21ac55
A
1372 case x86_FLOAT_STATE:
1373 {
1374 x86_float_state_t *state;
4452a7af 1375
0c530ab8
A
1376 if (count != x86_FLOAT_STATE_COUNT)
1377 return(KERN_INVALID_ARGUMENT);
4452a7af 1378
0c530ab8 1379 state = (x86_float_state_t *)tstate;
0c530ab8
A
1380 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1381 thread_is_64bit(thr_act)) {
2d21ac55 1382 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
0c530ab8
A
1383 }
1384 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1385 !thread_is_64bit(thr_act)) {
2d21ac55 1386 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
0c530ab8
A
1387 }
1388 return(KERN_INVALID_ARGUMENT);
2d21ac55 1389 }
0c530ab8 1390
2d21ac55
A
1391 case x86_THREAD_STATE32:
1392 {
0c530ab8
A
1393 if (count != x86_THREAD_STATE32_COUNT)
1394 return(KERN_INVALID_ARGUMENT);
1395
1396 if (thread_is_64bit(thr_act))
1397 return(KERN_INVALID_ARGUMENT);
2d21ac55 1398
0c530ab8 1399 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
2d21ac55 1400 }
0c530ab8 1401
2d21ac55
A
1402 case x86_THREAD_STATE64:
1403 {
0c530ab8
A
1404 if (count != x86_THREAD_STATE64_COUNT)
1405 return(KERN_INVALID_ARGUMENT);
1406
2d21ac55 1407 if (!thread_is_64bit(thr_act))
0c530ab8
A
1408 return(KERN_INVALID_ARGUMENT);
1409
1410 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
0c530ab8 1411
2d21ac55
A
1412 }
1413 case x86_THREAD_STATE:
1414 {
1415 x86_thread_state_t *state;
0c530ab8
A
1416
1417 if (count != x86_THREAD_STATE_COUNT)
1418 return(KERN_INVALID_ARGUMENT);
1419
1420 state = (x86_thread_state_t *)tstate;
1421
2d21ac55
A
1422 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1423 state->tsh.count == x86_THREAD_STATE64_COUNT &&
0c530ab8 1424 thread_is_64bit(thr_act)) {
2d21ac55
A
1425 return set_thread_state64(thr_act, &state->uts.ts64);
1426 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1427 state->tsh.count == x86_THREAD_STATE32_COUNT &&
0c530ab8 1428 !thread_is_64bit(thr_act)) {
2d21ac55 1429 return set_thread_state32(thr_act, &state->uts.ts32);
0c530ab8 1430 } else
2d21ac55 1431 return(KERN_INVALID_ARGUMENT);
0c530ab8 1432
6601e61a 1433 break;
2d21ac55 1434 }
0c530ab8
A
1435 case x86_DEBUG_STATE32:
1436 {
1437 x86_debug_state32_t *state;
1438 kern_return_t ret;
4452a7af 1439
0c530ab8
A
1440 if (thread_is_64bit(thr_act))
1441 return(KERN_INVALID_ARGUMENT);
1442
1443 state = (x86_debug_state32_t *)tstate;
1444
1445 ret = set_debug_state32(thr_act, state);
1446
1447 return ret;
6601e61a 1448 }
0c530ab8
A
1449 case x86_DEBUG_STATE64:
1450 {
1451 x86_debug_state64_t *state;
1452 kern_return_t ret;
4452a7af 1453
0c530ab8
A
1454 if (!thread_is_64bit(thr_act))
1455 return(KERN_INVALID_ARGUMENT);
1456
1457 state = (x86_debug_state64_t *)tstate;
1458
1459 ret = set_debug_state64(thr_act, state);
1460
1461 return ret;
1462 }
1463 case x86_DEBUG_STATE:
1464 {
1465 x86_debug_state_t *state;
1466 kern_return_t ret = KERN_INVALID_ARGUMENT;
1467
1468 if (count != x86_DEBUG_STATE_COUNT)
1469 return (KERN_INVALID_ARGUMENT);
1470
1471 state = (x86_debug_state_t *)tstate;
1472 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1473 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1474 thread_is_64bit(thr_act)) {
1475 ret = set_debug_state64(thr_act, &state->uds.ds64);
1476 }
1477 else
1478 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1479 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1480 !thread_is_64bit(thr_act)) {
1481 ret = set_debug_state32(thr_act, &state->uds.ds32);
1482 }
1483 return ret;
1484 }
1485 default:
6601e61a
A
1486 return(KERN_INVALID_ARGUMENT);
1487 }
4452a7af 1488
6601e61a
A
1489 return(KERN_SUCCESS);
1490}
4452a7af 1491
0c530ab8
A
1492
1493
6601e61a
A
1494/*
1495 * thread_getstatus:
1496 *
1497 * Get the status of the specified thread.
1498 */
4452a7af 1499
6601e61a
A
1500kern_return_t
1501machine_thread_get_state(
1502 thread_t thr_act,
1503 thread_flavor_t flavor,
1504 thread_state_t tstate,
1505 mach_msg_type_number_t *count)
1506{
2d21ac55 1507
6601e61a 1508 switch (flavor) {
4452a7af 1509
0c530ab8
A
1510 case THREAD_STATE_FLAVOR_LIST:
1511 {
1512 if (*count < 3)
1513 return (KERN_INVALID_ARGUMENT);
1514
1515 tstate[0] = i386_THREAD_STATE;
1516 tstate[1] = i386_FLOAT_STATE;
1517 tstate[2] = i386_EXCEPTION_STATE;
1518
1519 *count = 3;
1520 break;
1521 }
1522
1523 case THREAD_STATE_FLAVOR_LIST_NEW:
1524 {
1525 if (*count < 4)
1526 return (KERN_INVALID_ARGUMENT);
1527
1528 tstate[0] = x86_THREAD_STATE;
1529 tstate[1] = x86_FLOAT_STATE;
1530 tstate[2] = x86_EXCEPTION_STATE;
1531 tstate[3] = x86_DEBUG_STATE;
1532
1533 *count = 4;
1534 break;
1535 }
1536
1537 case x86_SAVED_STATE32:
4452a7af 1538 {
0c530ab8
A
1539 x86_saved_state32_t *state;
1540 x86_saved_state32_t *saved_state;
4452a7af 1541
0c530ab8
A
1542 if (*count < x86_SAVED_STATE32_COUNT)
1543 return(KERN_INVALID_ARGUMENT);
4452a7af 1544
2d21ac55
A
1545 if (thread_is_64bit(thr_act))
1546 return(KERN_INVALID_ARGUMENT);
1547
0c530ab8
A
1548 state = (x86_saved_state32_t *) tstate;
1549 saved_state = USER_REGS32(thr_act);
4452a7af 1550
6601e61a
A
1551 /*
1552 * First, copy everything:
1553 */
1554 *state = *saved_state;
0c530ab8
A
1555 state->ds = saved_state->ds & 0xffff;
1556 state->es = saved_state->es & 0xffff;
1557 state->fs = saved_state->fs & 0xffff;
1558 state->gs = saved_state->gs & 0xffff;
4452a7af 1559
0c530ab8 1560 *count = x86_SAVED_STATE32_COUNT;
4452a7af 1561 break;
8f6c56a5 1562 }
5d5c5d0d 1563
0c530ab8 1564 case x86_SAVED_STATE64:
4452a7af 1565 {
0c530ab8
A
1566 x86_saved_state64_t *state;
1567 x86_saved_state64_t *saved_state;
89b3af67 1568
0c530ab8
A
1569 if (*count < x86_SAVED_STATE64_COUNT)
1570 return(KERN_INVALID_ARGUMENT);
89b3af67 1571
2d21ac55
A
1572 if (!thread_is_64bit(thr_act))
1573 return(KERN_INVALID_ARGUMENT);
1574
0c530ab8
A
1575 state = (x86_saved_state64_t *)tstate;
1576 saved_state = USER_REGS64(thr_act);
89b3af67 1577
6601e61a 1578 /*
0c530ab8 1579 * First, copy everything:
6601e61a 1580 */
0c530ab8
A
1581 *state = *saved_state;
1582 state->fs = saved_state->fs & 0xffff;
1583 state->gs = saved_state->gs & 0xffff;
1584
1585 *count = x86_SAVED_STATE64_COUNT;
4452a7af
A
1586 break;
1587 }
1588
0c530ab8 1589 case x86_FLOAT_STATE32:
4452a7af 1590 {
0c530ab8
A
1591 if (*count < x86_FLOAT_STATE32_COUNT)
1592 return(KERN_INVALID_ARGUMENT);
1593
1594 if (thread_is_64bit(thr_act))
1595 return(KERN_INVALID_ARGUMENT);
1596
1597 *count = x86_FLOAT_STATE32_COUNT;
1598
1599 return fpu_get_fxstate(thr_act, tstate);
21362eb3 1600 }
89b3af67 1601
0c530ab8
A
1602 case x86_FLOAT_STATE64:
1603 {
1604 if (*count < x86_FLOAT_STATE64_COUNT)
1605 return(KERN_INVALID_ARGUMENT);
1606
1607 if ( !thread_is_64bit(thr_act))
1608 return(KERN_INVALID_ARGUMENT);
1609
1610 *count = x86_FLOAT_STATE64_COUNT;
1611
1612 return fpu_get_fxstate(thr_act, tstate);
1613 }
1614
1615 case x86_FLOAT_STATE:
1616 {
1617 x86_float_state_t *state;
1618 kern_return_t kret;
1619
1620 if (*count < x86_FLOAT_STATE_COUNT)
1621 return(KERN_INVALID_ARGUMENT);
1622
1623 state = (x86_float_state_t *)tstate;
1624
1625 /*
1626 * no need to bzero... currently
1627 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1628 */
1629 if (thread_is_64bit(thr_act)) {
1630 state->fsh.flavor = x86_FLOAT_STATE64;
1631 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1632
1633 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
1634 } else {
1635 state->fsh.flavor = x86_FLOAT_STATE32;
1636 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1637
1638 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
1639 }
1640 *count = x86_FLOAT_STATE_COUNT;
1641
1642 return(kret);
1643 }
1644
0c530ab8
A
1645 case x86_THREAD_STATE32:
1646 {
1647 if (*count < x86_THREAD_STATE32_COUNT)
1648 return(KERN_INVALID_ARGUMENT);
1649
1650 if (thread_is_64bit(thr_act))
1651 return(KERN_INVALID_ARGUMENT);
1652
1653 *count = x86_THREAD_STATE32_COUNT;
1654
1655 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
6601e61a 1656 break;
0c530ab8 1657 }
89b3af67 1658
0c530ab8
A
1659 case x86_THREAD_STATE64:
1660 {
1661 if (*count < x86_THREAD_STATE64_COUNT)
4452a7af 1662 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1663
1664 if ( !thread_is_64bit(thr_act))
1665 return(KERN_INVALID_ARGUMENT);
1666
1667 *count = x86_THREAD_STATE64_COUNT;
1668
1669 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1670 break;
21362eb3 1671 }
89b3af67 1672
0c530ab8
A
1673 case x86_THREAD_STATE:
1674 {
1675 x86_thread_state_t *state;
4452a7af 1676
0c530ab8 1677 if (*count < x86_THREAD_STATE_COUNT)
4452a7af
A
1678 return(KERN_INVALID_ARGUMENT);
1679
0c530ab8 1680 state = (x86_thread_state_t *)tstate;
4452a7af 1681
0c530ab8 1682 bzero((char *)state, sizeof(x86_thread_state_t));
4452a7af 1683
0c530ab8
A
1684 if (thread_is_64bit(thr_act)) {
1685 state->tsh.flavor = x86_THREAD_STATE64;
1686 state->tsh.count = x86_THREAD_STATE64_COUNT;
4452a7af 1687
0c530ab8 1688 get_thread_state64(thr_act, &state->uts.ts64);
4452a7af 1689 } else {
0c530ab8
A
1690 state->tsh.flavor = x86_THREAD_STATE32;
1691 state->tsh.count = x86_THREAD_STATE32_COUNT;
4452a7af 1692
0c530ab8 1693 get_thread_state32(thr_act, &state->uts.ts32);
4452a7af 1694 }
0c530ab8
A
1695 *count = x86_THREAD_STATE_COUNT;
1696
1697 break;
1698 }
1699
1700
1701 case x86_EXCEPTION_STATE32:
1702 {
1703 if (*count < x86_EXCEPTION_STATE32_COUNT)
1704 return(KERN_INVALID_ARGUMENT);
1705
1706 if (thread_is_64bit(thr_act))
1707 return(KERN_INVALID_ARGUMENT);
1708
1709 *count = x86_EXCEPTION_STATE32_COUNT;
4452a7af 1710
0c530ab8 1711 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
4452a7af 1712 break;
6601e61a 1713 }
4452a7af 1714
0c530ab8 1715 case x86_EXCEPTION_STATE64:
6601e61a 1716 {
0c530ab8
A
1717 if (*count < x86_EXCEPTION_STATE64_COUNT)
1718 return(KERN_INVALID_ARGUMENT);
4452a7af 1719
0c530ab8
A
1720 if ( !thread_is_64bit(thr_act))
1721 return(KERN_INVALID_ARGUMENT);
4452a7af 1722
0c530ab8 1723 *count = x86_EXCEPTION_STATE64_COUNT;
4452a7af 1724
0c530ab8 1725 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
4452a7af 1726 break;
6601e61a 1727 }
4452a7af 1728
0c530ab8
A
1729 case x86_EXCEPTION_STATE:
1730 {
1731 x86_exception_state_t *state;
1732
1733 if (*count < x86_EXCEPTION_STATE_COUNT)
1734 return(KERN_INVALID_ARGUMENT);
1735
1736 state = (x86_exception_state_t *)tstate;
1737
1738 bzero((char *)state, sizeof(x86_exception_state_t));
1739
1740 if (thread_is_64bit(thr_act)) {
1741 state->esh.flavor = x86_EXCEPTION_STATE64;
1742 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1743
1744 get_exception_state64(thr_act, &state->ues.es64);
1745 } else {
1746 state->esh.flavor = x86_EXCEPTION_STATE32;
1747 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1748
1749 get_exception_state32(thr_act, &state->ues.es32);
1750 }
1751 *count = x86_EXCEPTION_STATE_COUNT;
1752
1753 break;
1754 }
1755 case x86_DEBUG_STATE32:
1756 {
1757 if (*count < x86_DEBUG_STATE32_COUNT)
1758 return(KERN_INVALID_ARGUMENT);
1759
1760 if (thread_is_64bit(thr_act))
1761 return(KERN_INVALID_ARGUMENT);
1762
1763 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1764
1765 *count = x86_DEBUG_STATE32_COUNT;
1766
1767 break;
1768 }
1769 case x86_DEBUG_STATE64:
1770 {
1771 if (*count < x86_DEBUG_STATE64_COUNT)
1772 return(KERN_INVALID_ARGUMENT);
1773
1774 if (!thread_is_64bit(thr_act))
1775 return(KERN_INVALID_ARGUMENT);
1776
1777 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1778
1779 *count = x86_DEBUG_STATE64_COUNT;
1780
1c79356b
A
1781 break;
1782 }
0c530ab8
A
1783 case x86_DEBUG_STATE:
1784 {
1785 x86_debug_state_t *state;
1786
1787 if (*count < x86_DEBUG_STATE_COUNT)
1788 return(KERN_INVALID_ARGUMENT);
1789
1790 state = (x86_debug_state_t *)tstate;
1791
1792 bzero(state, sizeof *state);
1793
1794 if (thread_is_64bit(thr_act)) {
1795 state->dsh.flavor = x86_DEBUG_STATE64;
1796 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1797
1798 get_debug_state64(thr_act, &state->uds.ds64);
1799 } else {
1800 state->dsh.flavor = x86_DEBUG_STATE32;
1801 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1802
0c530ab8
A
1803 get_debug_state32(thr_act, &state->uds.ds32);
1804 }
1805 *count = x86_DEBUG_STATE_COUNT;
1806 break;
1807 }
2d21ac55 1808 default:
1c79356b
A
1809 return(KERN_INVALID_ARGUMENT);
1810 }
1811
1812 return(KERN_SUCCESS);
1813}
1814
0c530ab8
A
1815kern_return_t
1816machine_thread_get_kern_state(
1817 thread_t thread,
1818 thread_flavor_t flavor,
1819 thread_state_t tstate,
1820 mach_msg_type_number_t *count)
1821{
b0d623f7 1822 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
0c530ab8
A
1823
1824 /*
1825 * This works only for an interrupted kernel thread
1826 */
b0d623f7 1827 if (thread != current_thread() || int_state == NULL)
0c530ab8
A
1828 return KERN_FAILURE;
1829
b0d623f7
A
1830 switch (flavor) {
1831 case x86_THREAD_STATE32: {
1832 x86_thread_state32_t *state;
1833 x86_saved_state32_t *saved_state;
1834
1835 if (!is_saved_state32(int_state) ||
1836 *count < x86_THREAD_STATE32_COUNT)
1837 return (KERN_INVALID_ARGUMENT);
1838
1839 state = (x86_thread_state32_t *) tstate;
0c530ab8 1840
b0d623f7
A
1841 saved_state = saved_state32(int_state);
1842 /*
1843 * General registers.
1844 */
1845 state->eax = saved_state->eax;
1846 state->ebx = saved_state->ebx;
1847 state->ecx = saved_state->ecx;
1848 state->edx = saved_state->edx;
1849 state->edi = saved_state->edi;
1850 state->esi = saved_state->esi;
1851 state->ebp = saved_state->ebp;
1852 state->esp = saved_state->uesp;
1853 state->eflags = saved_state->efl;
1854 state->eip = saved_state->eip;
1855 state->cs = saved_state->cs;
1856 state->ss = saved_state->ss;
1857 state->ds = saved_state->ds & 0xffff;
1858 state->es = saved_state->es & 0xffff;
1859 state->fs = saved_state->fs & 0xffff;
1860 state->gs = saved_state->gs & 0xffff;
1861
1862 *count = x86_THREAD_STATE32_COUNT;
2d21ac55 1863
b0d623f7
A
1864 return KERN_SUCCESS;
1865 }
1866
1867 case x86_THREAD_STATE64: {
1868 x86_thread_state64_t *state;
1869 x86_saved_state64_t *saved_state;
1870
1871 if (!is_saved_state64(int_state) ||
1872 *count < x86_THREAD_STATE64_COUNT)
1873 return (KERN_INVALID_ARGUMENT);
1874
1875 state = (x86_thread_state64_t *) tstate;
1876
1877 saved_state = saved_state64(int_state);
1878 /*
1879 * General registers.
1880 */
1881 state->rax = saved_state->rax;
1882 state->rbx = saved_state->rbx;
1883 state->rcx = saved_state->rcx;
1884 state->rdx = saved_state->rdx;
1885 state->rdi = saved_state->rdi;
1886 state->rsi = saved_state->rsi;
1887 state->rbp = saved_state->rbp;
1888 state->rsp = saved_state->isf.rsp;
1889 state->r8 = saved_state->r8;
1890 state->r9 = saved_state->r9;
1891 state->r10 = saved_state->r10;
1892 state->r11 = saved_state->r11;
1893 state->r12 = saved_state->r12;
1894 state->r13 = saved_state->r13;
1895 state->r14 = saved_state->r14;
1896 state->r15 = saved_state->r15;
1897
1898 state->rip = saved_state->isf.rip;
1899 state->rflags = saved_state->isf.rflags;
1900 state->cs = saved_state->isf.cs;
1901 state->fs = saved_state->fs & 0xffff;
1902 state->gs = saved_state->gs & 0xffff;
1903 *count = x86_THREAD_STATE64_COUNT;
1904
1905 return KERN_SUCCESS;
1906 }
1907
1908 case x86_THREAD_STATE: {
1909 x86_thread_state_t *state = NULL;
1910
1911 if (*count < x86_THREAD_STATE_COUNT)
1912 return (KERN_INVALID_ARGUMENT);
1913
1914 state = (x86_thread_state_t *) tstate;
1915
1916 if (is_saved_state32(int_state)) {
1917 x86_saved_state32_t *saved_state = saved_state32(int_state);
1918
1919 state->tsh.flavor = x86_THREAD_STATE32;
1920 state->tsh.count = x86_THREAD_STATE32_COUNT;
0c530ab8 1921
0c530ab8
A
1922 /*
1923 * General registers.
1924 */
b0d623f7
A
1925 state->uts.ts32.eax = saved_state->eax;
1926 state->uts.ts32.ebx = saved_state->ebx;
1927 state->uts.ts32.ecx = saved_state->ecx;
1928 state->uts.ts32.edx = saved_state->edx;
1929 state->uts.ts32.edi = saved_state->edi;
1930 state->uts.ts32.esi = saved_state->esi;
1931 state->uts.ts32.ebp = saved_state->ebp;
1932 state->uts.ts32.esp = saved_state->uesp;
1933 state->uts.ts32.eflags = saved_state->efl;
1934 state->uts.ts32.eip = saved_state->eip;
1935 state->uts.ts32.cs = saved_state->cs;
1936 state->uts.ts32.ss = saved_state->ss;
1937 state->uts.ts32.ds = saved_state->ds & 0xffff;
1938 state->uts.ts32.es = saved_state->es & 0xffff;
1939 state->uts.ts32.fs = saved_state->fs & 0xffff;
1940 state->uts.ts32.gs = saved_state->gs & 0xffff;
1941 } else if (is_saved_state64(int_state)) {
1942 x86_saved_state64_t *saved_state = saved_state64(int_state);
0c530ab8 1943
b0d623f7
A
1944 state->tsh.flavor = x86_THREAD_STATE64;
1945 state->tsh.count = x86_THREAD_STATE64_COUNT;
1946
1947 /*
1948 * General registers.
1949 */
1950 state->uts.ts64.rax = saved_state->rax;
1951 state->uts.ts64.rbx = saved_state->rbx;
1952 state->uts.ts64.rcx = saved_state->rcx;
1953 state->uts.ts64.rdx = saved_state->rdx;
1954 state->uts.ts64.rdi = saved_state->rdi;
1955 state->uts.ts64.rsi = saved_state->rsi;
1956 state->uts.ts64.rbp = saved_state->rbp;
1957 state->uts.ts64.rsp = saved_state->isf.rsp;
1958 state->uts.ts64.r8 = saved_state->r8;
1959 state->uts.ts64.r9 = saved_state->r9;
1960 state->uts.ts64.r10 = saved_state->r10;
1961 state->uts.ts64.r11 = saved_state->r11;
1962 state->uts.ts64.r12 = saved_state->r12;
1963 state->uts.ts64.r13 = saved_state->r13;
1964 state->uts.ts64.r14 = saved_state->r14;
1965 state->uts.ts64.r15 = saved_state->r15;
1966
1967 state->uts.ts64.rip = saved_state->isf.rip;
1968 state->uts.ts64.rflags = saved_state->isf.rflags;
1969 state->uts.ts64.cs = saved_state->isf.cs;
1970 state->uts.ts64.fs = saved_state->fs & 0xffff;
1971 state->uts.ts64.gs = saved_state->gs & 0xffff;
1972 } else {
1973 panic("unknown thread state");
0c530ab8 1974 }
b0d623f7
A
1975
1976 *count = x86_THREAD_STATE_COUNT;
1977 return KERN_SUCCESS;
1978 }
2d21ac55 1979 }
0c530ab8
A
1980 return KERN_FAILURE;
1981}
1982
1983
1c79356b
A
1984/*
1985 * Initialize the machine-dependent state for a new thread.
1986 */
1987kern_return_t
55e303ae
A
1988machine_thread_create(
1989 thread_t thread,
0c530ab8 1990 task_t task)
1c79356b 1991{
0c530ab8 1992 pcb_t pcb = &thread->machine.xxx_pcb;
0c530ab8 1993 x86_saved_state_t *iss;
89b3af67 1994
b0d623f7 1995#if NCOPY_WINDOWS > 0
0c530ab8 1996 inval_copy_windows(thread);
4452a7af 1997
0c530ab8
A
1998 thread->machine.physwindow_pte = 0;
1999 thread->machine.physwindow_busy = 0;
b0d623f7 2000#endif
4452a7af 2001
2d21ac55
A
2002 /*
2003 * Allocate pcb only if required.
2004 */
2005 if (pcb->sf == NULL) {
2006 pcb->sf = zalloc(iss_zone);
2007 if (pcb->sf == NULL)
2008 panic("iss_zone");
2009 }
2010
0c530ab8
A
2011 if (task_has_64BitAddr(task)) {
2012 x86_sframe64_t *sf64;
2013
2d21ac55 2014 sf64 = (x86_sframe64_t *) pcb->sf;
0c530ab8
A
2015
2016 bzero((char *)sf64, sizeof(x86_sframe64_t));
2017
2018 iss = (x86_saved_state_t *) &sf64->ssf;
2019 iss->flavor = x86_SAVED_STATE64;
2020 /*
2d21ac55
A
2021 * Guarantee that the bootstrapped thread will be in user
2022 * mode.
0c530ab8
A
2023 */
2024 iss->ss_64.isf.rflags = EFL_USER_SET;
2025 iss->ss_64.isf.cs = USER64_CS;
2026 iss->ss_64.isf.ss = USER_DS;
2027 iss->ss_64.fs = USER_DS;
2028 iss->ss_64.gs = USER_DS;
2029 } else {
2030 if (cpu_mode_is64bit()) {
2d21ac55 2031 x86_sframe_compat32_t *sfc32;
0c530ab8 2032
2d21ac55 2033 sfc32 = (x86_sframe_compat32_t *)pcb->sf;
0c530ab8
A
2034
2035 bzero((char *)sfc32, sizeof(x86_sframe_compat32_t));
2036
2037 iss = (x86_saved_state_t *) &sfc32->ssf.iss32;
2038 iss->flavor = x86_SAVED_STATE32;
b0d623f7 2039#if defined(__i386__)
0c530ab8
A
2040#if DEBUG
2041 {
2d21ac55 2042 x86_saved_state_compat32_t *xssc;
0c530ab8 2043
2d21ac55 2044 xssc = (x86_saved_state_compat32_t *) iss;
b0d623f7 2045
2d21ac55
A
2046 xssc->pad_for_16byte_alignment[0] = 0x64326432;
2047 xssc->pad_for_16byte_alignment[1] = 0x64326432;
0c530ab8 2048 }
b0d623f7 2049#endif /* DEBUG */
0c530ab8 2050 } else {
b0d623f7
A
2051 x86_sframe32_t *sf32;
2052 struct real_descriptor *ldtp;
2053 pmap_paddr_t paddr;
0c530ab8 2054
2d21ac55 2055 sf32 = (x86_sframe32_t *) pcb->sf;
0c530ab8
A
2056
2057 bzero((char *)sf32, sizeof(x86_sframe32_t));
2058
2059 iss = (x86_saved_state_t *) &sf32->ssf;
2060 iss->flavor = x86_SAVED_STATE32;
b0d623f7
A
2061 pcb->iss_pte0 = pte_kernel_rw(kvtophys((vm_offset_t)iss));
2062 if (0 == (paddr = pa_to_pte(kvtophys((vm_offset_t)iss + PAGE_SIZE))))
2063 pcb->iss_pte1 = INTEL_PTE_INVALID;
2064 else
2065 pcb->iss_pte1 = pte_kernel_rw(paddr);
2066
2067
2068 ldtp = (struct real_descriptor *)
2069 pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN);
2070 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
2071 pcb->uldt_desc = ldtp[sel_idx(USER_DS)];
2072#endif /* __i386__ */
0c530ab8
A
2073 }
2074 /*
2d21ac55
A
2075 * Guarantee that the bootstrapped thread will be in user
2076 * mode.
0c530ab8
A
2077 */
2078 iss->ss_32.cs = USER_CS;
2079 iss->ss_32.ss = USER_DS;
2080 iss->ss_32.ds = USER_DS;
2081 iss->ss_32.es = USER_DS;
2082 iss->ss_32.fs = USER_DS;
2083 iss->ss_32.gs = USER_DS;
2084 iss->ss_32.efl = EFL_USER_SET;
2d21ac55 2085
6601e61a 2086 }
0c530ab8 2087 pcb->iss = iss;
4452a7af 2088
0c530ab8
A
2089 thread->machine.pcb = pcb;
2090 simple_lock_init(&pcb->lock, 0);
2091
2d21ac55 2092 pcb->arg_store_valid = 0;
b0d623f7
A
2093 pcb->cthread_self = 0;
2094 pcb->uldt_selector = 0;
0c530ab8 2095
0b4c1975
A
2096 /* Ensure that the "cthread" descriptor describes a valid
2097 * segment.
2098 */
2099 if ((pcb->cthread_desc.access & ACC_P) == 0) {
2100 struct real_descriptor *ldtp;
2101 ldtp = (struct real_descriptor *)current_ldt();
2102 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
2103 }
2104
0b4e3aa0 2105
1c79356b
A
2106 return(KERN_SUCCESS);
2107}
2108
2109/*
2110 * Machine-dependent cleanup prior to destroying a thread
2111 */
2112void
55e303ae
A
2113machine_thread_destroy(
2114 thread_t thread)
1c79356b 2115{
91447636 2116 register pcb_t pcb = thread->machine.pcb;
1c79356b 2117
55e303ae 2118 assert(pcb);
91447636 2119
0c530ab8
A
2120 if (pcb->ifps != 0)
2121 fpu_free(pcb->ifps);
2122 if (pcb->sf != 0) {
2d21ac55 2123 zfree(iss_zone, pcb->sf);
0c530ab8
A
2124 pcb->sf = 0;
2125 }
2126 if (pcb->ids) {
2d21ac55
A
2127 zfree(ids_zone, pcb->ids);
2128 pcb->ids = NULL;
0c530ab8 2129 }
91447636 2130 thread->machine.pcb = (pcb_t)0;
0c530ab8
A
2131
2132}
2133
2134void
2d21ac55 2135machine_thread_switch_addrmode(thread_t thread)
0c530ab8 2136{
2d21ac55
A
2137 /*
2138 * We don't want to be preempted until we're done
2139 * - particularly if we're switching the current thread
2140 */
2141 disable_preemption();
0c530ab8 2142
2d21ac55
A
2143 /*
2144 * Reset the state saveareas.
2145 */
0c530ab8
A
2146 machine_thread_create(thread, thread->task);
2147
2148 /* If we're switching ourselves, reset the pcb addresses etc. */
c910b4d9 2149 if (thread == current_thread()) {
b0d623f7 2150#if defined(__i386__)
c910b4d9
A
2151 if (current_cpu_datap()->cpu_active_cr3 != kernel_pmap->pm_cr3)
2152 pmap_load_kernel_cr3();
b0d623f7 2153#endif /* defined(__i386) */
c910b4d9
A
2154 act_machine_switch_pcb(thread);
2155 }
2d21ac55 2156 enable_preemption();
1c79356b
A
2157}
2158
0c530ab8
A
2159
2160
1c79356b
A
2161/*
2162 * This is used to set the current thr_act/thread
2163 * when starting up a new processor
2164 */
2165void
b0d623f7 2166machine_set_current_thread(thread_t thread)
1c79356b 2167{
0c530ab8 2168 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
2169}
2170
0c530ab8 2171/*
b0d623f7
A
2172 * This is called when a task is terminated, and also on exec().
2173 * Clear machine-dependent state that is stored on the task.
0c530ab8 2174 */
1c79356b 2175void
55e303ae 2176machine_thread_terminate_self(void)
1c79356b 2177{
0c530ab8
A
2178 task_t self_task = current_task();
2179 if (self_task) {
2180 user_ldt_t user_ldt = self_task->i386_ldt;
2181 if (user_ldt != 0) {
2182 self_task->i386_ldt = 0;
2183 user_ldt_free(user_ldt);
2184 }
b0d623f7
A
2185
2186 if (self_task->task_debug != NULL) {
2187 zfree(ids_zone, self_task->task_debug);
2188 self_task->task_debug = NULL;
2189 }
0c530ab8 2190 }
1c79356b
A
2191}
2192
2193void
2d21ac55 2194act_machine_return(
2d21ac55 2195 int code
2d21ac55 2196 )
1c79356b 2197{
1c79356b
A
2198 /*
2199 * This code is called with nothing locked.
2200 * It also returns with nothing locked, if it returns.
2201 *
2202 * This routine terminates the current thread activation.
2203 * If this is the only activation associated with its
2204 * thread shuttle, then the entire thread (shuttle plus
2205 * activation) is terminated.
2206 */
2207 assert( code == KERN_TERMINATED );
1c79356b 2208
1c79356b
A
2209 thread_terminate_self();
2210
2211 /*NOTREACHED*/
2212
91447636 2213 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code);
1c79356b
A
2214}
2215
2216
2217/*
2218 * Perform machine-dependent per-thread initializations
2219 */
2220void
55e303ae 2221machine_thread_init(void)
1c79356b 2222{
2d21ac55 2223 if (cpu_mode_is64bit()) {
0c530ab8 2224 assert(sizeof(x86_sframe_compat32_t) % 16 == 0);
2d21ac55 2225 iss_zone = zinit(sizeof(x86_sframe64_t),
b0d623f7 2226 thread_max * sizeof(x86_sframe64_t),
2d21ac55
A
2227 THREAD_CHUNK * sizeof(x86_sframe64_t),
2228 "x86_64 saved state");
2229
2230 ids_zone = zinit(sizeof(x86_debug_state64_t),
b0d623f7 2231 thread_max * sizeof(x86_debug_state64_t),
2d21ac55
A
2232 THREAD_CHUNK * sizeof(x86_debug_state64_t),
2233 "x86_64 debug state");
0c530ab8
A
2234
2235 } else {
2d21ac55 2236 iss_zone = zinit(sizeof(x86_sframe32_t),
b0d623f7 2237 thread_max * sizeof(x86_sframe32_t),
2d21ac55
A
2238 THREAD_CHUNK * sizeof(x86_sframe32_t),
2239 "x86 saved state");
2240 ids_zone = zinit(sizeof(x86_debug_state32_t),
b0d623f7 2241 thread_max * (sizeof(x86_debug_state32_t)),
2d21ac55
A
2242 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
2243 "x86 debug state");
0c530ab8 2244 }
55e303ae 2245 fpu_module_init();
1c79356b
A
2246}
2247
2d21ac55 2248
b0d623f7 2249#if defined(__i386__)
1c79356b
A
2250/*
2251 * Some routines for debugging activation code
2252 */
91447636
A
2253static void dump_handlers(thread_t);
2254void dump_regs(thread_t);
2255int dump_act(thread_t thr_act);
1c79356b
A
2256
2257static void
91447636 2258dump_handlers(thread_t thr_act)
1c79356b 2259{
2d21ac55
A
2260 ReturnHandler *rhp = thr_act->handlers;
2261 int counter = 0;
2262
2263 printf("\t");
2264 while (rhp) {
2265 if (rhp == &thr_act->special_handler){
2266 if (rhp->next)
2267 printf("[NON-Zero next ptr(%p)]", rhp->next);
2268 printf("special_handler()->");
2269 break;
2270 }
2271 printf("hdlr_%d(%p)->", counter, rhp->handler);
2272 rhp = rhp->next;
2273 if (++counter > 32) {
2274 printf("Aborting: HUGE handler chain\n");
2275 break;
2276 }
1c79356b 2277 }
2d21ac55 2278 printf("HLDR_NULL\n");
1c79356b
A
2279}
2280
2281void
91447636 2282dump_regs(thread_t thr_act)
1c79356b 2283{
2d21ac55
A
2284 if (thr_act->machine.pcb == NULL)
2285 return;
0c530ab8 2286
2d21ac55
A
2287 if (thread_is_64bit(thr_act)) {
2288 x86_saved_state64_t *ssp;
0c530ab8
A
2289
2290 ssp = USER_REGS64(thr_act);
2291
2292 panic("dump_regs: 64bit tasks not yet supported");
2293
2294 } else {
2d21ac55 2295 x86_saved_state32_t *ssp;
0c530ab8
A
2296
2297 ssp = USER_REGS32(thr_act);
2298
2299 /*
2300 * Print out user register state
2301 */
1c79356b 2302 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
2d21ac55 2303 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
0c530ab8 2304
1c79356b 2305 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
2d21ac55 2306 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
0c530ab8 2307
1c79356b
A
2308 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
2309 }
2310}
2311
2312int
91447636 2313dump_act(thread_t thr_act)
1c79356b
A
2314{
2315 if (!thr_act)
2316 return(0);
2317
2d21ac55
A
2318 printf("thread(%p)(%d): task=%p(%d)\n",
2319 thr_act, thr_act->ref_count,
2320 thr_act->task,
2321 thr_act->task ? thr_act->task->ref_count : 0);
1c79356b 2322
55e303ae 2323 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
2d21ac55
A
2324 thr_act->suspend_count, thr_act->user_stop_count,
2325 thr_act->active, thr_act->ast);
2326 printf("\tpcb=%p\n", thr_act->machine.pcb);
1c79356b 2327
91447636 2328 if (thr_act->kernel_stack) {
2d21ac55 2329 vm_offset_t stack = thr_act->kernel_stack;
1c79356b 2330
b0d623f7
A
2331 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
2332 (long)stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
2d21ac55 2333 STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state);
1c79356b
A
2334 }
2335
2336 dump_handlers(thr_act);
2337 dump_regs(thr_act);
2338 return((int)thr_act);
2339}
b0d623f7 2340#endif
91447636
A
2341
2342user_addr_t
2343get_useraddr(void)
1c79356b 2344{
91447636 2345 thread_t thr_act = current_thread();
1c79356b 2346
0c530ab8 2347 if (thr_act->machine.pcb == NULL)
2d21ac55 2348 return(0);
0c530ab8
A
2349
2350 if (thread_is_64bit(thr_act)) {
2351 x86_saved_state64_t *iss64;
2352
2353 iss64 = USER_REGS64(thr_act);
2354
2355 return(iss64->isf.rip);
2356 } else {
2357 x86_saved_state32_t *iss32;
4452a7af 2358
0c530ab8
A
2359 iss32 = USER_REGS32(thr_act);
2360
2361 return(iss32->eip);
2362 }
1c79356b
A
2363}
2364
1c79356b
A
2365/*
2366 * detach and return a kernel stack from a thread
2367 */
2368
2369vm_offset_t
55e303ae 2370machine_stack_detach(thread_t thread)
1c79356b 2371{
0c530ab8 2372 vm_offset_t stack;
1c79356b 2373
0c530ab8 2374 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
b0d623f7 2375 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8
A
2376 thread->sched_pri, 0,
2377 0);
1c79356b 2378
0c530ab8
A
2379 stack = thread->kernel_stack;
2380 thread->kernel_stack = 0;
2381
2382 return (stack);
1c79356b
A
2383}
2384
2385/*
2386 * attach a kernel stack to a thread and initialize it
2387 */
2388
2389void
91447636
A
2390machine_stack_attach(
2391 thread_t thread,
2392 vm_offset_t stack)
1c79356b 2393{
b0d623f7 2394 struct x86_kernel_state *statep;
1c79356b 2395
0c530ab8 2396 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
b0d623f7 2397 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8 2398 thread->sched_pri, 0, 0);
1c79356b 2399
0c530ab8
A
2400 assert(stack);
2401 thread->kernel_stack = stack;
55e303ae 2402
0c530ab8 2403 statep = STACK_IKS(stack);
b0d623f7
A
2404#if defined(__x86_64__)
2405 statep->k_rip = (unsigned long) Thread_continue;
2406 statep->k_rbx = (unsigned long) thread_continue;
2407 statep->k_rsp = (unsigned long) STACK_IEL(stack);
2408#else
0c530ab8
A
2409 statep->k_eip = (unsigned long) Thread_continue;
2410 statep->k_ebx = (unsigned long) thread_continue;
2411 statep->k_esp = (unsigned long) STACK_IEL(stack);
b0d623f7 2412#endif
1c79356b 2413
0c530ab8 2414 return;
1c79356b
A
2415}
2416
2417/*
2418 * move a stack from old to new thread
2419 */
2420
2421void
55e303ae 2422machine_stack_handoff(thread_t old,
1c79356b
A
2423 thread_t new)
2424{
0c530ab8 2425 vm_offset_t stack;
1c79356b 2426
0c530ab8
A
2427 assert(new);
2428 assert(old);
1c79356b 2429
b0d623f7
A
2430#if CONFIG_COUNTERS
2431 machine_pmc_cswitch(old, new);
2432#endif
2433
0c530ab8
A
2434 stack = old->kernel_stack;
2435 if (stack == old->reserved_stack) {
2436 assert(new->reserved_stack);
2437 old->reserved_stack = new->reserved_stack;
2438 new->reserved_stack = stack;
2439 }
2440 old->kernel_stack = 0;
2441 /*
2442 * A full call to machine_stack_attach() is unnecessry
2443 * because old stack is already initialized.
2444 */
2445 new->kernel_stack = stack;
1c79356b 2446
0c530ab8 2447 fpu_save_context(old);
b0d623f7 2448
1c79356b 2449
0c530ab8
A
2450 old->machine.specFlags &= ~OnProc;
2451 new->machine.specFlags |= OnProc;
1c79356b 2452
0c530ab8
A
2453 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
2454 act_machine_switch_pcb(new);
9bccf70c 2455
0c530ab8 2456 machine_set_current_thread(new);
1c79356b 2457
0c530ab8 2458 return;
1c79356b 2459}
0b4e3aa0 2460
0c530ab8
A
2461
2462
2463
2464struct x86_act_context32 {
2465 x86_saved_state32_t ss;
2466 x86_float_state32_t fs;
2467 x86_debug_state32_t ds;
2468};
2469
2470struct x86_act_context64 {
2471 x86_saved_state64_t ss;
2472 x86_float_state64_t fs;
2473 x86_debug_state64_t ds;
0b4e3aa0
A
2474};
2475
0c530ab8
A
2476
2477
0b4e3aa0
A
2478void *
2479act_thread_csave(void)
2480{
2d21ac55 2481 kern_return_t kret;
0c530ab8 2482 mach_msg_type_number_t val;
2d21ac55 2483 thread_t thr_act = current_thread();
0c530ab8 2484
2d21ac55
A
2485 if (thread_is_64bit(thr_act)) {
2486 struct x86_act_context64 *ic64;
0b4e3aa0 2487
2d21ac55 2488 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
0c530ab8
A
2489
2490 if (ic64 == (struct x86_act_context64 *)NULL)
2d21ac55 2491 return((void *)0);
0c530ab8
A
2492
2493 val = x86_SAVED_STATE64_COUNT;
2494 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2d21ac55 2495 (thread_state_t) &ic64->ss, &val);
0c530ab8 2496 if (kret != KERN_SUCCESS) {
2d21ac55 2497 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
2498 return((void *)0);
2499 }
2500 val = x86_FLOAT_STATE64_COUNT;
2501 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2d21ac55 2502 (thread_state_t) &ic64->fs, &val);
0b4e3aa0 2503
0c530ab8 2504 if (kret != KERN_SUCCESS) {
2d21ac55 2505 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
2506 return((void *)0);
2507 }
0b4e3aa0 2508
0c530ab8
A
2509 val = x86_DEBUG_STATE64_COUNT;
2510 kret = machine_thread_get_state(thr_act,
2511 x86_DEBUG_STATE64,
2512 (thread_state_t)&ic64->ds,
55e303ae 2513 &val);
0b4e3aa0 2514 if (kret != KERN_SUCCESS) {
0c530ab8
A
2515 kfree(ic64, sizeof(struct x86_act_context64));
2516 return((void *)0);
2517 }
2518 return(ic64);
2519
2520 } else {
2d21ac55 2521 struct x86_act_context32 *ic32;
0c530ab8 2522
2d21ac55 2523 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
0c530ab8
A
2524
2525 if (ic32 == (struct x86_act_context32 *)NULL)
2d21ac55 2526 return((void *)0);
0c530ab8
A
2527
2528 val = x86_SAVED_STATE32_COUNT;
2529 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2d21ac55 2530 (thread_state_t) &ic32->ss, &val);
0c530ab8 2531 if (kret != KERN_SUCCESS) {
2d21ac55 2532 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8 2533 return((void *)0);
0b4e3aa0 2534 }
0c530ab8
A
2535 val = x86_FLOAT_STATE32_COUNT;
2536 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2d21ac55 2537 (thread_state_t) &ic32->fs, &val);
0c530ab8 2538 if (kret != KERN_SUCCESS) {
2d21ac55 2539 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8
A
2540 return((void *)0);
2541 }
2542
2543 val = x86_DEBUG_STATE32_COUNT;
2544 kret = machine_thread_get_state(thr_act,
2545 x86_DEBUG_STATE32,
2546 (thread_state_t)&ic32->ds,
55e303ae 2547 &val);
0b4e3aa0 2548 if (kret != KERN_SUCCESS) {
0c530ab8
A
2549 kfree(ic32, sizeof(struct x86_act_context32));
2550 return((void *)0);
0b4e3aa0 2551 }
0c530ab8
A
2552 return(ic32);
2553 }
0b4e3aa0 2554}
0c530ab8
A
2555
2556
0b4e3aa0
A
2557void
2558act_thread_catt(void *ctx)
2559{
0c530ab8
A
2560 thread_t thr_act = current_thread();
2561 kern_return_t kret;
2562
2563 if (ctx == (void *)NULL)
2d21ac55 2564 return;
0c530ab8
A
2565
2566 if (thread_is_64bit(thr_act)) {
2567 struct x86_act_context64 *ic64;
2568
2569 ic64 = (struct x86_act_context64 *)ctx;
2570
2571 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2572 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2573 if (kret == KERN_SUCCESS) {
2d21ac55
A
2574 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2575 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
0c530ab8
A
2576 }
2577 kfree(ic64, sizeof(struct x86_act_context64));
2578 } else {
2579 struct x86_act_context32 *ic32;
2580
2581 ic32 = (struct x86_act_context32 *)ctx;
2582
2583 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2584 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2585 if (kret == KERN_SUCCESS) {
2586 kret = machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2587 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2588 if (kret == KERN_SUCCESS && thr_act->machine.pcb->ids)
2589 machine_thread_set_state(thr_act,
2590 x86_DEBUG_STATE32,
2591 (thread_state_t)&ic32->ds,
2592 x86_DEBUG_STATE32_COUNT);
2593 }
2594 kfree(ic32, sizeof(struct x86_act_context32));
2595 }
0b4e3aa0
A
2596}
2597
0c530ab8
A
2598
2599void act_thread_cfree(__unused void *ctx)
0b4e3aa0 2600{
0c530ab8 2601 /* XXX - Unused */
0b4e3aa0 2602}
2d21ac55
A
2603void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid);
2604void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid) {
2605 thread->machine.pcb->arg_store_valid = valid;
2606}
2607
2608boolean_t x86_sysenter_arg_store_isvalid(thread_t thread);
2609
2610boolean_t x86_sysenter_arg_store_isvalid(thread_t thread) {
2611 return (thread->machine.pcb->arg_store_valid);
2612}
b0d623f7
A
2613
2614/*
2615 * Duplicate one x86_debug_state32_t to another. "all" parameter
2616 * chooses whether dr4 and dr5 are copied (they are never meant
2617 * to be installed when we do machine_task_set_state() or
2618 * machine_thread_set_state()).
2619 */
2620void
2621copy_debug_state32(
2622 x86_debug_state32_t *src,
2623 x86_debug_state32_t *target,
2624 boolean_t all)
2625{
2626 if (all) {
2627 target->dr4 = src->dr4;
2628 target->dr5 = src->dr5;
2629 }
2630
2631 target->dr0 = src->dr0;
2632 target->dr1 = src->dr1;
2633 target->dr2 = src->dr2;
2634 target->dr3 = src->dr3;
2635 target->dr6 = src->dr6;
2636 target->dr7 = src->dr7;
2637}
2638
2639/*
2640 * Duplicate one x86_debug_state64_t to another. "all" parameter
2641 * chooses whether dr4 and dr5 are copied (they are never meant
2642 * to be installed when we do machine_task_set_state() or
2643 * machine_thread_set_state()).
2644 */
2645void
2646copy_debug_state64(
2647 x86_debug_state64_t *src,
2648 x86_debug_state64_t *target,
2649 boolean_t all)
2650{
2651 if (all) {
2652 target->dr4 = src->dr4;
2653 target->dr5 = src->dr5;
2654 }
2655
2656 target->dr0 = src->dr0;
2657 target->dr1 = src->dr1;
2658 target->dr2 = src->dr2;
2659 target->dr3 = src->dr3;
2660 target->dr6 = src->dr6;
2661 target->dr7 = src->dr7;
2662}
2663