]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-1699.26.8.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <mach_debug.h>
59#include <mach_ldebug.h>
60
61#include <sys/kdebug.h>
62
63#include <mach/kern_return.h>
64#include <mach/thread_status.h>
65#include <mach/vm_param.h>
1c79356b
A
66
67#include <kern/counters.h>
91447636 68#include <kern/kalloc.h>
1c79356b 69#include <kern/mach_param.h>
91447636
A
70#include <kern/processor.h>
71#include <kern/cpu_data.h>
72#include <kern/cpu_number.h>
1c79356b
A
73#include <kern/task.h>
74#include <kern/thread.h>
1c79356b
A
75#include <kern/sched_prim.h>
76#include <kern/misc_protos.h>
77#include <kern/assert.h>
78#include <kern/spl.h>
91447636 79#include <kern/machine.h>
1c79356b
A
80#include <ipc/ipc_port.h>
81#include <vm/vm_kern.h>
91447636 82#include <vm/vm_map.h>
1c79356b 83#include <vm/pmap.h>
91447636 84#include <vm/vm_protos.h>
1c79356b 85
b0d623f7
A
86#include <i386/cpu_data.h>
87#include <i386/cpu_number.h>
1c79356b
A
88#include <i386/eflags.h>
89#include <i386/proc_reg.h>
1c79356b 90#include <i386/fpu.h>
2d21ac55 91#include <i386/misc_protos.h>
6d2010ae 92#include <i386/mp_desc.h>
b0d623f7
A
93#include <i386/thread.h>
94#if defined(__i386__)
95#include <i386/fpu.h>
96#endif
0c530ab8 97#include <i386/machine_routines.h>
b0d623f7 98#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
55e303ae 99
b0d623f7
A
100#if CONFIG_COUNTERS
101#include <pmc/pmc.h>
102#endif /* CONFIG_COUNTERS */
103
1c79356b
A
104/*
105 * Maps state flavor to number of words in the state:
106 */
91447636 107unsigned int _MachineStateCount[] = {
0c530ab8
A
108 /* FLAVOR_LIST */
109 0,
110 x86_THREAD_STATE32_COUNT,
111 x86_FLOAT_STATE32_COUNT,
112 x86_EXCEPTION_STATE32_COUNT,
113 x86_THREAD_STATE64_COUNT,
114 x86_FLOAT_STATE64_COUNT,
115 x86_EXCEPTION_STATE64_COUNT,
116 x86_THREAD_STATE_COUNT,
117 x86_FLOAT_STATE_COUNT,
118 x86_EXCEPTION_STATE_COUNT,
119 0,
120 x86_SAVED_STATE32_COUNT,
121 x86_SAVED_STATE64_COUNT,
122 x86_DEBUG_STATE32_COUNT,
123 x86_DEBUG_STATE64_COUNT,
124 x86_DEBUG_STATE_COUNT
1c79356b
A
125};
126
2d21ac55
A
127zone_t iss_zone; /* zone for saved_state area */
128zone_t ids_zone; /* zone for debug_state area */
0c530ab8 129
1c79356b
A
130/* Forward */
131
1c79356b
A
132extern void Thread_continue(void);
133extern void Load_context(
91447636 134 thread_t thread);
1c79356b 135
0c530ab8
A
136static void
137get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139static void
140get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142static void
143get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145static void
146get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148static int
149set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151static int
152set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
b0d623f7
A
154#if CONFIG_COUNTERS
155static inline void
156machine_pmc_cswitch(thread_t /* old */, thread_t /* new */);
157
158static inline boolean_t
159machine_thread_pmc_eligible(thread_t);
160
161static inline void
162pmc_swi(thread_t /* old */, thread_t /*new */);
163
164static inline boolean_t
165machine_thread_pmc_eligible(thread_t t) {
166 /*
167 * NOTE: Task-level reservations are propagated to child threads via
168 * thread_create_internal. Any mutation of task reservations forces a
169 * recalculate of t_chud (for the pmc flag) for all threads in that task.
170 * Consequently, we can simply check the current thread's flag against
171 * THREAD_PMC_FLAG. If the result is non-zero, we SWI for a PMC switch.
172 */
173 return (t != NULL) ? ((t->t_chud & THREAD_PMC_FLAG) ? TRUE : FALSE) : FALSE;
174}
175
176static inline void
177pmc_swi(thread_t old, thread_t new) {
178 current_cpu_datap()->csw_old_thread = old;
179 current_cpu_datap()->csw_new_thread = new;
6d2010ae 180 pal_pmc_swi();
b0d623f7
A
181}
182
183static inline void
184machine_pmc_cswitch(thread_t old, thread_t new) {
185 if (machine_thread_pmc_eligible(old) || machine_thread_pmc_eligible(new)) {
186 pmc_swi(old, new);
187 }
188}
189
190void ml_get_csw_threads(thread_t *old, thread_t *new) {
191 *old = current_cpu_datap()->csw_old_thread;
192 *new = current_cpu_datap()->csw_new_thread;
193}
194
195#endif /* CONFIG_COUNTERS */
196
0c530ab8
A
197/*
198 * Don't let an illegal value for dr7 get set. Specifically,
199 * check for undefined settings. Setting these bit patterns
200 * result in undefined behaviour and can lead to an unexpected
201 * TRCTRAP.
202 */
203static boolean_t
204dr7_is_valid(uint32_t *dr7)
205{
206 int i;
207 uint32_t mask1, mask2;
208
209 /*
210 * If the DE bit is set in CR4, R/W0-3 can be pattern
211 * "10B" to indicate i/o reads and write
212 */
213 if (!(get_cr4() & CR4_DE))
214 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
215 i++, mask1 <<= 4, mask2 <<= 4)
216 if ((*dr7 & mask1) == mask2)
217 return (FALSE);
218
219 /*
b0d623f7
A
220 * len0-3 pattern "10B" is ok for len on Merom and newer processors
221 * (it signifies an 8-byte wide region). We use the 64bit capability
222 * of the processor in lieu of the more laborious model/family checks
223 * as all 64-bit capable processors so far support this.
224 * Reject an attempt to use this on 64-bit incapable processors.
0c530ab8 225 */
b0d623f7 226 if (current_cpu_datap()->cpu_is64bit == FALSE)
0c530ab8
A
227 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4;
228 i++, mask1 <<= 4, mask2 <<= 4)
229 if ((*dr7 & mask1) == mask2)
230 return (FALSE);
231
232 /*
233 * if we are doing an instruction execution break (indicated
234 * by r/w[x] being "00B"), then the len[x] must also be set
235 * to "00B"
236 */
237 for (i = 0; i < 4; i++)
238 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) &&
239 ((((*dr7 >> (18 + i*4))) & 0x3) != 0))
240 return (FALSE);
241
242 /*
243 * Intel docs have these bits fixed.
244 */
245 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */
246 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */
247 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */
248 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */
249 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */
250
251 /*
252 * We don't allow anything to set the global breakpoints.
253 */
254
255 if (*dr7 & 0x2)
256 return (FALSE);
257
258 if (*dr7 & (0x2<<2))
259 return (FALSE);
260
261 if (*dr7 & (0x2<<4))
262 return (FALSE);
263
264 if (*dr7 & (0x2<<6))
265 return (FALSE);
266
267 return (TRUE);
268}
269
270static inline void
271set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds)
272{
273 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0));
274 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1));
275 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2));
276 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3));
277 if (cpu_mode_is64bit())
278 cdp->cpu_dr7 = ds->dr7;
279}
280
281extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
282
283static inline void
284set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds)
285{
286 /*
287 * We need to enter 64-bit mode in order to set the full
288 * width of these registers
289 */
290 set_64bit_debug_regs(ds);
291 cdp->cpu_dr7 = ds->dr7;
292}
293
b0d623f7
A
294boolean_t
295debug_state_is_valid32(x86_debug_state32_t *ds)
296{
297 if (!dr7_is_valid(&ds->dr7))
298 return FALSE;
299
300#if defined(__i386__)
301 /*
302 * Only allow local breakpoints and make sure they are not
303 * in the trampoline code.
304 */
305 if (ds->dr7 & 0x1)
306 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE)
307 return FALSE;
308
309 if (ds->dr7 & (0x1<<2))
310 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE)
311 return FALSE;
312
313 if (ds->dr7 & (0x1<<4))
314 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE)
315 return FALSE;
316
317 if (ds->dr7 & (0x1<<6))
318 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE)
319 return FALSE;
320#endif
321
322 return TRUE;
323}
324
325boolean_t
326debug_state_is_valid64(x86_debug_state64_t *ds)
327{
328 if (!dr7_is_valid((uint32_t *)&ds->dr7))
329 return FALSE;
330
331 /*
332 * Don't allow the user to set debug addresses above their max
333 * value
334 */
335 if (ds->dr7 & 0x1)
336 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
337 return FALSE;
338
339 if (ds->dr7 & (0x1<<2))
340 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
341 return FALSE;
342
343 if (ds->dr7 & (0x1<<4))
344 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
345 return FALSE;
346
347 if (ds->dr7 & (0x1<<6))
348 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
349 return FALSE;
350
351 return TRUE;
352}
353
354
0c530ab8
A
355static kern_return_t
356set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
357{
358 x86_debug_state32_t *ids;
359 pcb_t pcb;
360
6d2010ae 361 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
362 ids = pcb->ids;
363
b0d623f7
A
364 if (debug_state_is_valid32(ds) != TRUE) {
365 return KERN_INVALID_ARGUMENT;
366 }
367
0c530ab8 368 if (ids == NULL) {
2d21ac55 369 ids = zalloc(ids_zone);
0c530ab8
A
370 bzero(ids, sizeof *ids);
371
372 simple_lock(&pcb->lock);
373 /* make sure it wasn't already alloc()'d elsewhere */
374 if (pcb->ids == NULL) {
375 pcb->ids = ids;
376 simple_unlock(&pcb->lock);
377 } else {
378 simple_unlock(&pcb->lock);
2d21ac55 379 zfree(ids_zone, ids);
0c530ab8
A
380 }
381 }
382
0c530ab8 383
b0d623f7 384 copy_debug_state32(ds, ids, FALSE);
0c530ab8
A
385
386 return (KERN_SUCCESS);
0c530ab8
A
387}
388
389static kern_return_t
390set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
391{
392 x86_debug_state64_t *ids;
393 pcb_t pcb;
394
6d2010ae 395 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
396 ids = pcb->ids;
397
b0d623f7
A
398 if (debug_state_is_valid64(ds) != TRUE) {
399 return KERN_INVALID_ARGUMENT;
400 }
401
0c530ab8 402 if (ids == NULL) {
2d21ac55 403 ids = zalloc(ids_zone);
0c530ab8
A
404 bzero(ids, sizeof *ids);
405
406 simple_lock(&pcb->lock);
407 /* make sure it wasn't already alloc()'d elsewhere */
408 if (pcb->ids == NULL) {
409 pcb->ids = ids;
410 simple_unlock(&pcb->lock);
411 } else {
412 simple_unlock(&pcb->lock);
2d21ac55 413 zfree(ids_zone, ids);
0c530ab8
A
414 }
415 }
416
b0d623f7 417 copy_debug_state64(ds, ids, FALSE);
0c530ab8
A
418
419 return (KERN_SUCCESS);
0c530ab8
A
420}
421
422static void
423get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
424{
425 x86_debug_state32_t *saved_state;
426
6d2010ae 427 saved_state = thread->machine.ids;
0c530ab8
A
428
429 if (saved_state) {
b0d623f7 430 copy_debug_state32(saved_state, ds, TRUE);
0c530ab8
A
431 } else
432 bzero(ds, sizeof *ds);
433}
434
435static void
436get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
437{
438 x86_debug_state64_t *saved_state;
439
6d2010ae 440 saved_state = (x86_debug_state64_t *)thread->machine.ids;
0c530ab8
A
441
442 if (saved_state) {
b0d623f7 443 copy_debug_state64(saved_state, ds, TRUE);
0c530ab8
A
444 } else
445 bzero(ds, sizeof *ds);
446}
447
1c79356b
A
448/*
449 * consider_machine_collect:
450 *
451 * Try to collect machine-dependent pages
452 */
453void
91447636 454consider_machine_collect(void)
1c79356b
A
455{
456}
457
1c79356b 458void
91447636 459consider_machine_adjust(void)
1c79356b 460{
1c79356b 461}
1c79356b 462
1c79356b
A
463/*
464 * Switch to the first thread on a CPU.
465 */
466void
55e303ae 467machine_load_context(
1c79356b
A
468 thread_t new)
469{
b0d623f7
A
470#if CONFIG_COUNTERS
471 machine_pmc_cswitch(NULL, new);
472#endif
0c530ab8 473 new->machine.specFlags |= OnProc;
6d2010ae 474 act_machine_switch_pcb(NULL, new);
91447636 475 Load_context(new);
1c79356b
A
476}
477
478/*
479 * Switch to a new thread.
480 * Save the old thread`s kernel state or continuation,
481 * and return it.
482 */
483thread_t
55e303ae 484machine_switch_context(
91447636
A
485 thread_t old,
486 thread_continue_t continuation,
487 thread_t new)
1c79356b 488{
1c79356b 489#if MACH_RT
91447636 490 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
1c79356b 491#endif
b0d623f7
A
492#if CONFIG_COUNTERS
493 machine_pmc_cswitch(old, new);
494#endif
1c79356b
A
495 /*
496 * Save FP registers if in use.
497 */
498 fpu_save_context(old);
499
0c530ab8
A
500 old->machine.specFlags &= ~OnProc;
501 new->machine.specFlags |= OnProc;
502
b0d623f7
A
503 /*
504 * Monitor the stack depth and report new max,
505 * not worrying about races.
506 */
507 vm_offset_t depth = current_stack_depth();
508 if (depth > kernel_stack_depth_max) {
509 kernel_stack_depth_max = depth;
510 KERNEL_DEBUG_CONSTANT(
511 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
512 (long) depth, 0, 0, 0, 0);
513 }
514
1c79356b
A
515 /*
516 * Switch address maps if need be, even if not switching tasks.
517 * (A server activation may be "borrowing" a client map.)
518 */
6d2010ae 519 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
1c79356b
A
520
521 /*
522 * Load the rest of the user state for the new thread
523 */
6d2010ae 524 act_machine_switch_pcb(old, new);
2d21ac55 525
1c79356b
A
526 return(Switch_context(old, continuation, new));
527}
528
b0d623f7
A
529thread_t
530machine_processor_shutdown(
531 thread_t thread,
532 void (*doshutdown)(processor_t),
533 processor_t processor)
534{
535#if CONFIG_VMX
536 vmx_suspend();
537#endif
538 fpu_save_context(thread);
539 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number());
540 return(Shutdown_context(thread, doshutdown, processor));
541}
542
91447636
A
543
544/*
545 * This is where registers that are not normally specified by the mach-o
546 * file on an execve would be nullified, perhaps to avoid a covert channel.
547 */
548kern_return_t
549machine_thread_state_initialize(
550 thread_t thread)
551{
2d21ac55
A
552 /*
553 * If there's an fpu save area, free it.
554 * The initialized state will then be lazily faulted-in, if required.
555 * And if we're target, re-arm the no-fpu trap.
556 */
6d2010ae 557 if (thread->machine.ifps) {
060df5ea 558 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
4452a7af 559
b0d623f7
A
560 if (thread == current_thread())
561 clear_fpu();
562 }
563
6d2010ae
A
564 if (thread->machine.ids) {
565 zfree(ids_zone, thread->machine.ids);
566 thread->machine.ids = NULL;
b0d623f7
A
567 }
568
569 return KERN_SUCCESS;
4452a7af 570}
0c530ab8
A
571
572uint32_t
573get_eflags_exportmask(void)
574{
575 return EFL_USER_SET;
576}
577
578/*
579 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
580 * for 32bit tasks only
581 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
582 * for 64bit tasks only
583 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
584 * for 32bit tasks only
585 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
586 * for 64bit tasks only
587 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
588 * for either 32bit or 64bit tasks
589 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
590 * for 32bit tasks only
591 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
592 * for 64bit tasks only
593 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
594 * for either 32bit or 64bit tasks
595 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
596 * for 32bit tasks only
597 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
598 * for 64bit tasks only
599 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
600 * for either 32bit or 64bit tasks
601 */
602
2d21ac55 603
0c530ab8
A
604static void
605get_exception_state64(thread_t thread, x86_exception_state64_t *es)
606{
607 x86_saved_state64_t *saved_state;
608
609 saved_state = USER_REGS64(thread);
610
611 es->trapno = saved_state->isf.trapno;
6d2010ae 612 es->cpu = saved_state->isf.cpu;
b0d623f7 613 es->err = (typeof(es->err))saved_state->isf.err;
0c530ab8
A
614 es->faultvaddr = saved_state->cr2;
615}
616
617static void
618get_exception_state32(thread_t thread, x86_exception_state32_t *es)
619{
620 x86_saved_state32_t *saved_state;
621
622 saved_state = USER_REGS32(thread);
623
624 es->trapno = saved_state->trapno;
6d2010ae 625 es->cpu = saved_state->cpu;
0c530ab8
A
626 es->err = saved_state->err;
627 es->faultvaddr = saved_state->cr2;
628}
629
630
631static int
632set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
633{
634 x86_saved_state32_t *saved_state;
635
6d2010ae 636 pal_register_cache_state(thread, DIRTY);
b0d623f7 637
0c530ab8
A
638 saved_state = USER_REGS32(thread);
639
640 /*
641 * Scrub segment selector values:
642 */
b0d623f7
A
643 ts->cs = USER_CS;
644#ifdef __i386__
0c530ab8
A
645 if (ts->ss == 0) ts->ss = USER_DS;
646 if (ts->ds == 0) ts->ds = USER_DS;
647 if (ts->es == 0) ts->es = USER_DS;
b0d623f7
A
648#else /* __x86_64__ */
649 /*
650 * On a 64 bit kernel, we always override the data segments,
651 * as the actual selector numbers have changed. This also
652 * means that we don't support setting the data segments
653 * manually any more.
654 */
655 ts->ss = USER_DS;
656 ts->ds = USER_DS;
657 ts->es = USER_DS;
658#endif
0c530ab8
A
659
660 /* Check segment selectors are safe */
661 if (!valid_user_segment_selectors(ts->cs,
662 ts->ss,
663 ts->ds,
664 ts->es,
665 ts->fs,
666 ts->gs))
667 return(KERN_INVALID_ARGUMENT);
668
669 saved_state->eax = ts->eax;
670 saved_state->ebx = ts->ebx;
671 saved_state->ecx = ts->ecx;
672 saved_state->edx = ts->edx;
673 saved_state->edi = ts->edi;
674 saved_state->esi = ts->esi;
675 saved_state->ebp = ts->ebp;
676 saved_state->uesp = ts->esp;
677 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
678 saved_state->eip = ts->eip;
679 saved_state->cs = ts->cs;
680 saved_state->ss = ts->ss;
681 saved_state->ds = ts->ds;
682 saved_state->es = ts->es;
683 saved_state->fs = ts->fs;
684 saved_state->gs = ts->gs;
685
686 /*
687 * If the trace trap bit is being set,
688 * ensure that the user returns via iret
689 * - which is signaled thusly:
690 */
691 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
692 saved_state->cs = SYSENTER_TF_CS;
693
694 return(KERN_SUCCESS);
695}
696
697static int
698set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
699{
700 x86_saved_state64_t *saved_state;
701
6d2010ae 702 pal_register_cache_state(thread, DIRTY);
b0d623f7 703
0c530ab8
A
704 saved_state = USER_REGS64(thread);
705
706 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
707 !IS_USERADDR64_CANONICAL(ts->rip))
708 return(KERN_INVALID_ARGUMENT);
709
710 saved_state->r8 = ts->r8;
711 saved_state->r9 = ts->r9;
712 saved_state->r10 = ts->r10;
713 saved_state->r11 = ts->r11;
714 saved_state->r12 = ts->r12;
715 saved_state->r13 = ts->r13;
716 saved_state->r14 = ts->r14;
717 saved_state->r15 = ts->r15;
718 saved_state->rax = ts->rax;
0c530ab8
A
719 saved_state->rbx = ts->rbx;
720 saved_state->rcx = ts->rcx;
721 saved_state->rdx = ts->rdx;
722 saved_state->rdi = ts->rdi;
723 saved_state->rsi = ts->rsi;
724 saved_state->rbp = ts->rbp;
725 saved_state->isf.rsp = ts->rsp;
726 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
727 saved_state->isf.rip = ts->rip;
728 saved_state->isf.cs = USER64_CS;
b0d623f7
A
729 saved_state->fs = (uint32_t)ts->fs;
730 saved_state->gs = (uint32_t)ts->gs;
0c530ab8
A
731
732 return(KERN_SUCCESS);
733}
734
735
736
737static void
738get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
739{
740 x86_saved_state32_t *saved_state;
741
6d2010ae 742 pal_register_cache_state(thread, VALID);
b0d623f7 743
0c530ab8
A
744 saved_state = USER_REGS32(thread);
745
746 ts->eax = saved_state->eax;
747 ts->ebx = saved_state->ebx;
748 ts->ecx = saved_state->ecx;
749 ts->edx = saved_state->edx;
750 ts->edi = saved_state->edi;
751 ts->esi = saved_state->esi;
752 ts->ebp = saved_state->ebp;
753 ts->esp = saved_state->uesp;
754 ts->eflags = saved_state->efl;
755 ts->eip = saved_state->eip;
756 ts->cs = saved_state->cs;
757 ts->ss = saved_state->ss;
758 ts->ds = saved_state->ds;
759 ts->es = saved_state->es;
760 ts->fs = saved_state->fs;
761 ts->gs = saved_state->gs;
762}
763
764
765static void
766get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
767{
768 x86_saved_state64_t *saved_state;
769
6d2010ae 770 pal_register_cache_state(thread, VALID);
b0d623f7 771
0c530ab8
A
772 saved_state = USER_REGS64(thread);
773
774 ts->r8 = saved_state->r8;
775 ts->r9 = saved_state->r9;
776 ts->r10 = saved_state->r10;
777 ts->r11 = saved_state->r11;
778 ts->r12 = saved_state->r12;
779 ts->r13 = saved_state->r13;
780 ts->r14 = saved_state->r14;
781 ts->r15 = saved_state->r15;
782 ts->rax = saved_state->rax;
783 ts->rbx = saved_state->rbx;
784 ts->rcx = saved_state->rcx;
785 ts->rdx = saved_state->rdx;
786 ts->rdi = saved_state->rdi;
787 ts->rsi = saved_state->rsi;
788 ts->rbp = saved_state->rbp;
789 ts->rsp = saved_state->isf.rsp;
790 ts->rflags = saved_state->isf.rflags;
791 ts->rip = saved_state->isf.rip;
792 ts->cs = saved_state->isf.cs;
793 ts->fs = saved_state->fs;
794 ts->gs = saved_state->gs;
795}
796
797
1c79356b
A
798/*
799 * act_machine_set_state:
800 *
91447636 801 * Set the status of the specified thread.
1c79356b
A
802 */
803
804kern_return_t
55e303ae 805machine_thread_set_state(
91447636 806 thread_t thr_act,
1c79356b
A
807 thread_flavor_t flavor,
808 thread_state_t tstate,
809 mach_msg_type_number_t count)
810{
2d21ac55
A
811 switch (flavor) {
812 case x86_SAVED_STATE32:
0c530ab8 813 {
0c530ab8
A
814 x86_saved_state32_t *state;
815 x86_saved_state32_t *saved_state;
1c79356b 816
0c530ab8 817 if (count < x86_SAVED_STATE32_COUNT)
2d21ac55
A
818 return(KERN_INVALID_ARGUMENT);
819
820 if (thread_is_64bit(thr_act))
821 return(KERN_INVALID_ARGUMENT);
1c79356b 822
0c530ab8 823 state = (x86_saved_state32_t *) tstate;
1c79356b 824
91447636 825 /* Check segment selectors are safe */
0c530ab8 826 if (!valid_user_segment_selectors(state->cs,
2d21ac55
A
827 state->ss,
828 state->ds,
829 state->es,
830 state->fs,
831 state->gs))
832 return KERN_INVALID_ARGUMENT;
833
6d2010ae 834 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 835
0c530ab8 836 saved_state = USER_REGS32(thr_act);
1c79356b
A
837
838 /*
839 * General registers
840 */
841 saved_state->edi = state->edi;
842 saved_state->esi = state->esi;
843 saved_state->ebp = state->ebp;
844 saved_state->uesp = state->uesp;
845 saved_state->ebx = state->ebx;
846 saved_state->edx = state->edx;
847 saved_state->ecx = state->ecx;
848 saved_state->eax = state->eax;
849 saved_state->eip = state->eip;
0c530ab8
A
850
851 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
c0fea474 852
8f6c56a5 853 /*
0c530ab8
A
854 * If the trace trap bit is being set,
855 * ensure that the user returns via iret
856 * - which is signaled thusly:
21362eb3 857 */
0c530ab8
A
858 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
859 state->cs = SYSENTER_TF_CS;
860
861 /*
862 * User setting segment registers.
863 * Code and stack selectors have already been
864 * checked. Others will be reset by 'iret'
865 * if they are not valid.
866 */
867 saved_state->cs = state->cs;
868 saved_state->ss = state->ss;
869 saved_state->ds = state->ds;
870 saved_state->es = state->es;
871 saved_state->fs = state->fs;
872 saved_state->gs = state->gs;
b0d623f7 873
4452a7af 874 break;
2d21ac55 875 }
4452a7af 876
2d21ac55
A
877 case x86_SAVED_STATE64:
878 {
0c530ab8
A
879 x86_saved_state64_t *state;
880 x86_saved_state64_t *saved_state;
89b3af67 881
0c530ab8 882 if (count < x86_SAVED_STATE64_COUNT)
2d21ac55
A
883 return(KERN_INVALID_ARGUMENT);
884
885 if (!thread_is_64bit(thr_act))
886 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
887
888 state = (x86_saved_state64_t *) tstate;
5d5c5d0d 889
0c530ab8
A
890 /* Verify that the supplied code segment selector is
891 * valid. In 64-bit mode, the FS and GS segment overrides
892 * use the FS.base and GS.base MSRs to calculate
893 * base addresses, and the trampolines don't directly
894 * restore the segment registers--hence they are no
895 * longer relevant for validation.
896 */
897 if (!valid_user_code_selector(state->isf.cs))
898 return KERN_INVALID_ARGUMENT;
899
900 /* Check pc and stack are canonical addresses */
901 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
902 !IS_USERADDR64_CANONICAL(state->isf.rip))
6601e61a 903 return KERN_INVALID_ARGUMENT;
5d5c5d0d 904
6d2010ae 905 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 906
0c530ab8 907 saved_state = USER_REGS64(thr_act);
5d5c5d0d 908
21362eb3 909 /*
6601e61a 910 * General registers
21362eb3 911 */
0c530ab8
A
912 saved_state->r8 = state->r8;
913 saved_state->r9 = state->r9;
914 saved_state->r10 = state->r10;
915 saved_state->r11 = state->r11;
916 saved_state->r12 = state->r12;
917 saved_state->r13 = state->r13;
918 saved_state->r14 = state->r14;
919 saved_state->r15 = state->r15;
920 saved_state->rdi = state->rdi;
921 saved_state->rsi = state->rsi;
922 saved_state->rbp = state->rbp;
923 saved_state->rbx = state->rbx;
924 saved_state->rdx = state->rdx;
925 saved_state->rcx = state->rcx;
926 saved_state->rax = state->rax;
927 saved_state->isf.rsp = state->isf.rsp;
928 saved_state->isf.rip = state->isf.rip;
929
930 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
931
2d21ac55 932 /*
0c530ab8
A
933 * User setting segment registers.
934 * Code and stack selectors have already been
935 * checked. Others will be reset by 'sys'
936 * if they are not valid.
6601e61a 937 */
2d21ac55 938 saved_state->isf.cs = state->isf.cs;
0c530ab8
A
939 saved_state->isf.ss = state->isf.ss;
940 saved_state->fs = state->fs;
941 saved_state->gs = state->gs;
b0d623f7 942
89b3af67 943 break;
2d21ac55 944 }
89b3af67 945
2d21ac55
A
946 case x86_FLOAT_STATE32:
947 {
0c530ab8
A
948 if (count != x86_FLOAT_STATE32_COUNT)
949 return(KERN_INVALID_ARGUMENT);
950
951 if (thread_is_64bit(thr_act))
4452a7af 952 return(KERN_INVALID_ARGUMENT);
0c530ab8 953
060df5ea 954 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 955 }
4452a7af 956
2d21ac55
A
957 case x86_FLOAT_STATE64:
958 {
0c530ab8 959 if (count != x86_FLOAT_STATE64_COUNT)
4452a7af
A
960 return(KERN_INVALID_ARGUMENT);
961
0c530ab8
A
962 if ( !thread_is_64bit(thr_act))
963 return(KERN_INVALID_ARGUMENT);
964
060df5ea 965 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 966 }
5d5c5d0d 967
2d21ac55
A
968 case x86_FLOAT_STATE:
969 {
970 x86_float_state_t *state;
4452a7af 971
0c530ab8
A
972 if (count != x86_FLOAT_STATE_COUNT)
973 return(KERN_INVALID_ARGUMENT);
4452a7af 974
0c530ab8 975 state = (x86_float_state_t *)tstate;
0c530ab8
A
976 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
977 thread_is_64bit(thr_act)) {
060df5ea 978 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
979 }
980 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
981 !thread_is_64bit(thr_act)) {
060df5ea 982 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
983 }
984 return(KERN_INVALID_ARGUMENT);
2d21ac55 985 }
0c530ab8 986
060df5ea
A
987 case x86_AVX_STATE32:
988 {
989 if (count != x86_AVX_STATE32_COUNT)
990 return(KERN_INVALID_ARGUMENT);
991
992 if (thread_is_64bit(thr_act))
993 return(KERN_INVALID_ARGUMENT);
994
995 return fpu_set_fxstate(thr_act, tstate, flavor);
996 }
997
998 case x86_AVX_STATE64:
999 {
1000 if (count != x86_AVX_STATE64_COUNT)
1001 return(KERN_INVALID_ARGUMENT);
1002
1003 if (!thread_is_64bit(thr_act))
1004 return(KERN_INVALID_ARGUMENT);
1005
1006 return fpu_set_fxstate(thr_act, tstate, flavor);
1007 }
1008
2d21ac55
A
1009 case x86_THREAD_STATE32:
1010 {
0c530ab8
A
1011 if (count != x86_THREAD_STATE32_COUNT)
1012 return(KERN_INVALID_ARGUMENT);
1013
1014 if (thread_is_64bit(thr_act))
1015 return(KERN_INVALID_ARGUMENT);
2d21ac55 1016
0c530ab8 1017 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
2d21ac55 1018 }
0c530ab8 1019
2d21ac55
A
1020 case x86_THREAD_STATE64:
1021 {
0c530ab8
A
1022 if (count != x86_THREAD_STATE64_COUNT)
1023 return(KERN_INVALID_ARGUMENT);
1024
2d21ac55 1025 if (!thread_is_64bit(thr_act))
0c530ab8
A
1026 return(KERN_INVALID_ARGUMENT);
1027
1028 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
0c530ab8 1029
2d21ac55
A
1030 }
1031 case x86_THREAD_STATE:
1032 {
1033 x86_thread_state_t *state;
0c530ab8
A
1034
1035 if (count != x86_THREAD_STATE_COUNT)
1036 return(KERN_INVALID_ARGUMENT);
1037
1038 state = (x86_thread_state_t *)tstate;
1039
2d21ac55
A
1040 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1041 state->tsh.count == x86_THREAD_STATE64_COUNT &&
0c530ab8 1042 thread_is_64bit(thr_act)) {
2d21ac55
A
1043 return set_thread_state64(thr_act, &state->uts.ts64);
1044 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1045 state->tsh.count == x86_THREAD_STATE32_COUNT &&
0c530ab8 1046 !thread_is_64bit(thr_act)) {
2d21ac55 1047 return set_thread_state32(thr_act, &state->uts.ts32);
0c530ab8 1048 } else
2d21ac55 1049 return(KERN_INVALID_ARGUMENT);
0c530ab8 1050
6601e61a 1051 break;
2d21ac55 1052 }
0c530ab8
A
1053 case x86_DEBUG_STATE32:
1054 {
1055 x86_debug_state32_t *state;
1056 kern_return_t ret;
4452a7af 1057
0c530ab8
A
1058 if (thread_is_64bit(thr_act))
1059 return(KERN_INVALID_ARGUMENT);
1060
1061 state = (x86_debug_state32_t *)tstate;
1062
1063 ret = set_debug_state32(thr_act, state);
1064
1065 return ret;
6601e61a 1066 }
0c530ab8
A
1067 case x86_DEBUG_STATE64:
1068 {
1069 x86_debug_state64_t *state;
1070 kern_return_t ret;
4452a7af 1071
0c530ab8
A
1072 if (!thread_is_64bit(thr_act))
1073 return(KERN_INVALID_ARGUMENT);
1074
1075 state = (x86_debug_state64_t *)tstate;
1076
1077 ret = set_debug_state64(thr_act, state);
1078
1079 return ret;
1080 }
1081 case x86_DEBUG_STATE:
1082 {
1083 x86_debug_state_t *state;
1084 kern_return_t ret = KERN_INVALID_ARGUMENT;
1085
1086 if (count != x86_DEBUG_STATE_COUNT)
1087 return (KERN_INVALID_ARGUMENT);
1088
1089 state = (x86_debug_state_t *)tstate;
1090 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1091 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1092 thread_is_64bit(thr_act)) {
1093 ret = set_debug_state64(thr_act, &state->uds.ds64);
1094 }
1095 else
1096 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1097 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1098 !thread_is_64bit(thr_act)) {
1099 ret = set_debug_state32(thr_act, &state->uds.ds32);
1100 }
1101 return ret;
1102 }
1103 default:
6601e61a
A
1104 return(KERN_INVALID_ARGUMENT);
1105 }
4452a7af 1106
6601e61a
A
1107 return(KERN_SUCCESS);
1108}
4452a7af 1109
0c530ab8
A
1110
1111
6601e61a
A
1112/*
1113 * thread_getstatus:
1114 *
1115 * Get the status of the specified thread.
1116 */
4452a7af 1117
6601e61a
A
1118kern_return_t
1119machine_thread_get_state(
1120 thread_t thr_act,
1121 thread_flavor_t flavor,
1122 thread_state_t tstate,
1123 mach_msg_type_number_t *count)
1124{
2d21ac55 1125
6601e61a 1126 switch (flavor) {
4452a7af 1127
0c530ab8
A
1128 case THREAD_STATE_FLAVOR_LIST:
1129 {
1130 if (*count < 3)
1131 return (KERN_INVALID_ARGUMENT);
1132
1133 tstate[0] = i386_THREAD_STATE;
1134 tstate[1] = i386_FLOAT_STATE;
1135 tstate[2] = i386_EXCEPTION_STATE;
1136
1137 *count = 3;
1138 break;
1139 }
1140
1141 case THREAD_STATE_FLAVOR_LIST_NEW:
1142 {
1143 if (*count < 4)
1144 return (KERN_INVALID_ARGUMENT);
1145
1146 tstate[0] = x86_THREAD_STATE;
1147 tstate[1] = x86_FLOAT_STATE;
1148 tstate[2] = x86_EXCEPTION_STATE;
1149 tstate[3] = x86_DEBUG_STATE;
1150
1151 *count = 4;
1152 break;
1153 }
1154
1155 case x86_SAVED_STATE32:
4452a7af 1156 {
0c530ab8
A
1157 x86_saved_state32_t *state;
1158 x86_saved_state32_t *saved_state;
4452a7af 1159
0c530ab8
A
1160 if (*count < x86_SAVED_STATE32_COUNT)
1161 return(KERN_INVALID_ARGUMENT);
4452a7af 1162
2d21ac55
A
1163 if (thread_is_64bit(thr_act))
1164 return(KERN_INVALID_ARGUMENT);
1165
0c530ab8
A
1166 state = (x86_saved_state32_t *) tstate;
1167 saved_state = USER_REGS32(thr_act);
4452a7af 1168
6601e61a
A
1169 /*
1170 * First, copy everything:
1171 */
1172 *state = *saved_state;
0c530ab8
A
1173 state->ds = saved_state->ds & 0xffff;
1174 state->es = saved_state->es & 0xffff;
1175 state->fs = saved_state->fs & 0xffff;
1176 state->gs = saved_state->gs & 0xffff;
4452a7af 1177
0c530ab8 1178 *count = x86_SAVED_STATE32_COUNT;
4452a7af 1179 break;
8f6c56a5 1180 }
5d5c5d0d 1181
0c530ab8 1182 case x86_SAVED_STATE64:
4452a7af 1183 {
0c530ab8
A
1184 x86_saved_state64_t *state;
1185 x86_saved_state64_t *saved_state;
89b3af67 1186
0c530ab8
A
1187 if (*count < x86_SAVED_STATE64_COUNT)
1188 return(KERN_INVALID_ARGUMENT);
89b3af67 1189
2d21ac55
A
1190 if (!thread_is_64bit(thr_act))
1191 return(KERN_INVALID_ARGUMENT);
1192
0c530ab8
A
1193 state = (x86_saved_state64_t *)tstate;
1194 saved_state = USER_REGS64(thr_act);
89b3af67 1195
6601e61a 1196 /*
0c530ab8 1197 * First, copy everything:
6601e61a 1198 */
0c530ab8
A
1199 *state = *saved_state;
1200 state->fs = saved_state->fs & 0xffff;
1201 state->gs = saved_state->gs & 0xffff;
1202
1203 *count = x86_SAVED_STATE64_COUNT;
4452a7af
A
1204 break;
1205 }
1206
0c530ab8 1207 case x86_FLOAT_STATE32:
4452a7af 1208 {
0c530ab8
A
1209 if (*count < x86_FLOAT_STATE32_COUNT)
1210 return(KERN_INVALID_ARGUMENT);
1211
1212 if (thread_is_64bit(thr_act))
1213 return(KERN_INVALID_ARGUMENT);
1214
1215 *count = x86_FLOAT_STATE32_COUNT;
1216
060df5ea 1217 return fpu_get_fxstate(thr_act, tstate, flavor);
21362eb3 1218 }
89b3af67 1219
0c530ab8
A
1220 case x86_FLOAT_STATE64:
1221 {
1222 if (*count < x86_FLOAT_STATE64_COUNT)
1223 return(KERN_INVALID_ARGUMENT);
1224
1225 if ( !thread_is_64bit(thr_act))
1226 return(KERN_INVALID_ARGUMENT);
1227
1228 *count = x86_FLOAT_STATE64_COUNT;
1229
060df5ea 1230 return fpu_get_fxstate(thr_act, tstate, flavor);
0c530ab8
A
1231 }
1232
1233 case x86_FLOAT_STATE:
1234 {
1235 x86_float_state_t *state;
1236 kern_return_t kret;
1237
1238 if (*count < x86_FLOAT_STATE_COUNT)
1239 return(KERN_INVALID_ARGUMENT);
1240
1241 state = (x86_float_state_t *)tstate;
1242
1243 /*
1244 * no need to bzero... currently
1245 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1246 */
1247 if (thread_is_64bit(thr_act)) {
1248 state->fsh.flavor = x86_FLOAT_STATE64;
1249 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1250
060df5ea 1251 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
1252 } else {
1253 state->fsh.flavor = x86_FLOAT_STATE32;
1254 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1255
060df5ea 1256 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
1257 }
1258 *count = x86_FLOAT_STATE_COUNT;
1259
1260 return(kret);
1261 }
1262
060df5ea
A
1263 case x86_AVX_STATE32:
1264 {
1265 if (*count != x86_AVX_STATE32_COUNT)
1266 return(KERN_INVALID_ARGUMENT);
1267
1268 if (thread_is_64bit(thr_act))
1269 return(KERN_INVALID_ARGUMENT);
1270
1271 *count = x86_AVX_STATE32_COUNT;
1272
1273 return fpu_get_fxstate(thr_act, tstate, flavor);
1274 }
1275
1276 case x86_AVX_STATE64:
1277 {
1278 if (*count != x86_AVX_STATE64_COUNT)
1279 return(KERN_INVALID_ARGUMENT);
1280
1281 if ( !thread_is_64bit(thr_act))
1282 return(KERN_INVALID_ARGUMENT);
1283
1284 *count = x86_AVX_STATE64_COUNT;
1285
1286 return fpu_get_fxstate(thr_act, tstate, flavor);
1287 }
1288
0c530ab8
A
1289 case x86_THREAD_STATE32:
1290 {
1291 if (*count < x86_THREAD_STATE32_COUNT)
1292 return(KERN_INVALID_ARGUMENT);
1293
1294 if (thread_is_64bit(thr_act))
1295 return(KERN_INVALID_ARGUMENT);
1296
1297 *count = x86_THREAD_STATE32_COUNT;
1298
1299 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
6601e61a 1300 break;
0c530ab8 1301 }
89b3af67 1302
0c530ab8
A
1303 case x86_THREAD_STATE64:
1304 {
1305 if (*count < x86_THREAD_STATE64_COUNT)
4452a7af 1306 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1307
1308 if ( !thread_is_64bit(thr_act))
1309 return(KERN_INVALID_ARGUMENT);
1310
1311 *count = x86_THREAD_STATE64_COUNT;
1312
1313 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1314 break;
21362eb3 1315 }
89b3af67 1316
0c530ab8
A
1317 case x86_THREAD_STATE:
1318 {
1319 x86_thread_state_t *state;
4452a7af 1320
0c530ab8 1321 if (*count < x86_THREAD_STATE_COUNT)
4452a7af
A
1322 return(KERN_INVALID_ARGUMENT);
1323
0c530ab8 1324 state = (x86_thread_state_t *)tstate;
4452a7af 1325
0c530ab8 1326 bzero((char *)state, sizeof(x86_thread_state_t));
4452a7af 1327
0c530ab8
A
1328 if (thread_is_64bit(thr_act)) {
1329 state->tsh.flavor = x86_THREAD_STATE64;
1330 state->tsh.count = x86_THREAD_STATE64_COUNT;
4452a7af 1331
0c530ab8 1332 get_thread_state64(thr_act, &state->uts.ts64);
4452a7af 1333 } else {
0c530ab8
A
1334 state->tsh.flavor = x86_THREAD_STATE32;
1335 state->tsh.count = x86_THREAD_STATE32_COUNT;
4452a7af 1336
0c530ab8 1337 get_thread_state32(thr_act, &state->uts.ts32);
4452a7af 1338 }
0c530ab8
A
1339 *count = x86_THREAD_STATE_COUNT;
1340
1341 break;
1342 }
1343
1344
1345 case x86_EXCEPTION_STATE32:
1346 {
1347 if (*count < x86_EXCEPTION_STATE32_COUNT)
1348 return(KERN_INVALID_ARGUMENT);
1349
1350 if (thread_is_64bit(thr_act))
1351 return(KERN_INVALID_ARGUMENT);
1352
1353 *count = x86_EXCEPTION_STATE32_COUNT;
4452a7af 1354
0c530ab8 1355 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
6d2010ae
A
1356 /*
1357 * Suppress the cpu number for binary compatibility
1358 * of this deprecated state.
1359 */
1360 ((x86_exception_state32_t *)tstate)->cpu = 0;
4452a7af 1361 break;
6601e61a 1362 }
4452a7af 1363
0c530ab8 1364 case x86_EXCEPTION_STATE64:
6601e61a 1365 {
0c530ab8
A
1366 if (*count < x86_EXCEPTION_STATE64_COUNT)
1367 return(KERN_INVALID_ARGUMENT);
4452a7af 1368
0c530ab8
A
1369 if ( !thread_is_64bit(thr_act))
1370 return(KERN_INVALID_ARGUMENT);
4452a7af 1371
0c530ab8 1372 *count = x86_EXCEPTION_STATE64_COUNT;
4452a7af 1373
0c530ab8 1374 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
6d2010ae
A
1375 /*
1376 * Suppress the cpu number for binary compatibility
1377 * of this deprecated state.
1378 */
1379 ((x86_exception_state64_t *)tstate)->cpu = 0;
4452a7af 1380 break;
6601e61a 1381 }
4452a7af 1382
0c530ab8
A
1383 case x86_EXCEPTION_STATE:
1384 {
1385 x86_exception_state_t *state;
1386
1387 if (*count < x86_EXCEPTION_STATE_COUNT)
1388 return(KERN_INVALID_ARGUMENT);
1389
1390 state = (x86_exception_state_t *)tstate;
1391
1392 bzero((char *)state, sizeof(x86_exception_state_t));
1393
1394 if (thread_is_64bit(thr_act)) {
1395 state->esh.flavor = x86_EXCEPTION_STATE64;
1396 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1397
1398 get_exception_state64(thr_act, &state->ues.es64);
1399 } else {
1400 state->esh.flavor = x86_EXCEPTION_STATE32;
1401 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1402
1403 get_exception_state32(thr_act, &state->ues.es32);
1404 }
1405 *count = x86_EXCEPTION_STATE_COUNT;
1406
1407 break;
1408 }
1409 case x86_DEBUG_STATE32:
1410 {
1411 if (*count < x86_DEBUG_STATE32_COUNT)
1412 return(KERN_INVALID_ARGUMENT);
1413
1414 if (thread_is_64bit(thr_act))
1415 return(KERN_INVALID_ARGUMENT);
1416
1417 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1418
1419 *count = x86_DEBUG_STATE32_COUNT;
1420
1421 break;
1422 }
1423 case x86_DEBUG_STATE64:
1424 {
1425 if (*count < x86_DEBUG_STATE64_COUNT)
1426 return(KERN_INVALID_ARGUMENT);
1427
1428 if (!thread_is_64bit(thr_act))
1429 return(KERN_INVALID_ARGUMENT);
1430
1431 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1432
1433 *count = x86_DEBUG_STATE64_COUNT;
1434
1c79356b
A
1435 break;
1436 }
0c530ab8
A
1437 case x86_DEBUG_STATE:
1438 {
1439 x86_debug_state_t *state;
1440
1441 if (*count < x86_DEBUG_STATE_COUNT)
1442 return(KERN_INVALID_ARGUMENT);
1443
1444 state = (x86_debug_state_t *)tstate;
1445
1446 bzero(state, sizeof *state);
1447
1448 if (thread_is_64bit(thr_act)) {
1449 state->dsh.flavor = x86_DEBUG_STATE64;
1450 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1451
1452 get_debug_state64(thr_act, &state->uds.ds64);
1453 } else {
1454 state->dsh.flavor = x86_DEBUG_STATE32;
1455 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1456
0c530ab8
A
1457 get_debug_state32(thr_act, &state->uds.ds32);
1458 }
1459 *count = x86_DEBUG_STATE_COUNT;
1460 break;
1461 }
2d21ac55 1462 default:
1c79356b
A
1463 return(KERN_INVALID_ARGUMENT);
1464 }
1465
1466 return(KERN_SUCCESS);
1467}
1468
0c530ab8
A
1469kern_return_t
1470machine_thread_get_kern_state(
1471 thread_t thread,
1472 thread_flavor_t flavor,
1473 thread_state_t tstate,
1474 mach_msg_type_number_t *count)
1475{
b0d623f7 1476 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
0c530ab8
A
1477
1478 /*
1479 * This works only for an interrupted kernel thread
1480 */
b0d623f7 1481 if (thread != current_thread() || int_state == NULL)
0c530ab8
A
1482 return KERN_FAILURE;
1483
b0d623f7
A
1484 switch (flavor) {
1485 case x86_THREAD_STATE32: {
1486 x86_thread_state32_t *state;
1487 x86_saved_state32_t *saved_state;
1488
1489 if (!is_saved_state32(int_state) ||
1490 *count < x86_THREAD_STATE32_COUNT)
1491 return (KERN_INVALID_ARGUMENT);
1492
1493 state = (x86_thread_state32_t *) tstate;
0c530ab8 1494
b0d623f7
A
1495 saved_state = saved_state32(int_state);
1496 /*
1497 * General registers.
1498 */
1499 state->eax = saved_state->eax;
1500 state->ebx = saved_state->ebx;
1501 state->ecx = saved_state->ecx;
1502 state->edx = saved_state->edx;
1503 state->edi = saved_state->edi;
1504 state->esi = saved_state->esi;
1505 state->ebp = saved_state->ebp;
1506 state->esp = saved_state->uesp;
1507 state->eflags = saved_state->efl;
1508 state->eip = saved_state->eip;
1509 state->cs = saved_state->cs;
1510 state->ss = saved_state->ss;
1511 state->ds = saved_state->ds & 0xffff;
1512 state->es = saved_state->es & 0xffff;
1513 state->fs = saved_state->fs & 0xffff;
1514 state->gs = saved_state->gs & 0xffff;
1515
1516 *count = x86_THREAD_STATE32_COUNT;
2d21ac55 1517
b0d623f7
A
1518 return KERN_SUCCESS;
1519 }
1520
1521 case x86_THREAD_STATE64: {
1522 x86_thread_state64_t *state;
1523 x86_saved_state64_t *saved_state;
1524
1525 if (!is_saved_state64(int_state) ||
1526 *count < x86_THREAD_STATE64_COUNT)
1527 return (KERN_INVALID_ARGUMENT);
1528
1529 state = (x86_thread_state64_t *) tstate;
1530
1531 saved_state = saved_state64(int_state);
1532 /*
1533 * General registers.
1534 */
1535 state->rax = saved_state->rax;
1536 state->rbx = saved_state->rbx;
1537 state->rcx = saved_state->rcx;
1538 state->rdx = saved_state->rdx;
1539 state->rdi = saved_state->rdi;
1540 state->rsi = saved_state->rsi;
1541 state->rbp = saved_state->rbp;
1542 state->rsp = saved_state->isf.rsp;
1543 state->r8 = saved_state->r8;
1544 state->r9 = saved_state->r9;
1545 state->r10 = saved_state->r10;
1546 state->r11 = saved_state->r11;
1547 state->r12 = saved_state->r12;
1548 state->r13 = saved_state->r13;
1549 state->r14 = saved_state->r14;
1550 state->r15 = saved_state->r15;
1551
1552 state->rip = saved_state->isf.rip;
1553 state->rflags = saved_state->isf.rflags;
1554 state->cs = saved_state->isf.cs;
1555 state->fs = saved_state->fs & 0xffff;
1556 state->gs = saved_state->gs & 0xffff;
1557 *count = x86_THREAD_STATE64_COUNT;
1558
1559 return KERN_SUCCESS;
1560 }
1561
1562 case x86_THREAD_STATE: {
1563 x86_thread_state_t *state = NULL;
1564
1565 if (*count < x86_THREAD_STATE_COUNT)
1566 return (KERN_INVALID_ARGUMENT);
1567
1568 state = (x86_thread_state_t *) tstate;
1569
1570 if (is_saved_state32(int_state)) {
1571 x86_saved_state32_t *saved_state = saved_state32(int_state);
1572
1573 state->tsh.flavor = x86_THREAD_STATE32;
1574 state->tsh.count = x86_THREAD_STATE32_COUNT;
0c530ab8 1575
0c530ab8
A
1576 /*
1577 * General registers.
1578 */
b0d623f7
A
1579 state->uts.ts32.eax = saved_state->eax;
1580 state->uts.ts32.ebx = saved_state->ebx;
1581 state->uts.ts32.ecx = saved_state->ecx;
1582 state->uts.ts32.edx = saved_state->edx;
1583 state->uts.ts32.edi = saved_state->edi;
1584 state->uts.ts32.esi = saved_state->esi;
1585 state->uts.ts32.ebp = saved_state->ebp;
1586 state->uts.ts32.esp = saved_state->uesp;
1587 state->uts.ts32.eflags = saved_state->efl;
1588 state->uts.ts32.eip = saved_state->eip;
1589 state->uts.ts32.cs = saved_state->cs;
1590 state->uts.ts32.ss = saved_state->ss;
1591 state->uts.ts32.ds = saved_state->ds & 0xffff;
1592 state->uts.ts32.es = saved_state->es & 0xffff;
1593 state->uts.ts32.fs = saved_state->fs & 0xffff;
1594 state->uts.ts32.gs = saved_state->gs & 0xffff;
1595 } else if (is_saved_state64(int_state)) {
1596 x86_saved_state64_t *saved_state = saved_state64(int_state);
0c530ab8 1597
b0d623f7
A
1598 state->tsh.flavor = x86_THREAD_STATE64;
1599 state->tsh.count = x86_THREAD_STATE64_COUNT;
1600
1601 /*
1602 * General registers.
1603 */
1604 state->uts.ts64.rax = saved_state->rax;
1605 state->uts.ts64.rbx = saved_state->rbx;
1606 state->uts.ts64.rcx = saved_state->rcx;
1607 state->uts.ts64.rdx = saved_state->rdx;
1608 state->uts.ts64.rdi = saved_state->rdi;
1609 state->uts.ts64.rsi = saved_state->rsi;
1610 state->uts.ts64.rbp = saved_state->rbp;
1611 state->uts.ts64.rsp = saved_state->isf.rsp;
1612 state->uts.ts64.r8 = saved_state->r8;
1613 state->uts.ts64.r9 = saved_state->r9;
1614 state->uts.ts64.r10 = saved_state->r10;
1615 state->uts.ts64.r11 = saved_state->r11;
1616 state->uts.ts64.r12 = saved_state->r12;
1617 state->uts.ts64.r13 = saved_state->r13;
1618 state->uts.ts64.r14 = saved_state->r14;
1619 state->uts.ts64.r15 = saved_state->r15;
1620
1621 state->uts.ts64.rip = saved_state->isf.rip;
1622 state->uts.ts64.rflags = saved_state->isf.rflags;
1623 state->uts.ts64.cs = saved_state->isf.cs;
1624 state->uts.ts64.fs = saved_state->fs & 0xffff;
1625 state->uts.ts64.gs = saved_state->gs & 0xffff;
1626 } else {
1627 panic("unknown thread state");
0c530ab8 1628 }
b0d623f7
A
1629
1630 *count = x86_THREAD_STATE_COUNT;
1631 return KERN_SUCCESS;
1632 }
2d21ac55 1633 }
0c530ab8
A
1634 return KERN_FAILURE;
1635}
1636
1637
0c530ab8 1638void
2d21ac55 1639machine_thread_switch_addrmode(thread_t thread)
0c530ab8 1640{
2d21ac55
A
1641 /*
1642 * We don't want to be preempted until we're done
1643 * - particularly if we're switching the current thread
1644 */
1645 disable_preemption();
0c530ab8 1646
2d21ac55 1647 /*
6d2010ae
A
1648 * Reset the state saveareas. As we're resetting, we anticipate no
1649 * memory allocations in this path.
2d21ac55 1650 */
0c530ab8
A
1651 machine_thread_create(thread, thread->task);
1652
1653 /* If we're switching ourselves, reset the pcb addresses etc. */
c910b4d9 1654 if (thread == current_thread()) {
6d2010ae 1655 boolean_t istate = ml_set_interrupts_enabled(FALSE);
b0d623f7 1656#if defined(__i386__)
6d2010ae
A
1657 if (current_cpu_datap()->cpu_active_cr3 != kernel_pmap->pm_cr3)
1658 pmap_load_kernel_cr3();
b0d623f7 1659#endif /* defined(__i386) */
6d2010ae
A
1660 act_machine_switch_pcb(NULL, thread);
1661 ml_set_interrupts_enabled(istate);
c910b4d9 1662 }
2d21ac55 1663 enable_preemption();
1c79356b
A
1664}
1665
0c530ab8
A
1666
1667
1c79356b
A
1668/*
1669 * This is used to set the current thr_act/thread
1670 * when starting up a new processor
1671 */
1672void
b0d623f7 1673machine_set_current_thread(thread_t thread)
1c79356b 1674{
0c530ab8 1675 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
1676}
1677
0c530ab8 1678/*
b0d623f7
A
1679 * This is called when a task is terminated, and also on exec().
1680 * Clear machine-dependent state that is stored on the task.
0c530ab8 1681 */
1c79356b 1682void
55e303ae 1683machine_thread_terminate_self(void)
1c79356b 1684{
0c530ab8
A
1685 task_t self_task = current_task();
1686 if (self_task) {
1687 user_ldt_t user_ldt = self_task->i386_ldt;
1688 if (user_ldt != 0) {
1689 self_task->i386_ldt = 0;
1690 user_ldt_free(user_ldt);
1691 }
b0d623f7
A
1692
1693 if (self_task->task_debug != NULL) {
1694 zfree(ids_zone, self_task->task_debug);
1695 self_task->task_debug = NULL;
1696 }
0c530ab8 1697 }
1c79356b
A
1698}
1699
1c79356b
A
1700/*
1701 * Perform machine-dependent per-thread initializations
1702 */
1703void
55e303ae 1704machine_thread_init(void)
1c79356b 1705{
2d21ac55 1706 if (cpu_mode_is64bit()) {
0c530ab8 1707 assert(sizeof(x86_sframe_compat32_t) % 16 == 0);
2d21ac55 1708 iss_zone = zinit(sizeof(x86_sframe64_t),
b0d623f7 1709 thread_max * sizeof(x86_sframe64_t),
2d21ac55
A
1710 THREAD_CHUNK * sizeof(x86_sframe64_t),
1711 "x86_64 saved state");
1712
1713 ids_zone = zinit(sizeof(x86_debug_state64_t),
b0d623f7 1714 thread_max * sizeof(x86_debug_state64_t),
2d21ac55
A
1715 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1716 "x86_64 debug state");
0c530ab8
A
1717
1718 } else {
2d21ac55 1719 iss_zone = zinit(sizeof(x86_sframe32_t),
b0d623f7 1720 thread_max * sizeof(x86_sframe32_t),
2d21ac55
A
1721 THREAD_CHUNK * sizeof(x86_sframe32_t),
1722 "x86 saved state");
1723 ids_zone = zinit(sizeof(x86_debug_state32_t),
b0d623f7 1724 thread_max * (sizeof(x86_debug_state32_t)),
2d21ac55
A
1725 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1726 "x86 debug state");
0c530ab8 1727 }
55e303ae 1728 fpu_module_init();
1c79356b
A
1729}
1730
2d21ac55 1731
b0d623f7 1732#if defined(__i386__)
1c79356b
A
1733/*
1734 * Some routines for debugging activation code
1735 */
91447636
A
1736static void dump_handlers(thread_t);
1737void dump_regs(thread_t);
1738int dump_act(thread_t thr_act);
1c79356b
A
1739
1740static void
91447636 1741dump_handlers(thread_t thr_act)
1c79356b 1742{
2d21ac55
A
1743 ReturnHandler *rhp = thr_act->handlers;
1744 int counter = 0;
1745
1746 printf("\t");
1747 while (rhp) {
1748 if (rhp == &thr_act->special_handler){
1749 if (rhp->next)
1750 printf("[NON-Zero next ptr(%p)]", rhp->next);
1751 printf("special_handler()->");
1752 break;
1753 }
1754 printf("hdlr_%d(%p)->", counter, rhp->handler);
1755 rhp = rhp->next;
1756 if (++counter > 32) {
1757 printf("Aborting: HUGE handler chain\n");
1758 break;
1759 }
1c79356b 1760 }
2d21ac55 1761 printf("HLDR_NULL\n");
1c79356b
A
1762}
1763
1764void
91447636 1765dump_regs(thread_t thr_act)
1c79356b 1766{
2d21ac55
A
1767 if (thread_is_64bit(thr_act)) {
1768 x86_saved_state64_t *ssp;
0c530ab8
A
1769
1770 ssp = USER_REGS64(thr_act);
1771
1772 panic("dump_regs: 64bit tasks not yet supported");
1773
1774 } else {
2d21ac55 1775 x86_saved_state32_t *ssp;
0c530ab8
A
1776
1777 ssp = USER_REGS32(thr_act);
1778
1779 /*
1780 * Print out user register state
1781 */
1c79356b 1782 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
2d21ac55 1783 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
0c530ab8 1784
1c79356b 1785 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
2d21ac55 1786 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
0c530ab8 1787
1c79356b
A
1788 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1789 }
1790}
1791
1792int
91447636 1793dump_act(thread_t thr_act)
1c79356b
A
1794{
1795 if (!thr_act)
1796 return(0);
1797
2d21ac55
A
1798 printf("thread(%p)(%d): task=%p(%d)\n",
1799 thr_act, thr_act->ref_count,
1800 thr_act->task,
1801 thr_act->task ? thr_act->task->ref_count : 0);
1c79356b 1802
55e303ae 1803 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
2d21ac55
A
1804 thr_act->suspend_count, thr_act->user_stop_count,
1805 thr_act->active, thr_act->ast);
6d2010ae 1806 printf("\tpcb=%p\n", &thr_act->machine);
1c79356b 1807
91447636 1808 if (thr_act->kernel_stack) {
2d21ac55 1809 vm_offset_t stack = thr_act->kernel_stack;
1c79356b 1810
b0d623f7
A
1811 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
1812 (long)stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
6d2010ae 1813 STACK_IKS(stack)->k_esp, thr_act->machine.iss);
1c79356b
A
1814 }
1815
1816 dump_handlers(thr_act);
1817 dump_regs(thr_act);
1818 return((int)thr_act);
1819}
b0d623f7 1820#endif
91447636
A
1821
1822user_addr_t
1823get_useraddr(void)
1c79356b 1824{
91447636 1825 thread_t thr_act = current_thread();
1c79356b 1826
0c530ab8
A
1827 if (thread_is_64bit(thr_act)) {
1828 x86_saved_state64_t *iss64;
1829
1830 iss64 = USER_REGS64(thr_act);
1831
1832 return(iss64->isf.rip);
1833 } else {
1834 x86_saved_state32_t *iss32;
4452a7af 1835
0c530ab8
A
1836 iss32 = USER_REGS32(thr_act);
1837
1838 return(iss32->eip);
1839 }
1c79356b
A
1840}
1841
1c79356b
A
1842/*
1843 * detach and return a kernel stack from a thread
1844 */
1845
1846vm_offset_t
55e303ae 1847machine_stack_detach(thread_t thread)
1c79356b 1848{
0c530ab8 1849 vm_offset_t stack;
1c79356b 1850
0c530ab8 1851 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
b0d623f7 1852 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8
A
1853 thread->sched_pri, 0,
1854 0);
1c79356b 1855
0c530ab8
A
1856 stack = thread->kernel_stack;
1857 thread->kernel_stack = 0;
1858
1859 return (stack);
1c79356b
A
1860}
1861
1862/*
1863 * attach a kernel stack to a thread and initialize it
1864 */
1865
1866void
91447636
A
1867machine_stack_attach(
1868 thread_t thread,
1869 vm_offset_t stack)
1c79356b 1870{
b0d623f7 1871 struct x86_kernel_state *statep;
1c79356b 1872
0c530ab8 1873 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
b0d623f7 1874 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8 1875 thread->sched_pri, 0, 0);
1c79356b 1876
0c530ab8
A
1877 assert(stack);
1878 thread->kernel_stack = stack;
55e303ae 1879
0c530ab8 1880 statep = STACK_IKS(stack);
b0d623f7
A
1881#if defined(__x86_64__)
1882 statep->k_rip = (unsigned long) Thread_continue;
1883 statep->k_rbx = (unsigned long) thread_continue;
6d2010ae 1884 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1);
b0d623f7 1885#else
0c530ab8
A
1886 statep->k_eip = (unsigned long) Thread_continue;
1887 statep->k_ebx = (unsigned long) thread_continue;
6d2010ae 1888 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1);
b0d623f7 1889#endif
1c79356b 1890
0c530ab8 1891 return;
1c79356b
A
1892}
1893
1894/*
1895 * move a stack from old to new thread
1896 */
1897
1898void
55e303ae 1899machine_stack_handoff(thread_t old,
1c79356b
A
1900 thread_t new)
1901{
0c530ab8 1902 vm_offset_t stack;
1c79356b 1903
0c530ab8
A
1904 assert(new);
1905 assert(old);
1c79356b 1906
b0d623f7
A
1907#if CONFIG_COUNTERS
1908 machine_pmc_cswitch(old, new);
1909#endif
1910
0c530ab8
A
1911 stack = old->kernel_stack;
1912 if (stack == old->reserved_stack) {
1913 assert(new->reserved_stack);
1914 old->reserved_stack = new->reserved_stack;
1915 new->reserved_stack = stack;
1916 }
1917 old->kernel_stack = 0;
1918 /*
1919 * A full call to machine_stack_attach() is unnecessry
1920 * because old stack is already initialized.
1921 */
1922 new->kernel_stack = stack;
1c79356b 1923
0c530ab8 1924 fpu_save_context(old);
b0d623f7 1925
0c530ab8
A
1926 old->machine.specFlags &= ~OnProc;
1927 new->machine.specFlags |= OnProc;
1c79356b 1928
0c530ab8 1929 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
6d2010ae 1930 act_machine_switch_pcb(old, new);
9bccf70c 1931
0c530ab8 1932 machine_set_current_thread(new);
1c79356b 1933
0c530ab8 1934 return;
1c79356b 1935}
0b4e3aa0 1936
0c530ab8
A
1937
1938
1939
1940struct x86_act_context32 {
1941 x86_saved_state32_t ss;
1942 x86_float_state32_t fs;
1943 x86_debug_state32_t ds;
1944};
1945
1946struct x86_act_context64 {
1947 x86_saved_state64_t ss;
1948 x86_float_state64_t fs;
1949 x86_debug_state64_t ds;
0b4e3aa0
A
1950};
1951
0c530ab8
A
1952
1953
0b4e3aa0
A
1954void *
1955act_thread_csave(void)
1956{
2d21ac55 1957 kern_return_t kret;
0c530ab8 1958 mach_msg_type_number_t val;
2d21ac55 1959 thread_t thr_act = current_thread();
0c530ab8 1960
2d21ac55
A
1961 if (thread_is_64bit(thr_act)) {
1962 struct x86_act_context64 *ic64;
0b4e3aa0 1963
2d21ac55 1964 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
0c530ab8
A
1965
1966 if (ic64 == (struct x86_act_context64 *)NULL)
2d21ac55 1967 return((void *)0);
0c530ab8
A
1968
1969 val = x86_SAVED_STATE64_COUNT;
1970 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2d21ac55 1971 (thread_state_t) &ic64->ss, &val);
0c530ab8 1972 if (kret != KERN_SUCCESS) {
2d21ac55 1973 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1974 return((void *)0);
1975 }
1976 val = x86_FLOAT_STATE64_COUNT;
1977 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2d21ac55 1978 (thread_state_t) &ic64->fs, &val);
0c530ab8 1979 if (kret != KERN_SUCCESS) {
2d21ac55 1980 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1981 return((void *)0);
1982 }
0b4e3aa0 1983
0c530ab8
A
1984 val = x86_DEBUG_STATE64_COUNT;
1985 kret = machine_thread_get_state(thr_act,
1986 x86_DEBUG_STATE64,
1987 (thread_state_t)&ic64->ds,
55e303ae 1988 &val);
0b4e3aa0 1989 if (kret != KERN_SUCCESS) {
0c530ab8
A
1990 kfree(ic64, sizeof(struct x86_act_context64));
1991 return((void *)0);
1992 }
1993 return(ic64);
1994
1995 } else {
2d21ac55 1996 struct x86_act_context32 *ic32;
0c530ab8 1997
2d21ac55 1998 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
0c530ab8
A
1999
2000 if (ic32 == (struct x86_act_context32 *)NULL)
2d21ac55 2001 return((void *)0);
0c530ab8
A
2002
2003 val = x86_SAVED_STATE32_COUNT;
2004 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2d21ac55 2005 (thread_state_t) &ic32->ss, &val);
0c530ab8 2006 if (kret != KERN_SUCCESS) {
2d21ac55 2007 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8 2008 return((void *)0);
0b4e3aa0 2009 }
0c530ab8
A
2010 val = x86_FLOAT_STATE32_COUNT;
2011 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2d21ac55 2012 (thread_state_t) &ic32->fs, &val);
0c530ab8 2013 if (kret != KERN_SUCCESS) {
2d21ac55 2014 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8
A
2015 return((void *)0);
2016 }
2017
2018 val = x86_DEBUG_STATE32_COUNT;
2019 kret = machine_thread_get_state(thr_act,
2020 x86_DEBUG_STATE32,
2021 (thread_state_t)&ic32->ds,
55e303ae 2022 &val);
0b4e3aa0 2023 if (kret != KERN_SUCCESS) {
0c530ab8
A
2024 kfree(ic32, sizeof(struct x86_act_context32));
2025 return((void *)0);
0b4e3aa0 2026 }
0c530ab8
A
2027 return(ic32);
2028 }
0b4e3aa0 2029}
0c530ab8
A
2030
2031
0b4e3aa0
A
2032void
2033act_thread_catt(void *ctx)
2034{
0c530ab8
A
2035 thread_t thr_act = current_thread();
2036 kern_return_t kret;
2037
2038 if (ctx == (void *)NULL)
2d21ac55 2039 return;
0c530ab8
A
2040
2041 if (thread_is_64bit(thr_act)) {
2042 struct x86_act_context64 *ic64;
2043
2044 ic64 = (struct x86_act_context64 *)ctx;
2045
2046 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2047 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2048 if (kret == KERN_SUCCESS) {
2d21ac55
A
2049 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2050 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
0c530ab8
A
2051 }
2052 kfree(ic64, sizeof(struct x86_act_context64));
2053 } else {
2054 struct x86_act_context32 *ic32;
2055
2056 ic32 = (struct x86_act_context32 *)ctx;
2057
2058 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2059 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2060 if (kret == KERN_SUCCESS) {
060df5ea 2061 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
0c530ab8 2062 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
0c530ab8
A
2063 }
2064 kfree(ic32, sizeof(struct x86_act_context32));
2065 }
0b4e3aa0
A
2066}
2067
0c530ab8
A
2068
2069void act_thread_cfree(__unused void *ctx)
0b4e3aa0 2070{
0c530ab8 2071 /* XXX - Unused */
0b4e3aa0 2072}
2d21ac55
A
2073void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid);
2074void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid) {
6d2010ae 2075 thread->machine.arg_store_valid = valid;
2d21ac55
A
2076}
2077
2078boolean_t x86_sysenter_arg_store_isvalid(thread_t thread);
2079
2080boolean_t x86_sysenter_arg_store_isvalid(thread_t thread) {
6d2010ae 2081 return (thread->machine.arg_store_valid);
2d21ac55 2082}
b0d623f7
A
2083
2084/*
2085 * Duplicate one x86_debug_state32_t to another. "all" parameter
2086 * chooses whether dr4 and dr5 are copied (they are never meant
2087 * to be installed when we do machine_task_set_state() or
2088 * machine_thread_set_state()).
2089 */
2090void
2091copy_debug_state32(
2092 x86_debug_state32_t *src,
2093 x86_debug_state32_t *target,
2094 boolean_t all)
2095{
2096 if (all) {
2097 target->dr4 = src->dr4;
2098 target->dr5 = src->dr5;
2099 }
2100
2101 target->dr0 = src->dr0;
2102 target->dr1 = src->dr1;
2103 target->dr2 = src->dr2;
2104 target->dr3 = src->dr3;
2105 target->dr6 = src->dr6;
2106 target->dr7 = src->dr7;
2107}
2108
2109/*
2110 * Duplicate one x86_debug_state64_t to another. "all" parameter
2111 * chooses whether dr4 and dr5 are copied (they are never meant
2112 * to be installed when we do machine_task_set_state() or
2113 * machine_thread_set_state()).
2114 */
2115void
2116copy_debug_state64(
2117 x86_debug_state64_t *src,
2118 x86_debug_state64_t *target,
2119 boolean_t all)
2120{
2121 if (all) {
2122 target->dr4 = src->dr4;
2123 target->dr5 = src->dr5;
2124 }
2125
2126 target->dr0 = src->dr0;
2127 target->dr1 = src->dr1;
2128 target->dr2 = src->dr2;
2129 target->dr3 = src->dr3;
2130 target->dr6 = src->dr6;
2131 target->dr7 = src->dr7;
2132}