]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <mach_debug.h>
59#include <mach_ldebug.h>
60
61#include <sys/kdebug.h>
62
63#include <mach/kern_return.h>
64#include <mach/thread_status.h>
65#include <mach/vm_param.h>
1c79356b
A
66
67#include <kern/counters.h>
91447636 68#include <kern/kalloc.h>
1c79356b 69#include <kern/mach_param.h>
91447636
A
70#include <kern/processor.h>
71#include <kern/cpu_data.h>
72#include <kern/cpu_number.h>
1c79356b
A
73#include <kern/task.h>
74#include <kern/thread.h>
1c79356b
A
75#include <kern/sched_prim.h>
76#include <kern/misc_protos.h>
77#include <kern/assert.h>
78#include <kern/spl.h>
91447636 79#include <kern/machine.h>
1c79356b
A
80#include <ipc/ipc_port.h>
81#include <vm/vm_kern.h>
91447636 82#include <vm/vm_map.h>
1c79356b 83#include <vm/pmap.h>
91447636 84#include <vm/vm_protos.h>
1c79356b 85
b0d623f7
A
86#include <i386/cpu_data.h>
87#include <i386/cpu_number.h>
1c79356b
A
88#include <i386/eflags.h>
89#include <i386/proc_reg.h>
1c79356b 90#include <i386/fpu.h>
2d21ac55 91#include <i386/misc_protos.h>
6d2010ae 92#include <i386/mp_desc.h>
b0d623f7 93#include <i386/thread.h>
0c530ab8 94#include <i386/machine_routines.h>
b0d623f7 95#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
55e303ae 96
39236c6e
A
97#if KPERF
98#include <kperf/kperf.h>
3e170ce0 99#include <kperf/kperf_kpc.h>
39236c6e
A
100#endif
101
fe8ab488
A
102#if HYPERVISOR
103#include <kern/hv_support.h>
104#endif
105
1c79356b
A
106/*
107 * Maps state flavor to number of words in the state:
108 */
91447636 109unsigned int _MachineStateCount[] = {
3e170ce0
A
110 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
111 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
112 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
113 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
114 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
115 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
116 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
117 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
118 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
119 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
120 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
121 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
122 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
123 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
124 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
1c79356b
A
125};
126
2d21ac55
A
127zone_t iss_zone; /* zone for saved_state area */
128zone_t ids_zone; /* zone for debug_state area */
0c530ab8 129
1c79356b
A
130/* Forward */
131
1c79356b
A
132extern void Thread_continue(void);
133extern void Load_context(
91447636 134 thread_t thread);
1c79356b 135
0c530ab8
A
136static void
137get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139static void
140get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142static void
143get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145static void
146get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148static int
149set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151static int
152set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
fe8ab488
A
154#if HYPERVISOR
155static inline void
156ml_hv_cswitch(thread_t old, thread_t new)
157{
158 if (old->hv_thread_target)
159 hv_callbacks.preempt(old->hv_thread_target);
160
161 if (new->hv_thread_target)
162 hv_callbacks.dispatch(new->hv_thread_target);
163}
164#endif
165
0c530ab8 166/*
3e170ce0
A
167 * Don't let an illegal value for the lower 32-bits of dr7 get set.
168 * Specifically, check for undefined settings. Setting these bit patterns
0c530ab8
A
169 * result in undefined behaviour and can lead to an unexpected
170 * TRCTRAP.
171 */
172static boolean_t
3e170ce0 173dr7d_is_valid(uint32_t *dr7d)
0c530ab8
A
174{
175 int i;
176 uint32_t mask1, mask2;
177
178 /*
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
181 */
182 if (!(get_cr4() & CR4_DE))
183 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
184 i++, mask1 <<= 4, mask2 <<= 4)
3e170ce0 185 if ((*dr7d & mask1) == mask2)
0c530ab8
A
186 return (FALSE);
187
0c530ab8
A
188 /*
189 * if we are doing an instruction execution break (indicated
190 * by r/w[x] being "00B"), then the len[x] must also be set
191 * to "00B"
192 */
193 for (i = 0; i < 4; i++)
3e170ce0
A
194 if (((((*dr7d >> (16 + i*4))) & 0x3) == 0) &&
195 ((((*dr7d >> (18 + i*4))) & 0x3) != 0))
0c530ab8
A
196 return (FALSE);
197
198 /*
199 * Intel docs have these bits fixed.
200 */
3e170ce0
A
201 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
202 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
203 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
204 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
205 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
0c530ab8
A
206
207 /*
208 * We don't allow anything to set the global breakpoints.
209 */
210
3e170ce0 211 if (*dr7d & 0x2)
0c530ab8
A
212 return (FALSE);
213
3e170ce0 214 if (*dr7d & (0x2<<2))
0c530ab8
A
215 return (FALSE);
216
3e170ce0 217 if (*dr7d & (0x2<<4))
0c530ab8
A
218 return (FALSE);
219
3e170ce0 220 if (*dr7d & (0x2<<6))
0c530ab8
A
221 return (FALSE);
222
223 return (TRUE);
224}
225
0c530ab8
A
226extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
227
b0d623f7
A
228boolean_t
229debug_state_is_valid32(x86_debug_state32_t *ds)
230{
3e170ce0 231 if (!dr7d_is_valid(&ds->dr7))
b0d623f7
A
232 return FALSE;
233
b0d623f7
A
234 return TRUE;
235}
236
237boolean_t
238debug_state_is_valid64(x86_debug_state64_t *ds)
239{
3e170ce0 240 if (!dr7d_is_valid((uint32_t *)&ds->dr7))
b0d623f7
A
241 return FALSE;
242
243 /*
244 * Don't allow the user to set debug addresses above their max
245 * value
246 */
247 if (ds->dr7 & 0x1)
248 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
249 return FALSE;
250
251 if (ds->dr7 & (0x1<<2))
252 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
253 return FALSE;
254
255 if (ds->dr7 & (0x1<<4))
256 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
257 return FALSE;
258
259 if (ds->dr7 & (0x1<<6))
260 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
261 return FALSE;
262
3e170ce0
A
263 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
264 ds->dr7 &= 0xffffffffULL;
265
b0d623f7
A
266 return TRUE;
267}
268
269
0c530ab8
A
270static kern_return_t
271set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
272{
273 x86_debug_state32_t *ids;
274 pcb_t pcb;
275
6d2010ae 276 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
277 ids = pcb->ids;
278
b0d623f7
A
279 if (debug_state_is_valid32(ds) != TRUE) {
280 return KERN_INVALID_ARGUMENT;
281 }
282
0c530ab8 283 if (ids == NULL) {
2d21ac55 284 ids = zalloc(ids_zone);
0c530ab8
A
285 bzero(ids, sizeof *ids);
286
287 simple_lock(&pcb->lock);
288 /* make sure it wasn't already alloc()'d elsewhere */
289 if (pcb->ids == NULL) {
290 pcb->ids = ids;
291 simple_unlock(&pcb->lock);
292 } else {
293 simple_unlock(&pcb->lock);
2d21ac55 294 zfree(ids_zone, ids);
0c530ab8
A
295 }
296 }
297
0c530ab8 298
b0d623f7 299 copy_debug_state32(ds, ids, FALSE);
0c530ab8
A
300
301 return (KERN_SUCCESS);
0c530ab8
A
302}
303
304static kern_return_t
305set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
306{
307 x86_debug_state64_t *ids;
308 pcb_t pcb;
309
6d2010ae 310 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
311 ids = pcb->ids;
312
b0d623f7
A
313 if (debug_state_is_valid64(ds) != TRUE) {
314 return KERN_INVALID_ARGUMENT;
315 }
316
0c530ab8 317 if (ids == NULL) {
2d21ac55 318 ids = zalloc(ids_zone);
0c530ab8
A
319 bzero(ids, sizeof *ids);
320
fe8ab488
A
321#if HYPERVISOR
322 if (thread->hv_thread_target) {
323 hv_callbacks.volatile_state(thread->hv_thread_target,
324 HV_DEBUG_STATE);
325 }
326#endif
327
0c530ab8
A
328 simple_lock(&pcb->lock);
329 /* make sure it wasn't already alloc()'d elsewhere */
330 if (pcb->ids == NULL) {
331 pcb->ids = ids;
332 simple_unlock(&pcb->lock);
333 } else {
334 simple_unlock(&pcb->lock);
2d21ac55 335 zfree(ids_zone, ids);
0c530ab8
A
336 }
337 }
338
b0d623f7 339 copy_debug_state64(ds, ids, FALSE);
0c530ab8
A
340
341 return (KERN_SUCCESS);
0c530ab8
A
342}
343
344static void
345get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
346{
347 x86_debug_state32_t *saved_state;
348
6d2010ae 349 saved_state = thread->machine.ids;
0c530ab8
A
350
351 if (saved_state) {
b0d623f7 352 copy_debug_state32(saved_state, ds, TRUE);
0c530ab8
A
353 } else
354 bzero(ds, sizeof *ds);
355}
356
357static void
358get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
359{
360 x86_debug_state64_t *saved_state;
361
6d2010ae 362 saved_state = (x86_debug_state64_t *)thread->machine.ids;
0c530ab8
A
363
364 if (saved_state) {
b0d623f7 365 copy_debug_state64(saved_state, ds, TRUE);
0c530ab8
A
366 } else
367 bzero(ds, sizeof *ds);
368}
369
1c79356b
A
370/*
371 * consider_machine_collect:
372 *
373 * Try to collect machine-dependent pages
374 */
375void
91447636 376consider_machine_collect(void)
1c79356b
A
377{
378}
379
1c79356b 380void
91447636 381consider_machine_adjust(void)
1c79356b 382{
1c79356b 383}
1c79356b 384
1c79356b
A
385/*
386 * Switch to the first thread on a CPU.
387 */
388void
55e303ae 389machine_load_context(
1c79356b
A
390 thread_t new)
391{
0c530ab8 392 new->machine.specFlags |= OnProc;
6d2010ae 393 act_machine_switch_pcb(NULL, new);
91447636 394 Load_context(new);
1c79356b
A
395}
396
397/*
398 * Switch to a new thread.
399 * Save the old thread`s kernel state or continuation,
400 * and return it.
401 */
402thread_t
55e303ae 403machine_switch_context(
91447636
A
404 thread_t old,
405 thread_continue_t continuation,
406 thread_t new)
1c79356b 407{
1c79356b 408#if MACH_RT
91447636 409 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
1c79356b 410#endif
39236c6e 411#if KPERF
3e170ce0 412 kperf_kpc_cswitch(old, new);
b0d623f7 413#endif
1c79356b
A
414 /*
415 * Save FP registers if in use.
416 */
417 fpu_save_context(old);
418
0c530ab8
A
419 old->machine.specFlags &= ~OnProc;
420 new->machine.specFlags |= OnProc;
421
b0d623f7
A
422 /*
423 * Monitor the stack depth and report new max,
424 * not worrying about races.
425 */
426 vm_offset_t depth = current_stack_depth();
427 if (depth > kernel_stack_depth_max) {
428 kernel_stack_depth_max = depth;
429 KERNEL_DEBUG_CONSTANT(
430 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
431 (long) depth, 0, 0, 0, 0);
432 }
433
1c79356b
A
434 /*
435 * Switch address maps if need be, even if not switching tasks.
436 * (A server activation may be "borrowing" a client map.)
437 */
6d2010ae 438 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
1c79356b
A
439
440 /*
441 * Load the rest of the user state for the new thread
442 */
6d2010ae 443 act_machine_switch_pcb(old, new);
2d21ac55 444
fe8ab488
A
445#if HYPERVISOR
446 ml_hv_cswitch(old, new);
447#endif
448
1c79356b
A
449 return(Switch_context(old, continuation, new));
450}
451
b0d623f7
A
452thread_t
453machine_processor_shutdown(
454 thread_t thread,
455 void (*doshutdown)(processor_t),
456 processor_t processor)
457{
458#if CONFIG_VMX
459 vmx_suspend();
460#endif
461 fpu_save_context(thread);
462 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number());
463 return(Shutdown_context(thread, doshutdown, processor));
464}
465
91447636
A
466
467/*
468 * This is where registers that are not normally specified by the mach-o
469 * file on an execve would be nullified, perhaps to avoid a covert channel.
470 */
471kern_return_t
472machine_thread_state_initialize(
473 thread_t thread)
474{
2d21ac55
A
475 /*
476 * If there's an fpu save area, free it.
477 * The initialized state will then be lazily faulted-in, if required.
478 * And if we're target, re-arm the no-fpu trap.
479 */
6d2010ae 480 if (thread->machine.ifps) {
060df5ea 481 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
4452a7af 482
b0d623f7
A
483 if (thread == current_thread())
484 clear_fpu();
485 }
486
6d2010ae
A
487 if (thread->machine.ids) {
488 zfree(ids_zone, thread->machine.ids);
489 thread->machine.ids = NULL;
b0d623f7
A
490 }
491
492 return KERN_SUCCESS;
4452a7af 493}
0c530ab8
A
494
495uint32_t
496get_eflags_exportmask(void)
497{
498 return EFL_USER_SET;
499}
500
501/*
502 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
503 * for 32bit tasks only
504 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
505 * for 64bit tasks only
506 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
507 * for 32bit tasks only
508 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
509 * for 64bit tasks only
510 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
511 * for either 32bit or 64bit tasks
512 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
513 * for 32bit tasks only
514 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
515 * for 64bit tasks only
516 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
517 * for either 32bit or 64bit tasks
518 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
519 * for 32bit tasks only
520 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
521 * for 64bit tasks only
522 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
523 * for either 32bit or 64bit tasks
524 */
525
2d21ac55 526
0c530ab8
A
527static void
528get_exception_state64(thread_t thread, x86_exception_state64_t *es)
529{
530 x86_saved_state64_t *saved_state;
531
532 saved_state = USER_REGS64(thread);
533
534 es->trapno = saved_state->isf.trapno;
6d2010ae 535 es->cpu = saved_state->isf.cpu;
b0d623f7 536 es->err = (typeof(es->err))saved_state->isf.err;
0c530ab8
A
537 es->faultvaddr = saved_state->cr2;
538}
539
540static void
541get_exception_state32(thread_t thread, x86_exception_state32_t *es)
542{
543 x86_saved_state32_t *saved_state;
544
545 saved_state = USER_REGS32(thread);
546
547 es->trapno = saved_state->trapno;
6d2010ae 548 es->cpu = saved_state->cpu;
0c530ab8
A
549 es->err = saved_state->err;
550 es->faultvaddr = saved_state->cr2;
551}
552
553
554static int
555set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
556{
557 x86_saved_state32_t *saved_state;
558
6d2010ae 559 pal_register_cache_state(thread, DIRTY);
b0d623f7 560
0c530ab8
A
561 saved_state = USER_REGS32(thread);
562
563 /*
564 * Scrub segment selector values:
565 */
b0d623f7 566 ts->cs = USER_CS;
b0d623f7
A
567 /*
568 * On a 64 bit kernel, we always override the data segments,
569 * as the actual selector numbers have changed. This also
570 * means that we don't support setting the data segments
571 * manually any more.
572 */
573 ts->ss = USER_DS;
574 ts->ds = USER_DS;
575 ts->es = USER_DS;
0c530ab8 576
fe8ab488
A
577 /* Set GS to CTHREAD only if's been established */
578 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
579
0c530ab8
A
580 /* Check segment selectors are safe */
581 if (!valid_user_segment_selectors(ts->cs,
582 ts->ss,
583 ts->ds,
584 ts->es,
585 ts->fs,
586 ts->gs))
587 return(KERN_INVALID_ARGUMENT);
588
589 saved_state->eax = ts->eax;
590 saved_state->ebx = ts->ebx;
591 saved_state->ecx = ts->ecx;
592 saved_state->edx = ts->edx;
593 saved_state->edi = ts->edi;
594 saved_state->esi = ts->esi;
595 saved_state->ebp = ts->ebp;
596 saved_state->uesp = ts->esp;
597 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
598 saved_state->eip = ts->eip;
599 saved_state->cs = ts->cs;
600 saved_state->ss = ts->ss;
601 saved_state->ds = ts->ds;
602 saved_state->es = ts->es;
603 saved_state->fs = ts->fs;
604 saved_state->gs = ts->gs;
605
606 /*
607 * If the trace trap bit is being set,
608 * ensure that the user returns via iret
609 * - which is signaled thusly:
610 */
611 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
612 saved_state->cs = SYSENTER_TF_CS;
613
614 return(KERN_SUCCESS);
615}
616
617static int
618set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
619{
620 x86_saved_state64_t *saved_state;
621
6d2010ae 622 pal_register_cache_state(thread, DIRTY);
b0d623f7 623
0c530ab8
A
624 saved_state = USER_REGS64(thread);
625
626 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
627 !IS_USERADDR64_CANONICAL(ts->rip))
628 return(KERN_INVALID_ARGUMENT);
629
630 saved_state->r8 = ts->r8;
631 saved_state->r9 = ts->r9;
632 saved_state->r10 = ts->r10;
633 saved_state->r11 = ts->r11;
634 saved_state->r12 = ts->r12;
635 saved_state->r13 = ts->r13;
636 saved_state->r14 = ts->r14;
637 saved_state->r15 = ts->r15;
638 saved_state->rax = ts->rax;
0c530ab8
A
639 saved_state->rbx = ts->rbx;
640 saved_state->rcx = ts->rcx;
641 saved_state->rdx = ts->rdx;
642 saved_state->rdi = ts->rdi;
643 saved_state->rsi = ts->rsi;
644 saved_state->rbp = ts->rbp;
645 saved_state->isf.rsp = ts->rsp;
646 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
647 saved_state->isf.rip = ts->rip;
648 saved_state->isf.cs = USER64_CS;
b0d623f7
A
649 saved_state->fs = (uint32_t)ts->fs;
650 saved_state->gs = (uint32_t)ts->gs;
0c530ab8
A
651
652 return(KERN_SUCCESS);
653}
654
655
656
657static void
658get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
659{
660 x86_saved_state32_t *saved_state;
661
6d2010ae 662 pal_register_cache_state(thread, VALID);
b0d623f7 663
0c530ab8
A
664 saved_state = USER_REGS32(thread);
665
666 ts->eax = saved_state->eax;
667 ts->ebx = saved_state->ebx;
668 ts->ecx = saved_state->ecx;
669 ts->edx = saved_state->edx;
670 ts->edi = saved_state->edi;
671 ts->esi = saved_state->esi;
672 ts->ebp = saved_state->ebp;
673 ts->esp = saved_state->uesp;
674 ts->eflags = saved_state->efl;
675 ts->eip = saved_state->eip;
676 ts->cs = saved_state->cs;
677 ts->ss = saved_state->ss;
678 ts->ds = saved_state->ds;
679 ts->es = saved_state->es;
680 ts->fs = saved_state->fs;
681 ts->gs = saved_state->gs;
682}
683
684
685static void
686get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
687{
688 x86_saved_state64_t *saved_state;
689
6d2010ae 690 pal_register_cache_state(thread, VALID);
b0d623f7 691
0c530ab8
A
692 saved_state = USER_REGS64(thread);
693
694 ts->r8 = saved_state->r8;
695 ts->r9 = saved_state->r9;
696 ts->r10 = saved_state->r10;
697 ts->r11 = saved_state->r11;
698 ts->r12 = saved_state->r12;
699 ts->r13 = saved_state->r13;
700 ts->r14 = saved_state->r14;
701 ts->r15 = saved_state->r15;
702 ts->rax = saved_state->rax;
703 ts->rbx = saved_state->rbx;
704 ts->rcx = saved_state->rcx;
705 ts->rdx = saved_state->rdx;
706 ts->rdi = saved_state->rdi;
707 ts->rsi = saved_state->rsi;
708 ts->rbp = saved_state->rbp;
709 ts->rsp = saved_state->isf.rsp;
710 ts->rflags = saved_state->isf.rflags;
711 ts->rip = saved_state->isf.rip;
712 ts->cs = saved_state->isf.cs;
713 ts->fs = saved_state->fs;
714 ts->gs = saved_state->gs;
715}
716
717
1c79356b
A
718/*
719 * act_machine_set_state:
720 *
91447636 721 * Set the status of the specified thread.
1c79356b
A
722 */
723
724kern_return_t
55e303ae 725machine_thread_set_state(
91447636 726 thread_t thr_act,
1c79356b
A
727 thread_flavor_t flavor,
728 thread_state_t tstate,
729 mach_msg_type_number_t count)
730{
2d21ac55
A
731 switch (flavor) {
732 case x86_SAVED_STATE32:
0c530ab8 733 {
0c530ab8
A
734 x86_saved_state32_t *state;
735 x86_saved_state32_t *saved_state;
1c79356b 736
0c530ab8 737 if (count < x86_SAVED_STATE32_COUNT)
2d21ac55
A
738 return(KERN_INVALID_ARGUMENT);
739
740 if (thread_is_64bit(thr_act))
741 return(KERN_INVALID_ARGUMENT);
1c79356b 742
0c530ab8 743 state = (x86_saved_state32_t *) tstate;
1c79356b 744
91447636 745 /* Check segment selectors are safe */
0c530ab8 746 if (!valid_user_segment_selectors(state->cs,
2d21ac55
A
747 state->ss,
748 state->ds,
749 state->es,
750 state->fs,
751 state->gs))
752 return KERN_INVALID_ARGUMENT;
753
6d2010ae 754 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 755
0c530ab8 756 saved_state = USER_REGS32(thr_act);
1c79356b
A
757
758 /*
759 * General registers
760 */
761 saved_state->edi = state->edi;
762 saved_state->esi = state->esi;
763 saved_state->ebp = state->ebp;
764 saved_state->uesp = state->uesp;
765 saved_state->ebx = state->ebx;
766 saved_state->edx = state->edx;
767 saved_state->ecx = state->ecx;
768 saved_state->eax = state->eax;
769 saved_state->eip = state->eip;
0c530ab8
A
770
771 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
c0fea474 772
8f6c56a5 773 /*
0c530ab8
A
774 * If the trace trap bit is being set,
775 * ensure that the user returns via iret
776 * - which is signaled thusly:
21362eb3 777 */
0c530ab8
A
778 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
779 state->cs = SYSENTER_TF_CS;
780
781 /*
782 * User setting segment registers.
783 * Code and stack selectors have already been
784 * checked. Others will be reset by 'iret'
785 * if they are not valid.
786 */
787 saved_state->cs = state->cs;
788 saved_state->ss = state->ss;
789 saved_state->ds = state->ds;
790 saved_state->es = state->es;
791 saved_state->fs = state->fs;
792 saved_state->gs = state->gs;
b0d623f7 793
4452a7af 794 break;
2d21ac55 795 }
4452a7af 796
2d21ac55
A
797 case x86_SAVED_STATE64:
798 {
0c530ab8
A
799 x86_saved_state64_t *state;
800 x86_saved_state64_t *saved_state;
89b3af67 801
0c530ab8 802 if (count < x86_SAVED_STATE64_COUNT)
2d21ac55
A
803 return(KERN_INVALID_ARGUMENT);
804
805 if (!thread_is_64bit(thr_act))
806 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
807
808 state = (x86_saved_state64_t *) tstate;
5d5c5d0d 809
0c530ab8
A
810 /* Verify that the supplied code segment selector is
811 * valid. In 64-bit mode, the FS and GS segment overrides
812 * use the FS.base and GS.base MSRs to calculate
813 * base addresses, and the trampolines don't directly
814 * restore the segment registers--hence they are no
815 * longer relevant for validation.
816 */
817 if (!valid_user_code_selector(state->isf.cs))
818 return KERN_INVALID_ARGUMENT;
819
820 /* Check pc and stack are canonical addresses */
821 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
822 !IS_USERADDR64_CANONICAL(state->isf.rip))
6601e61a 823 return KERN_INVALID_ARGUMENT;
5d5c5d0d 824
6d2010ae 825 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 826
0c530ab8 827 saved_state = USER_REGS64(thr_act);
5d5c5d0d 828
21362eb3 829 /*
6601e61a 830 * General registers
21362eb3 831 */
0c530ab8
A
832 saved_state->r8 = state->r8;
833 saved_state->r9 = state->r9;
834 saved_state->r10 = state->r10;
835 saved_state->r11 = state->r11;
836 saved_state->r12 = state->r12;
837 saved_state->r13 = state->r13;
838 saved_state->r14 = state->r14;
839 saved_state->r15 = state->r15;
840 saved_state->rdi = state->rdi;
841 saved_state->rsi = state->rsi;
842 saved_state->rbp = state->rbp;
843 saved_state->rbx = state->rbx;
844 saved_state->rdx = state->rdx;
845 saved_state->rcx = state->rcx;
846 saved_state->rax = state->rax;
847 saved_state->isf.rsp = state->isf.rsp;
848 saved_state->isf.rip = state->isf.rip;
849
850 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
851
2d21ac55 852 /*
0c530ab8
A
853 * User setting segment registers.
854 * Code and stack selectors have already been
855 * checked. Others will be reset by 'sys'
856 * if they are not valid.
6601e61a 857 */
2d21ac55 858 saved_state->isf.cs = state->isf.cs;
0c530ab8
A
859 saved_state->isf.ss = state->isf.ss;
860 saved_state->fs = state->fs;
861 saved_state->gs = state->gs;
b0d623f7 862
89b3af67 863 break;
2d21ac55 864 }
89b3af67 865
2d21ac55
A
866 case x86_FLOAT_STATE32:
867 {
0c530ab8
A
868 if (count != x86_FLOAT_STATE32_COUNT)
869 return(KERN_INVALID_ARGUMENT);
870
871 if (thread_is_64bit(thr_act))
4452a7af 872 return(KERN_INVALID_ARGUMENT);
0c530ab8 873
060df5ea 874 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 875 }
4452a7af 876
2d21ac55
A
877 case x86_FLOAT_STATE64:
878 {
0c530ab8 879 if (count != x86_FLOAT_STATE64_COUNT)
4452a7af
A
880 return(KERN_INVALID_ARGUMENT);
881
0c530ab8
A
882 if ( !thread_is_64bit(thr_act))
883 return(KERN_INVALID_ARGUMENT);
884
060df5ea 885 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 886 }
5d5c5d0d 887
2d21ac55
A
888 case x86_FLOAT_STATE:
889 {
890 x86_float_state_t *state;
4452a7af 891
0c530ab8
A
892 if (count != x86_FLOAT_STATE_COUNT)
893 return(KERN_INVALID_ARGUMENT);
4452a7af 894
0c530ab8 895 state = (x86_float_state_t *)tstate;
0c530ab8
A
896 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
897 thread_is_64bit(thr_act)) {
060df5ea 898 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
899 }
900 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
901 !thread_is_64bit(thr_act)) {
060df5ea 902 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
903 }
904 return(KERN_INVALID_ARGUMENT);
2d21ac55 905 }
0c530ab8 906
060df5ea
A
907 case x86_AVX_STATE32:
908 {
909 if (count != x86_AVX_STATE32_COUNT)
910 return(KERN_INVALID_ARGUMENT);
911
912 if (thread_is_64bit(thr_act))
913 return(KERN_INVALID_ARGUMENT);
914
915 return fpu_set_fxstate(thr_act, tstate, flavor);
916 }
917
918 case x86_AVX_STATE64:
919 {
920 if (count != x86_AVX_STATE64_COUNT)
921 return(KERN_INVALID_ARGUMENT);
922
923 if (!thread_is_64bit(thr_act))
924 return(KERN_INVALID_ARGUMENT);
925
926 return fpu_set_fxstate(thr_act, tstate, flavor);
927 }
928
bd504ef0
A
929 case x86_AVX_STATE:
930 {
931 x86_avx_state_t *state;
932
933 if (count != x86_AVX_STATE_COUNT)
934 return(KERN_INVALID_ARGUMENT);
935
936 state = (x86_avx_state_t *)tstate;
937 if (state->ash.flavor == x86_AVX_STATE64 &&
938 state->ash.count == x86_FLOAT_STATE64_COUNT &&
939 thread_is_64bit(thr_act)) {
940 return fpu_set_fxstate(thr_act,
941 (thread_state_t)&state->ufs.as64,
942 x86_FLOAT_STATE64);
943 }
944 if (state->ash.flavor == x86_FLOAT_STATE32 &&
945 state->ash.count == x86_FLOAT_STATE32_COUNT &&
946 !thread_is_64bit(thr_act)) {
947 return fpu_set_fxstate(thr_act,
948 (thread_state_t)&state->ufs.as32,
949 x86_FLOAT_STATE32);
950 }
951 return(KERN_INVALID_ARGUMENT);
952 }
953
2d21ac55
A
954 case x86_THREAD_STATE32:
955 {
0c530ab8
A
956 if (count != x86_THREAD_STATE32_COUNT)
957 return(KERN_INVALID_ARGUMENT);
958
959 if (thread_is_64bit(thr_act))
960 return(KERN_INVALID_ARGUMENT);
2d21ac55 961
0c530ab8 962 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
2d21ac55 963 }
0c530ab8 964
2d21ac55
A
965 case x86_THREAD_STATE64:
966 {
0c530ab8
A
967 if (count != x86_THREAD_STATE64_COUNT)
968 return(KERN_INVALID_ARGUMENT);
969
2d21ac55 970 if (!thread_is_64bit(thr_act))
0c530ab8
A
971 return(KERN_INVALID_ARGUMENT);
972
973 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
0c530ab8 974
2d21ac55
A
975 }
976 case x86_THREAD_STATE:
977 {
978 x86_thread_state_t *state;
0c530ab8
A
979
980 if (count != x86_THREAD_STATE_COUNT)
981 return(KERN_INVALID_ARGUMENT);
982
983 state = (x86_thread_state_t *)tstate;
984
2d21ac55
A
985 if (state->tsh.flavor == x86_THREAD_STATE64 &&
986 state->tsh.count == x86_THREAD_STATE64_COUNT &&
0c530ab8 987 thread_is_64bit(thr_act)) {
2d21ac55
A
988 return set_thread_state64(thr_act, &state->uts.ts64);
989 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
990 state->tsh.count == x86_THREAD_STATE32_COUNT &&
0c530ab8 991 !thread_is_64bit(thr_act)) {
2d21ac55 992 return set_thread_state32(thr_act, &state->uts.ts32);
0c530ab8 993 } else
2d21ac55 994 return(KERN_INVALID_ARGUMENT);
0c530ab8 995
6601e61a 996 break;
2d21ac55 997 }
0c530ab8
A
998 case x86_DEBUG_STATE32:
999 {
1000 x86_debug_state32_t *state;
1001 kern_return_t ret;
4452a7af 1002
0c530ab8
A
1003 if (thread_is_64bit(thr_act))
1004 return(KERN_INVALID_ARGUMENT);
1005
1006 state = (x86_debug_state32_t *)tstate;
1007
1008 ret = set_debug_state32(thr_act, state);
1009
1010 return ret;
6601e61a 1011 }
0c530ab8
A
1012 case x86_DEBUG_STATE64:
1013 {
1014 x86_debug_state64_t *state;
1015 kern_return_t ret;
4452a7af 1016
0c530ab8
A
1017 if (!thread_is_64bit(thr_act))
1018 return(KERN_INVALID_ARGUMENT);
1019
1020 state = (x86_debug_state64_t *)tstate;
1021
1022 ret = set_debug_state64(thr_act, state);
1023
1024 return ret;
1025 }
1026 case x86_DEBUG_STATE:
1027 {
1028 x86_debug_state_t *state;
1029 kern_return_t ret = KERN_INVALID_ARGUMENT;
1030
1031 if (count != x86_DEBUG_STATE_COUNT)
1032 return (KERN_INVALID_ARGUMENT);
1033
1034 state = (x86_debug_state_t *)tstate;
1035 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1036 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1037 thread_is_64bit(thr_act)) {
1038 ret = set_debug_state64(thr_act, &state->uds.ds64);
1039 }
1040 else
1041 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1042 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1043 !thread_is_64bit(thr_act)) {
1044 ret = set_debug_state32(thr_act, &state->uds.ds32);
1045 }
1046 return ret;
1047 }
1048 default:
6601e61a
A
1049 return(KERN_INVALID_ARGUMENT);
1050 }
4452a7af 1051
6601e61a
A
1052 return(KERN_SUCCESS);
1053}
4452a7af 1054
0c530ab8
A
1055
1056
6601e61a
A
1057/*
1058 * thread_getstatus:
1059 *
1060 * Get the status of the specified thread.
1061 */
4452a7af 1062
6601e61a
A
1063kern_return_t
1064machine_thread_get_state(
1065 thread_t thr_act,
1066 thread_flavor_t flavor,
1067 thread_state_t tstate,
1068 mach_msg_type_number_t *count)
1069{
2d21ac55 1070
6601e61a 1071 switch (flavor) {
4452a7af 1072
0c530ab8
A
1073 case THREAD_STATE_FLAVOR_LIST:
1074 {
1075 if (*count < 3)
1076 return (KERN_INVALID_ARGUMENT);
1077
1078 tstate[0] = i386_THREAD_STATE;
1079 tstate[1] = i386_FLOAT_STATE;
1080 tstate[2] = i386_EXCEPTION_STATE;
1081
1082 *count = 3;
1083 break;
1084 }
1085
1086 case THREAD_STATE_FLAVOR_LIST_NEW:
1087 {
1088 if (*count < 4)
1089 return (KERN_INVALID_ARGUMENT);
1090
1091 tstate[0] = x86_THREAD_STATE;
1092 tstate[1] = x86_FLOAT_STATE;
1093 tstate[2] = x86_EXCEPTION_STATE;
1094 tstate[3] = x86_DEBUG_STATE;
1095
1096 *count = 4;
1097 break;
1098 }
1099
bd504ef0
A
1100 case THREAD_STATE_FLAVOR_LIST_10_9:
1101 {
1102 if (*count < 5)
1103 return (KERN_INVALID_ARGUMENT);
1104
1105 tstate[0] = x86_THREAD_STATE;
1106 tstate[1] = x86_FLOAT_STATE;
1107 tstate[2] = x86_EXCEPTION_STATE;
1108 tstate[3] = x86_DEBUG_STATE;
1109 tstate[4] = x86_AVX_STATE;
1110
1111 *count = 5;
1112 break;
1113 }
1114
0c530ab8 1115 case x86_SAVED_STATE32:
4452a7af 1116 {
0c530ab8
A
1117 x86_saved_state32_t *state;
1118 x86_saved_state32_t *saved_state;
4452a7af 1119
0c530ab8
A
1120 if (*count < x86_SAVED_STATE32_COUNT)
1121 return(KERN_INVALID_ARGUMENT);
4452a7af 1122
2d21ac55
A
1123 if (thread_is_64bit(thr_act))
1124 return(KERN_INVALID_ARGUMENT);
1125
0c530ab8
A
1126 state = (x86_saved_state32_t *) tstate;
1127 saved_state = USER_REGS32(thr_act);
4452a7af 1128
6601e61a
A
1129 /*
1130 * First, copy everything:
1131 */
1132 *state = *saved_state;
0c530ab8
A
1133 state->ds = saved_state->ds & 0xffff;
1134 state->es = saved_state->es & 0xffff;
1135 state->fs = saved_state->fs & 0xffff;
1136 state->gs = saved_state->gs & 0xffff;
4452a7af 1137
0c530ab8 1138 *count = x86_SAVED_STATE32_COUNT;
4452a7af 1139 break;
8f6c56a5 1140 }
5d5c5d0d 1141
0c530ab8 1142 case x86_SAVED_STATE64:
4452a7af 1143 {
0c530ab8
A
1144 x86_saved_state64_t *state;
1145 x86_saved_state64_t *saved_state;
89b3af67 1146
0c530ab8
A
1147 if (*count < x86_SAVED_STATE64_COUNT)
1148 return(KERN_INVALID_ARGUMENT);
89b3af67 1149
2d21ac55
A
1150 if (!thread_is_64bit(thr_act))
1151 return(KERN_INVALID_ARGUMENT);
1152
0c530ab8
A
1153 state = (x86_saved_state64_t *)tstate;
1154 saved_state = USER_REGS64(thr_act);
89b3af67 1155
6601e61a 1156 /*
0c530ab8 1157 * First, copy everything:
6601e61a 1158 */
0c530ab8
A
1159 *state = *saved_state;
1160 state->fs = saved_state->fs & 0xffff;
1161 state->gs = saved_state->gs & 0xffff;
1162
1163 *count = x86_SAVED_STATE64_COUNT;
4452a7af
A
1164 break;
1165 }
1166
0c530ab8 1167 case x86_FLOAT_STATE32:
4452a7af 1168 {
0c530ab8
A
1169 if (*count < x86_FLOAT_STATE32_COUNT)
1170 return(KERN_INVALID_ARGUMENT);
1171
1172 if (thread_is_64bit(thr_act))
1173 return(KERN_INVALID_ARGUMENT);
1174
1175 *count = x86_FLOAT_STATE32_COUNT;
1176
060df5ea 1177 return fpu_get_fxstate(thr_act, tstate, flavor);
21362eb3 1178 }
89b3af67 1179
0c530ab8
A
1180 case x86_FLOAT_STATE64:
1181 {
1182 if (*count < x86_FLOAT_STATE64_COUNT)
1183 return(KERN_INVALID_ARGUMENT);
1184
1185 if ( !thread_is_64bit(thr_act))
1186 return(KERN_INVALID_ARGUMENT);
1187
1188 *count = x86_FLOAT_STATE64_COUNT;
1189
060df5ea 1190 return fpu_get_fxstate(thr_act, tstate, flavor);
0c530ab8
A
1191 }
1192
1193 case x86_FLOAT_STATE:
1194 {
1195 x86_float_state_t *state;
1196 kern_return_t kret;
1197
1198 if (*count < x86_FLOAT_STATE_COUNT)
1199 return(KERN_INVALID_ARGUMENT);
1200
1201 state = (x86_float_state_t *)tstate;
1202
1203 /*
1204 * no need to bzero... currently
1205 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1206 */
1207 if (thread_is_64bit(thr_act)) {
1208 state->fsh.flavor = x86_FLOAT_STATE64;
1209 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1210
060df5ea 1211 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
1212 } else {
1213 state->fsh.flavor = x86_FLOAT_STATE32;
1214 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1215
060df5ea 1216 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
1217 }
1218 *count = x86_FLOAT_STATE_COUNT;
1219
1220 return(kret);
1221 }
1222
bd504ef0
A
1223 case x86_AVX_STATE32:
1224 {
060df5ea
A
1225 if (*count != x86_AVX_STATE32_COUNT)
1226 return(KERN_INVALID_ARGUMENT);
1227
1228 if (thread_is_64bit(thr_act))
1229 return(KERN_INVALID_ARGUMENT);
1230
1231 *count = x86_AVX_STATE32_COUNT;
1232
1233 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0 1234 }
060df5ea 1235
bd504ef0
A
1236 case x86_AVX_STATE64:
1237 {
060df5ea
A
1238 if (*count != x86_AVX_STATE64_COUNT)
1239 return(KERN_INVALID_ARGUMENT);
1240
1241 if ( !thread_is_64bit(thr_act))
1242 return(KERN_INVALID_ARGUMENT);
1243
1244 *count = x86_AVX_STATE64_COUNT;
1245
1246 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0
A
1247 }
1248
1249 case x86_AVX_STATE:
1250 {
1251 x86_avx_state_t *state;
1252 kern_return_t kret;
1253
1254 if (*count < x86_AVX_STATE_COUNT)
1255 return(KERN_INVALID_ARGUMENT);
1256
1257 state = (x86_avx_state_t *)tstate;
1258
1259 bzero((char *)state, sizeof(x86_avx_state_t));
1260 if (thread_is_64bit(thr_act)) {
1261 state->ash.flavor = x86_AVX_STATE64;
1262 state->ash.count = x86_AVX_STATE64_COUNT;
1263 kret = fpu_get_fxstate(thr_act,
1264 (thread_state_t)&state->ufs.as64,
1265 x86_AVX_STATE64);
1266 } else {
1267 state->ash.flavor = x86_AVX_STATE32;
1268 state->ash.count = x86_AVX_STATE32_COUNT;
1269 kret = fpu_get_fxstate(thr_act,
1270 (thread_state_t)&state->ufs.as32,
1271 x86_AVX_STATE32);
1272 }
1273 *count = x86_AVX_STATE_COUNT;
1274
1275 return(kret);
1276 }
060df5ea 1277
0c530ab8
A
1278 case x86_THREAD_STATE32:
1279 {
1280 if (*count < x86_THREAD_STATE32_COUNT)
1281 return(KERN_INVALID_ARGUMENT);
1282
1283 if (thread_is_64bit(thr_act))
1284 return(KERN_INVALID_ARGUMENT);
1285
1286 *count = x86_THREAD_STATE32_COUNT;
1287
1288 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
6601e61a 1289 break;
0c530ab8 1290 }
89b3af67 1291
0c530ab8
A
1292 case x86_THREAD_STATE64:
1293 {
1294 if (*count < x86_THREAD_STATE64_COUNT)
4452a7af 1295 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1296
1297 if ( !thread_is_64bit(thr_act))
1298 return(KERN_INVALID_ARGUMENT);
1299
1300 *count = x86_THREAD_STATE64_COUNT;
1301
1302 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1303 break;
21362eb3 1304 }
89b3af67 1305
0c530ab8
A
1306 case x86_THREAD_STATE:
1307 {
1308 x86_thread_state_t *state;
4452a7af 1309
0c530ab8 1310 if (*count < x86_THREAD_STATE_COUNT)
4452a7af
A
1311 return(KERN_INVALID_ARGUMENT);
1312
0c530ab8 1313 state = (x86_thread_state_t *)tstate;
4452a7af 1314
0c530ab8 1315 bzero((char *)state, sizeof(x86_thread_state_t));
4452a7af 1316
0c530ab8
A
1317 if (thread_is_64bit(thr_act)) {
1318 state->tsh.flavor = x86_THREAD_STATE64;
1319 state->tsh.count = x86_THREAD_STATE64_COUNT;
4452a7af 1320
0c530ab8 1321 get_thread_state64(thr_act, &state->uts.ts64);
4452a7af 1322 } else {
0c530ab8
A
1323 state->tsh.flavor = x86_THREAD_STATE32;
1324 state->tsh.count = x86_THREAD_STATE32_COUNT;
4452a7af 1325
0c530ab8 1326 get_thread_state32(thr_act, &state->uts.ts32);
4452a7af 1327 }
0c530ab8
A
1328 *count = x86_THREAD_STATE_COUNT;
1329
1330 break;
1331 }
1332
1333
1334 case x86_EXCEPTION_STATE32:
1335 {
1336 if (*count < x86_EXCEPTION_STATE32_COUNT)
1337 return(KERN_INVALID_ARGUMENT);
1338
1339 if (thread_is_64bit(thr_act))
1340 return(KERN_INVALID_ARGUMENT);
1341
1342 *count = x86_EXCEPTION_STATE32_COUNT;
4452a7af 1343
0c530ab8 1344 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
6d2010ae
A
1345 /*
1346 * Suppress the cpu number for binary compatibility
1347 * of this deprecated state.
1348 */
1349 ((x86_exception_state32_t *)tstate)->cpu = 0;
4452a7af 1350 break;
6601e61a 1351 }
4452a7af 1352
0c530ab8 1353 case x86_EXCEPTION_STATE64:
6601e61a 1354 {
0c530ab8
A
1355 if (*count < x86_EXCEPTION_STATE64_COUNT)
1356 return(KERN_INVALID_ARGUMENT);
4452a7af 1357
0c530ab8
A
1358 if ( !thread_is_64bit(thr_act))
1359 return(KERN_INVALID_ARGUMENT);
4452a7af 1360
0c530ab8 1361 *count = x86_EXCEPTION_STATE64_COUNT;
4452a7af 1362
0c530ab8 1363 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
6d2010ae
A
1364 /*
1365 * Suppress the cpu number for binary compatibility
1366 * of this deprecated state.
1367 */
1368 ((x86_exception_state64_t *)tstate)->cpu = 0;
4452a7af 1369 break;
6601e61a 1370 }
4452a7af 1371
0c530ab8
A
1372 case x86_EXCEPTION_STATE:
1373 {
1374 x86_exception_state_t *state;
1375
1376 if (*count < x86_EXCEPTION_STATE_COUNT)
1377 return(KERN_INVALID_ARGUMENT);
1378
1379 state = (x86_exception_state_t *)tstate;
1380
1381 bzero((char *)state, sizeof(x86_exception_state_t));
1382
1383 if (thread_is_64bit(thr_act)) {
1384 state->esh.flavor = x86_EXCEPTION_STATE64;
1385 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1386
1387 get_exception_state64(thr_act, &state->ues.es64);
1388 } else {
1389 state->esh.flavor = x86_EXCEPTION_STATE32;
1390 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1391
1392 get_exception_state32(thr_act, &state->ues.es32);
1393 }
1394 *count = x86_EXCEPTION_STATE_COUNT;
1395
1396 break;
1397 }
1398 case x86_DEBUG_STATE32:
1399 {
1400 if (*count < x86_DEBUG_STATE32_COUNT)
1401 return(KERN_INVALID_ARGUMENT);
1402
1403 if (thread_is_64bit(thr_act))
1404 return(KERN_INVALID_ARGUMENT);
1405
1406 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1407
1408 *count = x86_DEBUG_STATE32_COUNT;
1409
1410 break;
1411 }
1412 case x86_DEBUG_STATE64:
1413 {
1414 if (*count < x86_DEBUG_STATE64_COUNT)
1415 return(KERN_INVALID_ARGUMENT);
1416
1417 if (!thread_is_64bit(thr_act))
1418 return(KERN_INVALID_ARGUMENT);
1419
1420 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1421
1422 *count = x86_DEBUG_STATE64_COUNT;
1423
1c79356b
A
1424 break;
1425 }
0c530ab8
A
1426 case x86_DEBUG_STATE:
1427 {
1428 x86_debug_state_t *state;
1429
1430 if (*count < x86_DEBUG_STATE_COUNT)
1431 return(KERN_INVALID_ARGUMENT);
1432
1433 state = (x86_debug_state_t *)tstate;
1434
1435 bzero(state, sizeof *state);
1436
1437 if (thread_is_64bit(thr_act)) {
1438 state->dsh.flavor = x86_DEBUG_STATE64;
1439 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1440
1441 get_debug_state64(thr_act, &state->uds.ds64);
1442 } else {
1443 state->dsh.flavor = x86_DEBUG_STATE32;
1444 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1445
0c530ab8
A
1446 get_debug_state32(thr_act, &state->uds.ds32);
1447 }
1448 *count = x86_DEBUG_STATE_COUNT;
1449 break;
1450 }
2d21ac55 1451 default:
1c79356b
A
1452 return(KERN_INVALID_ARGUMENT);
1453 }
1454
1455 return(KERN_SUCCESS);
1456}
1457
0c530ab8
A
1458kern_return_t
1459machine_thread_get_kern_state(
1460 thread_t thread,
1461 thread_flavor_t flavor,
1462 thread_state_t tstate,
1463 mach_msg_type_number_t *count)
1464{
b0d623f7 1465 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
0c530ab8
A
1466
1467 /*
1468 * This works only for an interrupted kernel thread
1469 */
b0d623f7 1470 if (thread != current_thread() || int_state == NULL)
0c530ab8
A
1471 return KERN_FAILURE;
1472
b0d623f7
A
1473 switch (flavor) {
1474 case x86_THREAD_STATE32: {
1475 x86_thread_state32_t *state;
1476 x86_saved_state32_t *saved_state;
1477
1478 if (!is_saved_state32(int_state) ||
1479 *count < x86_THREAD_STATE32_COUNT)
1480 return (KERN_INVALID_ARGUMENT);
1481
1482 state = (x86_thread_state32_t *) tstate;
0c530ab8 1483
b0d623f7
A
1484 saved_state = saved_state32(int_state);
1485 /*
1486 * General registers.
1487 */
1488 state->eax = saved_state->eax;
1489 state->ebx = saved_state->ebx;
1490 state->ecx = saved_state->ecx;
1491 state->edx = saved_state->edx;
1492 state->edi = saved_state->edi;
1493 state->esi = saved_state->esi;
1494 state->ebp = saved_state->ebp;
1495 state->esp = saved_state->uesp;
1496 state->eflags = saved_state->efl;
1497 state->eip = saved_state->eip;
1498 state->cs = saved_state->cs;
1499 state->ss = saved_state->ss;
1500 state->ds = saved_state->ds & 0xffff;
1501 state->es = saved_state->es & 0xffff;
1502 state->fs = saved_state->fs & 0xffff;
1503 state->gs = saved_state->gs & 0xffff;
1504
1505 *count = x86_THREAD_STATE32_COUNT;
2d21ac55 1506
b0d623f7
A
1507 return KERN_SUCCESS;
1508 }
1509
1510 case x86_THREAD_STATE64: {
1511 x86_thread_state64_t *state;
1512 x86_saved_state64_t *saved_state;
1513
1514 if (!is_saved_state64(int_state) ||
1515 *count < x86_THREAD_STATE64_COUNT)
1516 return (KERN_INVALID_ARGUMENT);
1517
1518 state = (x86_thread_state64_t *) tstate;
1519
1520 saved_state = saved_state64(int_state);
1521 /*
1522 * General registers.
1523 */
1524 state->rax = saved_state->rax;
1525 state->rbx = saved_state->rbx;
1526 state->rcx = saved_state->rcx;
1527 state->rdx = saved_state->rdx;
1528 state->rdi = saved_state->rdi;
1529 state->rsi = saved_state->rsi;
1530 state->rbp = saved_state->rbp;
1531 state->rsp = saved_state->isf.rsp;
1532 state->r8 = saved_state->r8;
1533 state->r9 = saved_state->r9;
1534 state->r10 = saved_state->r10;
1535 state->r11 = saved_state->r11;
1536 state->r12 = saved_state->r12;
1537 state->r13 = saved_state->r13;
1538 state->r14 = saved_state->r14;
1539 state->r15 = saved_state->r15;
1540
1541 state->rip = saved_state->isf.rip;
1542 state->rflags = saved_state->isf.rflags;
1543 state->cs = saved_state->isf.cs;
1544 state->fs = saved_state->fs & 0xffff;
1545 state->gs = saved_state->gs & 0xffff;
1546 *count = x86_THREAD_STATE64_COUNT;
1547
1548 return KERN_SUCCESS;
1549 }
1550
1551 case x86_THREAD_STATE: {
1552 x86_thread_state_t *state = NULL;
1553
1554 if (*count < x86_THREAD_STATE_COUNT)
1555 return (KERN_INVALID_ARGUMENT);
1556
1557 state = (x86_thread_state_t *) tstate;
1558
1559 if (is_saved_state32(int_state)) {
1560 x86_saved_state32_t *saved_state = saved_state32(int_state);
1561
1562 state->tsh.flavor = x86_THREAD_STATE32;
1563 state->tsh.count = x86_THREAD_STATE32_COUNT;
0c530ab8 1564
0c530ab8
A
1565 /*
1566 * General registers.
1567 */
b0d623f7
A
1568 state->uts.ts32.eax = saved_state->eax;
1569 state->uts.ts32.ebx = saved_state->ebx;
1570 state->uts.ts32.ecx = saved_state->ecx;
1571 state->uts.ts32.edx = saved_state->edx;
1572 state->uts.ts32.edi = saved_state->edi;
1573 state->uts.ts32.esi = saved_state->esi;
1574 state->uts.ts32.ebp = saved_state->ebp;
1575 state->uts.ts32.esp = saved_state->uesp;
1576 state->uts.ts32.eflags = saved_state->efl;
1577 state->uts.ts32.eip = saved_state->eip;
1578 state->uts.ts32.cs = saved_state->cs;
1579 state->uts.ts32.ss = saved_state->ss;
1580 state->uts.ts32.ds = saved_state->ds & 0xffff;
1581 state->uts.ts32.es = saved_state->es & 0xffff;
1582 state->uts.ts32.fs = saved_state->fs & 0xffff;
1583 state->uts.ts32.gs = saved_state->gs & 0xffff;
1584 } else if (is_saved_state64(int_state)) {
1585 x86_saved_state64_t *saved_state = saved_state64(int_state);
0c530ab8 1586
b0d623f7
A
1587 state->tsh.flavor = x86_THREAD_STATE64;
1588 state->tsh.count = x86_THREAD_STATE64_COUNT;
1589
1590 /*
1591 * General registers.
1592 */
1593 state->uts.ts64.rax = saved_state->rax;
1594 state->uts.ts64.rbx = saved_state->rbx;
1595 state->uts.ts64.rcx = saved_state->rcx;
1596 state->uts.ts64.rdx = saved_state->rdx;
1597 state->uts.ts64.rdi = saved_state->rdi;
1598 state->uts.ts64.rsi = saved_state->rsi;
1599 state->uts.ts64.rbp = saved_state->rbp;
1600 state->uts.ts64.rsp = saved_state->isf.rsp;
1601 state->uts.ts64.r8 = saved_state->r8;
1602 state->uts.ts64.r9 = saved_state->r9;
1603 state->uts.ts64.r10 = saved_state->r10;
1604 state->uts.ts64.r11 = saved_state->r11;
1605 state->uts.ts64.r12 = saved_state->r12;
1606 state->uts.ts64.r13 = saved_state->r13;
1607 state->uts.ts64.r14 = saved_state->r14;
1608 state->uts.ts64.r15 = saved_state->r15;
1609
1610 state->uts.ts64.rip = saved_state->isf.rip;
1611 state->uts.ts64.rflags = saved_state->isf.rflags;
1612 state->uts.ts64.cs = saved_state->isf.cs;
1613 state->uts.ts64.fs = saved_state->fs & 0xffff;
1614 state->uts.ts64.gs = saved_state->gs & 0xffff;
1615 } else {
1616 panic("unknown thread state");
0c530ab8 1617 }
b0d623f7
A
1618
1619 *count = x86_THREAD_STATE_COUNT;
1620 return KERN_SUCCESS;
1621 }
2d21ac55 1622 }
0c530ab8
A
1623 return KERN_FAILURE;
1624}
1625
1626
0c530ab8 1627void
2d21ac55 1628machine_thread_switch_addrmode(thread_t thread)
0c530ab8 1629{
2d21ac55
A
1630 /*
1631 * We don't want to be preempted until we're done
1632 * - particularly if we're switching the current thread
1633 */
1634 disable_preemption();
0c530ab8 1635
2d21ac55 1636 /*
6d2010ae
A
1637 * Reset the state saveareas. As we're resetting, we anticipate no
1638 * memory allocations in this path.
2d21ac55 1639 */
0c530ab8
A
1640 machine_thread_create(thread, thread->task);
1641
1642 /* If we're switching ourselves, reset the pcb addresses etc. */
c910b4d9 1643 if (thread == current_thread()) {
6d2010ae 1644 boolean_t istate = ml_set_interrupts_enabled(FALSE);
6d2010ae
A
1645 act_machine_switch_pcb(NULL, thread);
1646 ml_set_interrupts_enabled(istate);
c910b4d9 1647 }
2d21ac55 1648 enable_preemption();
1c79356b
A
1649}
1650
0c530ab8
A
1651
1652
1c79356b
A
1653/*
1654 * This is used to set the current thr_act/thread
1655 * when starting up a new processor
1656 */
1657void
b0d623f7 1658machine_set_current_thread(thread_t thread)
1c79356b 1659{
0c530ab8 1660 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
1661}
1662
1c79356b 1663
1c79356b
A
1664/*
1665 * Perform machine-dependent per-thread initializations
1666 */
1667void
55e303ae 1668machine_thread_init(void)
1c79356b 1669{
39236c6e
A
1670 iss_zone = zinit(sizeof(x86_saved_state_t),
1671 thread_max * sizeof(x86_saved_state_t),
1672 THREAD_CHUNK * sizeof(x86_saved_state_t),
1673 "x86_64 saved state");
0c530ab8 1674
39236c6e
A
1675 ids_zone = zinit(sizeof(x86_debug_state64_t),
1676 thread_max * sizeof(x86_debug_state64_t),
1677 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1678 "x86_64 debug state");
1c79356b 1679
39236c6e 1680 fpu_module_init();
1c79356b
A
1681}
1682
1c79356b 1683
91447636
A
1684
1685user_addr_t
1686get_useraddr(void)
1c79356b 1687{
91447636 1688 thread_t thr_act = current_thread();
1c79356b 1689
0c530ab8
A
1690 if (thread_is_64bit(thr_act)) {
1691 x86_saved_state64_t *iss64;
1692
1693 iss64 = USER_REGS64(thr_act);
1694
1695 return(iss64->isf.rip);
1696 } else {
1697 x86_saved_state32_t *iss32;
4452a7af 1698
0c530ab8
A
1699 iss32 = USER_REGS32(thr_act);
1700
1701 return(iss32->eip);
1702 }
1c79356b
A
1703}
1704
1c79356b
A
1705/*
1706 * detach and return a kernel stack from a thread
1707 */
1708
1709vm_offset_t
55e303ae 1710machine_stack_detach(thread_t thread)
1c79356b 1711{
0c530ab8 1712 vm_offset_t stack;
1c79356b 1713
0c530ab8 1714 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
b0d623f7 1715 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8
A
1716 thread->sched_pri, 0,
1717 0);
1c79356b 1718
0c530ab8
A
1719 stack = thread->kernel_stack;
1720 thread->kernel_stack = 0;
1721
1722 return (stack);
1c79356b
A
1723}
1724
1725/*
1726 * attach a kernel stack to a thread and initialize it
1727 */
1728
1729void
91447636
A
1730machine_stack_attach(
1731 thread_t thread,
1732 vm_offset_t stack)
1c79356b 1733{
b0d623f7 1734 struct x86_kernel_state *statep;
1c79356b 1735
0c530ab8 1736 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
b0d623f7 1737 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8 1738 thread->sched_pri, 0, 0);
1c79356b 1739
0c530ab8
A
1740 assert(stack);
1741 thread->kernel_stack = stack;
55e303ae 1742
0c530ab8 1743 statep = STACK_IKS(stack);
b0d623f7
A
1744#if defined(__x86_64__)
1745 statep->k_rip = (unsigned long) Thread_continue;
1746 statep->k_rbx = (unsigned long) thread_continue;
6d2010ae 1747 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1);
b0d623f7 1748#else
0c530ab8
A
1749 statep->k_eip = (unsigned long) Thread_continue;
1750 statep->k_ebx = (unsigned long) thread_continue;
6d2010ae 1751 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1);
b0d623f7 1752#endif
1c79356b 1753
0c530ab8 1754 return;
1c79356b
A
1755}
1756
1757/*
1758 * move a stack from old to new thread
1759 */
1760
1761void
55e303ae 1762machine_stack_handoff(thread_t old,
1c79356b
A
1763 thread_t new)
1764{
0c530ab8 1765 vm_offset_t stack;
1c79356b 1766
0c530ab8
A
1767 assert(new);
1768 assert(old);
1c79356b 1769
39236c6e 1770#if KPERF
3e170ce0 1771 kperf_kpc_cswitch(old, new);
39236c6e 1772#endif
b0d623f7 1773
0c530ab8
A
1774 stack = old->kernel_stack;
1775 if (stack == old->reserved_stack) {
1776 assert(new->reserved_stack);
1777 old->reserved_stack = new->reserved_stack;
1778 new->reserved_stack = stack;
1779 }
1780 old->kernel_stack = 0;
1781 /*
1782 * A full call to machine_stack_attach() is unnecessry
1783 * because old stack is already initialized.
1784 */
1785 new->kernel_stack = stack;
1c79356b 1786
0c530ab8 1787 fpu_save_context(old);
b0d623f7 1788
0c530ab8
A
1789 old->machine.specFlags &= ~OnProc;
1790 new->machine.specFlags |= OnProc;
1c79356b 1791
0c530ab8 1792 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
6d2010ae 1793 act_machine_switch_pcb(old, new);
9bccf70c 1794
fe8ab488
A
1795#if HYPERVISOR
1796 ml_hv_cswitch(old, new);
1797#endif
1798
0c530ab8 1799 machine_set_current_thread(new);
1c79356b 1800
0c530ab8 1801 return;
1c79356b 1802}
0b4e3aa0 1803
0c530ab8
A
1804
1805
1806
1807struct x86_act_context32 {
1808 x86_saved_state32_t ss;
1809 x86_float_state32_t fs;
1810 x86_debug_state32_t ds;
1811};
1812
1813struct x86_act_context64 {
1814 x86_saved_state64_t ss;
1815 x86_float_state64_t fs;
1816 x86_debug_state64_t ds;
0b4e3aa0
A
1817};
1818
0c530ab8
A
1819
1820
0b4e3aa0
A
1821void *
1822act_thread_csave(void)
1823{
2d21ac55 1824 kern_return_t kret;
0c530ab8 1825 mach_msg_type_number_t val;
2d21ac55 1826 thread_t thr_act = current_thread();
0c530ab8 1827
2d21ac55
A
1828 if (thread_is_64bit(thr_act)) {
1829 struct x86_act_context64 *ic64;
0b4e3aa0 1830
2d21ac55 1831 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
0c530ab8
A
1832
1833 if (ic64 == (struct x86_act_context64 *)NULL)
2d21ac55 1834 return((void *)0);
0c530ab8
A
1835
1836 val = x86_SAVED_STATE64_COUNT;
1837 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2d21ac55 1838 (thread_state_t) &ic64->ss, &val);
0c530ab8 1839 if (kret != KERN_SUCCESS) {
2d21ac55 1840 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1841 return((void *)0);
1842 }
1843 val = x86_FLOAT_STATE64_COUNT;
1844 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2d21ac55 1845 (thread_state_t) &ic64->fs, &val);
0c530ab8 1846 if (kret != KERN_SUCCESS) {
2d21ac55 1847 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1848 return((void *)0);
1849 }
0b4e3aa0 1850
0c530ab8
A
1851 val = x86_DEBUG_STATE64_COUNT;
1852 kret = machine_thread_get_state(thr_act,
1853 x86_DEBUG_STATE64,
1854 (thread_state_t)&ic64->ds,
55e303ae 1855 &val);
0b4e3aa0 1856 if (kret != KERN_SUCCESS) {
0c530ab8
A
1857 kfree(ic64, sizeof(struct x86_act_context64));
1858 return((void *)0);
1859 }
1860 return(ic64);
1861
1862 } else {
2d21ac55 1863 struct x86_act_context32 *ic32;
0c530ab8 1864
2d21ac55 1865 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
0c530ab8
A
1866
1867 if (ic32 == (struct x86_act_context32 *)NULL)
2d21ac55 1868 return((void *)0);
0c530ab8
A
1869
1870 val = x86_SAVED_STATE32_COUNT;
1871 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2d21ac55 1872 (thread_state_t) &ic32->ss, &val);
0c530ab8 1873 if (kret != KERN_SUCCESS) {
2d21ac55 1874 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8 1875 return((void *)0);
0b4e3aa0 1876 }
0c530ab8
A
1877 val = x86_FLOAT_STATE32_COUNT;
1878 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2d21ac55 1879 (thread_state_t) &ic32->fs, &val);
0c530ab8 1880 if (kret != KERN_SUCCESS) {
2d21ac55 1881 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8
A
1882 return((void *)0);
1883 }
1884
1885 val = x86_DEBUG_STATE32_COUNT;
1886 kret = machine_thread_get_state(thr_act,
1887 x86_DEBUG_STATE32,
1888 (thread_state_t)&ic32->ds,
55e303ae 1889 &val);
0b4e3aa0 1890 if (kret != KERN_SUCCESS) {
0c530ab8
A
1891 kfree(ic32, sizeof(struct x86_act_context32));
1892 return((void *)0);
0b4e3aa0 1893 }
0c530ab8
A
1894 return(ic32);
1895 }
0b4e3aa0 1896}
0c530ab8
A
1897
1898
0b4e3aa0
A
1899void
1900act_thread_catt(void *ctx)
1901{
0c530ab8
A
1902 thread_t thr_act = current_thread();
1903 kern_return_t kret;
1904
1905 if (ctx == (void *)NULL)
2d21ac55 1906 return;
0c530ab8
A
1907
1908 if (thread_is_64bit(thr_act)) {
1909 struct x86_act_context64 *ic64;
1910
1911 ic64 = (struct x86_act_context64 *)ctx;
1912
1913 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
1914 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
1915 if (kret == KERN_SUCCESS) {
2d21ac55
A
1916 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
1917 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
0c530ab8
A
1918 }
1919 kfree(ic64, sizeof(struct x86_act_context64));
1920 } else {
1921 struct x86_act_context32 *ic32;
1922
1923 ic32 = (struct x86_act_context32 *)ctx;
1924
1925 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
1926 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
1927 if (kret == KERN_SUCCESS) {
060df5ea 1928 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
0c530ab8 1929 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
0c530ab8
A
1930 }
1931 kfree(ic32, sizeof(struct x86_act_context32));
1932 }
0b4e3aa0
A
1933}
1934
0c530ab8
A
1935
1936void act_thread_cfree(__unused void *ctx)
0b4e3aa0 1937{
0c530ab8 1938 /* XXX - Unused */
0b4e3aa0 1939}
b0d623f7
A
1940
1941/*
1942 * Duplicate one x86_debug_state32_t to another. "all" parameter
1943 * chooses whether dr4 and dr5 are copied (they are never meant
1944 * to be installed when we do machine_task_set_state() or
1945 * machine_thread_set_state()).
1946 */
1947void
1948copy_debug_state32(
1949 x86_debug_state32_t *src,
1950 x86_debug_state32_t *target,
1951 boolean_t all)
1952{
1953 if (all) {
1954 target->dr4 = src->dr4;
1955 target->dr5 = src->dr5;
1956 }
1957
1958 target->dr0 = src->dr0;
1959 target->dr1 = src->dr1;
1960 target->dr2 = src->dr2;
1961 target->dr3 = src->dr3;
1962 target->dr6 = src->dr6;
1963 target->dr7 = src->dr7;
1964}
1965
1966/*
1967 * Duplicate one x86_debug_state64_t to another. "all" parameter
1968 * chooses whether dr4 and dr5 are copied (they are never meant
1969 * to be installed when we do machine_task_set_state() or
1970 * machine_thread_set_state()).
1971 */
1972void
1973copy_debug_state64(
1974 x86_debug_state64_t *src,
1975 x86_debug_state64_t *target,
1976 boolean_t all)
1977{
1978 if (all) {
1979 target->dr4 = src->dr4;
1980 target->dr5 = src->dr5;
1981 }
1982
1983 target->dr0 = src->dr0;
1984 target->dr1 = src->dr1;
1985 target->dr2 = src->dr2;
1986 target->dr3 = src->dr3;
1987 target->dr6 = src->dr6;
1988 target->dr7 = src->dr7;
1989}